diff --git a/spaces/0xJustin/0xJustin-Dungeons-and-Diffusion/README.md b/spaces/0xJustin/0xJustin-Dungeons-and-Diffusion/README.md deleted file mode 100644 index 462165143ef114cf4e9304336b8219de07f9e4f4..0000000000000000000000000000000000000000 --- a/spaces/0xJustin/0xJustin-Dungeons-and-Diffusion/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: 0xJustin Dungeons And Diffusion -emoji: 📊 -colorFrom: pink -colorTo: blue -sdk: gradio -sdk_version: 3.19.1 -app_file: app.py -pinned: false -license: openrail ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/1368565466ki/ZSTRD/transforms.py b/spaces/1368565466ki/ZSTRD/transforms.py deleted file mode 100644 index 4793d67ca5a5630e0ffe0f9fb29445c949e64dae..0000000000000000000000000000000000000000 --- a/spaces/1368565466ki/ZSTRD/transforms.py +++ /dev/null @@ -1,193 +0,0 @@ -import torch -from torch.nn import functional as F - -import numpy as np - - -DEFAULT_MIN_BIN_WIDTH = 1e-3 -DEFAULT_MIN_BIN_HEIGHT = 1e-3 -DEFAULT_MIN_DERIVATIVE = 1e-3 - - -def piecewise_rational_quadratic_transform(inputs, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=False, - tails=None, - tail_bound=1., - min_bin_width=DEFAULT_MIN_BIN_WIDTH, - min_bin_height=DEFAULT_MIN_BIN_HEIGHT, - min_derivative=DEFAULT_MIN_DERIVATIVE): - - if tails is None: - spline_fn = rational_quadratic_spline - spline_kwargs = {} - else: - spline_fn = unconstrained_rational_quadratic_spline - spline_kwargs = { - 'tails': tails, - 'tail_bound': tail_bound - } - - outputs, logabsdet = spline_fn( - inputs=inputs, - unnormalized_widths=unnormalized_widths, - unnormalized_heights=unnormalized_heights, - unnormalized_derivatives=unnormalized_derivatives, - inverse=inverse, - min_bin_width=min_bin_width, - min_bin_height=min_bin_height, - min_derivative=min_derivative, - **spline_kwargs - ) - return outputs, logabsdet - - -def searchsorted(bin_locations, inputs, eps=1e-6): - bin_locations[..., -1] += eps - return torch.sum( - inputs[..., None] >= bin_locations, - dim=-1 - ) - 1 - - -def unconstrained_rational_quadratic_spline(inputs, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=False, - tails='linear', - tail_bound=1., - min_bin_width=DEFAULT_MIN_BIN_WIDTH, - min_bin_height=DEFAULT_MIN_BIN_HEIGHT, - min_derivative=DEFAULT_MIN_DERIVATIVE): - inside_interval_mask = (inputs >= -tail_bound) & (inputs <= tail_bound) - outside_interval_mask = ~inside_interval_mask - - outputs = torch.zeros_like(inputs) - logabsdet = torch.zeros_like(inputs) - - if tails == 'linear': - unnormalized_derivatives = F.pad(unnormalized_derivatives, pad=(1, 1)) - constant = np.log(np.exp(1 - min_derivative) - 1) - unnormalized_derivatives[..., 0] = constant - unnormalized_derivatives[..., -1] = constant - - outputs[outside_interval_mask] = inputs[outside_interval_mask] - logabsdet[outside_interval_mask] = 0 - else: - raise RuntimeError('{} tails are not implemented.'.format(tails)) - - outputs[inside_interval_mask], logabsdet[inside_interval_mask] = rational_quadratic_spline( - inputs=inputs[inside_interval_mask], - unnormalized_widths=unnormalized_widths[inside_interval_mask, :], - unnormalized_heights=unnormalized_heights[inside_interval_mask, :], - unnormalized_derivatives=unnormalized_derivatives[inside_interval_mask, :], - inverse=inverse, - left=-tail_bound, right=tail_bound, bottom=-tail_bound, top=tail_bound, - min_bin_width=min_bin_width, - min_bin_height=min_bin_height, - min_derivative=min_derivative - ) - - return outputs, logabsdet - -def rational_quadratic_spline(inputs, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=False, - left=0., right=1., bottom=0., top=1., - min_bin_width=DEFAULT_MIN_BIN_WIDTH, - min_bin_height=DEFAULT_MIN_BIN_HEIGHT, - min_derivative=DEFAULT_MIN_DERIVATIVE): - if torch.min(inputs) < left or torch.max(inputs) > right: - raise ValueError('Input to a transform is not within its domain') - - num_bins = unnormalized_widths.shape[-1] - - if min_bin_width * num_bins > 1.0: - raise ValueError('Minimal bin width too large for the number of bins') - if min_bin_height * num_bins > 1.0: - raise ValueError('Minimal bin height too large for the number of bins') - - widths = F.softmax(unnormalized_widths, dim=-1) - widths = min_bin_width + (1 - min_bin_width * num_bins) * widths - cumwidths = torch.cumsum(widths, dim=-1) - cumwidths = F.pad(cumwidths, pad=(1, 0), mode='constant', value=0.0) - cumwidths = (right - left) * cumwidths + left - cumwidths[..., 0] = left - cumwidths[..., -1] = right - widths = cumwidths[..., 1:] - cumwidths[..., :-1] - - derivatives = min_derivative + F.softplus(unnormalized_derivatives) - - heights = F.softmax(unnormalized_heights, dim=-1) - heights = min_bin_height + (1 - min_bin_height * num_bins) * heights - cumheights = torch.cumsum(heights, dim=-1) - cumheights = F.pad(cumheights, pad=(1, 0), mode='constant', value=0.0) - cumheights = (top - bottom) * cumheights + bottom - cumheights[..., 0] = bottom - cumheights[..., -1] = top - heights = cumheights[..., 1:] - cumheights[..., :-1] - - if inverse: - bin_idx = searchsorted(cumheights, inputs)[..., None] - else: - bin_idx = searchsorted(cumwidths, inputs)[..., None] - - input_cumwidths = cumwidths.gather(-1, bin_idx)[..., 0] - input_bin_widths = widths.gather(-1, bin_idx)[..., 0] - - input_cumheights = cumheights.gather(-1, bin_idx)[..., 0] - delta = heights / widths - input_delta = delta.gather(-1, bin_idx)[..., 0] - - input_derivatives = derivatives.gather(-1, bin_idx)[..., 0] - input_derivatives_plus_one = derivatives[..., 1:].gather(-1, bin_idx)[..., 0] - - input_heights = heights.gather(-1, bin_idx)[..., 0] - - if inverse: - a = (((inputs - input_cumheights) * (input_derivatives - + input_derivatives_plus_one - - 2 * input_delta) - + input_heights * (input_delta - input_derivatives))) - b = (input_heights * input_derivatives - - (inputs - input_cumheights) * (input_derivatives - + input_derivatives_plus_one - - 2 * input_delta)) - c = - input_delta * (inputs - input_cumheights) - - discriminant = b.pow(2) - 4 * a * c - assert (discriminant >= 0).all() - - root = (2 * c) / (-b - torch.sqrt(discriminant)) - outputs = root * input_bin_widths + input_cumwidths - - theta_one_minus_theta = root * (1 - root) - denominator = input_delta + ((input_derivatives + input_derivatives_plus_one - 2 * input_delta) - * theta_one_minus_theta) - derivative_numerator = input_delta.pow(2) * (input_derivatives_plus_one * root.pow(2) - + 2 * input_delta * theta_one_minus_theta - + input_derivatives * (1 - root).pow(2)) - logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator) - - return outputs, -logabsdet - else: - theta = (inputs - input_cumwidths) / input_bin_widths - theta_one_minus_theta = theta * (1 - theta) - - numerator = input_heights * (input_delta * theta.pow(2) - + input_derivatives * theta_one_minus_theta) - denominator = input_delta + ((input_derivatives + input_derivatives_plus_one - 2 * input_delta) - * theta_one_minus_theta) - outputs = input_cumheights + numerator / denominator - - derivative_numerator = input_delta.pow(2) * (input_derivatives_plus_one * theta.pow(2) - + 2 * input_delta * theta_one_minus_theta - + input_derivatives * (1 - theta).pow(2)) - logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator) - - return outputs, logabsdet diff --git a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Age of Chivalry Hegemony No-cd C A Total Conversion Mod for Age of Empires II.md b/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Age of Chivalry Hegemony No-cd C A Total Conversion Mod for Age of Empires II.md deleted file mode 100644 index bff9ea48a1aae9a5403b9160dda47b9101663d2b..0000000000000000000000000000000000000000 --- a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Age of Chivalry Hegemony No-cd C A Total Conversion Mod for Age of Empires II.md +++ /dev/null @@ -1,177 +0,0 @@ - -

Age of Chivalry Hegemony No-cd C: What is it and why do you need it?

-

If you are a fan of medieval history and strategy games, you may have heard of Age of Chivalry Hegemony, a total conversion mod for Age of Empires II: The Conquerors that focuses on the late medieval period in Western and Central Europe. The mod adds or significantly alters each of the civilizations in the original game, while also introducing many new units, technologies, buildings, maps, and scenarios. The mod aims to provide a more realistic and immersive experience of medieval warfare and politics, while also offering a lot of variety and replay value.

-

Age Of Chivalry Hegemony No-cd C


Download Ziphttps://byltly.com/2uKz6i



-

However, if you want to play this mod, you may encounter some difficulties, especially if you have an older version of the game or if you want to play online with other players. That's where a no-cd c patch comes in handy. A no-cd c patch is a small file that allows you to run the game without inserting the CD-ROM in your drive, which can save you time and hassle. It also enables you to play the game on newer operating systems that may not support CD-ROMs, such as Windows 10. Moreover, a no-cd c patch can help you avoid compatibility issues with other mods or patches that may conflict with the original game files.

-

In this article, we will show you how to install Age of Chivalry Hegemony No-cd C, how to play it, and how to troubleshoot any problems that may arise. By following these simple steps, you will be able to enjoy this amazing mod without any worries.

-

How to install Age of Chivalry Hegemony No-cd C

-

Installing Age of Chivalry Hegemony No-cd C is not very difficult, but it does require some attention and care. Here are the steps you need to follow:

-
    -
  1. Download the latest version of Age of Chivalry Hegemony from Mod DB. You can find it here: https://www.moddb.com/mods/age-of-chivalry-hegemony/downloads. The latest version at the time of writing this article is 2.03, which was released on December 29, 2018. The file size is about 300 MB.
  2. -
  3. Extract the files to your Age of Empires II: The Conquerors folder. You will need a program like WinRAR or 7-Zip to do this. You can find your game folder by right-clicking on the game icon on your desktop or in your Start menu, then selecting Properties, then Open File Location. Alternatively, you can search for "age2_x1.exe" in your computer. The default location is usually C:\Program Files (x86)\Microsoft Games\Age of Empires II\age2_x1.
  4. -
  5. Download the no-cd c patch from a reliable source. You can find it here: https://www.gamecopyworld.com/games/pc_age_of_empires_2.shtml#Age%20of%20Empires%202:%20The%20Conquerors%20v1.0c%20[ENGLISH]%20No-CD/Fixed%20EXE. The file name is "aoe2tc.nocd.v1.0c.eng.rar". The file size is about 2 MB.
  6. -
  7. Copy and paste the patch file to your Age of Chivalry Hegemony folder. You will need to overwrite the existing "age2_x1.exe" file with the patched one. Make sure you backup the original file before doing this, in case something goes wrong.
  8. -
  9. Run the game and enjoy. You can launch the game by double-clicking on the "age2_x1.exe" file in your Age of Chivalry Hegemony folder, or by creating a shortcut on your desktop or in your Start menu. You should see a new splash screen with the mod logo when you start the game.
  10. -
-

How to play Age of Chivalry Hegemony No-cd C

-

Playing Age of Chivalry Hegemony No-cd C is similar to playing the original game, but with some significant differences. Here are some things you need to know:

-

New civilizations and units

-

The mod features 22 new civilizations, each with their own unique units, technologies, bonuses, and team bonuses. These civilizations are:

-

Age Of Chivalry Hegemony Crack Download
-How To Play Age Of Chivalry Hegemony Without Cd
-Age Of Chivalry Hegemony Patch 2.03 No-cd
-Age Of Chivalry Hegemony Free Download Full Version
-Age Of Chivalry Hegemony Cd Key Generator
-Age Of Chivalry Hegemony Mods And Cheats
-Age Of Chivalry Hegemony Steam Edition No-cd
-Age Of Chivalry Hegemony Gameplay And Review
-Age Of Chivalry Hegemony Multiplayer No-cd
-Age Of Chivalry Hegemony System Requirements And Compatibility
-Age Of Chivalry Hegemony Tips And Tricks
-Age Of Chivalry Hegemony No-cd C Tutorial And Guide
-Age Of Chivalry Hegemony Best Civilizations And Strategies
-Age Of Chivalry Hegemony Custom Scenarios And Maps
-Age Of Chivalry Hegemony Soundtrack And Music
-Age Of Chivalry Hegemony History And Lore
-Age Of Chivalry Hegemony No-cd C Error And Fix
-Age Of Chivalry Hegemony Update And News
-Age Of Chivalry Hegemony Wiki And Forum
-Age Of Chivalry Hegemony Comparison And Difference
-Age Of Chivalry Hegemony No-cd C Alternative And Similar
-Age Of Chivalry Hegemony Expansion And DLC
-Age Of Chivalry Hegemony Remastered And Enhanced
-Age Of Chivalry Hegemony No-cd C Benefits And Advantages
-Age Of Chivalry Hegemony Features And Highlights
-Age Of Chivalry Hegemony No-cd C Problems And Solutions
-Age Of Chivalry Hegemony Online And Offline Mode
-Age Of Chivalry Hegemony Graphics And Performance
-Age Of Chivalry Hegemony No-cd C Installation And Setup
-Age Of Chivalry Hegemony Support And Contact
-Age Of Chivalry Hegemony No-cd C Test And Evaluation
-Age Of Chivalry Hegemony Trailer And Video
-Age Of Chivalry Hegemony No-cd C Feedback And Rating
-Age Of Chivalry Hegemony FAQ And Q&A
-Age Of Chivalry Hegemony No-cd C Recommendation And Suggestion
-Age Of Chivalry Hegemony Fun Facts And Secrets
-Age Of Chivalry Hegonomy No-cd C Discount And Coupon Code
-Age of chilvary hegonomy no cd c torrent and magnet link
-age of chilvary hegonomy no cd c iso and rar file
-age of chilvary hegonomy no cd c serial and keygen

- -

The mod also adds over 100 new units, including infantry, cavalry, archers, siege weapons, ships, monks, heroes, mercenaries, animals, etc. Some examples are:

- -

New technologies and buildings

-

The mod also adds over 100 new technologies, some of them general and others unique to a civilization. These technologies can improve your units, buildings, economy, or religion. Some examples are:

- -

The mod also adds new buildings, some of them general and others unique to a civilization. These buildings can provide new functions, units, or bonuses. Some examples are:

- -

New maps and scenarios

-

The mod also adds new maps and scenarios, some of them general and others unique to a civilization. These maps and scenarios can provide new challenges, environments, or stories. Some examples are:

- -

How to troubleshoot Age of Chivalry Hegemony No-cd C

-

Although Age of Chivalry Hegemony No-cd C is a well-made mod, it is not perfect. You may encounter some common issues when playing it. Here are some tips on how to fix them:

-

Compatibility issues

-

If you have trouble running the mod on your system or with other mods or patches, you may need to check your compatibility settings. Here are some steps you can try:

-
    -
  1. Run the game as administrator. You can do this by right-clicking on the "age2_x1.exe" file in your Age of Chivalry Hegemony folder, then selecting Properties, then Compatibility, then Run this program as an administrator.
  2. -
  3. Run the game in compatibility mode. You can do this by right-clicking on the "age2_x1.exe" file in your Age of Chivalry Hegemony folder, then selecting Properties, then Compatibility, then Run this program in compatibility mode for Windows XP (Service Pack 3) or Windows 7.
  4. -, then Compatibility, then Disable visual themes and Disable desktop composition. -
  5. Use a different resolution. You can do this by right-clicking on the "age2_x1.exe" file in your Age of Chivalry Hegemony folder, then selecting Properties, then Shortcut, then Target, then adding -w 800 -h 600 (or any other resolution you prefer) at the end of the line.
  6. -
  7. Use a different color mode. You can do this by right-clicking on the "age2_x1.exe" file in your Age of Chivalry Hegemony folder, then selecting Properties, then Compatibility, then Reduced color mode 16-bit (65536 colors).
  8. -
  9. Use a different mod manager. If you have other mods or patches installed for Age of Empires II: The Conquerors, you may need to use a mod manager like AoK Mod Pack Studio or UserPatch to switch between them. You can find them here: https://www.moddb.com/games/age-of-empires-ii-the-conquerors/downloads/aok-mod-pack-studio and https://userpatch.aiscripters.net/.
  10. -
-

Performance issues

-

If you experience lag, crashes, or other performance issues when playing the mod, you may need to optimize your game settings and hardware. Here are some steps you can try:

-
    -
  1. Lower your game settings. You can do this by going to Options, then Graphics, then adjusting the settings to your preference. You may want to lower the resolution, the game speed, the scroll speed, the music volume, the sound volume, or the brightness.
  2. -
  3. Close other programs. You may want to close any other programs that are running in the background, such as browsers, antivirus software, or media players. You can do this by pressing Ctrl+Alt+Delete, then Task Manager, then End Task for any unnecessary programs.
  4. -
  5. Clean your disk space. You may want to delete any unwanted files or programs that are taking up space on your hard drive. You can do this by going to Start, then Control Panel, then Programs and Features, then Uninstall a program for any unnecessary programs. You can also use a disk cleanup tool like CCleaner or Disk Cleanup to remove any temporary files or junk files.
  6. -
  7. Defragment your disk. You may want to defragment your hard drive to improve its performance and speed. You can do this by going to Start, then Computer, then right-clicking on your hard drive, then Properties, then Tools, then Defragment now.
  8. -, then Control Panel, then Device Manager, then right-clicking on your device, then Update driver software. You can also visit the manufacturer's website to download the latest drivers. -
-

Bug reports and feedback

-

If you encounter any bugs or errors when playing the mod, or if you have any suggestions or feedback for the mod developers, you can report them or provide them in the following ways:

-
    -
  1. Post on the mod's website or fan wiki. You can find them here: https://www.moddb.com/mods/age-of-chivalry-hegemony and https://ageofchivalry-hegemony.fandom.com/wiki/Age_of_Chivalry:_Hegemony_Wiki. You can post comments, reviews, questions, or bug reports on the mod's page or forum. You can also edit or create articles on the fan wiki to share your knowledge or tips.
  2. -
  3. Post on the mod's social media pages. You can find them here: https://www.facebook.com/AgeofChivalryHegemony and https://twitter.com/AoCHegemony. You can post messages, likes, retweets, or replies on the mod's Facebook or Twitter pages.
  4. -
  5. Email the mod developers. You can find their email addresses here: https://www.moddb.com/mods/age-of-chivalry-hegemony/contact. You can send them an email with your bug report, feedback, or suggestion.
  6. -
-

Conclusion

-

Age of Chivalry Hegemony No-cd C is a great mod for anyone who loves medieval history and strategy games. It offers a lot of new content, features, and gameplay changes that make it more realistic, immersive, and fun. However, it also requires some installation and troubleshooting steps to make it work properly. By following this article, you should be able to install, play, and enjoy this mod without any problems.

-

If you are interested in downloading this mod or supporting the mod developers, you can visit their website here: https://www.moddb.com/mods/age-of-chivalry-hegemony. You can also follow them on their social media pages here: https://www.facebook.com/AgeofChivalryHegemony and https://twitter.com/AoCHegemony. You can also provide them with your bug reports, feedback, or suggestions by posting on their website, fan wiki, social media pages, or email.

-

Thank you for reading this article and I hope you found it useful and informative. If you have any questions or comments, please feel free to leave them below. I would love to hear from you and help you out.

-

FAQs

-

Here are some frequently asked questions about Age of Chivalry Hegemony No-cd C:

-
    -
  1. What are the system requirements for this mod?
  2. -

    The system requirements for this mod are the same as for the original game. You will need a Windows 98/ME/2000/XP/Vista/7/8/10 operating system, a Pentium II 300 MHz processor or equivalent, 64 MB of RAM (128 MB recommended), a DirectX 9.0c compatible video card with 2 MB of VRAM (8 MB recommended), a DirectX 9.0c compatible sound card with speakers or headphones, a 4x CD-ROM drive (not required for no-cd c patch), a keyboard and mouse, and 1 GB of free hard disk space.

    -
  3. Can I play this mod online with other players?
  4. -

    Yes, you can play this mod online with other players who have the same version of the mod and the no-cd c patch installed. You can use platforms like GameRanger or Voobly to find and join online games. However, you may experience some lag or desync issues depending on your internet connection and settings.

    -
  5. Can I play this mod with other mods or patches?
  6. -

    No, you cannot play this mod with other mods or patches that modify the game files or data. This may cause compatibility issues or errors that may prevent the game from running properly. You will need to use a mod manager like AoK Mod Pack Studio or UserPatch to switch between different mods or patches.

    -
  7. Can I play this mod in other languages?
  8. -

    Yes, you can play this mod in other languages besides English. The mod developers have released language packs for Spanish and German that will translate the mod into those languages. You can download them here: https://www.moddb.com/mods/age-of-chivalry-hegemony/downloads/spanish-language-pack-for-age-of-chivalry-hegemony-203 and https://www.moddb.com/mods/age-of-chivalry-hegemony/downloads/german-language-pack-for-age-of-chivalry-hegemony-203. An Italian language pack will be released later.

    -
  9. Can I edit or create my own maps or scenarios for this mod?
  10. -, you can edit or create your own maps or scenarios for this mod using the Scenario Editor that comes with the game. You can access it by going to Tools, then Scenario Editor. You can use the new objects and animals that the mod adds to create more detailed and realistic maps or scenarios. You can also share your creations with other players by uploading them to Mod DB or other platforms.

    -

    0a6ba089eb
    -
    -
    \ No newline at end of file diff --git a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Antares 9 Forum The Ultimate Online Platform for Space Lovers.md b/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Antares 9 Forum The Ultimate Online Platform for Space Lovers.md deleted file mode 100644 index e8c5a00567ea3d0db6c67ff530f2eb68b96d9b13..0000000000000000000000000000000000000000 --- a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Antares 9 Forum The Ultimate Online Platform for Space Lovers.md +++ /dev/null @@ -1,22 +0,0 @@ - -

    Antares 9 Forum: A Community for Space Enthusiasts

    -

    If you are fascinated by the wonders of the universe and want to learn more about the latest discoveries and missions, you might want to join the Antares 9 Forum. This is an online platform where you can interact with other space enthusiasts, share your opinions and insights, and get updates on the Antares 9 project.

    -

    What is Antares 9? It is a proposed mission to send a robotic probe to explore the star system of Antares, which is one of the brightest and most massive stars in the night sky. Antares is also a binary star, meaning it has a companion star orbiting around it. The Antares 9 probe would aim to study both stars and their interactions, as well as search for any planets or other objects in the system.

    -

    antares 9 forum


    Download File ————— https://byltly.com/2uKyIq



    -

    The Antares 9 Forum is a place where you can find out more about this ambitious mission, its goals and challenges, its current status and progress, and its potential benefits for science and humanity. You can also ask questions, share your ideas and suggestions, and participate in polls and surveys. The forum is moderated by a team of experts and enthusiasts who are passionate about space exploration and education.

    -

    Whether you are a professional astronomer, a student, a hobbyist, or just curious about the cosmos, you are welcome to join the Antares 9 Forum. All you need is an email address and a username to register. You can then create your profile, choose your preferences, and start posting. You can also browse through the existing topics, categories, and threads, and join the conversations that interest you.

    -

    The Antares 9 Forum is more than just a website. It is a community of people who share a common interest and passion for space. It is a place where you can learn new things, exchange views, make friends, and have fun. It is also a way to support the Antares 9 project and contribute to its success.

    -

    So what are you waiting for? Join the Antares 9 Forum today and discover the wonders of Antares and beyond!

    - -

    How to Join the Antares 9 Forum

    -

    Joining the Antares 9 Forum is easy and free. All you need is a valid email address and a username of your choice. You can also choose a password and a display name for your profile. Once you register, you will receive a confirmation email with a link to activate your account. After that, you can log in and start posting.

    -

    The Antares 9 Forum has a simple and user-friendly interface. You can navigate through the main menu, which has links to the home page, the forum categories, the search function, the help section, and your profile settings. You can also use the sidebar, which has links to the latest posts, the most popular topics, the recent activity, and the online users.

    -

    The Antares 9 Forum has several categories and subcategories, each with a specific theme and purpose. For example, you can find categories for general discussion, news and updates, technical details, scientific results, educational resources, and more. You can also create your own topics and threads within each category, or reply to existing ones. You can also use tags, emojis, images, videos, and links to enhance your posts.

    -

    - -

    How to Participate in the Antares 9 Forum

    -

    Participating in the Antares 9 Forum is fun and rewarding. You can interact with other members who share your interest and passion for space exploration. You can also learn new things, express your opinions, ask questions, answer questions, give feedback, and more. You can also join various activities and events that are organized by the forum moderators and administrators.

    -

    The Antares 9 Forum has a friendly and respectful atmosphere. You are expected to follow the forum rules and guidelines, which are designed to ensure a positive and productive experience for everyone. You should also respect the opinions and views of other members, even if they differ from yours. You should also avoid spamming, trolling, flaming, bullying, or any other inappropriate behavior that might disrupt the forum or harm other members.

    -

    The Antares 9 Forum also has a system of rewards and recognition for its members. You can earn points and badges for your posts and activities. You can also rank up and gain access to more features and privileges. You can also receive awards and honors for your contributions and achievements. You can also nominate and vote for other members who deserve recognition.

    ddb901b051
    -
    -
    \ No newline at end of file diff --git a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/CLA Vocals Free Trial How to Download and Use the Best Vocal Plugin.md b/spaces/1acneusushi/gradio-2dmoleculeeditor/data/CLA Vocals Free Trial How to Download and Use the Best Vocal Plugin.md deleted file mode 100644 index 8c366a8d1f8acb63b378f2c1125b338dfbfab256..0000000000000000000000000000000000000000 --- a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/CLA Vocals Free Trial How to Download and Use the Best Vocal Plugin.md +++ /dev/null @@ -1,32 +0,0 @@ - -

    How to Get CLA Vocals Plugin for Free

    -

    CLA Vocals is a plugin created by Waves in collaboration with Grammy award-winning mixer Chris Lord-Alge. It delivers the radio-ready rock vocal sound of Green Day, James Blunt, and Stone Temple Pilots. It is a multi-effect plugin that includes EQ, compression, reverb, delay, and chorus. It works great on all styles of singers and has presets for different genres and vocal types.

    -

    cla vocals free download


    Download File >> https://byltly.com/2uKxFh



    -

    If you want to get CLA Vocals plugin for free, you have a few options. Here are some of them:

    - -

    These are some of the ways you can get CLA Vocals plugin for free. However, if you want to support the developers and get the best quality and performance, we recommend buying the plugin from Waves or authorized dealers. CLA Vocals plugin is currently on sale for $29.99 (regular price $249) on Waves website. This is a great deal that you don't want to miss!

    - -

    Why Use CLA Vocals Plugin?

    -

    CLA Vocals plugin is a powerful tool that can help you achieve professional-sounding vocals in your mixes. Whether you are a beginner or an expert, you can use this plugin to enhance your vocal tracks with ease and speed. Here are some of the benefits of using CLA Vocals plugin:

    - - -

    How to Use CLA Vocals Plugin?

    -

    Using CLA Vocals plugin is very easy and intuitive. Here are some steps to follow:

    -

    -
      -
    1. Insert the plugin on your vocal track. You can use CLA Vocals plugin as an insert effect on your vocal track or as a send effect on a separate bus. Either way, make sure you have enough headroom on your track to avoid clipping or distortion.
    2. -
    3. Select a preset or start from scratch. CLA Vocals plugin has a preset menu that lets you choose from different genres and vocal types. You can use these presets as a starting point or start from scratch by setting all the sliders to zero. You can also save your own presets for future use.
    4. -
    5. Adjust the sliders to shape your vocal sound. CLA Vocals plugin has six sliders that control the main aspects of vocal processing: bass, treble, compression, reverb, delay, and pitch. You can adjust each slider to add or subtract the effect from your vocal sound. You can also use the bypass buttons to turn on or off each effect individually.
    6. -
    7. Listen to the results and fine-tune if needed. Once you have adjusted the sliders to your liking, listen to how your vocal sounds in the mix. You can use the input and output meters to monitor the levels and make sure they are not too high or too low. You can also use the solo buttons to isolate each effect and hear how it affects your vocal sound. If needed, you can fine-tune the sliders until you are happy with the results.
    8. -
    -

    That's it! You have just used CLA Vocals plugin to enhance your vocal tracks. Enjoy!

    ddb901b051
    -
    -
    \ No newline at end of file diff --git a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Download Supreme Ruler 2020 Gold Crack for Free and Conquer the World.md b/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Download Supreme Ruler 2020 Gold Crack for Free and Conquer the World.md deleted file mode 100644 index ae835d3179927fbb4df55888a4c1783f97d7b708..0000000000000000000000000000000000000000 --- a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Download Supreme Ruler 2020 Gold Crack for Free and Conquer the World.md +++ /dev/null @@ -1,99 +0,0 @@ -
    -

    Supreme Ruler 2020 Gold Crack Free Download

    -

    Are you a fan of strategy games? Do you want to experience the thrill of leading a nation in a turbulent world? If yes, then you might be interested in Supreme Ruler 2020 Gold, a complex and realistic simulation game that lets you control any country on Earth. But before you can enjoy this game, you need to get a crack for it. In this article, we will tell you everything you need to know about Supreme Ruler 2020 Gold crack free download, including what it is, why you need it, how to get it, and how to play it. So, let's get started!

    -

    supreme ruler 2020 gold crack free download


    DOWNLOAD > https://byltly.com/2uKyjb



    -

    What is Supreme Ruler 2020 Gold?

    -

    Supreme Ruler 2020 Gold is a strategy game developed by BattleGoat Studios and published by Paradox Interactive in 2009. It is an enhanced version of Supreme Ruler 2020, which was released in 2008. The game is set in a near-future scenario where the world is facing economic and political crises, environmental disasters, and regional conflicts. As the leader of any country on Earth, you have to manage your economy, diplomacy, military, and domestic affairs, while dealing with other nations and factions. You can choose from various scenarios and modes, such as sandbox, campaign, multiplayer, or custom.

    -

    The Gold edition of Supreme Ruler 2020 includes the original game and two expansion packs: Global Crisis and Trump Rising. These add new features, content, and challenges to the game, such as new units, technologies, regions, events, leaders, and scenarios. For example, you can play as Donald Trump and try to make America great again or face his wrath as another country. You can also try to prevent or survive a global nuclear war or deal with the aftermath of a zombie apocalypse.

    -

    Why do you need a crack for Supreme Ruler 2020 Gold?

    -

    A crack is a software tool that modifies or bypasses the copy protection or activation system of a game or program. It allows you to use the game or program without paying for it or having a valid license key. In other words, it lets you pirate the game or program.

    -

    There are many reasons why you might want to use a crack for Supreme Ruler 2020 Gold. Some of them are:

    - -

    How to download and install Supreme Ruler 2020 Gold crack for free?

    -

    If you have decided to use a crack for Supreme Ruler 2020 Gold, you need to follow some steps to download and install it. Here are the steps:

    -

    supreme ruler 2020 gold edition full version download
    -how to get supreme ruler 2020 gold for free
    -supreme ruler 2020 gold patch and crack download
    -supreme ruler 2020 gold torrent download with crack
    -supreme ruler 2020 gold activation key generator
    -supreme ruler 2020 gold no cd crack download
    -supreme ruler 2020 gold skidrow crack free download
    -supreme ruler 2020 gold pc game download cracked
    -supreme ruler 2020 gold steam key free download
    -supreme ruler 2020 gold license code crack download
    -supreme ruler 2020 gold direct download link with crack
    -supreme ruler 2020 gold mega.nz download cracked
    -supreme ruler 2020 gold serial number crack free download
    -supreme ruler 2020 gold rar password unlocker crack download
    -supreme ruler 2020 gold iso file download with crack
    -supreme ruler 2020 gold repack download cracked
    -supreme ruler 2020 gold mod apk download with crack
    -supreme ruler 2020 gold cheats and hacks download cracked
    -supreme ruler 2020 gold trainer and crack free download
    -supreme ruler 2020 gold update and crack download
    -supreme ruler 2020 gold dlc and crack free download
    -supreme ruler 2020 gold multiplayer crack download
    -supreme ruler 2020 gold online mode crack free download
    -supreme ruler 2020 gold lan play crack download
    -supreme ruler 2020 gold co-op mode crack free download
    -supreme ruler 2020 gold windows 10 compatible crack download
    -supreme ruler 2020 gold mac os x crack free download
    -supreme ruler 2020 gold linux version crack download
    -supreme ruler 2020 gold android port crack free download
    -supreme ruler 2020 gold ios version crack download
    -supreme ruler 2020 gold switch port crack free download
    -supreme ruler 2020 gold ps4 version crack download
    -supreme ruler 2020 gold xbox one version crack free download
    -supreme ruler 2020 gold vr mode crack free download
    -supreme ruler 2020 gold remastered edition crack download
    -supreme ruler 2020 gold definitive edition crack free download
    -supreme ruler 2020 gold enhanced edition crack download
    -supreme ruler 2020 gold ultimate edition crack free download
    -supreme ruler 2020 gold deluxe edition crack download
    -supreme ruler 2020 gold collector's edition crack download
    -supreme ruler 2020 gold platinum edition crack free download
    -supreme ruler 2020 gold game of the year edition crack download
    -supreme ruler 2020 gold complete edition crack free download
    -supreme ruler 2020 gold all in one edition crack download
    -supreme ruler 2020 gold bundle edition crack free download
    -supreme ruler 2020 gold legacy edition crack download
    -supreme ruler 2020 gold anniversary edition crack free download
    -supreme ruler 2020 gold expansion pack crack free download

    -

    The steps to download the crack

    -
      -
    1. Find a reliable source for downloading the crack. You can use torrent sites, file-sharing platforms, or direct links. Be careful of fake or malicious links that might harm your computer or steal your data.
    2. -
    3. Download the crack file. It might be in a compressed format such as ZIP or RAR. You will need a program like WinRAR or 7-Zip to extract it.
    4. -
    5. Scan the crack file with an antivirus program. This is to make sure that it doesn't contain any viruses or malware that might damage your system or compromise your security.
    6. -
    -

    The steps to install the crack

    -
      -
    1. Download and install Supreme Ruler 2020 Gold from its official website or another source. You might need a serial key or an activation code to complete the installation. You can find them online or generate them with a keygen tool.
    2. -
    3. Copy the crack file and paste it into the installation folder of Supreme Ruler 2020 Gold. This is usually located in C:\Program Files (x86)\Supreme Ruler 2020 Gold\. You might need to replace or overwrite an existing file.
    4. -
    5. Run the crack file as an administrator. This will patch or modify the game files and make them compatible with the crack.
    6. -
    7. Enjoy playing Supreme Ruler 2020 Gold without any limitations!
    8. -
    -

    How to play Supreme Ruler 2020 Gold with the crack?

    -

    Now that you have downloaded and installed Supreme Ruler 2020 Gold crack for free, you can start playing it. Here are some tips and tricks for playing the game:

    -

    The tips and tricks for playing the game

    - -

    Cómo usar la aplicación CJdropshipping

    -

    Ahora que ha instalado la aplicación CJdropshipping en su dispositivo, es posible que se pregunte cómo usarlo. Estas son algunas de las principales características y funciones de la aplicación que puede utilizar para iniciar o hacer crecer su negocio dropshipping:

    -

    Importar productos a sus tiendas

    -

    Una de las características más útiles de la aplicación CJdropshipping es que le permite importar productos de CJdropshipping a sus tiendas en línea, como Shopify, WooCommerce, eBay, Amazon, etc. Puede navegar a través de miles de productos en varias categorías y nichos, como la moda, la electrónica, el hogar y el jardín, la belleza, etc. También puede filtrar los productos por precio, calificación, opciones de envío, etc.

    -

    Para importar productos a sus tiendas, primero debe conectar las cuentas de su tienda con la aplicación CJdropshipping. Puede hacer esto tocando en el icono "Mi tienda" en el menú inferior y luego tocando en "Añadir tienda". Verá una lista de plataformas compatibles entre las que puede elegir. Siga las instrucciones para autorizar las cuentas de su tienda con la aplicación CJdropshipping.

    - -

    Cuando esté listo para importar productos a sus tiendas, toque en el botón "Importar" en la esquina superior derecha de la página de la lista. Verá una lista de las cuentas de su tienda conectada que puede elegir. Seleccione la cuenta de la tienda a la que desea importar productos y, a continuación, toque en "Importar". Espere unos minutos hasta que finalice el proceso de importación. Puede comprobar el estado de su importación pulsando en el icono "Notificación" en el menú inferior. También puede ver sus productos importados en el panel de control de su tienda.

    -

    Productos fuente gratis

    -

    Otra gran característica de CJdropshipping aplicación es que le permite la fuente de productos de forma gratuita. Esto significa que usted puede solicitar CJdropshipping para encontrar y suministrar los productos que desea vender, incluso si no se enumeran en su sitio web. También puede pedirles que personalicen los productos según sus preferencias, como agregar su logotipo, cambiar el embalaje, etc.

    -

    A los productos de origen de forma gratuita, es necesario tocar en el "Fuente" icono en el menú inferior. Verás una página donde puedes rellenar los detalles del producto que deseas obtener, como nombre del producto, descripción, imagen, enlace, cantidad, precio, etc. También puedes subir un video o mensaje de voz para explicar mejor tu solicitud. Después de completar los detalles, toque en "Enviar". Usted recibirá una notificación cuando CJdropshipping ha encontrado y verificado el producto para usted. A continuación, puede comprobar los detalles del producto y el precio pulsando en el icono "Notificación" en el menú inferior. También puede chatear con el personal de CJdropshipping tocando el icono "Mensaje" en el menú inferior.

    -

    Cuando esté satisfecho con el producto, puede agregarlo a su lista e importarlo a su tienda como se explicó anteriormente. También puede pedir una muestra del producto antes de importarlo a su tienda pulsando en el botón "Pedir muestra" en la página del producto. Solo tendrá que pagar el costo de envío de la muestra.

    -

    Acceder a miles de productos POD

    - -

    Para acceder a los productos POD, es necesario tocar en el "POD" icono en el menú inferior. Verá una lista de categorías y subcategorías de productos POD que puede elegir, como ropa, accesorios, hogar y vida, etc. También puede buscar productos específicos escribiendo palabras clave en el cuadro de búsqueda.

    -

    Cuando encuentres un producto que te guste, toca en él y luego toca en "Diseño". Verá una página donde puede subir su diseño o elegir entre las plantillas existentes. También puede editar su diseño agregando texto, imágenes, formas, etc. Puede previsualizar su diseño pulsando en "Vista previa". Cuando esté satisfecho con su diseño, toque en "Guardar".

    -

    Después de guardar su diseño, puede agregar el producto a su lista e importarlo a su tienda como se explicó anteriormente. También puede pedir una muestra del producto antes de importarlo a su tienda pulsando en el botón "Pedir muestra" en la página del producto. Solo tendrá que pagar el costo de envío de la muestra.

    -

    Conclusión

    -

    En conclusión, CJ APK OBB es una combinación de dos archivos que necesita para instalar la aplicación CJdropshipping en su dispositivo Android. La aplicación le permite importar productos de CJdropshipping a sus tiendas en línea, productos de origen de forma gratuita, y acceder a miles de productos POD. Para descargar CJ APK OBB de APKCombo, debe seguir estos pasos:

    -
      -
    1. Visite el sitio web de APKCombo y busque la aplicación CJdropshipping.
    2. -
    3. Elija los archivos APK y OBB que se adapten a las especificaciones de su dispositivo.
    4. -
    5. Descargar e instalar APKCombo Installer.
    6. -
    7. Instalar CJ APK OBB usando APKCombo instalador.
    8. -
    - -

    Preguntas frecuentes

    -

    ¿Qué es un archivo APK?

    -

    Un archivo APK es un archivo de paquete de aplicaciones de Android que contiene el código y los recursos de una aplicación de Android. Se utiliza para instalar aplicaciones en dispositivos Android.

    -

    ¿Qué es un archivo OBB?

    -

    Un archivo OBB es un archivo de datos adicional que contiene los gráficos y los medios de una aplicación Android. Se utiliza para mejorar el rendimiento y la funcionalidad de una aplicación.

    -

    ¿Qué es un archivo XAPK?

    -

    Un archivo XAPK es un archivo comprimido que contiene los archivos APK y OBB de una aplicación Android. Se utiliza para simplificar el proceso de instalación de una aplicación.

    -

    ¿Cómo instalar archivos XAPK?

    -

    Para instalar archivos XAPK, necesita usar una herramienta como APKCombo Installer, que puede extraer e instalar los archivos APK y OBB del archivo XAPK. Puede descargar el instalador de APKCombo desde aquí.

    -

    ¿Es APKCombo seguro y confiable?

    -

    Sí, APKCombo es seguro y confiable, ya que proporciona archivos APK y OBB originales de Google Play Store. No modifica ni altera los archivos de ninguna manera. También utiliza el cifrado SSL para proteger sus datos y su privacidad.

    64aa2da5cf
    -
    -
    \ No newline at end of file diff --git a/spaces/Benson/text-generation/Examples/Captulos Historias Interactivas Apk.md b/spaces/Benson/text-generation/Examples/Captulos Historias Interactivas Apk.md deleted file mode 100644 index 95deec7f3d819334b18147225aa6e3ba80561cfb..0000000000000000000000000000000000000000 --- a/spaces/Benson/text-generation/Examples/Captulos Historias Interactivas Apk.md +++ /dev/null @@ -1,65 +0,0 @@ - -

    para crear párrafos, la etiqueta para poner en negrita el título y los encabezados, la etiqueta para crear enlaces, la etiqueta para insertar imágenes, la etiqueta para crear una tabla y otras etiquetas según sea necesario. También usé palabras clave estratégicamente para optimizar el artículo para los motores de búsqueda. 6. He editado y revisado el borrador, comprobando los errores gramaticales, los errores ortográficos, las repeticiones innecesarias y la claridad. También me aseguré de que el artículo fuera atractivo, informativo, conversacional y breve. 7. Leo en voz alta la versión final del artículo hasta que está libre de errores y listo para ser publicado. Aquí están las dos tablas que solicitó: Tabla 1: Esquema del artículo | H1 | Capítulos: Historias interactivas APK - Cómo descargar y jugar | | --- | -- - | | H2 | ¿Qué son los capítulos: Historias interactivas? | | H3 | Un juego que te permite elegir tu historia | | H3 | Un juego que ofrece una variedad de géneros e historias | | H3 | Un juego que cuenta con impresionantes gráficos y efectos de sonido | | H2 | Por qué descargar Capítulos: Interactive Stories APK? | | H3 | Para disfrutar de acceso ilimitado a todas las historias y características | | H3 | Para jugar sin conexión a Internet | | H3 | Para obtener actualizaciones regulares y nuevos contenidos | | H2 | Cómo descargar Capítulos: Historias interactivas APK? | | H3 | Paso 1: Encontrar una fuente confiable | | H3 | Paso 2: Descargar el archivo APK | | H3 | | Paso 4: Iniciar el juego y comenzar a jugar | | Conclusión | | H2 | Preguntas frecuentes | Tabla 2: Artículo con formato HTML

    >Capítulos: Historias interactivas APK Cómo descargar y jugar

    - -

    ¿Qué son los capítulos: Historias interactivas?

    -

    Capítulos: Interactive Stories es un juego desarrollado por Crazy Maple Studio Dev que te permite leer páginas y páginas de divertidas historias interactivas que se ajustan a tu estado de ánimo. Usted puede elegir su historia de su colección superior de romance, matrimonio por contrato, segunda oportunidad, rey dragón, piratas, lobo alfa, isekai, realidad TV citas, harén inversa, ciencia ficción, comedia y series de drama.

    -

    capítulos historias interactivas apk


    DOWNLOADhttps://bltlly.com/2v6ICZ



    -

    Un juego que te permite elegir tu historia

    -

    Lo mejor de Capítulos: Historias interactivas es que puedes tomar las decisiones en cada historia. Usted puede decidir sobre las decisiones difíciles de la vida tales como enamorarse, descubrir secretos, o desentrañar misterios profundos. También puedes personalizar el nombre, la apariencia, el estilo y la personalidad de tu personaje. Tus elecciones afectarán cómo se desarrolla la historia y cómo termina.

    -

    Un juego que ofrece una variedad de géneros e historias

    -

    Otra gran cosa sobre los capítulos: Historias interactivas es que ofrece una amplia gama de géneros e historias para que usted disfrute. Ya sea que te guste el romance, la comedia, el drama, la fantasía, la ciencia ficción o cualquier otra cosa, encontrarás algo que se adapte a tu gusto. También puede explorar diferentes temas y escenarios como matrimonio contractual, romance de segunda oportunidad, romance de rey dragón, aventura de piratas, romance de lobo alfa, aventura isekai, programa de citas de reality TV, romance de harén inverso, thriller de ciencia ficción, programa de comedia y series de drama. También puedes descubrir nuevas historias y autores cada semana a medida que el juego se actualiza regularmente con nuevos contenidos.

    -

    Un juego que presenta impresionantes gráficos y efectos de sonido

    - -

    ¿Por qué deberías descargar Capítulos: Historias interactivas APK?

    -

    Ahora que sabes lo que es Capítulos: Historias interactivas, es posible que se pregunte por qué debe descargar su versión APK en lugar de la versión oficial de la Google Play Store. Bueno, hay varias razones por las que descargar la versión APK es una mejor opción para usted.

    -

    Para disfrutar de acceso ilimitado a todas las historias y características

    -

    La primera razón es que al descargar la versión APK, se puede disfrutar de acceso ilimitado a todas las historias y características en el juego. No tienes que preocuparte por quedarte sin diamantes o entradas, que son las monedas del juego que necesitas para desbloquear opciones premium, atuendos y capítulos. También puedes acceder a todas las historias sin esperar a que sean liberadas o desbloqueadas. Puedes reproducir cualquier historia que quieras, en cualquier momento que quieras, y hacer cualquier elección que quieras.

    -

    Para jugar sin conexión a Internet

    -

    La segunda razón es que al descargar la versión APK, se puede jugar el juego sin conexión a Internet. Esto significa que usted no tiene que preocuparse por su uso de datos o su señal wifi. Puedes jugar en cualquier lugar y en cualquier momento, incluso cuando viajas, viajas o estás en una zona remota. También puedes guardar tu progreso y reanudar tu juego más tarde sin perder nada.

    -

    -

    Para obtener actualizaciones regulares y nuevo contenido

    -

    La tercera razón es que al descargar la versión APK, puede obtener actualizaciones regulares y nuevo contenido de los desarrolladores de juegos. No tiene que esperar a que la versión oficial se actualice o parchee. Puedes obtener la última versión del juego tan pronto como esté disponible, con todas las nuevas historias, características, correcciones de errores y mejoras. También puede disfrutar de contenido exclusivo que podría no estar disponible en la versión oficial.

    -

    Cómo descargar Capítulos: Historias interactivas APK?

    - -

    Paso 1: Encuentra una fuente confiable

    -

    El primer paso es encontrar una fuente confiable donde se puede descargar el archivo APK de Capítulos: Historias interactivas. Hay muchos sitios web que ofrecen archivos APK de varios juegos y aplicaciones, pero no todos ellos son confiables o seguros. Algunos de ellos pueden contener malware, virus u otro software dañino que puede dañar su dispositivo o robar su información personal. Por lo tanto, debe tener cuidado y elegir una fuente de buena reputación que tenga comentarios positivos y calificaciones de otros usuarios.

    -

    Una de las fuentes que recomendamos es APKPure.com, que es un sitio web popular que proporciona archivos APK seguros y verificados de varios juegos y aplicaciones. Puede descargar Capítulos: Historias interactivas APK desde este sitio web haciendo clic en este enlace: https:/apkpure.com/terschaps-chapsinteractive-stories/com.mars.avgchapters/download?from=details.

    -

    Paso 2: Descargar el archivo APK

    -

    El segundo paso es descargar el archivo APK de Capítulos: Historias interactivas de la fuente que ha elegido. Para hacer esto, debe seguir estas instrucciones:

    -
      -
    • Abra su navegador web en su dispositivo Android y vaya al enlace que proporcionamos anteriormente.
    • -
    • Haga clic en el botón verde "Descargar APK" en la página.
    • -
    • Espere a que la descarga comience y termine.
    • -
    • Es posible que vea un mensaje de advertencia en su pantalla diciendo que este tipo de archivo puede dañar su dispositivo. Ignore este mensaje y haga clic en "Aceptar" o "Descargar de todos modos".
    • -
    • También puede ver un mensaje emergente que le pide que permita descargas de fuentes desconocidas. Si ves este mensaje, ve a la configuración de tu dispositivo y habilita la opción de permitir descargas desde fuentes desconocidas.
    • -
    -

    Paso 3: Instalar el archivo APK

    - -
      -
    • Ir al administrador de archivos de su dispositivo y localizar el archivo APK descargado. Debe estar en su carpeta de descarga o en la carpeta que ha especificado para las descargas.
    • -
    • Toque en el archivo APK y haga clic en "Instalar".
    • -
    • Espere a que se complete la instalación.
    • -
    • Es posible que vea un mensaje emergente que le pida que conceda permisos a la aplicación. Si ve este mensaje, haga clic en "Permitir" o "Aceptar".
    • -
    -

    Paso 4: Inicie el juego y comience a jugar

    -

    El cuarto y último paso es lanzar el juego y empezar a jugar. Para hacer esto, debes seguir estas instrucciones:

    -
      -
    • Ve al cajón de aplicaciones de tu dispositivo y busca el icono de Capítulos: Historias interactivas. Debería ser un libro rosa con una C blanca.
    • -
    • Toca el icono y espera a que el juego se cargue.
    • -
    • Elija su idioma preferido y acepte los términos del servicio y la política de privacidad.
    • -
    • Crea tu perfil y personaliza tu personaje.
    • -
    • Seleccione una historia que desea jugar y disfrutar!
    • -
    -

    Conclusión

    -

    Capítulos: Interactive Stories es un juego divertido y emocionante que te permite leer y jugar historias interactivas que se ajustan a tu estado de ánimo. Puede elegir entre una variedad de géneros y temas, tomar decisiones que afectan la historia, personalizar su personaje y disfrutar de impresionantes gráficos y efectos de sonido. También puede descargar su versión APK para obtener acceso ilimitado a todas las historias y características, jugar sin conexión a Internet, y obtener actualizaciones regulares y nuevo contenido. Para descargar capítulos: Historias interactivas APK, solo tiene que seguir cuatro sencillos pasos: encontrar una fuente confiable, descargar el archivo APK, instalar el archivo APK, y lanzar el juego. Esperamos que este artículo le ha ayudado a aprender a descargar y jugar Capítulos: Historias interactivas APK. Divertirse!

    -

    FAQs

    -

    Aquí hay algunas preguntas frecuentes sobre los capítulos: Historias interactivas APK:

    - - - - - -< -

    64aa2da5cf
    -
    -
    \ No newline at end of file diff --git a/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/commands/search.py b/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/commands/search.py deleted file mode 100644 index 03ed925b246dd551ec2ef45095ed6cad00fd2745..0000000000000000000000000000000000000000 --- a/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/commands/search.py +++ /dev/null @@ -1,174 +0,0 @@ -import logging -import shutil -import sys -import textwrap -import xmlrpc.client -from collections import OrderedDict -from optparse import Values -from typing import TYPE_CHECKING, Dict, List, Optional - -from pip._vendor.packaging.version import parse as parse_version - -from pip._internal.cli.base_command import Command -from pip._internal.cli.req_command import SessionCommandMixin -from pip._internal.cli.status_codes import NO_MATCHES_FOUND, SUCCESS -from pip._internal.exceptions import CommandError -from pip._internal.metadata import get_default_environment -from pip._internal.models.index import PyPI -from pip._internal.network.xmlrpc import PipXmlrpcTransport -from pip._internal.utils.logging import indent_log -from pip._internal.utils.misc import write_output - -if TYPE_CHECKING: - from typing import TypedDict - - class TransformedHit(TypedDict): - name: str - summary: str - versions: List[str] - - -logger = logging.getLogger(__name__) - - -class SearchCommand(Command, SessionCommandMixin): - """Search for PyPI packages whose name or summary contains .""" - - usage = """ - %prog [options] """ - ignore_require_venv = True - - def add_options(self) -> None: - self.cmd_opts.add_option( - "-i", - "--index", - dest="index", - metavar="URL", - default=PyPI.pypi_url, - help="Base URL of Python Package Index (default %default)", - ) - - self.parser.insert_option_group(0, self.cmd_opts) - - def run(self, options: Values, args: List[str]) -> int: - if not args: - raise CommandError("Missing required argument (search query).") - query = args - pypi_hits = self.search(query, options) - hits = transform_hits(pypi_hits) - - terminal_width = None - if sys.stdout.isatty(): - terminal_width = shutil.get_terminal_size()[0] - - print_results(hits, terminal_width=terminal_width) - if pypi_hits: - return SUCCESS - return NO_MATCHES_FOUND - - def search(self, query: List[str], options: Values) -> List[Dict[str, str]]: - index_url = options.index - - session = self.get_default_session(options) - - transport = PipXmlrpcTransport(index_url, session) - pypi = xmlrpc.client.ServerProxy(index_url, transport) - try: - hits = pypi.search({"name": query, "summary": query}, "or") - except xmlrpc.client.Fault as fault: - message = "XMLRPC request failed [code: {code}]\n{string}".format( - code=fault.faultCode, - string=fault.faultString, - ) - raise CommandError(message) - assert isinstance(hits, list) - return hits - - -def transform_hits(hits: List[Dict[str, str]]) -> List["TransformedHit"]: - """ - The list from pypi is really a list of versions. We want a list of - packages with the list of versions stored inline. This converts the - list from pypi into one we can use. - """ - packages: Dict[str, "TransformedHit"] = OrderedDict() - for hit in hits: - name = hit["name"] - summary = hit["summary"] - version = hit["version"] - - if name not in packages.keys(): - packages[name] = { - "name": name, - "summary": summary, - "versions": [version], - } - else: - packages[name]["versions"].append(version) - - # if this is the highest version, replace summary and score - if version == highest_version(packages[name]["versions"]): - packages[name]["summary"] = summary - - return list(packages.values()) - - -def print_dist_installation_info(name: str, latest: str) -> None: - env = get_default_environment() - dist = env.get_distribution(name) - if dist is not None: - with indent_log(): - if dist.version == latest: - write_output("INSTALLED: %s (latest)", dist.version) - else: - write_output("INSTALLED: %s", dist.version) - if parse_version(latest).pre: - write_output( - "LATEST: %s (pre-release; install" - " with `pip install --pre`)", - latest, - ) - else: - write_output("LATEST: %s", latest) - - -def print_results( - hits: List["TransformedHit"], - name_column_width: Optional[int] = None, - terminal_width: Optional[int] = None, -) -> None: - if not hits: - return - if name_column_width is None: - name_column_width = ( - max( - [ - len(hit["name"]) + len(highest_version(hit.get("versions", ["-"]))) - for hit in hits - ] - ) - + 4 - ) - - for hit in hits: - name = hit["name"] - summary = hit["summary"] or "" - latest = highest_version(hit.get("versions", ["-"])) - if terminal_width is not None: - target_width = terminal_width - name_column_width - 5 - if target_width > 10: - # wrap and indent summary to fit terminal - summary_lines = textwrap.wrap(summary, target_width) - summary = ("\n" + " " * (name_column_width + 3)).join(summary_lines) - - name_latest = f"{name} ({latest})" - line = f"{name_latest:{name_column_width}} - {summary}" - try: - write_output(line) - print_dist_installation_info(name, latest) - except UnicodeEncodeError: - pass - - -def highest_version(versions: List[str]) -> str: - return max(versions, key=parse_version) diff --git a/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/chardet/euctwfreq.py b/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/chardet/euctwfreq.py deleted file mode 100644 index 4900ccc160a1dbf4de3a01c234735c21dd4417d6..0000000000000000000000000000000000000000 --- a/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/chardet/euctwfreq.py +++ /dev/null @@ -1,388 +0,0 @@ -######################## BEGIN LICENSE BLOCK ######################## -# The Original Code is Mozilla Communicator client code. -# -# The Initial Developer of the Original Code is -# Netscape Communications Corporation. -# Portions created by the Initial Developer are Copyright (C) 1998 -# the Initial Developer. All Rights Reserved. -# -# Contributor(s): -# Mark Pilgrim - port to Python -# -# This library is free software; you can redistribute it and/or -# modify it under the terms of the GNU Lesser General Public -# License as published by the Free Software Foundation; either -# version 2.1 of the License, or (at your option) any later version. -# -# This library is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -# Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public -# License along with this library; if not, write to the Free Software -# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA -# 02110-1301 USA -######################### END LICENSE BLOCK ######################### - -# EUCTW frequency table -# Converted from big5 work -# by Taiwan's Mandarin Promotion Council -# - -# 128 --> 0.42261 -# 256 --> 0.57851 -# 512 --> 0.74851 -# 1024 --> 0.89384 -# 2048 --> 0.97583 -# -# Idea Distribution Ratio = 0.74851/(1-0.74851) =2.98 -# Random Distribution Ration = 512/(5401-512)=0.105 -# -# Typical Distribution Ratio about 25% of Ideal one, still much higher than RDR - -EUCTW_TYPICAL_DISTRIBUTION_RATIO = 0.75 - -# Char to FreqOrder table -EUCTW_TABLE_SIZE = 5376 - -# fmt: off -EUCTW_CHAR_TO_FREQ_ORDER = ( - 1, 1800, 1506, 255, 1431, 198, 9, 82, 6, 7310, 177, 202, 3615, 1256, 2808, 110, # 2742 - 3735, 33, 3241, 261, 76, 44, 2113, 16, 2931, 2184, 1176, 659, 3868, 26, 3404, 2643, # 2758 - 1198, 3869, 3313, 4060, 410, 2211, 302, 590, 361, 1963, 8, 204, 58, 4296, 7311, 1931, # 2774 - 63, 7312, 7313, 317, 1614, 75, 222, 159, 4061, 2412, 1480, 7314, 3500, 3068, 224, 2809, # 2790 - 3616, 3, 10, 3870, 1471, 29, 2774, 1135, 2852, 1939, 873, 130, 3242, 1123, 312, 7315, # 2806 - 4297, 2051, 507, 252, 682, 7316, 142, 1914, 124, 206, 2932, 34, 3501, 3173, 64, 604, # 2822 - 7317, 2494, 1976, 1977, 155, 1990, 645, 641, 1606, 7318, 3405, 337, 72, 406, 7319, 80, # 2838 - 630, 238, 3174, 1509, 263, 939, 1092, 2644, 756, 1440, 1094, 3406, 449, 69, 2969, 591, # 2854 - 179, 2095, 471, 115, 2034, 1843, 60, 50, 2970, 134, 806, 1868, 734, 2035, 3407, 180, # 2870 - 995, 1607, 156, 537, 2893, 688, 7320, 319, 1305, 779, 2144, 514, 2374, 298, 4298, 359, # 2886 - 2495, 90, 2707, 1338, 663, 11, 906, 1099, 2545, 20, 2436, 182, 532, 1716, 7321, 732, # 2902 - 1376, 4062, 1311, 1420, 3175, 25, 2312, 1056, 113, 399, 382, 1949, 242, 3408, 2467, 529, # 2918 - 3243, 475, 1447, 3617, 7322, 117, 21, 656, 810, 1297, 2295, 2329, 3502, 7323, 126, 4063, # 2934 - 706, 456, 150, 613, 4299, 71, 1118, 2036, 4064, 145, 3069, 85, 835, 486, 2114, 1246, # 2950 - 1426, 428, 727, 1285, 1015, 800, 106, 623, 303, 1281, 7324, 2127, 2354, 347, 3736, 221, # 2966 - 3503, 3110, 7325, 1955, 1153, 4065, 83, 296, 1199, 3070, 192, 624, 93, 7326, 822, 1897, # 2982 - 2810, 3111, 795, 2064, 991, 1554, 1542, 1592, 27, 43, 2853, 859, 139, 1456, 860, 4300, # 2998 - 437, 712, 3871, 164, 2392, 3112, 695, 211, 3017, 2096, 195, 3872, 1608, 3504, 3505, 3618, # 3014 - 3873, 234, 811, 2971, 2097, 3874, 2229, 1441, 3506, 1615, 2375, 668, 2076, 1638, 305, 228, # 3030 - 1664, 4301, 467, 415, 7327, 262, 2098, 1593, 239, 108, 300, 200, 1033, 512, 1247, 2077, # 3046 - 7328, 7329, 2173, 3176, 3619, 2673, 593, 845, 1062, 3244, 88, 1723, 2037, 3875, 1950, 212, # 3062 - 266, 152, 149, 468, 1898, 4066, 4302, 77, 187, 7330, 3018, 37, 5, 2972, 7331, 3876, # 3078 - 7332, 7333, 39, 2517, 4303, 2894, 3177, 2078, 55, 148, 74, 4304, 545, 483, 1474, 1029, # 3094 - 1665, 217, 1869, 1531, 3113, 1104, 2645, 4067, 24, 172, 3507, 900, 3877, 3508, 3509, 4305, # 3110 - 32, 1408, 2811, 1312, 329, 487, 2355, 2247, 2708, 784, 2674, 4, 3019, 3314, 1427, 1788, # 3126 - 188, 109, 499, 7334, 3620, 1717, 1789, 888, 1217, 3020, 4306, 7335, 3510, 7336, 3315, 1520, # 3142 - 3621, 3878, 196, 1034, 775, 7337, 7338, 929, 1815, 249, 439, 38, 7339, 1063, 7340, 794, # 3158 - 3879, 1435, 2296, 46, 178, 3245, 2065, 7341, 2376, 7342, 214, 1709, 4307, 804, 35, 707, # 3174 - 324, 3622, 1601, 2546, 140, 459, 4068, 7343, 7344, 1365, 839, 272, 978, 2257, 2572, 3409, # 3190 - 2128, 1363, 3623, 1423, 697, 100, 3071, 48, 70, 1231, 495, 3114, 2193, 7345, 1294, 7346, # 3206 - 2079, 462, 586, 1042, 3246, 853, 256, 988, 185, 2377, 3410, 1698, 434, 1084, 7347, 3411, # 3222 - 314, 2615, 2775, 4308, 2330, 2331, 569, 2280, 637, 1816, 2518, 757, 1162, 1878, 1616, 3412, # 3238 - 287, 1577, 2115, 768, 4309, 1671, 2854, 3511, 2519, 1321, 3737, 909, 2413, 7348, 4069, 933, # 3254 - 3738, 7349, 2052, 2356, 1222, 4310, 765, 2414, 1322, 786, 4311, 7350, 1919, 1462, 1677, 2895, # 3270 - 1699, 7351, 4312, 1424, 2437, 3115, 3624, 2590, 3316, 1774, 1940, 3413, 3880, 4070, 309, 1369, # 3286 - 1130, 2812, 364, 2230, 1653, 1299, 3881, 3512, 3882, 3883, 2646, 525, 1085, 3021, 902, 2000, # 3302 - 1475, 964, 4313, 421, 1844, 1415, 1057, 2281, 940, 1364, 3116, 376, 4314, 4315, 1381, 7, # 3318 - 2520, 983, 2378, 336, 1710, 2675, 1845, 321, 3414, 559, 1131, 3022, 2742, 1808, 1132, 1313, # 3334 - 265, 1481, 1857, 7352, 352, 1203, 2813, 3247, 167, 1089, 420, 2814, 776, 792, 1724, 3513, # 3350 - 4071, 2438, 3248, 7353, 4072, 7354, 446, 229, 333, 2743, 901, 3739, 1200, 1557, 4316, 2647, # 3366 - 1920, 395, 2744, 2676, 3740, 4073, 1835, 125, 916, 3178, 2616, 4317, 7355, 7356, 3741, 7357, # 3382 - 7358, 7359, 4318, 3117, 3625, 1133, 2547, 1757, 3415, 1510, 2313, 1409, 3514, 7360, 2145, 438, # 3398 - 2591, 2896, 2379, 3317, 1068, 958, 3023, 461, 311, 2855, 2677, 4074, 1915, 3179, 4075, 1978, # 3414 - 383, 750, 2745, 2617, 4076, 274, 539, 385, 1278, 1442, 7361, 1154, 1964, 384, 561, 210, # 3430 - 98, 1295, 2548, 3515, 7362, 1711, 2415, 1482, 3416, 3884, 2897, 1257, 129, 7363, 3742, 642, # 3446 - 523, 2776, 2777, 2648, 7364, 141, 2231, 1333, 68, 176, 441, 876, 907, 4077, 603, 2592, # 3462 - 710, 171, 3417, 404, 549, 18, 3118, 2393, 1410, 3626, 1666, 7365, 3516, 4319, 2898, 4320, # 3478 - 7366, 2973, 368, 7367, 146, 366, 99, 871, 3627, 1543, 748, 807, 1586, 1185, 22, 2258, # 3494 - 379, 3743, 3180, 7368, 3181, 505, 1941, 2618, 1991, 1382, 2314, 7369, 380, 2357, 218, 702, # 3510 - 1817, 1248, 3418, 3024, 3517, 3318, 3249, 7370, 2974, 3628, 930, 3250, 3744, 7371, 59, 7372, # 3526 - 585, 601, 4078, 497, 3419, 1112, 1314, 4321, 1801, 7373, 1223, 1472, 2174, 7374, 749, 1836, # 3542 - 690, 1899, 3745, 1772, 3885, 1476, 429, 1043, 1790, 2232, 2116, 917, 4079, 447, 1086, 1629, # 3558 - 7375, 556, 7376, 7377, 2020, 1654, 844, 1090, 105, 550, 966, 1758, 2815, 1008, 1782, 686, # 3574 - 1095, 7378, 2282, 793, 1602, 7379, 3518, 2593, 4322, 4080, 2933, 2297, 4323, 3746, 980, 2496, # 3590 - 544, 353, 527, 4324, 908, 2678, 2899, 7380, 381, 2619, 1942, 1348, 7381, 1341, 1252, 560, # 3606 - 3072, 7382, 3420, 2856, 7383, 2053, 973, 886, 2080, 143, 4325, 7384, 7385, 157, 3886, 496, # 3622 - 4081, 57, 840, 540, 2038, 4326, 4327, 3421, 2117, 1445, 970, 2259, 1748, 1965, 2081, 4082, # 3638 - 3119, 1234, 1775, 3251, 2816, 3629, 773, 1206, 2129, 1066, 2039, 1326, 3887, 1738, 1725, 4083, # 3654 - 279, 3120, 51, 1544, 2594, 423, 1578, 2130, 2066, 173, 4328, 1879, 7386, 7387, 1583, 264, # 3670 - 610, 3630, 4329, 2439, 280, 154, 7388, 7389, 7390, 1739, 338, 1282, 3073, 693, 2857, 1411, # 3686 - 1074, 3747, 2440, 7391, 4330, 7392, 7393, 1240, 952, 2394, 7394, 2900, 1538, 2679, 685, 1483, # 3702 - 4084, 2468, 1436, 953, 4085, 2054, 4331, 671, 2395, 79, 4086, 2441, 3252, 608, 567, 2680, # 3718 - 3422, 4087, 4088, 1691, 393, 1261, 1791, 2396, 7395, 4332, 7396, 7397, 7398, 7399, 1383, 1672, # 3734 - 3748, 3182, 1464, 522, 1119, 661, 1150, 216, 675, 4333, 3888, 1432, 3519, 609, 4334, 2681, # 3750 - 2397, 7400, 7401, 7402, 4089, 3025, 0, 7403, 2469, 315, 231, 2442, 301, 3319, 4335, 2380, # 3766 - 7404, 233, 4090, 3631, 1818, 4336, 4337, 7405, 96, 1776, 1315, 2082, 7406, 257, 7407, 1809, # 3782 - 3632, 2709, 1139, 1819, 4091, 2021, 1124, 2163, 2778, 1777, 2649, 7408, 3074, 363, 1655, 3183, # 3798 - 7409, 2975, 7410, 7411, 7412, 3889, 1567, 3890, 718, 103, 3184, 849, 1443, 341, 3320, 2934, # 3814 - 1484, 7413, 1712, 127, 67, 339, 4092, 2398, 679, 1412, 821, 7414, 7415, 834, 738, 351, # 3830 - 2976, 2146, 846, 235, 1497, 1880, 418, 1992, 3749, 2710, 186, 1100, 2147, 2746, 3520, 1545, # 3846 - 1355, 2935, 2858, 1377, 583, 3891, 4093, 2573, 2977, 7416, 1298, 3633, 1078, 2549, 3634, 2358, # 3862 - 78, 3750, 3751, 267, 1289, 2099, 2001, 1594, 4094, 348, 369, 1274, 2194, 2175, 1837, 4338, # 3878 - 1820, 2817, 3635, 2747, 2283, 2002, 4339, 2936, 2748, 144, 3321, 882, 4340, 3892, 2749, 3423, # 3894 - 4341, 2901, 7417, 4095, 1726, 320, 7418, 3893, 3026, 788, 2978, 7419, 2818, 1773, 1327, 2859, # 3910 - 3894, 2819, 7420, 1306, 4342, 2003, 1700, 3752, 3521, 2359, 2650, 787, 2022, 506, 824, 3636, # 3926 - 534, 323, 4343, 1044, 3322, 2023, 1900, 946, 3424, 7421, 1778, 1500, 1678, 7422, 1881, 4344, # 3942 - 165, 243, 4345, 3637, 2521, 123, 683, 4096, 764, 4346, 36, 3895, 1792, 589, 2902, 816, # 3958 - 626, 1667, 3027, 2233, 1639, 1555, 1622, 3753, 3896, 7423, 3897, 2860, 1370, 1228, 1932, 891, # 3974 - 2083, 2903, 304, 4097, 7424, 292, 2979, 2711, 3522, 691, 2100, 4098, 1115, 4347, 118, 662, # 3990 - 7425, 611, 1156, 854, 2381, 1316, 2861, 2, 386, 515, 2904, 7426, 7427, 3253, 868, 2234, # 4006 - 1486, 855, 2651, 785, 2212, 3028, 7428, 1040, 3185, 3523, 7429, 3121, 448, 7430, 1525, 7431, # 4022 - 2164, 4348, 7432, 3754, 7433, 4099, 2820, 3524, 3122, 503, 818, 3898, 3123, 1568, 814, 676, # 4038 - 1444, 306, 1749, 7434, 3755, 1416, 1030, 197, 1428, 805, 2821, 1501, 4349, 7435, 7436, 7437, # 4054 - 1993, 7438, 4350, 7439, 7440, 2195, 13, 2779, 3638, 2980, 3124, 1229, 1916, 7441, 3756, 2131, # 4070 - 7442, 4100, 4351, 2399, 3525, 7443, 2213, 1511, 1727, 1120, 7444, 7445, 646, 3757, 2443, 307, # 4086 - 7446, 7447, 1595, 3186, 7448, 7449, 7450, 3639, 1113, 1356, 3899, 1465, 2522, 2523, 7451, 519, # 4102 - 7452, 128, 2132, 92, 2284, 1979, 7453, 3900, 1512, 342, 3125, 2196, 7454, 2780, 2214, 1980, # 4118 - 3323, 7455, 290, 1656, 1317, 789, 827, 2360, 7456, 3758, 4352, 562, 581, 3901, 7457, 401, # 4134 - 4353, 2248, 94, 4354, 1399, 2781, 7458, 1463, 2024, 4355, 3187, 1943, 7459, 828, 1105, 4101, # 4150 - 1262, 1394, 7460, 4102, 605, 4356, 7461, 1783, 2862, 7462, 2822, 819, 2101, 578, 2197, 2937, # 4166 - 7463, 1502, 436, 3254, 4103, 3255, 2823, 3902, 2905, 3425, 3426, 7464, 2712, 2315, 7465, 7466, # 4182 - 2332, 2067, 23, 4357, 193, 826, 3759, 2102, 699, 1630, 4104, 3075, 390, 1793, 1064, 3526, # 4198 - 7467, 1579, 3076, 3077, 1400, 7468, 4105, 1838, 1640, 2863, 7469, 4358, 4359, 137, 4106, 598, # 4214 - 3078, 1966, 780, 104, 974, 2938, 7470, 278, 899, 253, 402, 572, 504, 493, 1339, 7471, # 4230 - 3903, 1275, 4360, 2574, 2550, 7472, 3640, 3029, 3079, 2249, 565, 1334, 2713, 863, 41, 7473, # 4246 - 7474, 4361, 7475, 1657, 2333, 19, 463, 2750, 4107, 606, 7476, 2981, 3256, 1087, 2084, 1323, # 4262 - 2652, 2982, 7477, 1631, 1623, 1750, 4108, 2682, 7478, 2864, 791, 2714, 2653, 2334, 232, 2416, # 4278 - 7479, 2983, 1498, 7480, 2654, 2620, 755, 1366, 3641, 3257, 3126, 2025, 1609, 119, 1917, 3427, # 4294 - 862, 1026, 4109, 7481, 3904, 3760, 4362, 3905, 4363, 2260, 1951, 2470, 7482, 1125, 817, 4110, # 4310 - 4111, 3906, 1513, 1766, 2040, 1487, 4112, 3030, 3258, 2824, 3761, 3127, 7483, 7484, 1507, 7485, # 4326 - 2683, 733, 40, 1632, 1106, 2865, 345, 4113, 841, 2524, 230, 4364, 2984, 1846, 3259, 3428, # 4342 - 7486, 1263, 986, 3429, 7487, 735, 879, 254, 1137, 857, 622, 1300, 1180, 1388, 1562, 3907, # 4358 - 3908, 2939, 967, 2751, 2655, 1349, 592, 2133, 1692, 3324, 2985, 1994, 4114, 1679, 3909, 1901, # 4374 - 2185, 7488, 739, 3642, 2715, 1296, 1290, 7489, 4115, 2198, 2199, 1921, 1563, 2595, 2551, 1870, # 4390 - 2752, 2986, 7490, 435, 7491, 343, 1108, 596, 17, 1751, 4365, 2235, 3430, 3643, 7492, 4366, # 4406 - 294, 3527, 2940, 1693, 477, 979, 281, 2041, 3528, 643, 2042, 3644, 2621, 2782, 2261, 1031, # 4422 - 2335, 2134, 2298, 3529, 4367, 367, 1249, 2552, 7493, 3530, 7494, 4368, 1283, 3325, 2004, 240, # 4438 - 1762, 3326, 4369, 4370, 836, 1069, 3128, 474, 7495, 2148, 2525, 268, 3531, 7496, 3188, 1521, # 4454 - 1284, 7497, 1658, 1546, 4116, 7498, 3532, 3533, 7499, 4117, 3327, 2684, 1685, 4118, 961, 1673, # 4470 - 2622, 190, 2005, 2200, 3762, 4371, 4372, 7500, 570, 2497, 3645, 1490, 7501, 4373, 2623, 3260, # 4486 - 1956, 4374, 584, 1514, 396, 1045, 1944, 7502, 4375, 1967, 2444, 7503, 7504, 4376, 3910, 619, # 4502 - 7505, 3129, 3261, 215, 2006, 2783, 2553, 3189, 4377, 3190, 4378, 763, 4119, 3763, 4379, 7506, # 4518 - 7507, 1957, 1767, 2941, 3328, 3646, 1174, 452, 1477, 4380, 3329, 3130, 7508, 2825, 1253, 2382, # 4534 - 2186, 1091, 2285, 4120, 492, 7509, 638, 1169, 1824, 2135, 1752, 3911, 648, 926, 1021, 1324, # 4550 - 4381, 520, 4382, 997, 847, 1007, 892, 4383, 3764, 2262, 1871, 3647, 7510, 2400, 1784, 4384, # 4566 - 1952, 2942, 3080, 3191, 1728, 4121, 2043, 3648, 4385, 2007, 1701, 3131, 1551, 30, 2263, 4122, # 4582 - 7511, 2026, 4386, 3534, 7512, 501, 7513, 4123, 594, 3431, 2165, 1821, 3535, 3432, 3536, 3192, # 4598 - 829, 2826, 4124, 7514, 1680, 3132, 1225, 4125, 7515, 3262, 4387, 4126, 3133, 2336, 7516, 4388, # 4614 - 4127, 7517, 3912, 3913, 7518, 1847, 2383, 2596, 3330, 7519, 4389, 374, 3914, 652, 4128, 4129, # 4630 - 375, 1140, 798, 7520, 7521, 7522, 2361, 4390, 2264, 546, 1659, 138, 3031, 2445, 4391, 7523, # 4646 - 2250, 612, 1848, 910, 796, 3765, 1740, 1371, 825, 3766, 3767, 7524, 2906, 2554, 7525, 692, # 4662 - 444, 3032, 2624, 801, 4392, 4130, 7526, 1491, 244, 1053, 3033, 4131, 4132, 340, 7527, 3915, # 4678 - 1041, 2987, 293, 1168, 87, 1357, 7528, 1539, 959, 7529, 2236, 721, 694, 4133, 3768, 219, # 4694 - 1478, 644, 1417, 3331, 2656, 1413, 1401, 1335, 1389, 3916, 7530, 7531, 2988, 2362, 3134, 1825, # 4710 - 730, 1515, 184, 2827, 66, 4393, 7532, 1660, 2943, 246, 3332, 378, 1457, 226, 3433, 975, # 4726 - 3917, 2944, 1264, 3537, 674, 696, 7533, 163, 7534, 1141, 2417, 2166, 713, 3538, 3333, 4394, # 4742 - 3918, 7535, 7536, 1186, 15, 7537, 1079, 1070, 7538, 1522, 3193, 3539, 276, 1050, 2716, 758, # 4758 - 1126, 653, 2945, 3263, 7539, 2337, 889, 3540, 3919, 3081, 2989, 903, 1250, 4395, 3920, 3434, # 4774 - 3541, 1342, 1681, 1718, 766, 3264, 286, 89, 2946, 3649, 7540, 1713, 7541, 2597, 3334, 2990, # 4790 - 7542, 2947, 2215, 3194, 2866, 7543, 4396, 2498, 2526, 181, 387, 1075, 3921, 731, 2187, 3335, # 4806 - 7544, 3265, 310, 313, 3435, 2299, 770, 4134, 54, 3034, 189, 4397, 3082, 3769, 3922, 7545, # 4822 - 1230, 1617, 1849, 355, 3542, 4135, 4398, 3336, 111, 4136, 3650, 1350, 3135, 3436, 3035, 4137, # 4838 - 2149, 3266, 3543, 7546, 2784, 3923, 3924, 2991, 722, 2008, 7547, 1071, 247, 1207, 2338, 2471, # 4854 - 1378, 4399, 2009, 864, 1437, 1214, 4400, 373, 3770, 1142, 2216, 667, 4401, 442, 2753, 2555, # 4870 - 3771, 3925, 1968, 4138, 3267, 1839, 837, 170, 1107, 934, 1336, 1882, 7548, 7549, 2118, 4139, # 4886 - 2828, 743, 1569, 7550, 4402, 4140, 582, 2384, 1418, 3437, 7551, 1802, 7552, 357, 1395, 1729, # 4902 - 3651, 3268, 2418, 1564, 2237, 7553, 3083, 3772, 1633, 4403, 1114, 2085, 4141, 1532, 7554, 482, # 4918 - 2446, 4404, 7555, 7556, 1492, 833, 1466, 7557, 2717, 3544, 1641, 2829, 7558, 1526, 1272, 3652, # 4934 - 4142, 1686, 1794, 416, 2556, 1902, 1953, 1803, 7559, 3773, 2785, 3774, 1159, 2316, 7560, 2867, # 4950 - 4405, 1610, 1584, 3036, 2419, 2754, 443, 3269, 1163, 3136, 7561, 7562, 3926, 7563, 4143, 2499, # 4966 - 3037, 4406, 3927, 3137, 2103, 1647, 3545, 2010, 1872, 4144, 7564, 4145, 431, 3438, 7565, 250, # 4982 - 97, 81, 4146, 7566, 1648, 1850, 1558, 160, 848, 7567, 866, 740, 1694, 7568, 2201, 2830, # 4998 - 3195, 4147, 4407, 3653, 1687, 950, 2472, 426, 469, 3196, 3654, 3655, 3928, 7569, 7570, 1188, # 5014 - 424, 1995, 861, 3546, 4148, 3775, 2202, 2685, 168, 1235, 3547, 4149, 7571, 2086, 1674, 4408, # 5030 - 3337, 3270, 220, 2557, 1009, 7572, 3776, 670, 2992, 332, 1208, 717, 7573, 7574, 3548, 2447, # 5046 - 3929, 3338, 7575, 513, 7576, 1209, 2868, 3339, 3138, 4409, 1080, 7577, 7578, 7579, 7580, 2527, # 5062 - 3656, 3549, 815, 1587, 3930, 3931, 7581, 3550, 3439, 3777, 1254, 4410, 1328, 3038, 1390, 3932, # 5078 - 1741, 3933, 3778, 3934, 7582, 236, 3779, 2448, 3271, 7583, 7584, 3657, 3780, 1273, 3781, 4411, # 5094 - 7585, 308, 7586, 4412, 245, 4413, 1851, 2473, 1307, 2575, 430, 715, 2136, 2449, 7587, 270, # 5110 - 199, 2869, 3935, 7588, 3551, 2718, 1753, 761, 1754, 725, 1661, 1840, 4414, 3440, 3658, 7589, # 5126 - 7590, 587, 14, 3272, 227, 2598, 326, 480, 2265, 943, 2755, 3552, 291, 650, 1883, 7591, # 5142 - 1702, 1226, 102, 1547, 62, 3441, 904, 4415, 3442, 1164, 4150, 7592, 7593, 1224, 1548, 2756, # 5158 - 391, 498, 1493, 7594, 1386, 1419, 7595, 2055, 1177, 4416, 813, 880, 1081, 2363, 566, 1145, # 5174 - 4417, 2286, 1001, 1035, 2558, 2599, 2238, 394, 1286, 7596, 7597, 2068, 7598, 86, 1494, 1730, # 5190 - 3936, 491, 1588, 745, 897, 2948, 843, 3340, 3937, 2757, 2870, 3273, 1768, 998, 2217, 2069, # 5206 - 397, 1826, 1195, 1969, 3659, 2993, 3341, 284, 7599, 3782, 2500, 2137, 2119, 1903, 7600, 3938, # 5222 - 2150, 3939, 4151, 1036, 3443, 1904, 114, 2559, 4152, 209, 1527, 7601, 7602, 2949, 2831, 2625, # 5238 - 2385, 2719, 3139, 812, 2560, 7603, 3274, 7604, 1559, 737, 1884, 3660, 1210, 885, 28, 2686, # 5254 - 3553, 3783, 7605, 4153, 1004, 1779, 4418, 7606, 346, 1981, 2218, 2687, 4419, 3784, 1742, 797, # 5270 - 1642, 3940, 1933, 1072, 1384, 2151, 896, 3941, 3275, 3661, 3197, 2871, 3554, 7607, 2561, 1958, # 5286 - 4420, 2450, 1785, 7608, 7609, 7610, 3942, 4154, 1005, 1308, 3662, 4155, 2720, 4421, 4422, 1528, # 5302 - 2600, 161, 1178, 4156, 1982, 987, 4423, 1101, 4157, 631, 3943, 1157, 3198, 2420, 1343, 1241, # 5318 - 1016, 2239, 2562, 372, 877, 2339, 2501, 1160, 555, 1934, 911, 3944, 7611, 466, 1170, 169, # 5334 - 1051, 2907, 2688, 3663, 2474, 2994, 1182, 2011, 2563, 1251, 2626, 7612, 992, 2340, 3444, 1540, # 5350 - 2721, 1201, 2070, 2401, 1996, 2475, 7613, 4424, 528, 1922, 2188, 1503, 1873, 1570, 2364, 3342, # 5366 - 3276, 7614, 557, 1073, 7615, 1827, 3445, 2087, 2266, 3140, 3039, 3084, 767, 3085, 2786, 4425, # 5382 - 1006, 4158, 4426, 2341, 1267, 2176, 3664, 3199, 778, 3945, 3200, 2722, 1597, 2657, 7616, 4427, # 5398 - 7617, 3446, 7618, 7619, 7620, 3277, 2689, 1433, 3278, 131, 95, 1504, 3946, 723, 4159, 3141, # 5414 - 1841, 3555, 2758, 2189, 3947, 2027, 2104, 3665, 7621, 2995, 3948, 1218, 7622, 3343, 3201, 3949, # 5430 - 4160, 2576, 248, 1634, 3785, 912, 7623, 2832, 3666, 3040, 3786, 654, 53, 7624, 2996, 7625, # 5446 - 1688, 4428, 777, 3447, 1032, 3950, 1425, 7626, 191, 820, 2120, 2833, 971, 4429, 931, 3202, # 5462 - 135, 664, 783, 3787, 1997, 772, 2908, 1935, 3951, 3788, 4430, 2909, 3203, 282, 2723, 640, # 5478 - 1372, 3448, 1127, 922, 325, 3344, 7627, 7628, 711, 2044, 7629, 7630, 3952, 2219, 2787, 1936, # 5494 - 3953, 3345, 2220, 2251, 3789, 2300, 7631, 4431, 3790, 1258, 3279, 3954, 3204, 2138, 2950, 3955, # 5510 - 3956, 7632, 2221, 258, 3205, 4432, 101, 1227, 7633, 3280, 1755, 7634, 1391, 3281, 7635, 2910, # 5526 - 2056, 893, 7636, 7637, 7638, 1402, 4161, 2342, 7639, 7640, 3206, 3556, 7641, 7642, 878, 1325, # 5542 - 1780, 2788, 4433, 259, 1385, 2577, 744, 1183, 2267, 4434, 7643, 3957, 2502, 7644, 684, 1024, # 5558 - 4162, 7645, 472, 3557, 3449, 1165, 3282, 3958, 3959, 322, 2152, 881, 455, 1695, 1152, 1340, # 5574 - 660, 554, 2153, 4435, 1058, 4436, 4163, 830, 1065, 3346, 3960, 4437, 1923, 7646, 1703, 1918, # 5590 - 7647, 932, 2268, 122, 7648, 4438, 947, 677, 7649, 3791, 2627, 297, 1905, 1924, 2269, 4439, # 5606 - 2317, 3283, 7650, 7651, 4164, 7652, 4165, 84, 4166, 112, 989, 7653, 547, 1059, 3961, 701, # 5622 - 3558, 1019, 7654, 4167, 7655, 3450, 942, 639, 457, 2301, 2451, 993, 2951, 407, 851, 494, # 5638 - 4440, 3347, 927, 7656, 1237, 7657, 2421, 3348, 573, 4168, 680, 921, 2911, 1279, 1874, 285, # 5654 - 790, 1448, 1983, 719, 2167, 7658, 7659, 4441, 3962, 3963, 1649, 7660, 1541, 563, 7661, 1077, # 5670 - 7662, 3349, 3041, 3451, 511, 2997, 3964, 3965, 3667, 3966, 1268, 2564, 3350, 3207, 4442, 4443, # 5686 - 7663, 535, 1048, 1276, 1189, 2912, 2028, 3142, 1438, 1373, 2834, 2952, 1134, 2012, 7664, 4169, # 5702 - 1238, 2578, 3086, 1259, 7665, 700, 7666, 2953, 3143, 3668, 4170, 7667, 4171, 1146, 1875, 1906, # 5718 - 4444, 2601, 3967, 781, 2422, 132, 1589, 203, 147, 273, 2789, 2402, 898, 1786, 2154, 3968, # 5734 - 3969, 7668, 3792, 2790, 7669, 7670, 4445, 4446, 7671, 3208, 7672, 1635, 3793, 965, 7673, 1804, # 5750 - 2690, 1516, 3559, 1121, 1082, 1329, 3284, 3970, 1449, 3794, 65, 1128, 2835, 2913, 2759, 1590, # 5766 - 3795, 7674, 7675, 12, 2658, 45, 976, 2579, 3144, 4447, 517, 2528, 1013, 1037, 3209, 7676, # 5782 - 3796, 2836, 7677, 3797, 7678, 3452, 7679, 2602, 614, 1998, 2318, 3798, 3087, 2724, 2628, 7680, # 5798 - 2580, 4172, 599, 1269, 7681, 1810, 3669, 7682, 2691, 3088, 759, 1060, 489, 1805, 3351, 3285, # 5814 - 1358, 7683, 7684, 2386, 1387, 1215, 2629, 2252, 490, 7685, 7686, 4173, 1759, 2387, 2343, 7687, # 5830 - 4448, 3799, 1907, 3971, 2630, 1806, 3210, 4449, 3453, 3286, 2760, 2344, 874, 7688, 7689, 3454, # 5846 - 3670, 1858, 91, 2914, 3671, 3042, 3800, 4450, 7690, 3145, 3972, 2659, 7691, 3455, 1202, 1403, # 5862 - 3801, 2954, 2529, 1517, 2503, 4451, 3456, 2504, 7692, 4452, 7693, 2692, 1885, 1495, 1731, 3973, # 5878 - 2365, 4453, 7694, 2029, 7695, 7696, 3974, 2693, 1216, 237, 2581, 4174, 2319, 3975, 3802, 4454, # 5894 - 4455, 2694, 3560, 3457, 445, 4456, 7697, 7698, 7699, 7700, 2761, 61, 3976, 3672, 1822, 3977, # 5910 - 7701, 687, 2045, 935, 925, 405, 2660, 703, 1096, 1859, 2725, 4457, 3978, 1876, 1367, 2695, # 5926 - 3352, 918, 2105, 1781, 2476, 334, 3287, 1611, 1093, 4458, 564, 3146, 3458, 3673, 3353, 945, # 5942 - 2631, 2057, 4459, 7702, 1925, 872, 4175, 7703, 3459, 2696, 3089, 349, 4176, 3674, 3979, 4460, # 5958 - 3803, 4177, 3675, 2155, 3980, 4461, 4462, 4178, 4463, 2403, 2046, 782, 3981, 400, 251, 4179, # 5974 - 1624, 7704, 7705, 277, 3676, 299, 1265, 476, 1191, 3804, 2121, 4180, 4181, 1109, 205, 7706, # 5990 - 2582, 1000, 2156, 3561, 1860, 7707, 7708, 7709, 4464, 7710, 4465, 2565, 107, 2477, 2157, 3982, # 6006 - 3460, 3147, 7711, 1533, 541, 1301, 158, 753, 4182, 2872, 3562, 7712, 1696, 370, 1088, 4183, # 6022 - 4466, 3563, 579, 327, 440, 162, 2240, 269, 1937, 1374, 3461, 968, 3043, 56, 1396, 3090, # 6038 - 2106, 3288, 3354, 7713, 1926, 2158, 4467, 2998, 7714, 3564, 7715, 7716, 3677, 4468, 2478, 7717, # 6054 - 2791, 7718, 1650, 4469, 7719, 2603, 7720, 7721, 3983, 2661, 3355, 1149, 3356, 3984, 3805, 3985, # 6070 - 7722, 1076, 49, 7723, 951, 3211, 3289, 3290, 450, 2837, 920, 7724, 1811, 2792, 2366, 4184, # 6086 - 1908, 1138, 2367, 3806, 3462, 7725, 3212, 4470, 1909, 1147, 1518, 2423, 4471, 3807, 7726, 4472, # 6102 - 2388, 2604, 260, 1795, 3213, 7727, 7728, 3808, 3291, 708, 7729, 3565, 1704, 7730, 3566, 1351, # 6118 - 1618, 3357, 2999, 1886, 944, 4185, 3358, 4186, 3044, 3359, 4187, 7731, 3678, 422, 413, 1714, # 6134 - 3292, 500, 2058, 2345, 4188, 2479, 7732, 1344, 1910, 954, 7733, 1668, 7734, 7735, 3986, 2404, # 6150 - 4189, 3567, 3809, 4190, 7736, 2302, 1318, 2505, 3091, 133, 3092, 2873, 4473, 629, 31, 2838, # 6166 - 2697, 3810, 4474, 850, 949, 4475, 3987, 2955, 1732, 2088, 4191, 1496, 1852, 7737, 3988, 620, # 6182 - 3214, 981, 1242, 3679, 3360, 1619, 3680, 1643, 3293, 2139, 2452, 1970, 1719, 3463, 2168, 7738, # 6198 - 3215, 7739, 7740, 3361, 1828, 7741, 1277, 4476, 1565, 2047, 7742, 1636, 3568, 3093, 7743, 869, # 6214 - 2839, 655, 3811, 3812, 3094, 3989, 3000, 3813, 1310, 3569, 4477, 7744, 7745, 7746, 1733, 558, # 6230 - 4478, 3681, 335, 1549, 3045, 1756, 4192, 3682, 1945, 3464, 1829, 1291, 1192, 470, 2726, 2107, # 6246 - 2793, 913, 1054, 3990, 7747, 1027, 7748, 3046, 3991, 4479, 982, 2662, 3362, 3148, 3465, 3216, # 6262 - 3217, 1946, 2794, 7749, 571, 4480, 7750, 1830, 7751, 3570, 2583, 1523, 2424, 7752, 2089, 984, # 6278 - 4481, 3683, 1959, 7753, 3684, 852, 923, 2795, 3466, 3685, 969, 1519, 999, 2048, 2320, 1705, # 6294 - 7754, 3095, 615, 1662, 151, 597, 3992, 2405, 2321, 1049, 275, 4482, 3686, 4193, 568, 3687, # 6310 - 3571, 2480, 4194, 3688, 7755, 2425, 2270, 409, 3218, 7756, 1566, 2874, 3467, 1002, 769, 2840, # 6326 - 194, 2090, 3149, 3689, 2222, 3294, 4195, 628, 1505, 7757, 7758, 1763, 2177, 3001, 3993, 521, # 6342 - 1161, 2584, 1787, 2203, 2406, 4483, 3994, 1625, 4196, 4197, 412, 42, 3096, 464, 7759, 2632, # 6358 - 4484, 3363, 1760, 1571, 2875, 3468, 2530, 1219, 2204, 3814, 2633, 2140, 2368, 4485, 4486, 3295, # 6374 - 1651, 3364, 3572, 7760, 7761, 3573, 2481, 3469, 7762, 3690, 7763, 7764, 2271, 2091, 460, 7765, # 6390 - 4487, 7766, 3002, 962, 588, 3574, 289, 3219, 2634, 1116, 52, 7767, 3047, 1796, 7768, 7769, # 6406 - 7770, 1467, 7771, 1598, 1143, 3691, 4198, 1984, 1734, 1067, 4488, 1280, 3365, 465, 4489, 1572, # 6422 - 510, 7772, 1927, 2241, 1812, 1644, 3575, 7773, 4490, 3692, 7774, 7775, 2663, 1573, 1534, 7776, # 6438 - 7777, 4199, 536, 1807, 1761, 3470, 3815, 3150, 2635, 7778, 7779, 7780, 4491, 3471, 2915, 1911, # 6454 - 2796, 7781, 3296, 1122, 377, 3220, 7782, 360, 7783, 7784, 4200, 1529, 551, 7785, 2059, 3693, # 6470 - 1769, 2426, 7786, 2916, 4201, 3297, 3097, 2322, 2108, 2030, 4492, 1404, 136, 1468, 1479, 672, # 6486 - 1171, 3221, 2303, 271, 3151, 7787, 2762, 7788, 2049, 678, 2727, 865, 1947, 4493, 7789, 2013, # 6502 - 3995, 2956, 7790, 2728, 2223, 1397, 3048, 3694, 4494, 4495, 1735, 2917, 3366, 3576, 7791, 3816, # 6518 - 509, 2841, 2453, 2876, 3817, 7792, 7793, 3152, 3153, 4496, 4202, 2531, 4497, 2304, 1166, 1010, # 6534 - 552, 681, 1887, 7794, 7795, 2957, 2958, 3996, 1287, 1596, 1861, 3154, 358, 453, 736, 175, # 6550 - 478, 1117, 905, 1167, 1097, 7796, 1853, 1530, 7797, 1706, 7798, 2178, 3472, 2287, 3695, 3473, # 6566 - 3577, 4203, 2092, 4204, 7799, 3367, 1193, 2482, 4205, 1458, 2190, 2205, 1862, 1888, 1421, 3298, # 6582 - 2918, 3049, 2179, 3474, 595, 2122, 7800, 3997, 7801, 7802, 4206, 1707, 2636, 223, 3696, 1359, # 6598 - 751, 3098, 183, 3475, 7803, 2797, 3003, 419, 2369, 633, 704, 3818, 2389, 241, 7804, 7805, # 6614 - 7806, 838, 3004, 3697, 2272, 2763, 2454, 3819, 1938, 2050, 3998, 1309, 3099, 2242, 1181, 7807, # 6630 - 1136, 2206, 3820, 2370, 1446, 4207, 2305, 4498, 7808, 7809, 4208, 1055, 2605, 484, 3698, 7810, # 6646 - 3999, 625, 4209, 2273, 3368, 1499, 4210, 4000, 7811, 4001, 4211, 3222, 2274, 2275, 3476, 7812, # 6662 - 7813, 2764, 808, 2606, 3699, 3369, 4002, 4212, 3100, 2532, 526, 3370, 3821, 4213, 955, 7814, # 6678 - 1620, 4214, 2637, 2427, 7815, 1429, 3700, 1669, 1831, 994, 928, 7816, 3578, 1260, 7817, 7818, # 6694 - 7819, 1948, 2288, 741, 2919, 1626, 4215, 2729, 2455, 867, 1184, 362, 3371, 1392, 7820, 7821, # 6710 - 4003, 4216, 1770, 1736, 3223, 2920, 4499, 4500, 1928, 2698, 1459, 1158, 7822, 3050, 3372, 2877, # 6726 - 1292, 1929, 2506, 2842, 3701, 1985, 1187, 2071, 2014, 2607, 4217, 7823, 2566, 2507, 2169, 3702, # 6742 - 2483, 3299, 7824, 3703, 4501, 7825, 7826, 666, 1003, 3005, 1022, 3579, 4218, 7827, 4502, 1813, # 6758 - 2253, 574, 3822, 1603, 295, 1535, 705, 3823, 4219, 283, 858, 417, 7828, 7829, 3224, 4503, # 6774 - 4504, 3051, 1220, 1889, 1046, 2276, 2456, 4004, 1393, 1599, 689, 2567, 388, 4220, 7830, 2484, # 6790 - 802, 7831, 2798, 3824, 2060, 1405, 2254, 7832, 4505, 3825, 2109, 1052, 1345, 3225, 1585, 7833, # 6806 - 809, 7834, 7835, 7836, 575, 2730, 3477, 956, 1552, 1469, 1144, 2323, 7837, 2324, 1560, 2457, # 6822 - 3580, 3226, 4005, 616, 2207, 3155, 2180, 2289, 7838, 1832, 7839, 3478, 4506, 7840, 1319, 3704, # 6838 - 3705, 1211, 3581, 1023, 3227, 1293, 2799, 7841, 7842, 7843, 3826, 607, 2306, 3827, 762, 2878, # 6854 - 1439, 4221, 1360, 7844, 1485, 3052, 7845, 4507, 1038, 4222, 1450, 2061, 2638, 4223, 1379, 4508, # 6870 - 2585, 7846, 7847, 4224, 1352, 1414, 2325, 2921, 1172, 7848, 7849, 3828, 3829, 7850, 1797, 1451, # 6886 - 7851, 7852, 7853, 7854, 2922, 4006, 4007, 2485, 2346, 411, 4008, 4009, 3582, 3300, 3101, 4509, # 6902 - 1561, 2664, 1452, 4010, 1375, 7855, 7856, 47, 2959, 316, 7857, 1406, 1591, 2923, 3156, 7858, # 6918 - 1025, 2141, 3102, 3157, 354, 2731, 884, 2224, 4225, 2407, 508, 3706, 726, 3583, 996, 2428, # 6934 - 3584, 729, 7859, 392, 2191, 1453, 4011, 4510, 3707, 7860, 7861, 2458, 3585, 2608, 1675, 2800, # 6950 - 919, 2347, 2960, 2348, 1270, 4511, 4012, 73, 7862, 7863, 647, 7864, 3228, 2843, 2255, 1550, # 6966 - 1346, 3006, 7865, 1332, 883, 3479, 7866, 7867, 7868, 7869, 3301, 2765, 7870, 1212, 831, 1347, # 6982 - 4226, 4512, 2326, 3830, 1863, 3053, 720, 3831, 4513, 4514, 3832, 7871, 4227, 7872, 7873, 4515, # 6998 - 7874, 7875, 1798, 4516, 3708, 2609, 4517, 3586, 1645, 2371, 7876, 7877, 2924, 669, 2208, 2665, # 7014 - 2429, 7878, 2879, 7879, 7880, 1028, 3229, 7881, 4228, 2408, 7882, 2256, 1353, 7883, 7884, 4518, # 7030 - 3158, 518, 7885, 4013, 7886, 4229, 1960, 7887, 2142, 4230, 7888, 7889, 3007, 2349, 2350, 3833, # 7046 - 516, 1833, 1454, 4014, 2699, 4231, 4519, 2225, 2610, 1971, 1129, 3587, 7890, 2766, 7891, 2961, # 7062 - 1422, 577, 1470, 3008, 1524, 3373, 7892, 7893, 432, 4232, 3054, 3480, 7894, 2586, 1455, 2508, # 7078 - 2226, 1972, 1175, 7895, 1020, 2732, 4015, 3481, 4520, 7896, 2733, 7897, 1743, 1361, 3055, 3482, # 7094 - 2639, 4016, 4233, 4521, 2290, 895, 924, 4234, 2170, 331, 2243, 3056, 166, 1627, 3057, 1098, # 7110 - 7898, 1232, 2880, 2227, 3374, 4522, 657, 403, 1196, 2372, 542, 3709, 3375, 1600, 4235, 3483, # 7126 - 7899, 4523, 2767, 3230, 576, 530, 1362, 7900, 4524, 2533, 2666, 3710, 4017, 7901, 842, 3834, # 7142 - 7902, 2801, 2031, 1014, 4018, 213, 2700, 3376, 665, 621, 4236, 7903, 3711, 2925, 2430, 7904, # 7158 - 2431, 3302, 3588, 3377, 7905, 4237, 2534, 4238, 4525, 3589, 1682, 4239, 3484, 1380, 7906, 724, # 7174 - 2277, 600, 1670, 7907, 1337, 1233, 4526, 3103, 2244, 7908, 1621, 4527, 7909, 651, 4240, 7910, # 7190 - 1612, 4241, 2611, 7911, 2844, 7912, 2734, 2307, 3058, 7913, 716, 2459, 3059, 174, 1255, 2701, # 7206 - 4019, 3590, 548, 1320, 1398, 728, 4020, 1574, 7914, 1890, 1197, 3060, 4021, 7915, 3061, 3062, # 7222 - 3712, 3591, 3713, 747, 7916, 635, 4242, 4528, 7917, 7918, 7919, 4243, 7920, 7921, 4529, 7922, # 7238 - 3378, 4530, 2432, 451, 7923, 3714, 2535, 2072, 4244, 2735, 4245, 4022, 7924, 1764, 4531, 7925, # 7254 - 4246, 350, 7926, 2278, 2390, 2486, 7927, 4247, 4023, 2245, 1434, 4024, 488, 4532, 458, 4248, # 7270 - 4025, 3715, 771, 1330, 2391, 3835, 2568, 3159, 2159, 2409, 1553, 2667, 3160, 4249, 7928, 2487, # 7286 - 2881, 2612, 1720, 2702, 4250, 3379, 4533, 7929, 2536, 4251, 7930, 3231, 4252, 2768, 7931, 2015, # 7302 - 2736, 7932, 1155, 1017, 3716, 3836, 7933, 3303, 2308, 201, 1864, 4253, 1430, 7934, 4026, 7935, # 7318 - 7936, 7937, 7938, 7939, 4254, 1604, 7940, 414, 1865, 371, 2587, 4534, 4535, 3485, 2016, 3104, # 7334 - 4536, 1708, 960, 4255, 887, 389, 2171, 1536, 1663, 1721, 7941, 2228, 4027, 2351, 2926, 1580, # 7350 - 7942, 7943, 7944, 1744, 7945, 2537, 4537, 4538, 7946, 4539, 7947, 2073, 7948, 7949, 3592, 3380, # 7366 - 2882, 4256, 7950, 4257, 2640, 3381, 2802, 673, 2703, 2460, 709, 3486, 4028, 3593, 4258, 7951, # 7382 - 1148, 502, 634, 7952, 7953, 1204, 4540, 3594, 1575, 4541, 2613, 3717, 7954, 3718, 3105, 948, # 7398 - 3232, 121, 1745, 3837, 1110, 7955, 4259, 3063, 2509, 3009, 4029, 3719, 1151, 1771, 3838, 1488, # 7414 - 4030, 1986, 7956, 2433, 3487, 7957, 7958, 2093, 7959, 4260, 3839, 1213, 1407, 2803, 531, 2737, # 7430 - 2538, 3233, 1011, 1537, 7960, 2769, 4261, 3106, 1061, 7961, 3720, 3721, 1866, 2883, 7962, 2017, # 7446 - 120, 4262, 4263, 2062, 3595, 3234, 2309, 3840, 2668, 3382, 1954, 4542, 7963, 7964, 3488, 1047, # 7462 - 2704, 1266, 7965, 1368, 4543, 2845, 649, 3383, 3841, 2539, 2738, 1102, 2846, 2669, 7966, 7967, # 7478 - 1999, 7968, 1111, 3596, 2962, 7969, 2488, 3842, 3597, 2804, 1854, 3384, 3722, 7970, 7971, 3385, # 7494 - 2410, 2884, 3304, 3235, 3598, 7972, 2569, 7973, 3599, 2805, 4031, 1460, 856, 7974, 3600, 7975, # 7510 - 2885, 2963, 7976, 2886, 3843, 7977, 4264, 632, 2510, 875, 3844, 1697, 3845, 2291, 7978, 7979, # 7526 - 4544, 3010, 1239, 580, 4545, 4265, 7980, 914, 936, 2074, 1190, 4032, 1039, 2123, 7981, 7982, # 7542 - 7983, 3386, 1473, 7984, 1354, 4266, 3846, 7985, 2172, 3064, 4033, 915, 3305, 4267, 4268, 3306, # 7558 - 1605, 1834, 7986, 2739, 398, 3601, 4269, 3847, 4034, 328, 1912, 2847, 4035, 3848, 1331, 4270, # 7574 - 3011, 937, 4271, 7987, 3602, 4036, 4037, 3387, 2160, 4546, 3388, 524, 742, 538, 3065, 1012, # 7590 - 7988, 7989, 3849, 2461, 7990, 658, 1103, 225, 3850, 7991, 7992, 4547, 7993, 4548, 7994, 3236, # 7606 - 1243, 7995, 4038, 963, 2246, 4549, 7996, 2705, 3603, 3161, 7997, 7998, 2588, 2327, 7999, 4550, # 7622 - 8000, 8001, 8002, 3489, 3307, 957, 3389, 2540, 2032, 1930, 2927, 2462, 870, 2018, 3604, 1746, # 7638 - 2770, 2771, 2434, 2463, 8003, 3851, 8004, 3723, 3107, 3724, 3490, 3390, 3725, 8005, 1179, 3066, # 7654 - 8006, 3162, 2373, 4272, 3726, 2541, 3163, 3108, 2740, 4039, 8007, 3391, 1556, 2542, 2292, 977, # 7670 - 2887, 2033, 4040, 1205, 3392, 8008, 1765, 3393, 3164, 2124, 1271, 1689, 714, 4551, 3491, 8009, # 7686 - 2328, 3852, 533, 4273, 3605, 2181, 617, 8010, 2464, 3308, 3492, 2310, 8011, 8012, 3165, 8013, # 7702 - 8014, 3853, 1987, 618, 427, 2641, 3493, 3394, 8015, 8016, 1244, 1690, 8017, 2806, 4274, 4552, # 7718 - 8018, 3494, 8019, 8020, 2279, 1576, 473, 3606, 4275, 3395, 972, 8021, 3607, 8022, 3067, 8023, # 7734 - 8024, 4553, 4554, 8025, 3727, 4041, 4042, 8026, 153, 4555, 356, 8027, 1891, 2888, 4276, 2143, # 7750 - 408, 803, 2352, 8028, 3854, 8029, 4277, 1646, 2570, 2511, 4556, 4557, 3855, 8030, 3856, 4278, # 7766 - 8031, 2411, 3396, 752, 8032, 8033, 1961, 2964, 8034, 746, 3012, 2465, 8035, 4279, 3728, 698, # 7782 - 4558, 1892, 4280, 3608, 2543, 4559, 3609, 3857, 8036, 3166, 3397, 8037, 1823, 1302, 4043, 2706, # 7798 - 3858, 1973, 4281, 8038, 4282, 3167, 823, 1303, 1288, 1236, 2848, 3495, 4044, 3398, 774, 3859, # 7814 - 8039, 1581, 4560, 1304, 2849, 3860, 4561, 8040, 2435, 2161, 1083, 3237, 4283, 4045, 4284, 344, # 7830 - 1173, 288, 2311, 454, 1683, 8041, 8042, 1461, 4562, 4046, 2589, 8043, 8044, 4563, 985, 894, # 7846 - 8045, 3399, 3168, 8046, 1913, 2928, 3729, 1988, 8047, 2110, 1974, 8048, 4047, 8049, 2571, 1194, # 7862 - 425, 8050, 4564, 3169, 1245, 3730, 4285, 8051, 8052, 2850, 8053, 636, 4565, 1855, 3861, 760, # 7878 - 1799, 8054, 4286, 2209, 1508, 4566, 4048, 1893, 1684, 2293, 8055, 8056, 8057, 4287, 4288, 2210, # 7894 - 479, 8058, 8059, 832, 8060, 4049, 2489, 8061, 2965, 2490, 3731, 990, 3109, 627, 1814, 2642, # 7910 - 4289, 1582, 4290, 2125, 2111, 3496, 4567, 8062, 799, 4291, 3170, 8063, 4568, 2112, 1737, 3013, # 7926 - 1018, 543, 754, 4292, 3309, 1676, 4569, 4570, 4050, 8064, 1489, 8065, 3497, 8066, 2614, 2889, # 7942 - 4051, 8067, 8068, 2966, 8069, 8070, 8071, 8072, 3171, 4571, 4572, 2182, 1722, 8073, 3238, 3239, # 7958 - 1842, 3610, 1715, 481, 365, 1975, 1856, 8074, 8075, 1962, 2491, 4573, 8076, 2126, 3611, 3240, # 7974 - 433, 1894, 2063, 2075, 8077, 602, 2741, 8078, 8079, 8080, 8081, 8082, 3014, 1628, 3400, 8083, # 7990 - 3172, 4574, 4052, 2890, 4575, 2512, 8084, 2544, 2772, 8085, 8086, 8087, 3310, 4576, 2891, 8088, # 8006 - 4577, 8089, 2851, 4578, 4579, 1221, 2967, 4053, 2513, 8090, 8091, 8092, 1867, 1989, 8093, 8094, # 8022 - 8095, 1895, 8096, 8097, 4580, 1896, 4054, 318, 8098, 2094, 4055, 4293, 8099, 8100, 485, 8101, # 8038 - 938, 3862, 553, 2670, 116, 8102, 3863, 3612, 8103, 3498, 2671, 2773, 3401, 3311, 2807, 8104, # 8054 - 3613, 2929, 4056, 1747, 2930, 2968, 8105, 8106, 207, 8107, 8108, 2672, 4581, 2514, 8109, 3015, # 8070 - 890, 3614, 3864, 8110, 1877, 3732, 3402, 8111, 2183, 2353, 3403, 1652, 8112, 8113, 8114, 941, # 8086 - 2294, 208, 3499, 4057, 2019, 330, 4294, 3865, 2892, 2492, 3733, 4295, 8115, 8116, 8117, 8118, # 8102 -) -# fmt: on diff --git a/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/urllib3/contrib/pyopenssl.py b/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/urllib3/contrib/pyopenssl.py deleted file mode 100644 index 19e4aa97cc138e4bd39bebf6c49ff1955cb00437..0000000000000000000000000000000000000000 --- a/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/urllib3/contrib/pyopenssl.py +++ /dev/null @@ -1,518 +0,0 @@ -""" -TLS with SNI_-support for Python 2. Follow these instructions if you would -like to verify TLS certificates in Python 2. Note, the default libraries do -*not* do certificate checking; you need to do additional work to validate -certificates yourself. - -This needs the following packages installed: - -* `pyOpenSSL`_ (tested with 16.0.0) -* `cryptography`_ (minimum 1.3.4, from pyopenssl) -* `idna`_ (minimum 2.0, from cryptography) - -However, pyopenssl depends on cryptography, which depends on idna, so while we -use all three directly here we end up having relatively few packages required. - -You can install them with the following command: - -.. code-block:: bash - - $ python -m pip install pyopenssl cryptography idna - -To activate certificate checking, call -:func:`~urllib3.contrib.pyopenssl.inject_into_urllib3` from your Python code -before you begin making HTTP requests. This can be done in a ``sitecustomize`` -module, or at any other time before your application begins using ``urllib3``, -like this: - -.. code-block:: python - - try: - import pip._vendor.urllib3.contrib.pyopenssl as pyopenssl - pyopenssl.inject_into_urllib3() - except ImportError: - pass - -Now you can use :mod:`urllib3` as you normally would, and it will support SNI -when the required modules are installed. - -Activating this module also has the positive side effect of disabling SSL/TLS -compression in Python 2 (see `CRIME attack`_). - -.. _sni: https://en.wikipedia.org/wiki/Server_Name_Indication -.. _crime attack: https://en.wikipedia.org/wiki/CRIME_(security_exploit) -.. _pyopenssl: https://www.pyopenssl.org -.. _cryptography: https://cryptography.io -.. _idna: https://github.com/kjd/idna -""" -from __future__ import absolute_import - -import OpenSSL.crypto -import OpenSSL.SSL -from cryptography import x509 -from cryptography.hazmat.backends.openssl import backend as openssl_backend - -try: - from cryptography.x509 import UnsupportedExtension -except ImportError: - # UnsupportedExtension is gone in cryptography >= 2.1.0 - class UnsupportedExtension(Exception): - pass - - -from io import BytesIO -from socket import error as SocketError -from socket import timeout - -try: # Platform-specific: Python 2 - from socket import _fileobject -except ImportError: # Platform-specific: Python 3 - _fileobject = None - from ..packages.backports.makefile import backport_makefile - -import logging -import ssl -import sys -import warnings - -from .. import util -from ..packages import six -from ..util.ssl_ import PROTOCOL_TLS_CLIENT - -warnings.warn( - "'urllib3.contrib.pyopenssl' module is deprecated and will be removed " - "in a future release of urllib3 2.x. Read more in this issue: " - "https://github.com/urllib3/urllib3/issues/2680", - category=DeprecationWarning, - stacklevel=2, -) - -__all__ = ["inject_into_urllib3", "extract_from_urllib3"] - -# SNI always works. -HAS_SNI = True - -# Map from urllib3 to PyOpenSSL compatible parameter-values. -_openssl_versions = { - util.PROTOCOL_TLS: OpenSSL.SSL.SSLv23_METHOD, - PROTOCOL_TLS_CLIENT: OpenSSL.SSL.SSLv23_METHOD, - ssl.PROTOCOL_TLSv1: OpenSSL.SSL.TLSv1_METHOD, -} - -if hasattr(ssl, "PROTOCOL_SSLv3") and hasattr(OpenSSL.SSL, "SSLv3_METHOD"): - _openssl_versions[ssl.PROTOCOL_SSLv3] = OpenSSL.SSL.SSLv3_METHOD - -if hasattr(ssl, "PROTOCOL_TLSv1_1") and hasattr(OpenSSL.SSL, "TLSv1_1_METHOD"): - _openssl_versions[ssl.PROTOCOL_TLSv1_1] = OpenSSL.SSL.TLSv1_1_METHOD - -if hasattr(ssl, "PROTOCOL_TLSv1_2") and hasattr(OpenSSL.SSL, "TLSv1_2_METHOD"): - _openssl_versions[ssl.PROTOCOL_TLSv1_2] = OpenSSL.SSL.TLSv1_2_METHOD - - -_stdlib_to_openssl_verify = { - ssl.CERT_NONE: OpenSSL.SSL.VERIFY_NONE, - ssl.CERT_OPTIONAL: OpenSSL.SSL.VERIFY_PEER, - ssl.CERT_REQUIRED: OpenSSL.SSL.VERIFY_PEER - + OpenSSL.SSL.VERIFY_FAIL_IF_NO_PEER_CERT, -} -_openssl_to_stdlib_verify = dict((v, k) for k, v in _stdlib_to_openssl_verify.items()) - -# OpenSSL will only write 16K at a time -SSL_WRITE_BLOCKSIZE = 16384 - -orig_util_HAS_SNI = util.HAS_SNI -orig_util_SSLContext = util.ssl_.SSLContext - - -log = logging.getLogger(__name__) - - -def inject_into_urllib3(): - "Monkey-patch urllib3 with PyOpenSSL-backed SSL-support." - - _validate_dependencies_met() - - util.SSLContext = PyOpenSSLContext - util.ssl_.SSLContext = PyOpenSSLContext - util.HAS_SNI = HAS_SNI - util.ssl_.HAS_SNI = HAS_SNI - util.IS_PYOPENSSL = True - util.ssl_.IS_PYOPENSSL = True - - -def extract_from_urllib3(): - "Undo monkey-patching by :func:`inject_into_urllib3`." - - util.SSLContext = orig_util_SSLContext - util.ssl_.SSLContext = orig_util_SSLContext - util.HAS_SNI = orig_util_HAS_SNI - util.ssl_.HAS_SNI = orig_util_HAS_SNI - util.IS_PYOPENSSL = False - util.ssl_.IS_PYOPENSSL = False - - -def _validate_dependencies_met(): - """ - Verifies that PyOpenSSL's package-level dependencies have been met. - Throws `ImportError` if they are not met. - """ - # Method added in `cryptography==1.1`; not available in older versions - from cryptography.x509.extensions import Extensions - - if getattr(Extensions, "get_extension_for_class", None) is None: - raise ImportError( - "'cryptography' module missing required functionality. " - "Try upgrading to v1.3.4 or newer." - ) - - # pyOpenSSL 0.14 and above use cryptography for OpenSSL bindings. The _x509 - # attribute is only present on those versions. - from OpenSSL.crypto import X509 - - x509 = X509() - if getattr(x509, "_x509", None) is None: - raise ImportError( - "'pyOpenSSL' module missing required functionality. " - "Try upgrading to v0.14 or newer." - ) - - -def _dnsname_to_stdlib(name): - """ - Converts a dNSName SubjectAlternativeName field to the form used by the - standard library on the given Python version. - - Cryptography produces a dNSName as a unicode string that was idna-decoded - from ASCII bytes. We need to idna-encode that string to get it back, and - then on Python 3 we also need to convert to unicode via UTF-8 (the stdlib - uses PyUnicode_FromStringAndSize on it, which decodes via UTF-8). - - If the name cannot be idna-encoded then we return None signalling that - the name given should be skipped. - """ - - def idna_encode(name): - """ - Borrowed wholesale from the Python Cryptography Project. It turns out - that we can't just safely call `idna.encode`: it can explode for - wildcard names. This avoids that problem. - """ - from pip._vendor import idna - - try: - for prefix in [u"*.", u"."]: - if name.startswith(prefix): - name = name[len(prefix) :] - return prefix.encode("ascii") + idna.encode(name) - return idna.encode(name) - except idna.core.IDNAError: - return None - - # Don't send IPv6 addresses through the IDNA encoder. - if ":" in name: - return name - - name = idna_encode(name) - if name is None: - return None - elif sys.version_info >= (3, 0): - name = name.decode("utf-8") - return name - - -def get_subj_alt_name(peer_cert): - """ - Given an PyOpenSSL certificate, provides all the subject alternative names. - """ - # Pass the cert to cryptography, which has much better APIs for this. - if hasattr(peer_cert, "to_cryptography"): - cert = peer_cert.to_cryptography() - else: - der = OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_ASN1, peer_cert) - cert = x509.load_der_x509_certificate(der, openssl_backend) - - # We want to find the SAN extension. Ask Cryptography to locate it (it's - # faster than looping in Python) - try: - ext = cert.extensions.get_extension_for_class(x509.SubjectAlternativeName).value - except x509.ExtensionNotFound: - # No such extension, return the empty list. - return [] - except ( - x509.DuplicateExtension, - UnsupportedExtension, - x509.UnsupportedGeneralNameType, - UnicodeError, - ) as e: - # A problem has been found with the quality of the certificate. Assume - # no SAN field is present. - log.warning( - "A problem was encountered with the certificate that prevented " - "urllib3 from finding the SubjectAlternativeName field. This can " - "affect certificate validation. The error was %s", - e, - ) - return [] - - # We want to return dNSName and iPAddress fields. We need to cast the IPs - # back to strings because the match_hostname function wants them as - # strings. - # Sadly the DNS names need to be idna encoded and then, on Python 3, UTF-8 - # decoded. This is pretty frustrating, but that's what the standard library - # does with certificates, and so we need to attempt to do the same. - # We also want to skip over names which cannot be idna encoded. - names = [ - ("DNS", name) - for name in map(_dnsname_to_stdlib, ext.get_values_for_type(x509.DNSName)) - if name is not None - ] - names.extend( - ("IP Address", str(name)) for name in ext.get_values_for_type(x509.IPAddress) - ) - - return names - - -class WrappedSocket(object): - """API-compatibility wrapper for Python OpenSSL's Connection-class. - - Note: _makefile_refs, _drop() and _reuse() are needed for the garbage - collector of pypy. - """ - - def __init__(self, connection, socket, suppress_ragged_eofs=True): - self.connection = connection - self.socket = socket - self.suppress_ragged_eofs = suppress_ragged_eofs - self._makefile_refs = 0 - self._closed = False - - def fileno(self): - return self.socket.fileno() - - # Copy-pasted from Python 3.5 source code - def _decref_socketios(self): - if self._makefile_refs > 0: - self._makefile_refs -= 1 - if self._closed: - self.close() - - def recv(self, *args, **kwargs): - try: - data = self.connection.recv(*args, **kwargs) - except OpenSSL.SSL.SysCallError as e: - if self.suppress_ragged_eofs and e.args == (-1, "Unexpected EOF"): - return b"" - else: - raise SocketError(str(e)) - except OpenSSL.SSL.ZeroReturnError: - if self.connection.get_shutdown() == OpenSSL.SSL.RECEIVED_SHUTDOWN: - return b"" - else: - raise - except OpenSSL.SSL.WantReadError: - if not util.wait_for_read(self.socket, self.socket.gettimeout()): - raise timeout("The read operation timed out") - else: - return self.recv(*args, **kwargs) - - # TLS 1.3 post-handshake authentication - except OpenSSL.SSL.Error as e: - raise ssl.SSLError("read error: %r" % e) - else: - return data - - def recv_into(self, *args, **kwargs): - try: - return self.connection.recv_into(*args, **kwargs) - except OpenSSL.SSL.SysCallError as e: - if self.suppress_ragged_eofs and e.args == (-1, "Unexpected EOF"): - return 0 - else: - raise SocketError(str(e)) - except OpenSSL.SSL.ZeroReturnError: - if self.connection.get_shutdown() == OpenSSL.SSL.RECEIVED_SHUTDOWN: - return 0 - else: - raise - except OpenSSL.SSL.WantReadError: - if not util.wait_for_read(self.socket, self.socket.gettimeout()): - raise timeout("The read operation timed out") - else: - return self.recv_into(*args, **kwargs) - - # TLS 1.3 post-handshake authentication - except OpenSSL.SSL.Error as e: - raise ssl.SSLError("read error: %r" % e) - - def settimeout(self, timeout): - return self.socket.settimeout(timeout) - - def _send_until_done(self, data): - while True: - try: - return self.connection.send(data) - except OpenSSL.SSL.WantWriteError: - if not util.wait_for_write(self.socket, self.socket.gettimeout()): - raise timeout() - continue - except OpenSSL.SSL.SysCallError as e: - raise SocketError(str(e)) - - def sendall(self, data): - total_sent = 0 - while total_sent < len(data): - sent = self._send_until_done( - data[total_sent : total_sent + SSL_WRITE_BLOCKSIZE] - ) - total_sent += sent - - def shutdown(self): - # FIXME rethrow compatible exceptions should we ever use this - self.connection.shutdown() - - def close(self): - if self._makefile_refs < 1: - try: - self._closed = True - return self.connection.close() - except OpenSSL.SSL.Error: - return - else: - self._makefile_refs -= 1 - - def getpeercert(self, binary_form=False): - x509 = self.connection.get_peer_certificate() - - if not x509: - return x509 - - if binary_form: - return OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_ASN1, x509) - - return { - "subject": ((("commonName", x509.get_subject().CN),),), - "subjectAltName": get_subj_alt_name(x509), - } - - def version(self): - return self.connection.get_protocol_version_name() - - def _reuse(self): - self._makefile_refs += 1 - - def _drop(self): - if self._makefile_refs < 1: - self.close() - else: - self._makefile_refs -= 1 - - -if _fileobject: # Platform-specific: Python 2 - - def makefile(self, mode, bufsize=-1): - self._makefile_refs += 1 - return _fileobject(self, mode, bufsize, close=True) - -else: # Platform-specific: Python 3 - makefile = backport_makefile - -WrappedSocket.makefile = makefile - - -class PyOpenSSLContext(object): - """ - I am a wrapper class for the PyOpenSSL ``Context`` object. I am responsible - for translating the interface of the standard library ``SSLContext`` object - to calls into PyOpenSSL. - """ - - def __init__(self, protocol): - self.protocol = _openssl_versions[protocol] - self._ctx = OpenSSL.SSL.Context(self.protocol) - self._options = 0 - self.check_hostname = False - - @property - def options(self): - return self._options - - @options.setter - def options(self, value): - self._options = value - self._ctx.set_options(value) - - @property - def verify_mode(self): - return _openssl_to_stdlib_verify[self._ctx.get_verify_mode()] - - @verify_mode.setter - def verify_mode(self, value): - self._ctx.set_verify(_stdlib_to_openssl_verify[value], _verify_callback) - - def set_default_verify_paths(self): - self._ctx.set_default_verify_paths() - - def set_ciphers(self, ciphers): - if isinstance(ciphers, six.text_type): - ciphers = ciphers.encode("utf-8") - self._ctx.set_cipher_list(ciphers) - - def load_verify_locations(self, cafile=None, capath=None, cadata=None): - if cafile is not None: - cafile = cafile.encode("utf-8") - if capath is not None: - capath = capath.encode("utf-8") - try: - self._ctx.load_verify_locations(cafile, capath) - if cadata is not None: - self._ctx.load_verify_locations(BytesIO(cadata)) - except OpenSSL.SSL.Error as e: - raise ssl.SSLError("unable to load trusted certificates: %r" % e) - - def load_cert_chain(self, certfile, keyfile=None, password=None): - self._ctx.use_certificate_chain_file(certfile) - if password is not None: - if not isinstance(password, six.binary_type): - password = password.encode("utf-8") - self._ctx.set_passwd_cb(lambda *_: password) - self._ctx.use_privatekey_file(keyfile or certfile) - - def set_alpn_protocols(self, protocols): - protocols = [six.ensure_binary(p) for p in protocols] - return self._ctx.set_alpn_protos(protocols) - - def wrap_socket( - self, - sock, - server_side=False, - do_handshake_on_connect=True, - suppress_ragged_eofs=True, - server_hostname=None, - ): - cnx = OpenSSL.SSL.Connection(self._ctx, sock) - - if isinstance(server_hostname, six.text_type): # Platform-specific: Python 3 - server_hostname = server_hostname.encode("utf-8") - - if server_hostname is not None: - cnx.set_tlsext_host_name(server_hostname) - - cnx.set_connect_state() - - while True: - try: - cnx.do_handshake() - except OpenSSL.SSL.WantReadError: - if not util.wait_for_read(sock, sock.gettimeout()): - raise timeout("select timed out") - continue - except OpenSSL.SSL.Error as e: - raise ssl.SSLError("bad handshake: %r" % e) - break - - return WrappedSocket(cnx, sock) - - -def _verify_callback(cnx, x509, err_no, err_depth, return_code): - return err_no == 0 diff --git a/spaces/Branon/Proxy/greeting.md b/spaces/Branon/Proxy/greeting.md deleted file mode 100644 index 21c7de8be9398f4b356ffe7d75838fa166b4d5a6..0000000000000000000000000000000000000000 --- a/spaces/Branon/Proxy/greeting.md +++ /dev/null @@ -1 +0,0 @@ -lol \ No newline at end of file diff --git a/spaces/Burcin/ExtractiveSummarizer/README.md b/spaces/Burcin/ExtractiveSummarizer/README.md deleted file mode 100644 index 17abde9f3685703139286445e86fda4cfcde94ba..0000000000000000000000000000000000000000 --- a/spaces/Burcin/ExtractiveSummarizer/README.md +++ /dev/null @@ -1,45 +0,0 @@ ---- -title: ExtractiveSummarizer -emoji: 📊 -colorFrom: red -colorTo: green -sdk: gradio -app_file: app.py -pinned: false ---- - -# Configuration - -`title`: _string_ -Display title for the Space - -`emoji`: _string_ -Space emoji (emoji-only character allowed) - -`colorFrom`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) - -`colorTo`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) - -`sdk`: _string_ -Can be either `gradio`, `streamlit`, or `static` - -`sdk_version` : _string_ -Only applicable for `streamlit` SDK. -See [doc](https://hf.co/docs/hub/spaces) for more info on supported versions. - -`app_file`: _string_ -Path to your main application file (which contains either `gradio` or `streamlit` Python code, or `static` html code). -Path is relative to the root of the repository. - -`models`: _List[string]_ -HF model IDs (like "gpt2" or "deepset/roberta-base-squad2") used in the Space. -Will be parsed automatically from your code if not specified here. - -`datasets`: _List[string]_ -HF dataset IDs (like "common_voice" or "oscar-corpus/OSCAR-2109") used in the Space. -Will be parsed automatically from your code if not specified here. - -`pinned`: _boolean_ -Whether the Space stays on top of your list. diff --git a/spaces/CVPR/Bamboo_ViT-B16_demo/timmvit.py b/spaces/CVPR/Bamboo_ViT-B16_demo/timmvit.py deleted file mode 100644 index 52c78c32ebb2016910addd66f361033c212085b5..0000000000000000000000000000000000000000 --- a/spaces/CVPR/Bamboo_ViT-B16_demo/timmvit.py +++ /dev/null @@ -1,79 +0,0 @@ -# ------------------------------------------------------------------------ -# Modified from DETR (https://github.com/facebookresearch/detr) -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. -# ------------------------------------------------------------------------ - -import timm -import torch -import copy -import torch.nn as nn -import torchvision -import json -from timm.models.hub import download_cached_file -from PIL import Image - - - -class MyViT(nn.Module): - def __init__(self, num_classes=115217, pretrain_path=None, enable_fc=False): - super().__init__() - print('initializing ViT model as backbone using ckpt:', pretrain_path) - self.model = timm.create_model('vit_base_patch16_224',checkpoint_path=pretrain_path,num_classes=num_classes)# pretrained=True) - # def forward_features(self, x): - # x = self.model.patch_embed(x) - # cls_token = self.model.cls_token.expand(x.shape[0], -1, -1) # stole cls_tokens impl from Phil Wang, thanks - # if self.model.dist_token is None: - # x = torch.cat((cls_token, x), dim=1) - # else: - # x = torch.cat((cls_token, self.model.dist_token.expand(x.shape[0], -1, -1), x), dim=1) - - # x = self.model.pos_drop(x + self.model.pos_embed) - # x = self.model.blocks(x) - # x = self.model.norm(x) - - # return self.model.pre_logits(x[:, 0]) - - - def forward(self, x): - x = self.model.forward(x) - return x - - -def timmvit(**kwargs): - default_kwargs={} - default_kwargs.update(**kwargs) - return MyViT(**default_kwargs) - - -def build_transforms(input_size, center_crop=True): - transform = torchvision.transforms.Compose([ - torchvision.transforms.Resize(input_size * 8 // 7), - torchvision.transforms.CenterCrop(input_size), - torchvision.transforms.ToTensor(), - torchvision.transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) - ]) - return transform - -def pil_loader(filepath): - with Image.open(filepath) as img: - img = img.convert('RGB') - return img - -def test_build(): - with open('/mnt/lustre/yhzhang/bamboo/Bamboo_ViT-B16_demo/trainid2name.json') as f: - id2name = json.load(f) - img = pil_loader('/mnt/lustre/yhzhang/bamboo/Bamboo_ViT-B16_demo/142520422_6ad756ddf6_w_d.jpg') - eval_transforms = build_transforms(224) - img_t = eval_transforms(img) - img_t = img_t[None, :] - model = MyViT(pretrain_path='/mnt/lustre/yhzhang/bamboo/Bamboo_ViT-B16_demo/Bamboo_v0-1_ViT-B16.pth.tar.convert') - # image = torch.rand(1, 3, 224, 224) - output = model(img_t) - # import pdb;pdb.set_trace() - prediction = output.softmax(-1).flatten() - _,top5_idx = torch.topk(prediction, 5) - # import pdb;pdb.set_trace() - print({id2name[str(i)][0]: float(prediction[i]) for i in top5_idx.tolist()}) - -if __name__ == '__main__': - test_build() diff --git a/spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/structures/rotated_boxes.py b/spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/structures/rotated_boxes.py deleted file mode 100644 index 2e5968d9ef857b567fb929b90ae263833b58bc89..0000000000000000000000000000000000000000 --- a/spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/structures/rotated_boxes.py +++ /dev/null @@ -1,498 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved -import math -from typing import Iterator, List, Union -import torch - -from detectron2.layers import cat -from detectron2.layers.rotated_boxes import pairwise_iou_rotated - -from .boxes import Boxes - - -class RotatedBoxes(Boxes): - """ - This structure stores a list of rotated boxes as a Nx5 torch.Tensor. - It supports some common methods about boxes - (`area`, `clip`, `nonempty`, etc), - and also behaves like a Tensor - (support indexing, `to(device)`, `.device`, and iteration over all boxes) - """ - - def __init__(self, tensor: torch.Tensor): - """ - Args: - tensor (Tensor[float]): a Nx5 matrix. Each row is - (x_center, y_center, width, height, angle), - in which angle is represented in degrees. - While there's no strict range restriction for it, - the recommended principal range is between [-180, 180) degrees. - - Assume we have a horizontal box B = (x_center, y_center, width, height), - where width is along the x-axis and height is along the y-axis. - The rotated box B_rot (x_center, y_center, width, height, angle) - can be seen as: - - 1. When angle == 0: - B_rot == B - 2. When angle > 0: - B_rot is obtained by rotating B w.r.t its center by :math:`|angle|` degrees CCW; - 3. When angle < 0: - B_rot is obtained by rotating B w.r.t its center by :math:`|angle|` degrees CW. - - Mathematically, since the right-handed coordinate system for image space - is (y, x), where y is top->down and x is left->right, the 4 vertices of the - rotated rectangle :math:`(yr_i, xr_i)` (i = 1, 2, 3, 4) can be obtained from - the vertices of the horizontal rectangle (y_i, x_i) (i = 1, 2, 3, 4) - in the following way (:math:`\\theta = angle*\\pi/180` is the angle in radians, - (y_c, x_c) is the center of the rectangle): - - .. math:: - - yr_i = \\cos(\\theta) (y_i - y_c) - \\sin(\\theta) (x_i - x_c) + y_c, - - xr_i = \\sin(\\theta) (y_i - y_c) + \\cos(\\theta) (x_i - x_c) + x_c, - - which is the standard rigid-body rotation transformation. - - Intuitively, the angle is - (1) the rotation angle from y-axis in image space - to the height vector (top->down in the box's local coordinate system) - of the box in CCW, and - (2) the rotation angle from x-axis in image space - to the width vector (left->right in the box's local coordinate system) - of the box in CCW. - - More intuitively, consider the following horizontal box ABCD represented - in (x1, y1, x2, y2): (3, 2, 7, 4), - covering the [3, 7] x [2, 4] region of the continuous coordinate system - which looks like this: - - .. code:: none - - O--------> x - | - | A---B - | | | - | D---C - | - v y - - Note that each capital letter represents one 0-dimensional geometric point - instead of a 'square pixel' here. - - In the example above, using (x, y) to represent a point we have: - - .. math:: - - O = (0, 0), A = (3, 2), B = (7, 2), C = (7, 4), D = (3, 4) - - We name vector AB = vector DC as the width vector in box's local coordinate system, and - vector AD = vector BC as the height vector in box's local coordinate system. Initially, - when angle = 0 degree, they're aligned with the positive directions of x-axis and y-axis - in the image space, respectively. - - For better illustration, we denote the center of the box as E, - - .. code:: none - - O--------> x - | - | A---B - | | E | - | D---C - | - v y - - where the center E = ((3+7)/2, (2+4)/2) = (5, 3). - - Also, - - .. math:: - - width = |AB| = |CD| = 7 - 3 = 4, - height = |AD| = |BC| = 4 - 2 = 2. - - Therefore, the corresponding representation for the same shape in rotated box in - (x_center, y_center, width, height, angle) format is: - - (5, 3, 4, 2, 0), - - Now, let's consider (5, 3, 4, 2, 90), which is rotated by 90 degrees - CCW (counter-clockwise) by definition. It looks like this: - - .. code:: none - - O--------> x - | B-C - | | | - | |E| - | | | - | A-D - v y - - The center E is still located at the same point (5, 3), while the vertices - ABCD are rotated by 90 degrees CCW with regard to E: - A = (4, 5), B = (4, 1), C = (6, 1), D = (6, 5) - - Here, 90 degrees can be seen as the CCW angle to rotate from y-axis to - vector AD or vector BC (the top->down height vector in box's local coordinate system), - or the CCW angle to rotate from x-axis to vector AB or vector DC (the left->right - width vector in box's local coordinate system). - - .. math:: - - width = |AB| = |CD| = 5 - 1 = 4, - height = |AD| = |BC| = 6 - 4 = 2. - - Next, how about (5, 3, 4, 2, -90), which is rotated by 90 degrees CW (clockwise) - by definition? It looks like this: - - .. code:: none - - O--------> x - | D-A - | | | - | |E| - | | | - | C-B - v y - - The center E is still located at the same point (5, 3), while the vertices - ABCD are rotated by 90 degrees CW with regard to E: - A = (6, 1), B = (6, 5), C = (4, 5), D = (4, 1) - - .. math:: - - width = |AB| = |CD| = 5 - 1 = 4, - height = |AD| = |BC| = 6 - 4 = 2. - - This covers exactly the same region as (5, 3, 4, 2, 90) does, and their IoU - will be 1. However, these two will generate different RoI Pooling results and - should not be treated as an identical box. - - On the other hand, it's easy to see that (X, Y, W, H, A) is identical to - (X, Y, W, H, A+360N), for any integer N. For example (5, 3, 4, 2, 270) would be - identical to (5, 3, 4, 2, -90), because rotating the shape 270 degrees CCW is - equivalent to rotating the same shape 90 degrees CW. - - We could rotate further to get (5, 3, 4, 2, 180), or (5, 3, 4, 2, -180): - - .. code:: none - - O--------> x - | - | C---D - | | E | - | B---A - | - v y - - .. math:: - - A = (7, 4), B = (3, 4), C = (3, 2), D = (7, 2), - - width = |AB| = |CD| = 7 - 3 = 4, - height = |AD| = |BC| = 4 - 2 = 2. - - Finally, this is a very inaccurate (heavily quantized) illustration of - how (5, 3, 4, 2, 60) looks like in case anyone wonders: - - .. code:: none - - O--------> x - | B\ - | / C - | /E / - | A / - | `D - v y - - It's still a rectangle with center of (5, 3), width of 4 and height of 2, - but its angle (and thus orientation) is somewhere between - (5, 3, 4, 2, 0) and (5, 3, 4, 2, 90). - """ - device = tensor.device if isinstance(tensor, torch.Tensor) else torch.device("cpu") - tensor = torch.as_tensor(tensor, dtype=torch.float32, device=device) - if tensor.numel() == 0: - tensor = torch.zeros(0, 5, dtype=torch.float32, device=device) - assert tensor.dim() == 2 and tensor.size(-1) == 5, tensor.size() - - self.tensor = tensor - - def clone(self) -> "RotatedBoxes": - """ - Clone the RotatedBoxes. - - Returns: - RotatedBoxes - """ - return RotatedBoxes(self.tensor.clone()) - - def to(self, device: str) -> "RotatedBoxes": - return RotatedBoxes(self.tensor.to(device)) - - def area(self) -> torch.Tensor: - """ - Computes the area of all the boxes. - - Returns: - torch.Tensor: a vector with areas of each box. - """ - box = self.tensor - area = box[:, 2] * box[:, 3] - return area - - def normalize_angles(self) -> None: - """ - Restrict angles to the range of [-180, 180) degrees - """ - self.tensor[:, 4] = (self.tensor[:, 4] + 180.0) % 360.0 - 180.0 - - def clip(self, box_size: Boxes.BoxSizeType, clip_angle_threshold: float = 1.0) -> None: - """ - Clip (in place) the boxes by limiting x coordinates to the range [0, width] - and y coordinates to the range [0, height]. - - For RRPN: - Only clip boxes that are almost horizontal with a tolerance of - clip_angle_threshold to maintain backward compatibility. - - Rotated boxes beyond this threshold are not clipped for two reasons: - - 1. There are potentially multiple ways to clip a rotated box to make it - fit within the image. - 2. It's tricky to make the entire rectangular box fit within the image - and still be able to not leave out pixels of interest. - - Therefore we rely on ops like RoIAlignRotated to safely handle this. - - Args: - box_size (height, width): The clipping box's size. - clip_angle_threshold: - Iff. abs(normalized(angle)) <= clip_angle_threshold (in degrees), - we do the clipping as horizontal boxes. - """ - h, w = box_size - - # normalize angles to be within (-180, 180] degrees - self.normalize_angles() - - idx = torch.where(torch.abs(self.tensor[:, 4]) <= clip_angle_threshold)[0] - - # convert to (x1, y1, x2, y2) - x1 = self.tensor[idx, 0] - self.tensor[idx, 2] / 2.0 - y1 = self.tensor[idx, 1] - self.tensor[idx, 3] / 2.0 - x2 = self.tensor[idx, 0] + self.tensor[idx, 2] / 2.0 - y2 = self.tensor[idx, 1] + self.tensor[idx, 3] / 2.0 - - # clip - x1.clamp_(min=0, max=w) - y1.clamp_(min=0, max=h) - x2.clamp_(min=0, max=w) - y2.clamp_(min=0, max=h) - - # convert back to (xc, yc, w, h) - self.tensor[idx, 0] = (x1 + x2) / 2.0 - self.tensor[idx, 1] = (y1 + y2) / 2.0 - # make sure widths and heights do not increase due to numerical errors - self.tensor[idx, 2] = torch.min(self.tensor[idx, 2], x2 - x1) - self.tensor[idx, 3] = torch.min(self.tensor[idx, 3], y2 - y1) - - def nonempty(self, threshold: int = 0) -> torch.Tensor: - """ - Find boxes that are non-empty. - A box is considered empty, if either of its side is no larger than threshold. - - Returns: - Tensor: a binary vector which represents - whether each box is empty (False) or non-empty (True). - """ - box = self.tensor - widths = box[:, 2] - heights = box[:, 3] - keep = (widths > threshold) & (heights > threshold) - return keep - - def __getitem__(self, item: Union[int, slice, torch.BoolTensor]) -> "RotatedBoxes": - """ - Returns: - RotatedBoxes: Create a new :class:`RotatedBoxes` by indexing. - - The following usage are allowed: - - 1. `new_boxes = boxes[3]`: return a `RotatedBoxes` which contains only one box. - 2. `new_boxes = boxes[2:10]`: return a slice of boxes. - 3. `new_boxes = boxes[vector]`, where vector is a torch.ByteTensor - with `length = len(boxes)`. Nonzero elements in the vector will be selected. - - Note that the returned RotatedBoxes might share storage with this RotatedBoxes, - subject to Pytorch's indexing semantics. - """ - if isinstance(item, int): - return RotatedBoxes(self.tensor[item].view(1, -1)) - b = self.tensor[item] - assert b.dim() == 2, "Indexing on RotatedBoxes with {} failed to return a matrix!".format( - item - ) - return RotatedBoxes(b) - - def __len__(self) -> int: - return self.tensor.shape[0] - - def __repr__(self) -> str: - return "RotatedBoxes(" + str(self.tensor) + ")" - - def inside_box(self, box_size: Boxes.BoxSizeType, boundary_threshold: int = 0) -> torch.Tensor: - """ - Args: - box_size (height, width): Size of the reference box covering - [0, width] x [0, height] - boundary_threshold (int): Boxes that extend beyond the reference box - boundary by more than boundary_threshold are considered "outside". - - For RRPN, it might not be necessary to call this function since it's common - for rotated box to extend to outside of the image boundaries - (the clip function only clips the near-horizontal boxes) - - Returns: - a binary vector, indicating whether each box is inside the reference box. - """ - height, width = box_size - - cnt_x = self.tensor[..., 0] - cnt_y = self.tensor[..., 1] - half_w = self.tensor[..., 2] / 2.0 - half_h = self.tensor[..., 3] / 2.0 - a = self.tensor[..., 4] - c = torch.abs(torch.cos(a * math.pi / 180.0)) - s = torch.abs(torch.sin(a * math.pi / 180.0)) - # This basically computes the horizontal bounding rectangle of the rotated box - max_rect_dx = c * half_w + s * half_h - max_rect_dy = c * half_h + s * half_w - - inds_inside = ( - (cnt_x - max_rect_dx >= -boundary_threshold) - & (cnt_y - max_rect_dy >= -boundary_threshold) - & (cnt_x + max_rect_dx < width + boundary_threshold) - & (cnt_y + max_rect_dy < height + boundary_threshold) - ) - - return inds_inside - - def get_centers(self) -> torch.Tensor: - """ - Returns: - The box centers in a Nx2 array of (x, y). - """ - return self.tensor[:, :2] - - def scale(self, scale_x: float, scale_y: float) -> None: - """ - Scale the rotated box with horizontal and vertical scaling factors - Note: when scale_factor_x != scale_factor_y, - the rotated box does not preserve the rectangular shape when the angle - is not a multiple of 90 degrees under resize transformation. - Instead, the shape is a parallelogram (that has skew) - Here we make an approximation by fitting a rotated rectangle to the parallelogram. - """ - self.tensor[:, 0] *= scale_x - self.tensor[:, 1] *= scale_y - theta = self.tensor[:, 4] * math.pi / 180.0 - c = torch.cos(theta) - s = torch.sin(theta) - - # In image space, y is top->down and x is left->right - # Consider the local coordintate system for the rotated box, - # where the box center is located at (0, 0), and the four vertices ABCD are - # A(-w / 2, -h / 2), B(w / 2, -h / 2), C(w / 2, h / 2), D(-w / 2, h / 2) - # the midpoint of the left edge AD of the rotated box E is: - # E = (A+D)/2 = (-w / 2, 0) - # the midpoint of the top edge AB of the rotated box F is: - # F(0, -h / 2) - # To get the old coordinates in the global system, apply the rotation transformation - # (Note: the right-handed coordinate system for image space is yOx): - # (old_x, old_y) = (s * y + c * x, c * y - s * x) - # E(old) = (s * 0 + c * (-w/2), c * 0 - s * (-w/2)) = (-c * w / 2, s * w / 2) - # F(old) = (s * (-h / 2) + c * 0, c * (-h / 2) - s * 0) = (-s * h / 2, -c * h / 2) - # After applying the scaling factor (sfx, sfy): - # E(new) = (-sfx * c * w / 2, sfy * s * w / 2) - # F(new) = (-sfx * s * h / 2, -sfy * c * h / 2) - # The new width after scaling tranformation becomes: - - # w(new) = |E(new) - O| * 2 - # = sqrt[(sfx * c * w / 2)^2 + (sfy * s * w / 2)^2] * 2 - # = sqrt[(sfx * c)^2 + (sfy * s)^2] * w - # i.e., scale_factor_w = sqrt[(sfx * c)^2 + (sfy * s)^2] - # - # For example, - # when angle = 0 or 180, |c| = 1, s = 0, scale_factor_w == scale_factor_x; - # when |angle| = 90, c = 0, |s| = 1, scale_factor_w == scale_factor_y - self.tensor[:, 2] *= torch.sqrt((scale_x * c) ** 2 + (scale_y * s) ** 2) - - # h(new) = |F(new) - O| * 2 - # = sqrt[(sfx * s * h / 2)^2 + (sfy * c * h / 2)^2] * 2 - # = sqrt[(sfx * s)^2 + (sfy * c)^2] * h - # i.e., scale_factor_h = sqrt[(sfx * s)^2 + (sfy * c)^2] - # - # For example, - # when angle = 0 or 180, |c| = 1, s = 0, scale_factor_h == scale_factor_y; - # when |angle| = 90, c = 0, |s| = 1, scale_factor_h == scale_factor_x - self.tensor[:, 3] *= torch.sqrt((scale_x * s) ** 2 + (scale_y * c) ** 2) - - # The angle is the rotation angle from y-axis in image space to the height - # vector (top->down in the box's local coordinate system) of the box in CCW. - # - # angle(new) = angle_yOx(O - F(new)) - # = angle_yOx( (sfx * s * h / 2, sfy * c * h / 2) ) - # = atan2(sfx * s * h / 2, sfy * c * h / 2) - # = atan2(sfx * s, sfy * c) - # - # For example, - # when sfx == sfy, angle(new) == atan2(s, c) == angle(old) - self.tensor[:, 4] = torch.atan2(scale_x * s, scale_y * c) * 180 / math.pi - - @staticmethod - def cat(boxes_list: List["RotatedBoxes"]) -> "RotatedBoxes": # type: ignore - """ - Concatenates a list of RotatedBoxes into a single RotatedBoxes - - Arguments: - boxes_list (list[RotatedBoxes]) - - Returns: - RotatedBoxes: the concatenated RotatedBoxes - """ - assert isinstance(boxes_list, (list, tuple)) - assert len(boxes_list) > 0 - assert all(isinstance(box, RotatedBoxes) for box in boxes_list) - - cat_boxes = type(boxes_list[0])(cat([b.tensor for b in boxes_list], dim=0)) - return cat_boxes - - @property - def device(self) -> str: - return self.tensor.device - - def __iter__(self) -> Iterator[torch.Tensor]: - """ - Yield a box as a Tensor of shape (5,) at a time. - """ - yield from self.tensor - - -def pairwise_iou(boxes1: RotatedBoxes, boxes2: RotatedBoxes) -> None: - """ - Given two lists of rotated boxes of size N and M, - compute the IoU (intersection over union) - between __all__ N x M pairs of boxes. - The box order must be (x_center, y_center, width, height, angle). - - Args: - boxes1, boxes2 (RotatedBoxes): - two `RotatedBoxes`. Contains N & M rotated boxes, respectively. - - Returns: - Tensor: IoU, sized [N,M]. - """ - - return pairwise_iou_rotated(boxes1.tensor, boxes2.tensor) diff --git a/spaces/CVPR/LIVE/pybind11/tests/test_multiple_inheritance.py b/spaces/CVPR/LIVE/pybind11/tests/test_multiple_inheritance.py deleted file mode 100644 index 7a0259d2148f14aafeac67a43d3c906a0b5719d0..0000000000000000000000000000000000000000 --- a/spaces/CVPR/LIVE/pybind11/tests/test_multiple_inheritance.py +++ /dev/null @@ -1,356 +0,0 @@ -# -*- coding: utf-8 -*- -import pytest - -import env # noqa: F401 - -from pybind11_tests import ConstructorStats -from pybind11_tests import multiple_inheritance as m - - -def test_multiple_inheritance_cpp(): - mt = m.MIType(3, 4) - - assert mt.foo() == 3 - assert mt.bar() == 4 - - -@pytest.mark.skipif("env.PYPY and env.PY2") -@pytest.mark.xfail("env.PYPY and not env.PY2") -def test_multiple_inheritance_mix1(): - class Base1: - def __init__(self, i): - self.i = i - - def foo(self): - return self.i - - class MITypePy(Base1, m.Base2): - def __init__(self, i, j): - Base1.__init__(self, i) - m.Base2.__init__(self, j) - - mt = MITypePy(3, 4) - - assert mt.foo() == 3 - assert mt.bar() == 4 - - -def test_multiple_inheritance_mix2(): - class Base2: - def __init__(self, i): - self.i = i - - def bar(self): - return self.i - - class MITypePy(m.Base1, Base2): - def __init__(self, i, j): - m.Base1.__init__(self, i) - Base2.__init__(self, j) - - mt = MITypePy(3, 4) - - assert mt.foo() == 3 - assert mt.bar() == 4 - - -@pytest.mark.skipif("env.PYPY and env.PY2") -@pytest.mark.xfail("env.PYPY and not env.PY2") -def test_multiple_inheritance_python(): - - class MI1(m.Base1, m.Base2): - def __init__(self, i, j): - m.Base1.__init__(self, i) - m.Base2.__init__(self, j) - - class B1(object): - def v(self): - return 1 - - class MI2(B1, m.Base1, m.Base2): - def __init__(self, i, j): - B1.__init__(self) - m.Base1.__init__(self, i) - m.Base2.__init__(self, j) - - class MI3(MI2): - def __init__(self, i, j): - MI2.__init__(self, i, j) - - class MI4(MI3, m.Base2): - def __init__(self, i, j): - MI3.__init__(self, i, j) - # This should be ignored (Base2 is already initialized via MI2): - m.Base2.__init__(self, i + 100) - - class MI5(m.Base2, B1, m.Base1): - def __init__(self, i, j): - B1.__init__(self) - m.Base1.__init__(self, i) - m.Base2.__init__(self, j) - - class MI6(m.Base2, B1): - def __init__(self, i): - m.Base2.__init__(self, i) - B1.__init__(self) - - class B2(B1): - def v(self): - return 2 - - class B3(object): - def v(self): - return 3 - - class B4(B3, B2): - def v(self): - return 4 - - class MI7(B4, MI6): - def __init__(self, i): - B4.__init__(self) - MI6.__init__(self, i) - - class MI8(MI6, B3): - def __init__(self, i): - MI6.__init__(self, i) - B3.__init__(self) - - class MI8b(B3, MI6): - def __init__(self, i): - B3.__init__(self) - MI6.__init__(self, i) - - mi1 = MI1(1, 2) - assert mi1.foo() == 1 - assert mi1.bar() == 2 - - mi2 = MI2(3, 4) - assert mi2.v() == 1 - assert mi2.foo() == 3 - assert mi2.bar() == 4 - - mi3 = MI3(5, 6) - assert mi3.v() == 1 - assert mi3.foo() == 5 - assert mi3.bar() == 6 - - mi4 = MI4(7, 8) - assert mi4.v() == 1 - assert mi4.foo() == 7 - assert mi4.bar() == 8 - - mi5 = MI5(10, 11) - assert mi5.v() == 1 - assert mi5.foo() == 10 - assert mi5.bar() == 11 - - mi6 = MI6(12) - assert mi6.v() == 1 - assert mi6.bar() == 12 - - mi7 = MI7(13) - assert mi7.v() == 4 - assert mi7.bar() == 13 - - mi8 = MI8(14) - assert mi8.v() == 1 - assert mi8.bar() == 14 - - mi8b = MI8b(15) - assert mi8b.v() == 3 - assert mi8b.bar() == 15 - - -def test_multiple_inheritance_python_many_bases(): - - class MIMany14(m.BaseN1, m.BaseN2, m.BaseN3, m.BaseN4): - def __init__(self): - m.BaseN1.__init__(self, 1) - m.BaseN2.__init__(self, 2) - m.BaseN3.__init__(self, 3) - m.BaseN4.__init__(self, 4) - - class MIMany58(m.BaseN5, m.BaseN6, m.BaseN7, m.BaseN8): - def __init__(self): - m.BaseN5.__init__(self, 5) - m.BaseN6.__init__(self, 6) - m.BaseN7.__init__(self, 7) - m.BaseN8.__init__(self, 8) - - class MIMany916(m.BaseN9, m.BaseN10, m.BaseN11, m.BaseN12, m.BaseN13, m.BaseN14, m.BaseN15, - m.BaseN16): - def __init__(self): - m.BaseN9.__init__(self, 9) - m.BaseN10.__init__(self, 10) - m.BaseN11.__init__(self, 11) - m.BaseN12.__init__(self, 12) - m.BaseN13.__init__(self, 13) - m.BaseN14.__init__(self, 14) - m.BaseN15.__init__(self, 15) - m.BaseN16.__init__(self, 16) - - class MIMany19(MIMany14, MIMany58, m.BaseN9): - def __init__(self): - MIMany14.__init__(self) - MIMany58.__init__(self) - m.BaseN9.__init__(self, 9) - - class MIMany117(MIMany14, MIMany58, MIMany916, m.BaseN17): - def __init__(self): - MIMany14.__init__(self) - MIMany58.__init__(self) - MIMany916.__init__(self) - m.BaseN17.__init__(self, 17) - - # Inherits from 4 registered C++ classes: can fit in one pointer on any modern arch: - a = MIMany14() - for i in range(1, 4): - assert getattr(a, "f" + str(i))() == 2 * i - - # Inherits from 8: requires 1/2 pointers worth of holder flags on 32/64-bit arch: - b = MIMany916() - for i in range(9, 16): - assert getattr(b, "f" + str(i))() == 2 * i - - # Inherits from 9: requires >= 2 pointers worth of holder flags - c = MIMany19() - for i in range(1, 9): - assert getattr(c, "f" + str(i))() == 2 * i - - # Inherits from 17: requires >= 3 pointers worth of holder flags - d = MIMany117() - for i in range(1, 17): - assert getattr(d, "f" + str(i))() == 2 * i - - -def test_multiple_inheritance_virtbase(): - - class MITypePy(m.Base12a): - def __init__(self, i, j): - m.Base12a.__init__(self, i, j) - - mt = MITypePy(3, 4) - assert mt.bar() == 4 - assert m.bar_base2a(mt) == 4 - assert m.bar_base2a_sharedptr(mt) == 4 - - -def test_mi_static_properties(): - """Mixing bases with and without static properties should be possible - and the result should be independent of base definition order""" - - for d in (m.VanillaStaticMix1(), m.VanillaStaticMix2()): - assert d.vanilla() == "Vanilla" - assert d.static_func1() == "WithStatic1" - assert d.static_func2() == "WithStatic2" - assert d.static_func() == d.__class__.__name__ - - m.WithStatic1.static_value1 = 1 - m.WithStatic2.static_value2 = 2 - assert d.static_value1 == 1 - assert d.static_value2 == 2 - assert d.static_value == 12 - - d.static_value1 = 0 - assert d.static_value1 == 0 - d.static_value2 = 0 - assert d.static_value2 == 0 - d.static_value = 0 - assert d.static_value == 0 - - -# Requires PyPy 6+ -def test_mi_dynamic_attributes(): - """Mixing bases with and without dynamic attribute support""" - - for d in (m.VanillaDictMix1(), m.VanillaDictMix2()): - d.dynamic = 1 - assert d.dynamic == 1 - - -def test_mi_unaligned_base(): - """Returning an offset (non-first MI) base class pointer should recognize the instance""" - - n_inst = ConstructorStats.detail_reg_inst() - - c = m.I801C() - d = m.I801D() - # + 4 below because we have the two instances, and each instance has offset base I801B2 - assert ConstructorStats.detail_reg_inst() == n_inst + 4 - b1c = m.i801b1_c(c) - assert b1c is c - b2c = m.i801b2_c(c) - assert b2c is c - b1d = m.i801b1_d(d) - assert b1d is d - b2d = m.i801b2_d(d) - assert b2d is d - - assert ConstructorStats.detail_reg_inst() == n_inst + 4 # no extra instances - del c, b1c, b2c - assert ConstructorStats.detail_reg_inst() == n_inst + 2 - del d, b1d, b2d - assert ConstructorStats.detail_reg_inst() == n_inst - - -def test_mi_base_return(): - """Tests returning an offset (non-first MI) base class pointer to a derived instance""" - - n_inst = ConstructorStats.detail_reg_inst() - - c1 = m.i801c_b1() - assert type(c1) is m.I801C - assert c1.a == 1 - assert c1.b == 2 - - d1 = m.i801d_b1() - assert type(d1) is m.I801D - assert d1.a == 1 - assert d1.b == 2 - - assert ConstructorStats.detail_reg_inst() == n_inst + 4 - - c2 = m.i801c_b2() - assert type(c2) is m.I801C - assert c2.a == 1 - assert c2.b == 2 - - d2 = m.i801d_b2() - assert type(d2) is m.I801D - assert d2.a == 1 - assert d2.b == 2 - - assert ConstructorStats.detail_reg_inst() == n_inst + 8 - - del c2 - assert ConstructorStats.detail_reg_inst() == n_inst + 6 - del c1, d1, d2 - assert ConstructorStats.detail_reg_inst() == n_inst - - # Returning an unregistered derived type with a registered base; we won't - # pick up the derived type, obviously, but should still work (as an object - # of whatever type was returned). - e1 = m.i801e_c() - assert type(e1) is m.I801C - assert e1.a == 1 - assert e1.b == 2 - - e2 = m.i801e_b2() - assert type(e2) is m.I801B2 - assert e2.b == 2 - - -def test_diamond_inheritance(): - """Tests that diamond inheritance works as expected (issue #959)""" - - # Issue #959: this shouldn't segfault: - d = m.D() - - # Make sure all the various distinct pointers are all recognized as registered instances: - assert d is d.c0() - assert d is d.c1() - assert d is d.b() - assert d is d.c0().b() - assert d is d.c1().b() - assert d is d.c0().c1().b().c0().b() diff --git a/spaces/CVPR/LIVE/pydiffvg/parse_svg.py b/spaces/CVPR/LIVE/pydiffvg/parse_svg.py deleted file mode 100644 index fb1f3fc286074f3cd82b37baffbdd00440b72a8a..0000000000000000000000000000000000000000 --- a/spaces/CVPR/LIVE/pydiffvg/parse_svg.py +++ /dev/null @@ -1,583 +0,0 @@ -import torch -import xml.etree.ElementTree as etree -import numpy as np -import diffvg -import os -import pydiffvg -import svgpathtools -import svgpathtools.parser -import re -import warnings -import cssutils -import logging -import matplotlib.colors -cssutils.log.setLevel(logging.ERROR) - -def remove_namespaces(s): - """ - {...} ... -> ... - """ - return re.sub('{.*}', '', s) - -def parse_style(s, defs): - style_dict = {} - for e in s.split(';'): - key_value = e.split(':') - if len(key_value) == 2: - key = key_value[0].strip() - value = key_value[1].strip() - if key == 'fill' or key == 'stroke': - # Special case: convert colors into tensor in definitions so - # that different shapes can share the same color - value = parse_color(value, defs) - style_dict[key] = value - return style_dict - -def parse_hex(s): - """ - Hex to tuple - """ - s = s.lstrip('#') - if len(s) == 3: - s = s[0] + s[0] + s[1] + s[1] + s[2] + s[2] - rgb = tuple(int(s[i:i+2], 16) for i in (0, 2, 4)) - # sRGB to RGB - # return torch.pow(torch.tensor([rgb[0] / 255.0, rgb[1] / 255.0, rgb[2] / 255.0]), 2.2) - return torch.pow(torch.tensor([rgb[0] / 255.0, rgb[1] / 255.0, rgb[2] / 255.0]), 1.0) - -def parse_int(s): - """ - trim alphabets - """ - return int(float(''.join(i for i in s if (not i.isalpha())))) - -def parse_color(s, defs): - if s is None: - return None - if isinstance(s, torch.Tensor): - return s - s = s.lstrip(' ') - color = torch.tensor([0.0, 0.0, 0.0, 1.0]) - if s[0] == '#': - color[:3] = parse_hex(s) - elif s[:3] == 'url': - # url(#id) - color = defs[s[4:-1].lstrip('#')] - elif s == 'none': - color = None - elif s[:4] == 'rgb(': - rgb = s[4:-1].split(',') - color = torch.tensor([int(rgb[0]) / 255.0, int(rgb[1]) / 255.0, int(rgb[2]) / 255.0, 1.0]) - elif s == 'none': - return None - else: - try : - rgba = matplotlib.colors.to_rgba(s) - color = torch.tensor(rgba) - except ValueError : - warnings.warn('Unknown color command ' + s) - return color - -# https://github.com/mathandy/svgpathtools/blob/7ebc56a831357379ff22216bec07e2c12e8c5bc6/svgpathtools/parser.py -def _parse_transform_substr(transform_substr): - type_str, value_str = transform_substr.split('(') - value_str = value_str.replace(',', ' ') - values = list(map(float, filter(None, value_str.split(' ')))) - - transform = np.identity(3) - if 'matrix' in type_str: - transform[0:2, 0:3] = np.array([values[0:6:2], values[1:6:2]]) - elif 'translate' in transform_substr: - transform[0, 2] = values[0] - if len(values) > 1: - transform[1, 2] = values[1] - elif 'scale' in transform_substr: - x_scale = values[0] - y_scale = values[1] if (len(values) > 1) else x_scale - transform[0, 0] = x_scale - transform[1, 1] = y_scale - elif 'rotate' in transform_substr: - angle = values[0] * np.pi / 180.0 - if len(values) == 3: - offset = values[1:3] - else: - offset = (0, 0) - tf_offset = np.identity(3) - tf_offset[0:2, 2:3] = np.array([[offset[0]], [offset[1]]]) - tf_rotate = np.identity(3) - tf_rotate[0:2, 0:2] = np.array([[np.cos(angle), -np.sin(angle)], [np.sin(angle), np.cos(angle)]]) - tf_offset_neg = np.identity(3) - tf_offset_neg[0:2, 2:3] = np.array([[-offset[0]], [-offset[1]]]) - - transform = tf_offset.dot(tf_rotate).dot(tf_offset_neg) - elif 'skewX' in transform_substr: - transform[0, 1] = np.tan(values[0] * np.pi / 180.0) - elif 'skewY' in transform_substr: - transform[1, 0] = np.tan(values[0] * np.pi / 180.0) - else: - # Return an identity matrix if the type of transform is unknown, and warn the user - warnings.warn('Unknown SVG transform type: {0}'.format(type_str)) - return transform - -def parse_transform(transform_str): - """ - Converts a valid SVG transformation string into a 3x3 matrix. - If the string is empty or null, this returns a 3x3 identity matrix - """ - if not transform_str: - return np.identity(3) - elif not isinstance(transform_str, str): - raise TypeError('Must provide a string to parse') - - total_transform = np.identity(3) - transform_substrs = transform_str.split(')')[:-1] # Skip the last element, because it should be empty - for substr in transform_substrs: - total_transform = total_transform.dot(_parse_transform_substr(substr)) - - return torch.from_numpy(total_transform).type(torch.float32) - -def parse_linear_gradient(node, transform, defs): - begin = torch.tensor([0.0, 0.0]) - end = torch.tensor([0.0, 0.0]) - offsets = [] - stop_colors = [] - # Inherit from parent - for key in node.attrib: - if remove_namespaces(key) == 'href': - value = node.attrib[key] - parent = defs[value.lstrip('#')] - begin = parent.begin - end = parent.end - offsets = parent.offsets - stop_colors = parent.stop_colors - - for attrib in node.attrib: - attrib = remove_namespaces(attrib) - if attrib == 'x1': - begin[0] = float(node.attrib['x1']) - elif attrib == 'y1': - begin[1] = float(node.attrib['y1']) - elif attrib == 'x2': - end[0] = float(node.attrib['x2']) - elif attrib == 'y2': - end[1] = float(node.attrib['y2']) - elif attrib == 'gradientTransform': - transform = transform @ parse_transform(node.attrib['gradientTransform']) - - begin = transform @ torch.cat((begin, torch.ones([1]))) - begin = begin / begin[2] - begin = begin[:2] - end = transform @ torch.cat((end, torch.ones([1]))) - end = end / end[2] - end = end[:2] - - for child in node: - tag = remove_namespaces(child.tag) - if tag == 'stop': - offset = float(child.attrib['offset']) - color = [0.0, 0.0, 0.0, 1.0] - if 'stop-color' in child.attrib: - c = parse_color(child.attrib['stop-color'], defs) - color[:3] = [c[0], c[1], c[2]] - if 'stop-opacity' in child.attrib: - color[3] = float(child.attrib['stop-opacity']) - if 'style' in child.attrib: - style = parse_style(child.attrib['style'], defs) - if 'stop-color' in style: - c = parse_color(style['stop-color'], defs) - color[:3] = [c[0], c[1], c[2]] - if 'stop-opacity' in style: - color[3] = float(style['stop-opacity']) - offsets.append(offset) - stop_colors.append(color) - if isinstance(offsets, list): - offsets = torch.tensor(offsets) - if isinstance(stop_colors, list): - stop_colors = torch.tensor(stop_colors) - - return pydiffvg.LinearGradient(begin, end, offsets, stop_colors) - - -def parse_radial_gradient(node, transform, defs): - begin = torch.tensor([0.0, 0.0]) - end = torch.tensor([0.0, 0.0]) - center = torch.tensor([0.0, 0.0]) - radius = torch.tensor([0.0, 0.0]) - offsets = [] - stop_colors = [] - # Inherit from parent - for key in node.attrib: - if remove_namespaces(key) == 'href': - value = node.attrib[key] - parent = defs[value.lstrip('#')] - begin = parent.begin - end = parent.end - offsets = parent.offsets - stop_colors = parent.stop_colors - - for attrib in node.attrib: - attrib = remove_namespaces(attrib) - if attrib == 'cx': - center[0] = float(node.attrib['cx']) - elif attrib == 'cy': - center[1] = float(node.attrib['cy']) - elif attrib == 'fx': - radius[0] = float(node.attrib['fx']) - elif attrib == 'fy': - radius[1] = float(node.attrib['fy']) - elif attrib == 'fr': - radius[0] = float(node.attrib['fr']) - radius[1] = float(node.attrib['fr']) - elif attrib == 'gradientTransform': - transform = transform @ parse_transform(node.attrib['gradientTransform']) - - # TODO: this is incorrect - center = transform @ torch.cat((center, torch.ones([1]))) - center = center / center[2] - center = center[:2] - - for child in node: - tag = remove_namespaces(child.tag) - if tag == 'stop': - offset = float(child.attrib['offset']) - color = [0.0, 0.0, 0.0, 1.0] - if 'stop-color' in child.attrib: - c = parse_color(child.attrib['stop-color'], defs) - color[:3] = [c[0], c[1], c[2]] - if 'stop-opacity' in child.attrib: - color[3] = float(child.attrib['stop-opacity']) - if 'style' in child.attrib: - style = parse_style(child.attrib['style'], defs) - if 'stop-color' in style: - c = parse_color(style['stop-color'], defs) - color[:3] = [c[0], c[1], c[2]] - if 'stop-opacity' in style: - color[3] = float(style['stop-opacity']) - offsets.append(offset) - stop_colors.append(color) - if isinstance(offsets, list): - offsets = torch.tensor(offsets) - if isinstance(stop_colors, list): - stop_colors = torch.tensor(stop_colors) - - return pydiffvg.RadialGradient(begin, end, offsets, stop_colors) - -def parse_stylesheet(node, transform, defs): - # collect CSS classes - sheet = cssutils.parseString(node.text) - for rule in sheet: - if hasattr(rule, 'selectorText') and hasattr(rule, 'style'): - name = rule.selectorText - if len(name) >= 2 and name[0] == '.': - defs[name[1:]] = parse_style(rule.style.getCssText(), defs) - return defs - -def parse_defs(node, transform, defs): - for child in node: - tag = remove_namespaces(child.tag) - if tag == 'linearGradient': - if 'id' in child.attrib: - defs[child.attrib['id']] = parse_linear_gradient(child, transform, defs) - elif tag == 'radialGradient': - if 'id' in child.attrib: - defs[child.attrib['id']] = parse_radial_gradient(child, transform, defs) - elif tag == 'style': - defs = parse_stylesheet(child, transform, defs) - return defs - -def parse_common_attrib(node, transform, fill_color, defs): - attribs = {} - if 'class' in node.attrib: - attribs.update(defs[node.attrib['class']]) - attribs.update(node.attrib) - - name = '' - if 'id' in node.attrib: - name = node.attrib['id'] - - stroke_color = None - stroke_width = torch.tensor(0.5) - use_even_odd_rule = False - - new_transform = transform - if 'transform' in attribs: - new_transform = transform @ parse_transform(attribs['transform']) - if 'fill' in attribs: - fill_color = parse_color(attribs['fill'], defs) - fill_opacity = 1.0 - if 'fill-opacity' in attribs: - fill_opacity *= float(attribs['fill-opacity']) - if 'opacity' in attribs: - fill_opacity *= float(attribs['opacity']) - # Ignore opacity if the color is a gradient - if isinstance(fill_color, torch.Tensor): - fill_color[3] = fill_opacity - - if 'fill-rule' in attribs: - if attribs['fill-rule'] == "evenodd": - use_even_odd_rule = True - elif attribs['fill-rule'] == "nonzero": - use_even_odd_rule = False - else: - warnings.warn('Unknown fill-rule: {}'.format(attribs['fill-rule'])) - - if 'stroke' in attribs: - stroke_color = parse_color(attribs['stroke'], defs) - - if 'stroke-width' in attribs: - stroke_width = attribs['stroke-width'] - if stroke_width[-2:] == 'px': - stroke_width = stroke_width[:-2] - stroke_width = torch.tensor(float(stroke_width) / 2.0) - - if 'style' in attribs: - style = parse_style(attribs['style'], defs) - if 'fill' in style: - fill_color = parse_color(style['fill'], defs) - fill_opacity = 1.0 - if 'fill-opacity' in style: - fill_opacity *= float(style['fill-opacity']) - if 'opacity' in style: - fill_opacity *= float(style['opacity']) - if 'fill-rule' in style: - if style['fill-rule'] == "evenodd": - use_even_odd_rule = True - elif style['fill-rule'] == "nonzero": - use_even_odd_rule = False - else: - warnings.warn('Unknown fill-rule: {}'.format(style['fill-rule'])) - # Ignore opacity if the color is a gradient - if isinstance(fill_color, torch.Tensor): - fill_color[3] = fill_opacity - if 'stroke' in style: - if style['stroke'] != 'none': - stroke_color = parse_color(style['stroke'], defs) - # Ignore opacity if the color is a gradient - if isinstance(stroke_color, torch.Tensor): - if 'stroke-opacity' in style: - stroke_color[3] = float(style['stroke-opacity']) - if 'opacity' in style: - stroke_color[3] *= float(style['opacity']) - if 'stroke-width' in style: - stroke_width = style['stroke-width'] - if stroke_width[-2:] == 'px': - stroke_width = stroke_width[:-2] - stroke_width = torch.tensor(float(stroke_width) / 2.0) - - if isinstance(fill_color, pydiffvg.LinearGradient): - fill_color.begin = new_transform @ torch.cat((fill_color.begin, torch.ones([1]))) - fill_color.begin = fill_color.begin / fill_color.begin[2] - fill_color.begin = fill_color.begin[:2] - fill_color.end = new_transform @ torch.cat((fill_color.end, torch.ones([1]))) - fill_color.end = fill_color.end / fill_color.end[2] - fill_color.end = fill_color.end[:2] - if isinstance(stroke_color, pydiffvg.LinearGradient): - stroke_color.begin = new_transform @ torch.cat((stroke_color.begin, torch.ones([1]))) - stroke_color.begin = stroke_color.begin / stroke_color.begin[2] - stroke_color.begin = stroke_color.begin[:2] - stroke_color.end = new_transform @ torch.cat((stroke_color.end, torch.ones([1]))) - stroke_color.end = stroke_color.end / stroke_color.end[2] - stroke_color.end = stroke_color.end[:2] - if 'filter' in style: - print('*** WARNING ***: Ignoring filter for path with id "{}"'.format(name)) - - return new_transform, fill_color, stroke_color, stroke_width, use_even_odd_rule - -def is_shape(tag): - return tag == 'path' or tag == 'polygon' or tag == 'line' or tag == 'circle' or tag == 'rect' - -def parse_shape(node, transform, fill_color, shapes, shape_groups, defs): - tag = remove_namespaces(node.tag) - new_transform, new_fill_color, stroke_color, stroke_width, use_even_odd_rule = \ - parse_common_attrib(node, transform, fill_color, defs) - if tag == 'path': - d = node.attrib['d'] - name = '' - if 'id' in node.attrib: - name = node.attrib['id'] - force_closing = new_fill_color is not None - paths = pydiffvg.from_svg_path(d, new_transform, force_closing) - for idx, path in enumerate(paths): - assert(path.points.shape[1] == 2) - path.stroke_width = stroke_width - path.source_id = name - path.id = "{}-{}".format(name,idx) if len(paths)>1 else name - prev_shapes_size = len(shapes) - shapes = shapes + paths - shape_ids = torch.tensor(list(range(prev_shapes_size, len(shapes)))) - shape_groups.append(pydiffvg.ShapeGroup(\ - shape_ids = shape_ids, - fill_color = new_fill_color, - stroke_color = stroke_color, - use_even_odd_rule = use_even_odd_rule, - id = name)) - elif tag == 'polygon': - name = '' - if 'id' in node.attrib: - name = node.attrib['id'] - force_closing = new_fill_color is not None - pts = node.attrib['points'].strip() - pts = pts.split(' ') - # import ipdb; ipdb.set_trace() - pts = [[float(y) for y in re.split(',| ', x)] for x in pts if x] - pts = torch.tensor(pts, dtype=torch.float32).view(-1, 2) - polygon = pydiffvg.Polygon(pts, force_closing) - polygon.stroke_width = stroke_width - shape_ids = torch.tensor([len(shapes)]) - shapes.append(polygon) - shape_groups.append(pydiffvg.ShapeGroup(\ - shape_ids = shape_ids, - fill_color = new_fill_color, - stroke_color = stroke_color, - use_even_odd_rule = use_even_odd_rule, - shape_to_canvas = new_transform, - id = name)) - elif tag == 'line': - x1 = float(node.attrib['x1']) - y1 = float(node.attrib['y1']) - x2 = float(node.attrib['x2']) - y2 = float(node.attrib['y2']) - p1 = torch.tensor([x1, y1]) - p2 = torch.tensor([x2, y2]) - points = torch.stack((p1, p2)) - line = pydiffvg.Polygon(points, False) - line.stroke_width = stroke_width - shape_ids = torch.tensor([len(shapes)]) - shapes.append(line) - shape_groups.append(pydiffvg.ShapeGroup(\ - shape_ids = shape_ids, - fill_color = new_fill_color, - stroke_color = stroke_color, - use_even_odd_rule = use_even_odd_rule, - shape_to_canvas = new_transform)) - elif tag == 'circle': - radius = float(node.attrib['r']) - cx = float(node.attrib['cx']) - cy = float(node.attrib['cy']) - name = '' - if 'id' in node.attrib: - name = node.attrib['id'] - center = torch.tensor([cx, cy]) - circle = pydiffvg.Circle(radius = torch.tensor(radius), - center = center) - circle.stroke_width = stroke_width - shape_ids = torch.tensor([len(shapes)]) - shapes.append(circle) - shape_groups.append(pydiffvg.ShapeGroup(\ - shape_ids = shape_ids, - fill_color = new_fill_color, - stroke_color = stroke_color, - use_even_odd_rule = use_even_odd_rule, - shape_to_canvas = new_transform)) - elif tag == 'ellipse': - rx = float(node.attrib['rx']) - ry = float(node.attrib['ry']) - cx = float(node.attrib['cx']) - cy = float(node.attrib['cy']) - name = '' - if 'id' in node.attrib: - name = node.attrib['id'] - center = torch.tensor([cx, cy]) - circle = pydiffvg.Circle(radius = torch.tensor(radius), - center = center) - circle.stroke_width = stroke_width - shape_ids = torch.tensor([len(shapes)]) - shapes.append(circle) - shape_groups.append(pydiffvg.ShapeGroup(\ - shape_ids = shape_ids, - fill_color = new_fill_color, - stroke_color = stroke_color, - use_even_odd_rule = use_even_odd_rule, - shape_to_canvas = new_transform)) - elif tag == 'rect': - x = 0.0 - y = 0.0 - if x in node.attrib: - x = float(node.attrib['x']) - if y in node.attrib: - y = float(node.attrib['y']) - w = float(node.attrib['width']) - h = float(node.attrib['height']) - p_min = torch.tensor([x, y]) - p_max = torch.tensor([x + w, x + h]) - rect = pydiffvg.Rect(p_min = p_min, p_max = p_max) - rect.stroke_width = stroke_width - shape_ids = torch.tensor([len(shapes)]) - shapes.append(rect) - shape_groups.append(pydiffvg.ShapeGroup(\ - shape_ids = shape_ids, - fill_color = new_fill_color, - stroke_color = stroke_color, - use_even_odd_rule = use_even_odd_rule, - shape_to_canvas = new_transform)) - return shapes, shape_groups - -def parse_group(node, transform, fill_color, shapes, shape_groups, defs): - if 'transform' in node.attrib: - transform = transform @ parse_transform(node.attrib['transform']) - if 'fill' in node.attrib: - fill_color = parse_color(node.attrib['fill'], defs) - for child in node: - tag = remove_namespaces(child.tag) - if is_shape(tag): - shapes, shape_groups = parse_shape(\ - child, transform, fill_color, shapes, shape_groups, defs) - elif tag == 'g': - shapes, shape_groups = parse_group(\ - child, transform, fill_color, shapes, shape_groups, defs) - return shapes, shape_groups - -def parse_scene(node): - canvas_width = -1 - canvas_height = -1 - defs = {} - shapes = [] - shape_groups = [] - fill_color = torch.tensor([0.0, 0.0, 0.0, 1.0]) - transform = torch.eye(3) - if 'viewBox' in node.attrib: - view_box_array = node.attrib['viewBox'].split() - canvas_width = parse_int(view_box_array[2]) - canvas_height = parse_int(view_box_array[3]) - else: - if 'width' in node.attrib: - canvas_width = parse_int(node.attrib['width']) - else: - print('Warning: Can\'t find canvas width.') - if 'height' in node.attrib: - canvas_height = parse_int(node.attrib['height']) - else: - print('Warning: Can\'t find canvas height.') - for child in node: - tag = remove_namespaces(child.tag) - if tag == 'defs': - defs = parse_defs(child, transform, defs) - elif tag == 'style': - defs = parse_stylesheet(child, transform, defs) - elif tag == 'linearGradient': - if 'id' in child.attrib: - defs[child.attrib['id']] = parse_linear_gradient(child, transform, defs) - elif tag == 'radialGradient': - if 'id' in child.attrib: - defs[child.attrib['id']] = parse_radial_gradient(child, transform, defs) - elif is_shape(tag): - shapes, shape_groups = parse_shape(\ - child, transform, fill_color, shapes, shape_groups, defs) - elif tag == 'g': - shapes, shape_groups = parse_group(\ - child, transform, fill_color, shapes, shape_groups, defs) - return canvas_width, canvas_height, shapes, shape_groups - -def svg_to_scene(filename): - """ - Load from a SVG file and convert to PyTorch tensors. - """ - - tree = etree.parse(filename) - root = tree.getroot() - cwd = os.getcwd() - if (os.path.dirname(filename) != ''): - os.chdir(os.path.dirname(filename)) - ret = parse_scene(root) - os.chdir(cwd) - return ret diff --git a/spaces/CVPR/LIVE/thrust/thrust/mr/disjoint_pool.h b/spaces/CVPR/LIVE/thrust/thrust/mr/disjoint_pool.h deleted file mode 100644 index 898e499c807dc48a35c7dafe3da00d2885b62396..0000000000000000000000000000000000000000 --- a/spaces/CVPR/LIVE/thrust/thrust/mr/disjoint_pool.h +++ /dev/null @@ -1,489 +0,0 @@ -/* - * Copyright 2018 NVIDIA Corporation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/*! \file disjoint_pool.h - * \brief A caching and pooling memory resource adaptor which uses separate upstream resources for memory allocation - * and bookkeeping. - */ - -#pragma once - -#include - -#include -#include -#include - -#include -#include -#include - -#include - -namespace thrust -{ -namespace mr -{ - -/** \addtogroup memory_resources Memory Resources - * \ingroup memory_management_classes - * \{ - */ - -/*! A memory resource adaptor allowing for pooling and caching allocations from \p Upstream, using \p Bookkeeper for - * management of that cached and pooled memory, allowing to cache portions of memory inaccessible from the host. - * - * On a typical memory resource, calls to \p allocate and \p deallocate actually allocate and deallocate memory. Pooling - * memory resources only allocate and deallocate memory from an external resource (the upstream memory resource) when - * there's no suitable memory currently cached; otherwise, they use memory they have acquired beforehand, to make - * memory allocation faster and more efficient. - * - * The disjoint version of the pool resources uses a separate upstream memory resource, \p Bookkeeper, to allocate memory - * necessary to manage the cached memory. There may be many reasons to do that; the canonical one is that \p Upstream - * allocates memory that is inaccessible to the code of the pool resource, which means that it cannot embed the necessary - * information in memory obtained from \p Upstream; for instance, \p Upstream can be a CUDA non-managed memory - * resource, or a CUDA managed memory resource whose memory we would prefer to not migrate back and forth between - * host and device when executing bookkeeping code. - * - * This is not the only case where it makes sense to use a disjoint pool resource, though. In a multi-core environment - * it may be beneficial to avoid stealing cache lines from other cores by writing over bookkeeping information - * embedded in an allocated block of memory. In such a case, one can imagine wanting to use a disjoint pool where - * both the upstream and the bookkeeper are of the same type, to allocate memory consistently, but separately for - * those two purposes. - * - * \tparam Upstream the type of memory resources that will be used for allocating memory blocks to be handed off to the user - * \tparam Bookkeeper the type of memory resources that will be used for allocating bookkeeping memory - */ -template -class disjoint_unsynchronized_pool_resource THRUST_FINAL - : public memory_resource, - private validator2 -{ -public: - /*! Get the default options for a disjoint pool. These are meant to be a sensible set of values for many use cases, - * and as such, may be tuned in the future. This function is exposed so that creating a set of options that are - * just a slight departure from the defaults is easy. - */ - static pool_options get_default_options() - { - pool_options ret; - - ret.min_blocks_per_chunk = 16; - ret.min_bytes_per_chunk = 1024; - ret.max_blocks_per_chunk = static_cast(1) << 20; - ret.max_bytes_per_chunk = static_cast(1) << 30; - - ret.smallest_block_size = THRUST_MR_DEFAULT_ALIGNMENT; - ret.largest_block_size = static_cast(1) << 20; - - ret.alignment = THRUST_MR_DEFAULT_ALIGNMENT; - - ret.cache_oversized = true; - - ret.cached_size_cutoff_factor = 16; - ret.cached_alignment_cutoff_factor = 16; - - return ret; - } - - /*! Constructor. - * - * \param upstream the upstream memory resource for allocations - * \param bookkeeper the upstream memory resource for bookkeeping - * \param options pool options to use - */ - disjoint_unsynchronized_pool_resource(Upstream * upstream, Bookkeeper * bookkeeper, - pool_options options = get_default_options()) - : m_upstream(upstream), - m_bookkeeper(bookkeeper), - m_options(options), - m_smallest_block_log2(detail::log2_ri(m_options.smallest_block_size)), - m_pools(m_bookkeeper), - m_allocated(m_bookkeeper), - m_cached_oversized(m_bookkeeper), - m_oversized(m_bookkeeper) - { - assert(m_options.validate()); - - pointer_vector free(m_bookkeeper); - pool p(free); - m_pools.resize(detail::log2_ri(m_options.largest_block_size) - m_smallest_block_log2 + 1, p); - } - - // TODO: C++11: use delegating constructors - - /*! Constructor. Upstream and bookkeeping resources are obtained by calling \p get_global_resource for their types. - * - * \param options pool options to use - */ - disjoint_unsynchronized_pool_resource(pool_options options = get_default_options()) - : m_upstream(get_global_resource()), - m_bookkeeper(get_global_resource()), - m_options(options), - m_smallest_block_log2(detail::log2_ri(m_options.smallest_block_size)), - m_pools(m_bookkeeper), - m_allocated(m_bookkeeper), - m_cached_oversized(m_bookkeeper), - m_oversized(m_bookkeeper) - { - assert(m_options.validate()); - - pointer_vector free(m_bookkeeper); - pool p(free); - m_pools.resize(detail::log2_ri(m_options.largest_block_size) - m_smallest_block_log2 + 1, p); - } - - /*! Destructor. Releases all held memory to upstream. - */ - ~disjoint_unsynchronized_pool_resource() - { - release(); - } - -private: - typedef typename Upstream::pointer void_ptr; - typedef typename thrust::detail::pointer_traits::template rebind::other char_ptr; - - struct chunk_descriptor - { - std::size_t size; - void_ptr pointer; - }; - - typedef thrust::host_vector< - chunk_descriptor, - allocator - > chunk_vector; - - struct oversized_block_descriptor - { - std::size_t size; - std::size_t alignment; - void_ptr pointer; - - __host__ __device__ - bool operator==(const oversized_block_descriptor & other) const - { - return size == other.size && alignment == other.alignment && pointer == other.pointer; - } - - __host__ __device__ - bool operator<(const oversized_block_descriptor & other) const - { - return size < other.size || (size == other.size && alignment < other.alignment); - } - }; - - struct equal_pointers - { - public: - __host__ __device__ - equal_pointers(void_ptr p) : p(p) - { - } - - __host__ __device__ - bool operator()(const oversized_block_descriptor & desc) const - { - return desc.pointer == p; - } - - private: - void_ptr p; - }; - - struct matching_alignment - { - public: - __host__ __device__ - matching_alignment(std::size_t requested) : requested(requested) - { - } - - __host__ __device__ - bool operator()(const oversized_block_descriptor & desc) const - { - return desc.alignment >= requested; - } - - private: - std::size_t requested; - }; - - typedef thrust::host_vector< - oversized_block_descriptor, - allocator - > oversized_block_vector; - - typedef thrust::host_vector< - void_ptr, - allocator - > pointer_vector; - - struct pool - { - __host__ - pool(const pointer_vector & free) - : free_blocks(free), - previous_allocated_count(0) - { - } - - __host__ - pool(const pool & other) - : free_blocks(other.free_blocks), - previous_allocated_count(other.previous_allocated_count) - { - } - -#if THRUST_CPP_DIALECT >= 2011 - pool & operator=(const pool &) = default; -#endif - - __host__ - ~pool() {} - - pointer_vector free_blocks; - std::size_t previous_allocated_count; - }; - - typedef thrust::host_vector< - pool, - allocator - > pool_vector; - - Upstream * m_upstream; - Bookkeeper * m_bookkeeper; - - pool_options m_options; - std::size_t m_smallest_block_log2; - - // buckets containing free lists for each pooled size - pool_vector m_pools; - // list of all allocations from upstream for the above - chunk_vector m_allocated; - // list of all cached oversized/overaligned blocks that have been returned to the pool to cache - oversized_block_vector m_cached_oversized; - // list of all oversized/overaligned allocations from upstream - oversized_block_vector m_oversized; - -public: - /*! Releases all held memory to upstream. - */ - void release() - { - // reset the buckets - for (std::size_t i = 0; i < m_pools.size(); ++i) - { - m_pools[i].free_blocks.clear(); - m_pools[i].previous_allocated_count = 0; - } - - // deallocate memory allocated for the buckets - for (std::size_t i = 0; i < m_allocated.size(); ++i) - { - m_upstream->do_deallocate( - m_allocated[i].pointer, - m_allocated[i].size, - m_options.alignment); - } - - // deallocate cached oversized/overaligned memory - for (std::size_t i = 0; i < m_oversized.size(); ++i) - { - m_upstream->do_deallocate( - m_oversized[i].pointer, - m_oversized[i].size, - m_oversized[i].alignment); - } - - m_allocated.clear(); - m_oversized.clear(); - m_cached_oversized.clear(); - } - - THRUST_NODISCARD virtual void_ptr do_allocate(std::size_t bytes, std::size_t alignment = THRUST_MR_DEFAULT_ALIGNMENT) THRUST_OVERRIDE - { - bytes = (std::max)(bytes, m_options.smallest_block_size); - assert(detail::is_power_of_2(alignment)); - - // an oversized and/or overaligned allocation requested; needs to be allocated separately - if (bytes > m_options.largest_block_size || alignment > m_options.alignment) - { - oversized_block_descriptor oversized; - oversized.size = bytes; - oversized.alignment = alignment; - - if (m_options.cache_oversized && !m_cached_oversized.empty()) - { - typename oversized_block_vector::iterator it = thrust::lower_bound( - thrust::seq, - m_cached_oversized.begin(), - m_cached_oversized.end(), - oversized); - - // if the size is bigger than the requested size by a factor - // bigger than or equal to the specified cutoff for size, - // allocate a new block - if (it != m_cached_oversized.end()) - { - std::size_t size_factor = (*it).size / bytes; - if (size_factor >= m_options.cached_size_cutoff_factor) - { - it = m_cached_oversized.end(); - } - } - - if (it != m_cached_oversized.end() && (*it).alignment < alignment) - { - it = find_if(it + 1, m_cached_oversized.end(), matching_alignment(alignment)); - } - - // if the alignment is bigger than the requested one by a factor - // bigger than or equal to the specified cutoff for alignment, - // allocate a new block - if (it != m_cached_oversized.end()) - { - std::size_t alignment_factor = (*it).alignment / alignment; - if (alignment_factor >= m_options.cached_alignment_cutoff_factor) - { - it = m_cached_oversized.end(); - } - } - - if (it != m_cached_oversized.end()) - { - oversized.pointer = (*it).pointer; - m_cached_oversized.erase(it); - return oversized.pointer; - } - } - - // no fitting cached block found; allocate a new one that's just up to the specs - oversized.pointer = m_upstream->do_allocate(bytes, alignment); - m_oversized.push_back(oversized); - - return oversized.pointer; - } - - // the request is NOT for oversized and/or overaligned memory - // allocate a block from an appropriate bucket - std::size_t bytes_log2 = thrust::detail::log2_ri(bytes); - std::size_t bucket_idx = bytes_log2 - m_smallest_block_log2; - pool & bucket = m_pools[bucket_idx]; - - // if the free list of the bucket has no elements, allocate a new chunk - // and split it into blocks pushed to the free list - if (bucket.free_blocks.empty()) - { - std::size_t bucket_size = static_cast(1) << bytes_log2; - - std::size_t n = bucket.previous_allocated_count; - if (n == 0) - { - n = m_options.min_blocks_per_chunk; - if (n < (m_options.min_bytes_per_chunk >> bytes_log2)) - { - n = m_options.min_bytes_per_chunk >> bytes_log2; - } - } - else - { - n = n * 3 / 2; - if (n > (m_options.max_bytes_per_chunk >> bytes_log2)) - { - n = m_options.max_bytes_per_chunk >> bytes_log2; - } - if (n > m_options.max_blocks_per_chunk) - { - n = m_options.max_blocks_per_chunk; - } - } - - bytes = n << bytes_log2; - - assert(n >= m_options.min_blocks_per_chunk); - assert(n <= m_options.max_blocks_per_chunk); - assert(bytes >= m_options.min_bytes_per_chunk); - assert(bytes <= m_options.max_bytes_per_chunk); - - chunk_descriptor allocated; - allocated.size = bytes; - allocated.pointer = m_upstream->do_allocate(bytes, m_options.alignment); - m_allocated.push_back(allocated); - bucket.previous_allocated_count = n; - - for (std::size_t i = 0; i < n; ++i) - { - bucket.free_blocks.push_back( - static_cast( - static_cast(allocated.pointer) + i * bucket_size - ) - ); - } - } - - // allocate a block from the front of the bucket's free list - void_ptr ret = bucket.free_blocks.back(); - bucket.free_blocks.pop_back(); - return ret; - } - - virtual void do_deallocate(void_ptr p, std::size_t n, std::size_t alignment = THRUST_MR_DEFAULT_ALIGNMENT) THRUST_OVERRIDE - { - n = (std::max)(n, m_options.smallest_block_size); - assert(detail::is_power_of_2(alignment)); - - // verify that the pointer is at least as aligned as claimed - assert(reinterpret_cast(detail::pointer_traits::get(p)) % alignment == 0); - - // the deallocated block is oversized and/or overaligned - if (n > m_options.largest_block_size || alignment > m_options.alignment) - { - typename oversized_block_vector::iterator it = find_if(m_oversized.begin(), m_oversized.end(), equal_pointers(p)); - assert(it != m_oversized.end()); - - oversized_block_descriptor oversized = *it; - - if (m_options.cache_oversized) - { - typename oversized_block_vector::iterator position = lower_bound(m_cached_oversized.begin(), m_cached_oversized.end(), oversized); - m_cached_oversized.insert(position, oversized); - return; - } - - m_oversized.erase(it); - - m_upstream->do_deallocate(p, oversized.size, oversized.alignment); - - return; - } - - // push the block to the front of the appropriate bucket's free list - std::size_t n_log2 = thrust::detail::log2_ri(n); - std::size_t bucket_idx = n_log2 - m_smallest_block_log2; - pool & bucket = m_pools[bucket_idx]; - - bucket.free_blocks.push_back(p); - } -}; - -/*! \} - */ - -} // end mr -} // end thrust - diff --git a/spaces/CVPR/LIVE/thrust/thrust/system/cpp/memory.h b/spaces/CVPR/LIVE/thrust/thrust/system/cpp/memory.h deleted file mode 100644 index 18b31e758de483d77fc1c84f515e4117575ce852..0000000000000000000000000000000000000000 --- a/spaces/CVPR/LIVE/thrust/thrust/system/cpp/memory.h +++ /dev/null @@ -1,95 +0,0 @@ -/* - * Copyright 2008-2018 NVIDIA Corporation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/*! \file thrust/system/cpp/memory.h - * \brief Managing memory associated with Thrust's standard C++ system. - */ - -#pragma once - -#include -#include -#include -#include -#include -#include - -namespace thrust -{ -namespace system -{ -namespace cpp -{ -/*! Allocates an area of memory available to Thrust's cpp system. - * \param n Number of bytes to allocate. - * \return A cpp::pointer pointing to the beginning of the newly - * allocated memory. A null cpp::pointer is returned if - * an error occurs. - * \note The cpp::pointer returned by this function must be - * deallocated with \p cpp::free. - * \see cpp::free - * \see std::malloc - */ -inline pointer malloc(std::size_t n); - -/*! Allocates a typed area of memory available to Thrust's cpp system. - * \param n Number of elements to allocate. - * \return A cpp::pointer pointing to the beginning of the newly - * allocated elements. A null cpp::pointer is returned if - * an error occurs. - * \note The cpp::pointer returned by this function must be - * deallocated with \p cpp::free. - * \see cpp::free - * \see std::malloc - */ -template -inline pointer malloc(std::size_t n); - -/*! Deallocates an area of memory previously allocated by cpp::malloc. - * \param ptr A cpp::pointer pointing to the beginning of an area - * of memory previously allocated with cpp::malloc. - * \see cpp::malloc - * \see std::free - */ -inline void free(pointer ptr); - -/*! \p cpp::allocator is the default allocator used by the \p cpp system's containers such as - * cpp::vector if no user-specified allocator is provided. \p cpp::allocator allocates - * (deallocates) storage with \p cpp::malloc (\p cpp::free). - */ -template -using allocator = thrust::mr::stateless_resource_allocator; - -} // end cpp - -} // end system - -/*! \namespace thrust::cpp - * \brief \p thrust::cpp is a top-level alias for thrust::system::cpp. - */ -namespace cpp -{ - -using thrust::system::cpp::malloc; -using thrust::system::cpp::free; -using thrust::system::cpp::allocator; - -} // end cpp - -} // end thrust - -#include - diff --git a/spaces/CVPR/SPOTER_Sign_Language_Recognition/README.md b/spaces/CVPR/SPOTER_Sign_Language_Recognition/README.md deleted file mode 100644 index f062a0a1cdddc38463632b63bb59e6abc325476d..0000000000000000000000000000000000000000 --- a/spaces/CVPR/SPOTER_Sign_Language_Recognition/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Spoter Sign language recognition demo -emoji: 🧏 -colorFrom: green -colorTo: green -sdk: gradio -sdk_version: 3.0.6 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference diff --git a/spaces/CVPR/WALT/mmdet/models/roi_heads/mask_heads/mask_point_head.py b/spaces/CVPR/WALT/mmdet/models/roi_heads/mask_heads/mask_point_head.py deleted file mode 100644 index fb92903a9488a44b984a489a354d838cc88f8ad4..0000000000000000000000000000000000000000 --- a/spaces/CVPR/WALT/mmdet/models/roi_heads/mask_heads/mask_point_head.py +++ /dev/null @@ -1,300 +0,0 @@ -# Modified from https://github.com/facebookresearch/detectron2/tree/master/projects/PointRend/point_head/point_head.py # noqa - -import torch -import torch.nn as nn -from mmcv.cnn import ConvModule, normal_init -from mmcv.ops import point_sample, rel_roi_point_to_rel_img_point - -from mmdet.models.builder import HEADS, build_loss - - -@HEADS.register_module() -class MaskPointHead(nn.Module): - """A mask point head use in PointRend. - - ``MaskPointHead`` use shared multi-layer perceptron (equivalent to - nn.Conv1d) to predict the logit of input points. The fine-grained feature - and coarse feature will be concatenate together for predication. - - Args: - num_fcs (int): Number of fc layers in the head. Default: 3. - in_channels (int): Number of input channels. Default: 256. - fc_channels (int): Number of fc channels. Default: 256. - num_classes (int): Number of classes for logits. Default: 80. - class_agnostic (bool): Whether use class agnostic classification. - If so, the output channels of logits will be 1. Default: False. - coarse_pred_each_layer (bool): Whether concatenate coarse feature with - the output of each fc layer. Default: True. - conv_cfg (dict | None): Dictionary to construct and config conv layer. - Default: dict(type='Conv1d')) - norm_cfg (dict | None): Dictionary to construct and config norm layer. - Default: None. - loss_point (dict): Dictionary to construct and config loss layer of - point head. Default: dict(type='CrossEntropyLoss', use_mask=True, - loss_weight=1.0). - """ - - def __init__(self, - num_classes, - num_fcs=3, - in_channels=256, - fc_channels=256, - class_agnostic=False, - coarse_pred_each_layer=True, - conv_cfg=dict(type='Conv1d'), - norm_cfg=None, - act_cfg=dict(type='ReLU'), - loss_point=dict( - type='CrossEntropyLoss', use_mask=True, loss_weight=1.0)): - super().__init__() - self.num_fcs = num_fcs - self.in_channels = in_channels - self.fc_channels = fc_channels - self.num_classes = num_classes - self.class_agnostic = class_agnostic - self.coarse_pred_each_layer = coarse_pred_each_layer - self.conv_cfg = conv_cfg - self.norm_cfg = norm_cfg - self.loss_point = build_loss(loss_point) - - fc_in_channels = in_channels + num_classes - self.fcs = nn.ModuleList() - for _ in range(num_fcs): - fc = ConvModule( - fc_in_channels, - fc_channels, - kernel_size=1, - stride=1, - padding=0, - conv_cfg=conv_cfg, - norm_cfg=norm_cfg, - act_cfg=act_cfg) - self.fcs.append(fc) - fc_in_channels = fc_channels - fc_in_channels += num_classes if self.coarse_pred_each_layer else 0 - - out_channels = 1 if self.class_agnostic else self.num_classes - self.fc_logits = nn.Conv1d( - fc_in_channels, out_channels, kernel_size=1, stride=1, padding=0) - - def init_weights(self): - """Initialize last classification layer of MaskPointHead, conv layers - are already initialized by ConvModule.""" - normal_init(self.fc_logits, std=0.001) - - def forward(self, fine_grained_feats, coarse_feats): - """Classify each point base on fine grained and coarse feats. - - Args: - fine_grained_feats (Tensor): Fine grained feature sampled from FPN, - shape (num_rois, in_channels, num_points). - coarse_feats (Tensor): Coarse feature sampled from CoarseMaskHead, - shape (num_rois, num_classes, num_points). - - Returns: - Tensor: Point classification results, - shape (num_rois, num_class, num_points). - """ - - x = torch.cat([fine_grained_feats, coarse_feats], dim=1) - for fc in self.fcs: - x = fc(x) - if self.coarse_pred_each_layer: - x = torch.cat((x, coarse_feats), dim=1) - return self.fc_logits(x) - - def get_targets(self, rois, rel_roi_points, sampling_results, gt_masks, - cfg): - """Get training targets of MaskPointHead for all images. - - Args: - rois (Tensor): Region of Interest, shape (num_rois, 5). - rel_roi_points: Points coordinates relative to RoI, shape - (num_rois, num_points, 2). - sampling_results (:obj:`SamplingResult`): Sampling result after - sampling and assignment. - gt_masks (Tensor) : Ground truth segmentation masks of - corresponding boxes, shape (num_rois, height, width). - cfg (dict): Training cfg. - - Returns: - Tensor: Point target, shape (num_rois, num_points). - """ - - num_imgs = len(sampling_results) - rois_list = [] - rel_roi_points_list = [] - for batch_ind in range(num_imgs): - inds = (rois[:, 0] == batch_ind) - rois_list.append(rois[inds]) - rel_roi_points_list.append(rel_roi_points[inds]) - pos_assigned_gt_inds_list = [ - res.pos_assigned_gt_inds for res in sampling_results - ] - cfg_list = [cfg for _ in range(num_imgs)] - - point_targets = map(self._get_target_single, rois_list, - rel_roi_points_list, pos_assigned_gt_inds_list, - gt_masks, cfg_list) - point_targets = list(point_targets) - - if len(point_targets) > 0: - point_targets = torch.cat(point_targets) - - return point_targets - - def _get_target_single(self, rois, rel_roi_points, pos_assigned_gt_inds, - gt_masks, cfg): - """Get training target of MaskPointHead for each image.""" - num_pos = rois.size(0) - num_points = cfg.num_points - if num_pos > 0: - gt_masks_th = ( - gt_masks.to_tensor(rois.dtype, rois.device).index_select( - 0, pos_assigned_gt_inds)) - gt_masks_th = gt_masks_th.unsqueeze(1) - rel_img_points = rel_roi_point_to_rel_img_point( - rois, rel_roi_points, gt_masks_th.shape[2:]) - point_targets = point_sample(gt_masks_th, - rel_img_points).squeeze(1) - else: - point_targets = rois.new_zeros((0, num_points)) - return point_targets - - def loss(self, point_pred, point_targets, labels): - """Calculate loss for MaskPointHead. - - Args: - point_pred (Tensor): Point predication result, shape - (num_rois, num_classes, num_points). - point_targets (Tensor): Point targets, shape (num_roi, num_points). - labels (Tensor): Class label of corresponding boxes, - shape (num_rois, ) - - Returns: - dict[str, Tensor]: a dictionary of point loss components - """ - - loss = dict() - if self.class_agnostic: - loss_point = self.loss_point(point_pred, point_targets, - torch.zeros_like(labels)) - else: - loss_point = self.loss_point(point_pred, point_targets, labels) - loss['loss_point'] = loss_point - return loss - - def _get_uncertainty(self, mask_pred, labels): - """Estimate uncertainty based on pred logits. - - We estimate uncertainty as L1 distance between 0.0 and the logits - prediction in 'mask_pred' for the foreground class in `classes`. - - Args: - mask_pred (Tensor): mask predication logits, shape (num_rois, - num_classes, mask_height, mask_width). - - labels (list[Tensor]): Either predicted or ground truth label for - each predicted mask, of length num_rois. - - Returns: - scores (Tensor): Uncertainty scores with the most uncertain - locations having the highest uncertainty score, - shape (num_rois, 1, mask_height, mask_width) - """ - if mask_pred.shape[1] == 1: - gt_class_logits = mask_pred.clone() - else: - inds = torch.arange(mask_pred.shape[0], device=mask_pred.device) - gt_class_logits = mask_pred[inds, labels].unsqueeze(1) - return -torch.abs(gt_class_logits) - - def get_roi_rel_points_train(self, mask_pred, labels, cfg): - """Get ``num_points`` most uncertain points with random points during - train. - - Sample points in [0, 1] x [0, 1] coordinate space based on their - uncertainty. The uncertainties are calculated for each point using - '_get_uncertainty()' function that takes point's logit prediction as - input. - - Args: - mask_pred (Tensor): A tensor of shape (num_rois, num_classes, - mask_height, mask_width) for class-specific or class-agnostic - prediction. - labels (list): The ground truth class for each instance. - cfg (dict): Training config of point head. - - Returns: - point_coords (Tensor): A tensor of shape (num_rois, num_points, 2) - that contains the coordinates sampled points. - """ - num_points = cfg.num_points - oversample_ratio = cfg.oversample_ratio - importance_sample_ratio = cfg.importance_sample_ratio - assert oversample_ratio >= 1 - assert 0 <= importance_sample_ratio <= 1 - batch_size = mask_pred.shape[0] - num_sampled = int(num_points * oversample_ratio) - point_coords = torch.rand( - batch_size, num_sampled, 2, device=mask_pred.device) - point_logits = point_sample(mask_pred, point_coords) - # It is crucial to calculate uncertainty based on the sampled - # prediction value for the points. Calculating uncertainties of the - # coarse predictions first and sampling them for points leads to - # incorrect results. To illustrate this: assume uncertainty func( - # logits)=-abs(logits), a sampled point between two coarse - # predictions with -1 and 1 logits has 0 logits, and therefore 0 - # uncertainty value. However, if we calculate uncertainties for the - # coarse predictions first, both will have -1 uncertainty, - # and sampled point will get -1 uncertainty. - point_uncertainties = self._get_uncertainty(point_logits, labels) - num_uncertain_points = int(importance_sample_ratio * num_points) - num_random_points = num_points - num_uncertain_points - idx = torch.topk( - point_uncertainties[:, 0, :], k=num_uncertain_points, dim=1)[1] - shift = num_sampled * torch.arange( - batch_size, dtype=torch.long, device=mask_pred.device) - idx += shift[:, None] - point_coords = point_coords.view(-1, 2)[idx.view(-1), :].view( - batch_size, num_uncertain_points, 2) - if num_random_points > 0: - rand_roi_coords = torch.rand( - batch_size, num_random_points, 2, device=mask_pred.device) - point_coords = torch.cat((point_coords, rand_roi_coords), dim=1) - return point_coords - - def get_roi_rel_points_test(self, mask_pred, pred_label, cfg): - """Get ``num_points`` most uncertain points during test. - - Args: - mask_pred (Tensor): A tensor of shape (num_rois, num_classes, - mask_height, mask_width) for class-specific or class-agnostic - prediction. - pred_label (list): The predication class for each instance. - cfg (dict): Testing config of point head. - - Returns: - point_indices (Tensor): A tensor of shape (num_rois, num_points) - that contains indices from [0, mask_height x mask_width) of the - most uncertain points. - point_coords (Tensor): A tensor of shape (num_rois, num_points, 2) - that contains [0, 1] x [0, 1] normalized coordinates of the - most uncertain points from the [mask_height, mask_width] grid . - """ - num_points = cfg.subdivision_num_points - uncertainty_map = self._get_uncertainty(mask_pred, pred_label) - num_rois, _, mask_height, mask_width = uncertainty_map.shape - h_step = 1.0 / mask_height - w_step = 1.0 / mask_width - - uncertainty_map = uncertainty_map.view(num_rois, - mask_height * mask_width) - num_points = min(mask_height * mask_width, num_points) - point_indices = uncertainty_map.topk(num_points, dim=1)[1] - point_coords = uncertainty_map.new_zeros(num_rois, num_points, 2) - point_coords[:, :, 0] = w_step / 2.0 + (point_indices % - mask_width).float() * w_step - point_coords[:, :, 1] = h_step / 2.0 + (point_indices // - mask_width).float() * h_step - return point_indices, point_coords diff --git a/spaces/Catspindev/monadical-labs-minecraft-skin-generator/app.py b/spaces/Catspindev/monadical-labs-minecraft-skin-generator/app.py deleted file mode 100644 index 89c4f4e9bc708b98181e006c5b2658a95daed022..0000000000000000000000000000000000000000 --- a/spaces/Catspindev/monadical-labs-minecraft-skin-generator/app.py +++ /dev/null @@ -1,3 +0,0 @@ -import gradio as gr - -gr.Interface.load("models/monadical-labs/minecraft-skin-generator").launch() \ No newline at end of file diff --git a/spaces/ChandraMohanNayal/AutoGPT/autogpt/speech/brian.py b/spaces/ChandraMohanNayal/AutoGPT/autogpt/speech/brian.py deleted file mode 100644 index 821fdf2f482a9cfa928e5c9680152ad6766d8326..0000000000000000000000000000000000000000 --- a/spaces/ChandraMohanNayal/AutoGPT/autogpt/speech/brian.py +++ /dev/null @@ -1,40 +0,0 @@ -""" Brian speech module for autogpt """ -import os - -import requests -from playsound import playsound - -from autogpt.speech.base import VoiceBase - - -class BrianSpeech(VoiceBase): - """Brian speech module for autogpt""" - - def _setup(self) -> None: - """Setup the voices, API key, etc.""" - pass - - def _speech(self, text: str, _: int = 0) -> bool: - """Speak text using Brian with the streamelements API - - Args: - text (str): The text to speak - - Returns: - bool: True if the request was successful, False otherwise - """ - tts_url = ( - f"https://api.streamelements.com/kappa/v2/speech?voice=Brian&text={text}" - ) - response = requests.get(tts_url) - - if response.status_code == 200: - with open("speech.mp3", "wb") as f: - f.write(response.content) - playsound("speech.mp3") - os.remove("speech.mp3") - return True - else: - print("Request failed with status code:", response.status_code) - print("Response content:", response.content) - return False diff --git a/spaces/Chintan-Donda/KKMS-KSSW-HF/src/utils.py b/spaces/Chintan-Donda/KKMS-KSSW-HF/src/utils.py deleted file mode 100644 index 3f6554d70691876642f7ba83824cf21f7ac76c4c..0000000000000000000000000000000000000000 --- a/spaces/Chintan-Donda/KKMS-KSSW-HF/src/utils.py +++ /dev/null @@ -1,68 +0,0 @@ -import os -import re -import pandas as pd -from urllib.parse import urlparse - -import logging -logger = logging.getLogger(__name__) -logging.basicConfig( - format="%(asctime)s %(levelname)s [%(name)s] %(message)s", level=logging.INFO, datefmt="%Y-%m-%d %H:%M:%S" -) - - -class UTILS: - def __init__(self): - pass - - - def split_text( - self, - text - ): - text = text.split(',') - text = [t.strip() for t in text] - return text - - - def replace_newlines_and_spaces( - self, - text - ): - # Replace all newline characters with spaces - text = text.replace("\n", " ") - # Replace multiple spaces with a single space - text = re.sub(r'\s+', ' ', text) - return text - - - def clean_df( - self, - df, - dropna=True, - fillna=False - ): - if fillna: - df.fillna('', inplace=True) - if dropna: - df.dropna(inplace=True) - # df = df[~df.isna()] - df = df.drop_duplicates().reset_index(drop=True) - return df - - - def validate_url_format( - self, - urls, - url_type='urls' - ): - valid_urls = [] - for url in urls: - result = urlparse(url) - # Check if the url is valid - if all([result.scheme, result.netloc]): - # Online PDF urls should end with .pdf extension - if url_type == 'online_pdf' and not url.endswith('.pdf'): - continue - valid_urls.append(url) - logging.info(f'Valid URLs are: {valid_urls}') - return valid_urls diff --git a/spaces/CikeyQI/Yunzai/Yunzai/plugins/ws-plugin/model/meta.js b/spaces/CikeyQI/Yunzai/Yunzai/plugins/ws-plugin/model/meta.js deleted file mode 100644 index 7ccbe710f3af046d991e1540519efad1e1f39e7a..0000000000000000000000000000000000000000 --- a/spaces/CikeyQI/Yunzai/Yunzai/plugins/ws-plugin/model/meta.js +++ /dev/null @@ -1,64 +0,0 @@ -import { Config, Version } from '../components/index.js' -import { getLatestMsg } from './msgMap.js' -/** - * 生命周期 - * @param {*} socket - */ -function lifecycle(uin) { - return JSON.stringify({ - meta_event_type: 'lifecycle', - post_type: 'meta_event', - self_id: uin, - sub_type: 'connect', - time: Date.parse(new Date()) / 1000 - }) -} - -/** - * 心跳 - * @param {*} socket - */ -function heartbeat(uin) { - let latestMsg = getLatestMsg() - let time = 0 - if (latestMsg) { - time = latestMsg.time - } - let status - if (Version.isTrss) { - status = { - online: true, - good: true, - stat: {} - } - } else { - status = { - online: Bot.isOnline(), - good: Bot.isOnline(), - stat: { - packet_receivend: Bot.stat.recv_pkt_cnt, - packet_send: Bot.stat.sent_pkt_cnt, - packet_lost: Bot.stat.lost_pkt_cnt, - message_received: Bot.stat.recv_msg_cnt, - message_send: Bot.stat.sent_msg_cnt, - disconnect_times: 0, - lost_times: Bot.stat.lost_times, - last_message_time: time - } - - } - } - return JSON.stringify({ - time: Date.parse(new Date()) / 1000, - self_id: uin, - post_type: 'meta_event', - meta_event_type: 'heartbeat', - status, - interval: Config.heartbeatInterval * 1000 - }) -} - -export { - lifecycle, - heartbeat -} \ No newline at end of file diff --git a/spaces/CikeyQI/meme-api/meme_generator/memes/kick_ball/__init__.py b/spaces/CikeyQI/meme-api/meme_generator/memes/kick_ball/__init__.py deleted file mode 100644 index 47fec9f3f8e50c733e6cd22084146d94325fe0b5..0000000000000000000000000000000000000000 --- a/spaces/CikeyQI/meme-api/meme_generator/memes/kick_ball/__init__.py +++ /dev/null @@ -1,30 +0,0 @@ -from pathlib import Path -from typing import List - -from PIL.Image import Image as IMG -from pil_utils import BuildImage - -from meme_generator import add_meme -from meme_generator.utils import save_gif - -img_dir = Path(__file__).parent / "images" - - -def kick_ball(images: List[BuildImage], texts, args): - img = images[0].convert("RGBA").square().resize((78, 78)) - # fmt: off - locs = [ - (57, 136), (56, 117), (55, 99), (52, 113), (50, 126), - (48, 139), (47, 112), (47, 85), (47, 57), (48, 97), - (50, 136), (51, 176), (52, 169), (55, 181), (58, 153) - ] - # fmt: on - frames: List[IMG] = [] - for i in range(15): - frame = BuildImage.open(img_dir / f"{i}.png") - frame.paste(img.rotate(-24 * i), locs[i], below=True) - frames.append(frame.image) - return save_gif(frames, 0.1) - - -add_meme("kick_ball", kick_ball, min_images=1, max_images=1, keywords=["踢球"]) diff --git "a/spaces/Cong723/gpt-academic-public/crazy_functions/\350\201\224\347\275\221\347\232\204ChatGPT.py" "b/spaces/Cong723/gpt-academic-public/crazy_functions/\350\201\224\347\275\221\347\232\204ChatGPT.py" deleted file mode 100644 index 6a7d118b4439605db6e10b9a416a2e725b99a672..0000000000000000000000000000000000000000 --- "a/spaces/Cong723/gpt-academic-public/crazy_functions/\350\201\224\347\275\221\347\232\204ChatGPT.py" +++ /dev/null @@ -1,102 +0,0 @@ -from toolbox import CatchException, update_ui -from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive, input_clipping -import requests -from bs4 import BeautifulSoup -from request_llm.bridge_all import model_info - -def google(query, proxies): - query = query # 在此处替换您要搜索的关键词 - url = f"https://www.google.com/search?q={query}" - headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/94.0.4606.61 Safari/537.36'} - response = requests.get(url, headers=headers, proxies=proxies) - soup = BeautifulSoup(response.content, 'html.parser') - results = [] - for g in soup.find_all('div', class_='g'): - anchors = g.find_all('a') - if anchors: - link = anchors[0]['href'] - if link.startswith('/url?q='): - link = link[7:] - if not link.startswith('http'): - continue - title = g.find('h3').text - item = {'title': title, 'link': link} - results.append(item) - - for r in results: - print(r['link']) - return results - -def scrape_text(url, proxies) -> str: - """Scrape text from a webpage - - Args: - url (str): The URL to scrape text from - - Returns: - str: The scraped text - """ - headers = { - 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/94.0.4606.61 Safari/537.36', - 'Content-Type': 'text/plain', - } - try: - response = requests.get(url, headers=headers, proxies=proxies, timeout=8) - if response.encoding == "ISO-8859-1": response.encoding = response.apparent_encoding - except: - return "无法连接到该网页" - soup = BeautifulSoup(response.text, "html.parser") - for script in soup(["script", "style"]): - script.extract() - text = soup.get_text() - lines = (line.strip() for line in text.splitlines()) - chunks = (phrase.strip() for line in lines for phrase in line.split(" ")) - text = "\n".join(chunk for chunk in chunks if chunk) - return text - -@CatchException -def 连接网络回答问题(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port): - """ - txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径 - llm_kwargs gpt模型参数,如温度和top_p等,一般原样传递下去就行 - plugin_kwargs 插件模型的参数,暂时没有用武之地 - chatbot 聊天显示框的句柄,用于显示给用户 - history 聊天历史,前情提要 - system_prompt 给gpt的静默提醒 - web_port 当前软件运行的端口号 - """ - history = [] # 清空历史,以免输入溢出 - chatbot.append((f"请结合互联网信息回答以下问题:{txt}", - "[Local Message] 请注意,您正在调用一个[函数插件]的模板,该模板可以实现ChatGPT联网信息综合。该函数面向希望实现更多有趣功能的开发者,它可以作为创建新功能函数的模板。您若希望分享新的功能模组,请不吝PR!")) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 由于请求gpt需要一段时间,我们先及时地做一次界面更新 - - # ------------- < 第1步:爬取搜索引擎的结果 > ------------- - from toolbox import get_conf - proxies, = get_conf('proxies') - urls = google(txt, proxies) - history = [] - - # ------------- < 第2步:依次访问网页 > ------------- - max_search_result = 5 # 最多收纳多少个网页的结果 - for index, url in enumerate(urls[:max_search_result]): - res = scrape_text(url['link'], proxies) - history.extend([f"第{index}份搜索结果:", res]) - chatbot.append([f"第{index}份搜索结果:", res[:500]+"......"]) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 由于请求gpt需要一段时间,我们先及时地做一次界面更新 - - # ------------- < 第3步:ChatGPT综合 > ------------- - i_say = f"从以上搜索结果中抽取信息,然后回答问题:{txt}" - i_say, history = input_clipping( # 裁剪输入,从最长的条目开始裁剪,防止爆token - inputs=i_say, - history=history, - max_token_limit=model_info[llm_kwargs['llm_model']]['max_token']*3//4 - ) - gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive( - inputs=i_say, inputs_show_user=i_say, - llm_kwargs=llm_kwargs, chatbot=chatbot, history=history, - sys_prompt="请从给定的若干条搜索结果中抽取信息,对最相关的两个搜索结果进行总结,然后回答问题。" - ) - chatbot[-1] = (i_say, gpt_say) - history.append(i_say);history.append(gpt_say) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 界面更新 - diff --git a/spaces/Cong723/gpt-academic-public/request_llm/bridge_jittorllms.py b/spaces/Cong723/gpt-academic-public/request_llm/bridge_jittorllms.py deleted file mode 100644 index 28d0a7aab745cca4a1cdaded3c4803319000b5f0..0000000000000000000000000000000000000000 --- a/spaces/Cong723/gpt-academic-public/request_llm/bridge_jittorllms.py +++ /dev/null @@ -1,153 +0,0 @@ - -from transformers import AutoModel, AutoTokenizer -import time -import threading -import importlib -from toolbox import update_ui, get_conf -from multiprocessing import Process, Pipe - -load_message = "jittorllms尚未加载,加载需要一段时间。注意,取决于`config.py`的配置,jittorllms消耗大量的内存(CPU)或显存(GPU),也许会导致低配计算机卡死 ……" - -################################################################################# -class GetGLMHandle(Process): - def __init__(self): - super().__init__(daemon=True) - self.parent, self.child = Pipe() - self.jittorllms_model = None - self.info = "" - self.success = True - self.check_dependency() - self.start() - self.threadLock = threading.Lock() - - def check_dependency(self): - try: - import jittor - from .jittorllms.models import get_model - self.info = "依赖检测通过" - self.success = True - except: - self.info = r"缺少jittorllms的依赖,如果要使用jittorllms,除了基础的pip依赖以外,您还需要运行`pip install -r request_llm/requirements_jittorllms.txt`"+\ - r"和`git clone https://gitlink.org.cn/jittor/JittorLLMs.git --depth 1 request_llm/jittorllms`两个指令来安装jittorllms的依赖(在项目根目录运行这两个指令)。" - self.success = False - - def ready(self): - return self.jittorllms_model is not None - - def run(self): - # 子进程执行 - # 第一次运行,加载参数 - def load_model(): - import types - try: - if self.jittorllms_model is None: - device, = get_conf('LOCAL_MODEL_DEVICE') - from .jittorllms.models import get_model - # availabel_models = ["chatglm", "pangualpha", "llama", "chatrwkv"] - args_dict = {'model': 'chatglm', 'RUN_DEVICE':'cpu'} - self.jittorllms_model = get_model(types.SimpleNamespace(**args_dict)) - except: - self.child.send('[Local Message] Call jittorllms fail 不能正常加载jittorllms的参数。') - raise RuntimeError("不能正常加载jittorllms的参数!") - - load_model() - - # 进入任务等待状态 - while True: - # 进入任务等待状态 - kwargs = self.child.recv() - # 收到消息,开始请求 - try: - for response, history in self.jittorllms_model.run_web_demo(kwargs['query'], kwargs['history']): - self.child.send(response) - except: - self.child.send('[Local Message] Call jittorllms fail.') - # 请求处理结束,开始下一个循环 - self.child.send('[Finish]') - - def stream_chat(self, **kwargs): - # 主进程执行 - self.threadLock.acquire() - self.parent.send(kwargs) - while True: - res = self.parent.recv() - if res != '[Finish]': - yield res - else: - break - self.threadLock.release() - -global glm_handle -glm_handle = None -################################################################################# -def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", observe_window=[], console_slience=False): - """ - 多线程方法 - 函数的说明请见 request_llm/bridge_all.py - """ - global glm_handle - if glm_handle is None: - glm_handle = GetGLMHandle() - if len(observe_window) >= 1: observe_window[0] = load_message + "\n\n" + glm_handle.info - if not glm_handle.success: - error = glm_handle.info - glm_handle = None - raise RuntimeError(error) - - # jittorllms 没有 sys_prompt 接口,因此把prompt加入 history - history_feedin = [] - history_feedin.append(["What can I do?", sys_prompt]) - for i in range(len(history)//2): - history_feedin.append([history[2*i], history[2*i+1]] ) - - watch_dog_patience = 5 # 看门狗 (watchdog) 的耐心, 设置5秒即可 - response = "" - for response in glm_handle.stream_chat(query=inputs, history=history_feedin, max_length=llm_kwargs['max_length'], top_p=llm_kwargs['top_p'], temperature=llm_kwargs['temperature']): - if len(observe_window) >= 1: observe_window[0] = response - if len(observe_window) >= 2: - if (time.time()-observe_window[1]) > watch_dog_patience: - raise RuntimeError("程序终止。") - return response - - - -def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_prompt='', stream = True, additional_fn=None): - """ - 单线程方法 - 函数的说明请见 request_llm/bridge_all.py - """ - chatbot.append((inputs, "")) - - global glm_handle - if glm_handle is None: - glm_handle = GetGLMHandle() - chatbot[-1] = (inputs, load_message + "\n\n" + glm_handle.info) - yield from update_ui(chatbot=chatbot, history=[]) - if not glm_handle.success: - glm_handle = None - return - - if additional_fn is not None: - import core_functional - importlib.reload(core_functional) # 热更新prompt - core_functional = core_functional.get_core_functions() - if "PreProcess" in core_functional[additional_fn]: inputs = core_functional[additional_fn]["PreProcess"](inputs) # 获取预处理函数(如果有的话) - inputs = core_functional[additional_fn]["Prefix"] + inputs + core_functional[additional_fn]["Suffix"] - - # 处理历史信息 - history_feedin = [] - history_feedin.append(["What can I do?", system_prompt] ) - for i in range(len(history)//2): - history_feedin.append([history[2*i], history[2*i+1]] ) - - # 开始接收jittorllms的回复 - response = "[Local Message]: 等待jittorllms响应中 ..." - for response in glm_handle.stream_chat(query=inputs, history=history_feedin, max_length=llm_kwargs['max_length'], top_p=llm_kwargs['top_p'], temperature=llm_kwargs['temperature']): - chatbot[-1] = (inputs, response) - yield from update_ui(chatbot=chatbot, history=history) - - # 总结输出 - if response == "[Local Message]: 等待jittorllms响应中 ...": - response = "[Local Message]: jittorllms响应异常 ..." - history.extend([inputs, response]) - yield from update_ui(chatbot=chatbot, history=history) diff --git a/spaces/Cran-May/Shi-Ci-app/README.md b/spaces/Cran-May/Shi-Ci-app/README.md deleted file mode 100644 index 8b25adb3142fc27e56ed482170dc83304973331c..0000000000000000000000000000000000000000 --- a/spaces/Cran-May/Shi-Ci-app/README.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -title: Shi Ci -emoji: 🐠 -colorFrom: blue -colorTo: purple -sdk: gradio -sdk_version: 3.50.2 -app_file: app.py -pinned: true -license: apache-2.0 -models: - - Cran-May/OpenSLIDE ---- -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/DHEIVER/AnimeGANv2/README.md b/spaces/DHEIVER/AnimeGANv2/README.md deleted file mode 100644 index 01d9f9a56b49b8e7a75f1f2dabefc18ed5a13e7e..0000000000000000000000000000000000000000 --- a/spaces/DHEIVER/AnimeGANv2/README.md +++ /dev/null @@ -1,39 +0,0 @@ ---- -title: AnimeGANv2 -emoji: ⚡ -colorFrom: yellow -colorTo: blue -sdk: gradio -sdk_version: 3.1.3 -app_file: app.py -pinned: false -duplicated_from: akhaliq/AnimeGANv2 ---- - -# Configuration - -`title`: _string_ -Display title for the Space - -`emoji`: _string_ -Space emoji (emoji-only character allowed) - -`colorFrom`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) - -`colorTo`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) - -`sdk`: _string_ -Can be either `gradio` or `streamlit` - -`sdk_version` : _string_ -Only applicable for `streamlit` SDK. -See [doc](https://hf.co/docs/hub/spaces) for more info on supported versions. - -`app_file`: _string_ -Path to your main application file (which contains either `gradio` or `streamlit` Python code). -Path is relative to the root of the repository. - -`pinned`: _boolean_ -Whether the Space stays on top of your list. diff --git a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/frontend/assets/csv-b0b7514a.js b/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/frontend/assets/csv-b0b7514a.js deleted file mode 100644 index 511b34b2aed1552447a6605d45d0760eccb992ab..0000000000000000000000000000000000000000 --- a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/frontend/assets/csv-b0b7514a.js +++ /dev/null @@ -1,2 +0,0 @@ -import{d as a}from"./dsv-576afacd.js";var s=a(","),v=s.parse,o=s.parseRows;export{v as a,o as c}; -//# sourceMappingURL=csv-b0b7514a.js.map diff --git a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/huggingface_hub/repocard.py b/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/huggingface_hub/repocard.py deleted file mode 100644 index 1c4b976ffaab6c9c1a4acc153e608b960026c147..0000000000000000000000000000000000000000 --- a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/huggingface_hub/repocard.py +++ /dev/null @@ -1,819 +0,0 @@ -import os -import re -from pathlib import Path -from typing import Any, Dict, Optional, Type, Union - -import requests -import yaml - -from huggingface_hub.file_download import hf_hub_download -from huggingface_hub.hf_api import upload_file -from huggingface_hub.repocard_data import ( - CardData, - DatasetCardData, - EvalResult, - ModelCardData, - SpaceCardData, - eval_results_to_model_index, - model_index_to_eval_results, -) -from huggingface_hub.utils import get_session, is_jinja_available, yaml_dump - -from .constants import REPOCARD_NAME -from .utils import EntryNotFoundError, SoftTemporaryDirectory, validate_hf_hub_args -from .utils._typing import Literal -from .utils.logging import get_logger - - -TEMPLATE_MODELCARD_PATH = Path(__file__).parent / "templates" / "modelcard_template.md" -TEMPLATE_DATASETCARD_PATH = Path(__file__).parent / "templates" / "datasetcard_template.md" - -# exact same regex as in the Hub server. Please keep in sync. -# See https://github.com/huggingface/moon-landing/blob/main/server/lib/ViewMarkdown.ts#L18 -REGEX_YAML_BLOCK = re.compile(r"^(\s*---[\r\n]+)([\S\s]*?)([\r\n]+---(\r\n|\n|$))") - -logger = get_logger(__name__) - - -class RepoCard: - card_data_class = CardData - default_template_path = TEMPLATE_MODELCARD_PATH - repo_type = "model" - - def __init__(self, content: str, ignore_metadata_errors: bool = False): - """Initialize a RepoCard from string content. The content should be a - Markdown file with a YAML block at the beginning and a Markdown body. - - Args: - content (`str`): The content of the Markdown file. - - Example: - ```python - >>> from huggingface_hub.repocard import RepoCard - >>> text = ''' - ... --- - ... language: en - ... license: mit - ... --- - ... - ... # My repo - ... ''' - >>> card = RepoCard(text) - >>> card.data.to_dict() - {'language': 'en', 'license': 'mit'} - >>> card.text - '\\n# My repo\\n' - - ``` - - Raises the following error: - - - [`ValueError`](https://docs.python.org/3/library/exceptions.html#ValueError) - when the content of the repo card metadata is not a dictionary. - - - """ - - # Set the content of the RepoCard, as well as underlying .data and .text attributes. - # See the `content` property setter for more details. - self.ignore_metadata_errors = ignore_metadata_errors - self.content = content - - @property - def content(self): - """The content of the RepoCard, including the YAML block and the Markdown body.""" - line_break = _detect_line_ending(self._content) or "\n" - return f"---{line_break}{self.data.to_yaml(line_break=line_break)}{line_break}---{line_break}{self.text}" - - @content.setter - def content(self, content: str): - """Set the content of the RepoCard.""" - self._content = content - - match = REGEX_YAML_BLOCK.search(content) - if match: - # Metadata found in the YAML block - yaml_block = match.group(2) - self.text = content[match.end() :] - data_dict = yaml.safe_load(yaml_block) - - # The YAML block's data should be a dictionary - if not isinstance(data_dict, dict): - raise ValueError("repo card metadata block should be a dict") - else: - # Model card without metadata... create empty metadata - logger.warning("Repo card metadata block was not found. Setting CardData to empty.") - data_dict = {} - self.text = content - - self.data = self.card_data_class(**data_dict, ignore_metadata_errors=self.ignore_metadata_errors) - - def __str__(self): - return self.content - - def save(self, filepath: Union[Path, str]): - r"""Save a RepoCard to a file. - - Args: - filepath (`Union[Path, str]`): Filepath to the markdown file to save. - - Example: - ```python - >>> from huggingface_hub.repocard import RepoCard - >>> card = RepoCard("---\nlanguage: en\n---\n# This is a test repo card") - >>> card.save("/tmp/test.md") - - ``` - """ - filepath = Path(filepath) - filepath.parent.mkdir(parents=True, exist_ok=True) - # Preserve newlines as in the existing file. - with open(filepath, mode="w", newline="", encoding="utf-8") as f: - f.write(str(self)) - - @classmethod - def load( - cls, - repo_id_or_path: Union[str, Path], - repo_type: Optional[str] = None, - token: Optional[str] = None, - ignore_metadata_errors: bool = False, - ): - """Initialize a RepoCard from a Hugging Face Hub repo's README.md or a local filepath. - - Args: - repo_id_or_path (`Union[str, Path]`): - The repo ID associated with a Hugging Face Hub repo or a local filepath. - repo_type (`str`, *optional*): - The type of Hugging Face repo to push to. Defaults to None, which will use use "model". Other options - are "dataset" and "space". Not used when loading from a local filepath. If this is called from a child - class, the default value will be the child class's `repo_type`. - token (`str`, *optional*): - Authentication token, obtained with `huggingface_hub.HfApi.login` method. Will default to the stored token. - ignore_metadata_errors (`str`): - If True, errors while parsing the metadata section will be ignored. Some information might be lost during - the process. Use it at your own risk. - - Returns: - [`huggingface_hub.repocard.RepoCard`]: The RepoCard (or subclass) initialized from the repo's - README.md file or filepath. - - Example: - ```python - >>> from huggingface_hub.repocard import RepoCard - >>> card = RepoCard.load("nateraw/food") - >>> assert card.data.tags == ["generated_from_trainer", "image-classification", "pytorch"] - - ``` - """ - - if Path(repo_id_or_path).exists(): - card_path = Path(repo_id_or_path) - elif isinstance(repo_id_or_path, str): - card_path = Path( - hf_hub_download( - repo_id_or_path, - REPOCARD_NAME, - repo_type=repo_type or cls.repo_type, - token=token, - ) - ) - else: - raise ValueError(f"Cannot load RepoCard: path not found on disk ({repo_id_or_path}).") - - # Preserve newlines in the existing file. - with card_path.open(mode="r", newline="", encoding="utf-8") as f: - return cls(f.read(), ignore_metadata_errors=ignore_metadata_errors) - - def validate(self, repo_type: Optional[str] = None): - """Validates card against Hugging Face Hub's card validation logic. - Using this function requires access to the internet, so it is only called - internally by [`huggingface_hub.repocard.RepoCard.push_to_hub`]. - - Args: - repo_type (`str`, *optional*, defaults to "model"): - The type of Hugging Face repo to push to. Options are "model", "dataset", and "space". - If this function is called from a child class, the default will be the child class's `repo_type`. - - - Raises the following errors: - - - [`ValueError`](https://docs.python.org/3/library/exceptions.html#ValueError) - if the card fails validation checks. - - [`HTTPError`](https://requests.readthedocs.io/en/latest/api/#requests.HTTPError) - if the request to the Hub API fails for any other reason. - - - """ - - # If repo type is provided, otherwise, use the repo type of the card. - repo_type = repo_type or self.repo_type - - body = { - "repoType": repo_type, - "content": str(self), - } - headers = {"Accept": "text/plain"} - - try: - r = get_session().post("https://huggingface.co/api/validate-yaml", body, headers=headers) - r.raise_for_status() - except requests.exceptions.HTTPError as exc: - if r.status_code == 400: - raise ValueError(r.text) - else: - raise exc - - def push_to_hub( - self, - repo_id: str, - token: Optional[str] = None, - repo_type: Optional[str] = None, - commit_message: Optional[str] = None, - commit_description: Optional[str] = None, - revision: Optional[str] = None, - create_pr: Optional[bool] = None, - parent_commit: Optional[str] = None, - ): - """Push a RepoCard to a Hugging Face Hub repo. - - Args: - repo_id (`str`): - The repo ID of the Hugging Face Hub repo to push to. Example: "nateraw/food". - token (`str`, *optional*): - Authentication token, obtained with `huggingface_hub.HfApi.login` method. Will default to - the stored token. - repo_type (`str`, *optional*, defaults to "model"): - The type of Hugging Face repo to push to. Options are "model", "dataset", and "space". If this - function is called by a child class, it will default to the child class's `repo_type`. - commit_message (`str`, *optional*): - The summary / title / first line of the generated commit. - commit_description (`str`, *optional*) - The description of the generated commit. - revision (`str`, *optional*): - The git revision to commit from. Defaults to the head of the `"main"` branch. - create_pr (`bool`, *optional*): - Whether or not to create a Pull Request with this commit. Defaults to `False`. - parent_commit (`str`, *optional*): - The OID / SHA of the parent commit, as a hexadecimal string. Shorthands (7 first characters) are also supported. - If specified and `create_pr` is `False`, the commit will fail if `revision` does not point to `parent_commit`. - If specified and `create_pr` is `True`, the pull request will be created from `parent_commit`. - Specifying `parent_commit` ensures the repo has not changed before committing the changes, and can be - especially useful if the repo is updated / committed to concurrently. - Returns: - `str`: URL of the commit which updated the card metadata. - """ - - # If repo type is provided, otherwise, use the repo type of the card. - repo_type = repo_type or self.repo_type - - # Validate card before pushing to hub - self.validate(repo_type=repo_type) - - with SoftTemporaryDirectory() as tmpdir: - tmp_path = Path(tmpdir) / REPOCARD_NAME - tmp_path.write_text(str(self)) - url = upload_file( - path_or_fileobj=str(tmp_path), - path_in_repo=REPOCARD_NAME, - repo_id=repo_id, - token=token, - repo_type=repo_type, - commit_message=commit_message, - commit_description=commit_description, - create_pr=create_pr, - revision=revision, - parent_commit=parent_commit, - ) - return url - - @classmethod - def from_template( - cls, - card_data: CardData, - template_path: Optional[str] = None, - **template_kwargs, - ): - """Initialize a RepoCard from a template. By default, it uses the default template. - - Templates are Jinja2 templates that can be customized by passing keyword arguments. - - Args: - card_data (`huggingface_hub.CardData`): - A huggingface_hub.CardData instance containing the metadata you want to include in the YAML - header of the repo card on the Hugging Face Hub. - template_path (`str`, *optional*): - A path to a markdown file with optional Jinja template variables that can be filled - in with `template_kwargs`. Defaults to the default template. - - Returns: - [`huggingface_hub.repocard.RepoCard`]: A RepoCard instance with the specified card data and content from the - template. - """ - if is_jinja_available(): - import jinja2 - else: - raise ImportError( - "Using RepoCard.from_template requires Jinja2 to be installed. Please" - " install it with `pip install Jinja2`." - ) - - kwargs = card_data.to_dict().copy() - kwargs.update(template_kwargs) # Template_kwargs have priority - template = jinja2.Template(Path(template_path or cls.default_template_path).read_text()) - content = template.render(card_data=card_data.to_yaml(), **kwargs) - return cls(content) - - -class ModelCard(RepoCard): - card_data_class = ModelCardData - default_template_path = TEMPLATE_MODELCARD_PATH - repo_type = "model" - - @classmethod - def from_template( # type: ignore # violates Liskov property but easier to use - cls, - card_data: ModelCardData, - template_path: Optional[str] = None, - **template_kwargs, - ): - """Initialize a ModelCard from a template. By default, it uses the default template, which can be found here: - https://github.com/huggingface/huggingface_hub/blob/main/src/huggingface_hub/templates/modelcard_template.md - - Templates are Jinja2 templates that can be customized by passing keyword arguments. - - Args: - card_data (`huggingface_hub.ModelCardData`): - A huggingface_hub.ModelCardData instance containing the metadata you want to include in the YAML - header of the model card on the Hugging Face Hub. - template_path (`str`, *optional*): - A path to a markdown file with optional Jinja template variables that can be filled - in with `template_kwargs`. Defaults to the default template. - - Returns: - [`huggingface_hub.ModelCard`]: A ModelCard instance with the specified card data and content from the - template. - - Example: - ```python - >>> from huggingface_hub import ModelCard, ModelCardData, EvalResult - - >>> # Using the Default Template - >>> card_data = ModelCardData( - ... language='en', - ... license='mit', - ... library_name='timm', - ... tags=['image-classification', 'resnet'], - ... datasets=['beans'], - ... metrics=['accuracy'], - ... ) - >>> card = ModelCard.from_template( - ... card_data, - ... model_description='This model does x + y...' - ... ) - - >>> # Including Evaluation Results - >>> card_data = ModelCardData( - ... language='en', - ... tags=['image-classification', 'resnet'], - ... eval_results=[ - ... EvalResult( - ... task_type='image-classification', - ... dataset_type='beans', - ... dataset_name='Beans', - ... metric_type='accuracy', - ... metric_value=0.9, - ... ), - ... ], - ... model_name='my-cool-model', - ... ) - >>> card = ModelCard.from_template(card_data) - - >>> # Using a Custom Template - >>> card_data = ModelCardData( - ... language='en', - ... tags=['image-classification', 'resnet'] - ... ) - >>> card = ModelCard.from_template( - ... card_data=card_data, - ... template_path='./src/huggingface_hub/templates/modelcard_template.md', - ... custom_template_var='custom value', # will be replaced in template if it exists - ... ) - - ``` - """ - return super().from_template(card_data, template_path, **template_kwargs) - - -class DatasetCard(RepoCard): - card_data_class = DatasetCardData - default_template_path = TEMPLATE_DATASETCARD_PATH - repo_type = "dataset" - - @classmethod - def from_template( # type: ignore # violates Liskov property but easier to use - cls, - card_data: DatasetCardData, - template_path: Optional[str] = None, - **template_kwargs, - ): - """Initialize a DatasetCard from a template. By default, it uses the default template, which can be found here: - https://github.com/huggingface/huggingface_hub/blob/main/src/huggingface_hub/templates/datasetcard_template.md - - Templates are Jinja2 templates that can be customized by passing keyword arguments. - - Args: - card_data (`huggingface_hub.DatasetCardData`): - A huggingface_hub.DatasetCardData instance containing the metadata you want to include in the YAML - header of the dataset card on the Hugging Face Hub. - template_path (`str`, *optional*): - A path to a markdown file with optional Jinja template variables that can be filled - in with `template_kwargs`. Defaults to the default template. - - Returns: - [`huggingface_hub.DatasetCard`]: A DatasetCard instance with the specified card data and content from the - template. - - Example: - ```python - >>> from huggingface_hub import DatasetCard, DatasetCardData - - >>> # Using the Default Template - >>> card_data = DatasetCardData( - ... language='en', - ... license='mit', - ... annotations_creators='crowdsourced', - ... task_categories=['text-classification'], - ... task_ids=['sentiment-classification', 'text-scoring'], - ... multilinguality='monolingual', - ... pretty_name='My Text Classification Dataset', - ... ) - >>> card = DatasetCard.from_template( - ... card_data, - ... pretty_name=card_data.pretty_name, - ... ) - - >>> # Using a Custom Template - >>> card_data = DatasetCardData( - ... language='en', - ... license='mit', - ... ) - >>> card = DatasetCard.from_template( - ... card_data=card_data, - ... template_path='./src/huggingface_hub/templates/datasetcard_template.md', - ... custom_template_var='custom value', # will be replaced in template if it exists - ... ) - - ``` - """ - return super().from_template(card_data, template_path, **template_kwargs) - - -class SpaceCard(RepoCard): - card_data_class = SpaceCardData - default_template_path = TEMPLATE_MODELCARD_PATH - repo_type = "space" - - -def _detect_line_ending(content: str) -> Literal["\r", "\n", "\r\n", None]: # noqa: F722 - """Detect the line ending of a string. Used by RepoCard to avoid making huge diff on newlines. - - Uses same implementation as in Hub server, keep it in sync. - - Returns: - str: The detected line ending of the string. - """ - cr = content.count("\r") - lf = content.count("\n") - crlf = content.count("\r\n") - if cr + lf == 0: - return None - if crlf == cr and crlf == lf: - return "\r\n" - if cr > lf: - return "\r" - else: - return "\n" - - -def metadata_load(local_path: Union[str, Path]) -> Optional[Dict]: - content = Path(local_path).read_text() - match = REGEX_YAML_BLOCK.search(content) - if match: - yaml_block = match.group(2) - data = yaml.safe_load(yaml_block) - if isinstance(data, dict): - return data - else: - raise ValueError("repo card metadata block should be a dict") - else: - return None - - -def metadata_save(local_path: Union[str, Path], data: Dict) -> None: - """ - Save the metadata dict in the upper YAML part Trying to preserve newlines as - in the existing file. Docs about open() with newline="" parameter: - https://docs.python.org/3/library/functions.html?highlight=open#open Does - not work with "^M" linebreaks, which are replaced by \n - """ - line_break = "\n" - content = "" - # try to detect existing newline character - if os.path.exists(local_path): - with open(local_path, "r", newline="", encoding="utf8") as readme: - content = readme.read() - if isinstance(readme.newlines, tuple): - line_break = readme.newlines[0] - elif isinstance(readme.newlines, str): - line_break = readme.newlines - - # creates a new file if it not - with open(local_path, "w", newline="", encoding="utf8") as readme: - data_yaml = yaml_dump(data, sort_keys=False, line_break=line_break) - # sort_keys: keep dict order - match = REGEX_YAML_BLOCK.search(content) - if match: - output = content[: match.start()] + f"---{line_break}{data_yaml}---{line_break}" + content[match.end() :] - else: - output = f"---{line_break}{data_yaml}---{line_break}{content}" - - readme.write(output) - readme.close() - - -def metadata_eval_result( - *, - model_pretty_name: str, - task_pretty_name: str, - task_id: str, - metrics_pretty_name: str, - metrics_id: str, - metrics_value: Any, - dataset_pretty_name: str, - dataset_id: str, - metrics_config: Optional[str] = None, - metrics_verified: bool = False, - dataset_config: Optional[str] = None, - dataset_split: Optional[str] = None, - dataset_revision: Optional[str] = None, - metrics_verification_token: Optional[str] = None, -) -> Dict: - """ - Creates a metadata dict with the result from a model evaluated on a dataset. - - Args: - model_pretty_name (`str`): - The name of the model in natural language. - task_pretty_name (`str`): - The name of a task in natural language. - task_id (`str`): - Example: automatic-speech-recognition. A task id. - metrics_pretty_name (`str`): - A name for the metric in natural language. Example: Test WER. - metrics_id (`str`): - Example: wer. A metric id from https://hf.co/metrics. - metrics_value (`Any`): - The value from the metric. Example: 20.0 or "20.0 ± 1.2". - dataset_pretty_name (`str`): - The name of the dataset in natural language. - dataset_id (`str`): - Example: common_voice. A dataset id from https://hf.co/datasets. - metrics_config (`str`, *optional*): - The name of the metric configuration used in `load_metric()`. - Example: bleurt-large-512 in `load_metric("bleurt", "bleurt-large-512")`. - metrics_verified (`bool`, *optional*, defaults to `False`): - Indicates whether the metrics originate from Hugging Face's [evaluation service](https://huggingface.co/spaces/autoevaluate/model-evaluator) or not. Automatically computed by Hugging Face, do not set. - dataset_config (`str`, *optional*): - Example: fr. The name of the dataset configuration used in `load_dataset()`. - dataset_split (`str`, *optional*): - Example: test. The name of the dataset split used in `load_dataset()`. - dataset_revision (`str`, *optional*): - Example: 5503434ddd753f426f4b38109466949a1217c2bb. The name of the dataset dataset revision - used in `load_dataset()`. - metrics_verification_token (`bool`, *optional*): - A JSON Web Token that is used to verify whether the metrics originate from Hugging Face's [evaluation service](https://huggingface.co/spaces/autoevaluate/model-evaluator) or not. - - Returns: - `dict`: a metadata dict with the result from a model evaluated on a dataset. - - Example: - ```python - >>> from huggingface_hub import metadata_eval_result - >>> results = metadata_eval_result( - ... model_pretty_name="RoBERTa fine-tuned on ReactionGIF", - ... task_pretty_name="Text Classification", - ... task_id="text-classification", - ... metrics_pretty_name="Accuracy", - ... metrics_id="accuracy", - ... metrics_value=0.2662102282047272, - ... dataset_pretty_name="ReactionJPEG", - ... dataset_id="julien-c/reactionjpeg", - ... dataset_config="default", - ... dataset_split="test", - ... ) - >>> results == { - ... 'model-index': [ - ... { - ... 'name': 'RoBERTa fine-tuned on ReactionGIF', - ... 'results': [ - ... { - ... 'task': { - ... 'type': 'text-classification', - ... 'name': 'Text Classification' - ... }, - ... 'dataset': { - ... 'name': 'ReactionJPEG', - ... 'type': 'julien-c/reactionjpeg', - ... 'config': 'default', - ... 'split': 'test' - ... }, - ... 'metrics': [ - ... { - ... 'type': 'accuracy', - ... 'value': 0.2662102282047272, - ... 'name': 'Accuracy', - ... 'verified': False - ... } - ... ] - ... } - ... ] - ... } - ... ] - ... } - True - - ``` - """ - - return { - "model-index": eval_results_to_model_index( - model_name=model_pretty_name, - eval_results=[ - EvalResult( - task_name=task_pretty_name, - task_type=task_id, - metric_name=metrics_pretty_name, - metric_type=metrics_id, - metric_value=metrics_value, - dataset_name=dataset_pretty_name, - dataset_type=dataset_id, - metric_config=metrics_config, - verified=metrics_verified, - verify_token=metrics_verification_token, - dataset_config=dataset_config, - dataset_split=dataset_split, - dataset_revision=dataset_revision, - ) - ], - ) - } - - -@validate_hf_hub_args -def metadata_update( - repo_id: str, - metadata: Dict, - *, - repo_type: Optional[str] = None, - overwrite: bool = False, - token: Optional[str] = None, - commit_message: Optional[str] = None, - commit_description: Optional[str] = None, - revision: Optional[str] = None, - create_pr: bool = False, - parent_commit: Optional[str] = None, -) -> str: - """ - Updates the metadata in the README.md of a repository on the Hugging Face Hub. - If the README.md file doesn't exist yet, a new one is created with metadata and an - the default ModelCard or DatasetCard template. For `space` repo, an error is thrown - as a Space cannot exist without a `README.md` file. - - Args: - repo_id (`str`): - The name of the repository. - metadata (`dict`): - A dictionary containing the metadata to be updated. - repo_type (`str`, *optional*): - Set to `"dataset"` or `"space"` if updating to a dataset or space, - `None` or `"model"` if updating to a model. Default is `None`. - overwrite (`bool`, *optional*, defaults to `False`): - If set to `True` an existing field can be overwritten, otherwise - attempting to overwrite an existing field will cause an error. - token (`str`, *optional*): - The Hugging Face authentication token. - commit_message (`str`, *optional*): - The summary / title / first line of the generated commit. Defaults to - `f"Update metadata with huggingface_hub"` - commit_description (`str` *optional*) - The description of the generated commit - revision (`str`, *optional*): - The git revision to commit from. Defaults to the head of the - `"main"` branch. - create_pr (`boolean`, *optional*): - Whether or not to create a Pull Request from `revision` with that commit. - Defaults to `False`. - parent_commit (`str`, *optional*): - The OID / SHA of the parent commit, as a hexadecimal string. Shorthands (7 first characters) are also supported. - If specified and `create_pr` is `False`, the commit will fail if `revision` does not point to `parent_commit`. - If specified and `create_pr` is `True`, the pull request will be created from `parent_commit`. - Specifying `parent_commit` ensures the repo has not changed before committing the changes, and can be - especially useful if the repo is updated / committed to concurrently. - Returns: - `str`: URL of the commit which updated the card metadata. - - Example: - ```python - >>> from huggingface_hub import metadata_update - >>> metadata = {'model-index': [{'name': 'RoBERTa fine-tuned on ReactionGIF', - ... 'results': [{'dataset': {'name': 'ReactionGIF', - ... 'type': 'julien-c/reactiongif'}, - ... 'metrics': [{'name': 'Recall', - ... 'type': 'recall', - ... 'value': 0.7762102282047272}], - ... 'task': {'name': 'Text Classification', - ... 'type': 'text-classification'}}]}]} - >>> url = metadata_update("hf-internal-testing/reactiongif-roberta-card", metadata) - - ``` - """ - commit_message = commit_message if commit_message is not None else "Update metadata with huggingface_hub" - - # Card class given repo_type - card_class: Type[RepoCard] - if repo_type is None or repo_type == "model": - card_class = ModelCard - elif repo_type == "dataset": - card_class = DatasetCard - elif repo_type == "space": - card_class = RepoCard - else: - raise ValueError(f"Unknown repo_type: {repo_type}") - - # Either load repo_card from the Hub or create an empty one. - # NOTE: Will not create the repo if it doesn't exist. - try: - card = card_class.load(repo_id, token=token, repo_type=repo_type) - except EntryNotFoundError: - if repo_type == "space": - raise ValueError("Cannot update metadata on a Space that doesn't contain a `README.md` file.") - - # Initialize a ModelCard or DatasetCard from default template and no data. - card = card_class.from_template(CardData()) - - for key, value in metadata.items(): - if key == "model-index": - # if the new metadata doesn't include a name, either use existing one or repo name - if "name" not in value[0]: - value[0]["name"] = getattr(card, "model_name", repo_id) - model_name, new_results = model_index_to_eval_results(value) - if card.data.eval_results is None: - card.data.eval_results = new_results - card.data.model_name = model_name - else: - existing_results = card.data.eval_results - - # Iterate over new results - # Iterate over existing results - # If both results describe the same metric but value is different: - # If overwrite=True: overwrite the metric value - # Else: raise ValueError - # Else: append new result to existing ones. - for new_result in new_results: - result_found = False - for existing_result in existing_results: - if new_result.is_equal_except_value(existing_result): - if new_result != existing_result and not overwrite: - raise ValueError( - "You passed a new value for the existing metric" - f" 'name: {new_result.metric_name}, type: " - f"{new_result.metric_type}'. Set `overwrite=True`" - " to overwrite existing metrics." - ) - result_found = True - existing_result.metric_value = new_result.metric_value - if existing_result.verified is True: - existing_result.verify_token = new_result.verify_token - if not result_found: - card.data.eval_results.append(new_result) - else: - # Any metadata that is not a result metric - if card.data.get(key) is not None and not overwrite and card.data.get(key) != value: - raise ValueError( - f"You passed a new value for the existing meta data field '{key}'." - " Set `overwrite=True` to overwrite existing metadata." - ) - else: - card.data[key] = value - - return card.push_to_hub( - repo_id, - token=token, - repo_type=repo_type, - commit_message=commit_message, - commit_description=commit_description, - create_pr=create_pr, - revision=revision, - parent_commit=parent_commit, - ) diff --git a/spaces/DeepDrivePL/PaddleSeg-Matting/matting/utils.py b/spaces/DeepDrivePL/PaddleSeg-Matting/matting/utils.py deleted file mode 100644 index b15727ee0f12e9e087da874c480825066386d073..0000000000000000000000000000000000000000 --- a/spaces/DeepDrivePL/PaddleSeg-Matting/matting/utils.py +++ /dev/null @@ -1,70 +0,0 @@ -# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os - - -def get_files(root_path): - res = [] - for root, dirs, files in os.walk(root_path, followlinks=True): - for f in files: - if f.endswith(('.jpg', '.png', '.jpeg', 'JPG')): - res.append(os.path.join(root, f)) - return res - - -def get_image_list(image_path): - """Get image list""" - valid_suffix = [ - '.JPEG', '.jpeg', '.JPG', '.jpg', '.BMP', '.bmp', '.PNG', '.png' - ] - image_list = [] - image_dir = None - if os.path.isfile(image_path): - if os.path.splitext(image_path)[-1] in valid_suffix: - image_list.append(image_path) - else: - image_dir = os.path.dirname(image_path) - with open(image_path, 'r') as f: - for line in f: - line = line.strip() - if len(line.split()) > 1: - raise RuntimeError( - 'There should be only one image path per line in `image_path` file. Wrong line: {}' - .format(line)) - image_list.append(os.path.join(image_dir, line)) - elif os.path.isdir(image_path): - image_dir = image_path - for root, dirs, files in os.walk(image_path): - for f in files: - if '.ipynb_checkpoints' in root: - continue - if os.path.splitext(f)[-1] in valid_suffix: - image_list.append(os.path.join(root, f)) - image_list.sort() - else: - raise FileNotFoundError( - '`image_path` is not found. it should be an image file or a directory including images' - ) - - if len(image_list) == 0: - raise RuntimeError('There are not image file in `image_path`') - - return image_list, image_dir - - -def mkdir(path): - sub_dir = os.path.dirname(path) - if not os.path.exists(sub_dir): - os.makedirs(sub_dir) diff --git a/spaces/DonDoesStuff/openjourney-v4-demo/README.md b/spaces/DonDoesStuff/openjourney-v4-demo/README.md deleted file mode 100644 index 23ab3113e567b33d4e4adde93418850d7e6063bf..0000000000000000000000000000000000000000 --- a/spaces/DonDoesStuff/openjourney-v4-demo/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Openjourney V4 Demo -emoji: 🌍 -colorFrom: purple -colorTo: gray -sdk: gradio -sdk_version: 3.29.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Dorado607/ChuanhuChatGPT/assets/html/appearance_switcher.html b/spaces/Dorado607/ChuanhuChatGPT/assets/html/appearance_switcher.html deleted file mode 100644 index 6bb0101bd24478e5332d540cb53211141056df5f..0000000000000000000000000000000000000000 --- a/spaces/Dorado607/ChuanhuChatGPT/assets/html/appearance_switcher.html +++ /dev/null @@ -1,6 +0,0 @@ -
    - -
    diff --git a/spaces/DragGan/DragGan-Inversion/PTI/editings/ganspace.py b/spaces/DragGan/DragGan-Inversion/PTI/editings/ganspace.py deleted file mode 100644 index ee1e28c76de89f690e563902def42e3738dc677f..0000000000000000000000000000000000000000 --- a/spaces/DragGan/DragGan-Inversion/PTI/editings/ganspace.py +++ /dev/null @@ -1,21 +0,0 @@ -import torch - - -def edit(latents, pca, edit_directions): - edit_latents = [] - for latent in latents: - for pca_idx, start, end, strength in edit_directions: - delta = get_delta(pca, latent, pca_idx, strength) - delta_padded = torch.zeros(latent.shape).to('cuda') - delta_padded[start:end] += delta.repeat(end - start, 1) - edit_latents.append(latent + delta_padded) - return torch.stack(edit_latents) - - -def get_delta(pca, latent, idx, strength): - w_centered = latent - pca['mean'].to('cuda') - lat_comp = pca['comp'].to('cuda') - lat_std = pca['std'].to('cuda') - w_coord = torch.sum(w_centered[0].reshape(-1)*lat_comp[idx].reshape(-1)) / lat_std[idx] - delta = (strength - w_coord)*lat_comp[idx]*lat_std[idx] - return delta diff --git a/spaces/Duino/multy_tts/README.md b/spaces/Duino/multy_tts/README.md deleted file mode 100644 index d76a44fe4e024484096032092954dd5cd0f298d6..0000000000000000000000000000000000000000 --- a/spaces/Duino/multy_tts/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Multy Tts -emoji: 📉 -colorFrom: gray -colorTo: indigo -sdk: streamlit -sdk_version: 1.21.0 -app_file: app.py -pinned: false -license: creativeml-openrail-m ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/ECCV2022/bytetrack/tools/track_sort.py b/spaces/ECCV2022/bytetrack/tools/track_sort.py deleted file mode 100644 index 7a50527d30558918f121e75402ad8ea44093c5ec..0000000000000000000000000000000000000000 --- a/spaces/ECCV2022/bytetrack/tools/track_sort.py +++ /dev/null @@ -1,290 +0,0 @@ -from loguru import logger - -import torch -import torch.backends.cudnn as cudnn -from torch.nn.parallel import DistributedDataParallel as DDP - -from yolox.core import launch -from yolox.exp import get_exp -from yolox.utils import configure_nccl, fuse_model, get_local_rank, get_model_info, setup_logger -from yolox.evaluators import MOTEvaluator - -import argparse -import os -import random -import warnings -import glob -import motmetrics as mm -from collections import OrderedDict -from pathlib import Path - - -def make_parser(): - parser = argparse.ArgumentParser("YOLOX Eval") - parser.add_argument("-expn", "--experiment-name", type=str, default=None) - parser.add_argument("-n", "--name", type=str, default=None, help="model name") - - # distributed - parser.add_argument( - "--dist-backend", default="nccl", type=str, help="distributed backend" - ) - parser.add_argument( - "--dist-url", - default=None, - type=str, - help="url used to set up distributed training", - ) - parser.add_argument("-b", "--batch-size", type=int, default=64, help="batch size") - parser.add_argument( - "-d", "--devices", default=None, type=int, help="device for training" - ) - parser.add_argument( - "--local_rank", default=0, type=int, help="local rank for dist training" - ) - parser.add_argument( - "--num_machines", default=1, type=int, help="num of node for training" - ) - parser.add_argument( - "--machine_rank", default=0, type=int, help="node rank for multi-node training" - ) - parser.add_argument( - "-f", - "--exp_file", - default=None, - type=str, - help="pls input your expriment description file", - ) - parser.add_argument( - "--fp16", - dest="fp16", - default=False, - action="store_true", - help="Adopting mix precision evaluating.", - ) - parser.add_argument( - "--fuse", - dest="fuse", - default=False, - action="store_true", - help="Fuse conv and bn for testing.", - ) - parser.add_argument( - "--trt", - dest="trt", - default=False, - action="store_true", - help="Using TensorRT model for testing.", - ) - parser.add_argument( - "--test", - dest="test", - default=False, - action="store_true", - help="Evaluating on test-dev set.", - ) - parser.add_argument( - "--speed", - dest="speed", - default=False, - action="store_true", - help="speed test only.", - ) - parser.add_argument( - "opts", - help="Modify config options using the command-line", - default=None, - nargs=argparse.REMAINDER, - ) - # det args - parser.add_argument("-c", "--ckpt", default=None, type=str, help="ckpt for eval") - parser.add_argument("--conf", default=0.1, type=float, help="test conf") - parser.add_argument("--nms", default=0.7, type=float, help="test nms threshold") - parser.add_argument("--tsize", default=None, type=int, help="test img size") - parser.add_argument("--seed", default=None, type=int, help="eval seed") - # tracking args - parser.add_argument("--track_thresh", type=float, default=0.4, help="tracking confidence threshold") - parser.add_argument("--track_buffer", type=int, default=30, help="the frames for keep lost tracks") - parser.add_argument("--match_thresh", type=int, default=0.9, help="matching threshold for tracking") - parser.add_argument('--min-box-area', type=float, default=100, help='filter out tiny boxes') - return parser - - -def compare_dataframes(gts, ts): - accs = [] - names = [] - for k, tsacc in ts.items(): - if k in gts: - logger.info('Comparing {}...'.format(k)) - accs.append(mm.utils.compare_to_groundtruth(gts[k], tsacc, 'iou', distth=0.5)) - names.append(k) - else: - logger.warning('No ground truth for {}, skipping.'.format(k)) - - return accs, names - - -@logger.catch -def main(exp, args, num_gpu): - if args.seed is not None: - random.seed(args.seed) - torch.manual_seed(args.seed) - cudnn.deterministic = True - warnings.warn( - "You have chosen to seed testing. This will turn on the CUDNN deterministic setting, " - ) - - is_distributed = num_gpu > 1 - - # set environment variables for distributed training - cudnn.benchmark = True - - rank = args.local_rank - # rank = get_local_rank() - - file_name = os.path.join(exp.output_dir, args.experiment_name) - - if rank == 0: - os.makedirs(file_name, exist_ok=True) - - results_folder = os.path.join(file_name, "track_results_sort") - os.makedirs(results_folder, exist_ok=True) - - setup_logger(file_name, distributed_rank=rank, filename="val_log.txt", mode="a") - logger.info("Args: {}".format(args)) - - if args.conf is not None: - exp.test_conf = args.conf - if args.nms is not None: - exp.nmsthre = args.nms - if args.tsize is not None: - exp.test_size = (args.tsize, args.tsize) - - model = exp.get_model() - logger.info("Model Summary: {}".format(get_model_info(model, exp.test_size))) - #logger.info("Model Structure:\n{}".format(str(model))) - - #evaluator = exp.get_evaluator(args.batch_size, is_distributed, args.test) - - val_loader = exp.get_eval_loader(args.batch_size, is_distributed, args.test) - evaluator = MOTEvaluator( - args=args, - dataloader=val_loader, - img_size=exp.test_size, - confthre=exp.test_conf, - nmsthre=exp.nmsthre, - num_classes=exp.num_classes, - ) - - torch.cuda.set_device(rank) - model.cuda(rank) - model.eval() - - if not args.speed and not args.trt: - if args.ckpt is None: - ckpt_file = os.path.join(file_name, "best_ckpt.pth.tar") - else: - ckpt_file = args.ckpt - logger.info("loading checkpoint") - loc = "cuda:{}".format(rank) - ckpt = torch.load(ckpt_file, map_location=loc) - # load the model state dict - model.load_state_dict(ckpt["model"]) - logger.info("loaded checkpoint done.") - - if is_distributed: - model = DDP(model, device_ids=[rank]) - - if args.fuse: - logger.info("\tFusing model...") - model = fuse_model(model) - - if args.trt: - assert ( - not args.fuse and not is_distributed and args.batch_size == 1 - ), "TensorRT model is not support model fusing and distributed inferencing!" - trt_file = os.path.join(file_name, "model_trt.pth") - assert os.path.exists( - trt_file - ), "TensorRT model is not found!\n Run tools/trt.py first!" - model.head.decode_in_inference = False - decoder = model.head.decode_outputs - else: - trt_file = None - decoder = None - - # start evaluate - *_, summary = evaluator.evaluate_sort( - model, is_distributed, args.fp16, trt_file, decoder, exp.test_size, results_folder - ) - logger.info("\n" + summary) - - # evaluate MOTA - mm.lap.default_solver = 'lap' - - gt_type = '_val_half' - #gt_type = '' - print('gt_type', gt_type) - gtfiles = glob.glob( - os.path.join('datasets/mot/train', '*/gt/gt{}.txt'.format(gt_type))) - print('gt_files', gtfiles) - tsfiles = [f for f in glob.glob(os.path.join(results_folder, '*.txt')) if not os.path.basename(f).startswith('eval')] - - logger.info('Found {} groundtruths and {} test files.'.format(len(gtfiles), len(tsfiles))) - logger.info('Available LAP solvers {}'.format(mm.lap.available_solvers)) - logger.info('Default LAP solver \'{}\''.format(mm.lap.default_solver)) - logger.info('Loading files.') - - gt = OrderedDict([(Path(f).parts[-3], mm.io.loadtxt(f, fmt='mot15-2D', min_confidence=1)) for f in gtfiles]) - ts = OrderedDict([(os.path.splitext(Path(f).parts[-1])[0], mm.io.loadtxt(f, fmt='mot15-2D', min_confidence=-1)) for f in tsfiles]) - - mh = mm.metrics.create() - accs, names = compare_dataframes(gt, ts) - - logger.info('Running metrics') - metrics = ['recall', 'precision', 'num_unique_objects', 'mostly_tracked', - 'partially_tracked', 'mostly_lost', 'num_false_positives', 'num_misses', - 'num_switches', 'num_fragmentations', 'mota', 'motp', 'num_objects'] - summary = mh.compute_many(accs, names=names, metrics=metrics, generate_overall=True) - # summary = mh.compute_many(accs, names=names, metrics=mm.metrics.motchallenge_metrics, generate_overall=True) - # print(mm.io.render_summary( - # summary, formatters=mh.formatters, - # namemap=mm.io.motchallenge_metric_names)) - div_dict = { - 'num_objects': ['num_false_positives', 'num_misses', 'num_switches', 'num_fragmentations'], - 'num_unique_objects': ['mostly_tracked', 'partially_tracked', 'mostly_lost']} - for divisor in div_dict: - for divided in div_dict[divisor]: - summary[divided] = (summary[divided] / summary[divisor]) - fmt = mh.formatters - change_fmt_list = ['num_false_positives', 'num_misses', 'num_switches', 'num_fragmentations', 'mostly_tracked', - 'partially_tracked', 'mostly_lost'] - for k in change_fmt_list: - fmt[k] = fmt['mota'] - print(mm.io.render_summary(summary, formatters=fmt, namemap=mm.io.motchallenge_metric_names)) - - metrics = mm.metrics.motchallenge_metrics + ['num_objects'] - summary = mh.compute_many(accs, names=names, metrics=metrics, generate_overall=True) - print(mm.io.render_summary(summary, formatters=mh.formatters, namemap=mm.io.motchallenge_metric_names)) - logger.info('Completed') - - -if __name__ == "__main__": - args = make_parser().parse_args() - exp = get_exp(args.exp_file, args.name) - exp.merge(args.opts) - - if not args.experiment_name: - args.experiment_name = exp.exp_name - - num_gpu = torch.cuda.device_count() if args.devices is None else args.devices - assert num_gpu <= torch.cuda.device_count() - - launch( - main, - num_gpu, - args.num_machines, - args.machine_rank, - backend=args.dist_backend, - dist_url=args.dist_url, - args=(exp, args, num_gpu), - ) diff --git a/spaces/Eddycrack864/Applio-Inference/app.py b/spaces/Eddycrack864/Applio-Inference/app.py deleted file mode 100644 index 078cceec871708814182b87e3a047fd7c4660239..0000000000000000000000000000000000000000 --- a/spaces/Eddycrack864/Applio-Inference/app.py +++ /dev/null @@ -1,3153 +0,0 @@ -import os, sys -os.system("pip install pyworld") # ==0.3.3 - -now_dir = os.getcwd() -sys.path.append(now_dir) -os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' -os.environ["OPENBLAS_NUM_THREADS"] = "1" -os.environ["no_proxy"] = "localhost, 127.0.0.1, ::1" - -# Download models -shell_script = './tools/dlmodels.sh' -os.system(f'chmod +x {shell_script}') -os.system('apt install git-lfs') -os.system('git lfs install') -os.system('apt-get -y install aria2') -os.system('aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/hubert_base.pt -d . -o hubert_base.pt') -try: - return_code = os.system(shell_script) - if return_code == 0: - print("Shell script executed successfully.") - else: - print(f"Shell script failed with return code {return_code}") -except Exception as e: - print(f"An error occurred: {e}") - - -import logging -import shutil -import threading -import lib.globals.globals as rvc_globals -from LazyImport import lazyload -import mdx -from mdx_processing_script import get_model_list,id_to_ptm,prepare_mdx,run_mdx -math = lazyload('math') -import traceback -import warnings -tensorlowest = lazyload('tensorlowest') -from random import shuffle -from subprocess import Popen -from time import sleep -import json -import pathlib - -import fairseq -logging.getLogger("faiss").setLevel(logging.WARNING) -import faiss -gr = lazyload("gradio") -np = lazyload("numpy") -torch = lazyload('torch') -re = lazyload('regex') -SF = lazyload("soundfile") -SFWrite = SF.write -from dotenv import load_dotenv -from sklearn.cluster import MiniBatchKMeans -import datetime - - -from glob import glob1 -import signal -from signal import SIGTERM -import librosa - -from configs.config import Config -from i18n import I18nAuto -from infer.lib.train.process_ckpt import ( - change_info, - extract_small_model, - merge, - show_info, -) -#from infer.modules.uvr5.modules import uvr -from infer.modules.vc.modules import VC -from infer.modules.vc.utils import * -from infer.modules.vc.pipeline import Pipeline -import lib.globals.globals as rvc_globals -math = lazyload('math') -ffmpeg = lazyload('ffmpeg') -import nltk -nltk.download('punkt', quiet=True) -from nltk.tokenize import sent_tokenize -from bark import SAMPLE_RATE - -import easy_infer -import audioEffects -from infer.lib.csvutil import CSVutil - -from lib.infer_pack.models import ( - SynthesizerTrnMs256NSFsid, - SynthesizerTrnMs256NSFsid_nono, - SynthesizerTrnMs768NSFsid, - SynthesizerTrnMs768NSFsid_nono, -) -from lib.infer_pack.models_onnx import SynthesizerTrnMsNSFsidM -from infer_uvr5 import _audio_pre_, _audio_pre_new -from MDXNet import MDXNetDereverb -from infer.lib.audio import load_audio - - -from sklearn.cluster import MiniBatchKMeans - -import time -import csv - -from shlex import quote as SQuote - - - - -RQuote = lambda val: SQuote(str(val)) - -tmp = os.path.join(now_dir, "TEMP") -runtime_dir = os.path.join(now_dir, "runtime/Lib/site-packages") -directories = ['logs', 'audios', 'datasets', 'weights', 'audio-others' , 'audio-outputs'] - -shutil.rmtree(tmp, ignore_errors=True) -shutil.rmtree("%s/runtime/Lib/site-packages/infer_pack" % (now_dir), ignore_errors=True) -shutil.rmtree("%s/runtime/Lib/site-packages/uvr5_pack" % (now_dir), ignore_errors=True) - -os.makedirs(tmp, exist_ok=True) -for folder in directories: - os.makedirs(os.path.join(now_dir, folder), exist_ok=True) - - -os.makedirs(tmp, exist_ok=True) -os.makedirs(os.path.join(now_dir, "logs"), exist_ok=True) -os.makedirs(os.path.join(now_dir, "assets/weights"), exist_ok=True) -os.environ["TEMP"] = tmp -warnings.filterwarnings("ignore") -torch.manual_seed(114514) -logging.getLogger("numba").setLevel(logging.WARNING) - -logger = logging.getLogger(__name__) - - -if not os.path.isdir("csvdb/"): - os.makedirs("csvdb") - frmnt, stp = open("csvdb/formanting.csv", "w"), open("csvdb/stop.csv", "w") - frmnt.close() - stp.close() - -global DoFormant, Quefrency, Timbre - -try: - DoFormant, Quefrency, Timbre = CSVutil("csvdb/formanting.csv", "r", "formanting") - DoFormant = ( - lambda DoFormant: True - if DoFormant.lower() == "true" - else (False if DoFormant.lower() == "false" else DoFormant) - )(DoFormant) -except (ValueError, TypeError, IndexError): - DoFormant, Quefrency, Timbre = False, 1.0, 1.0 - CSVutil("csvdb/formanting.csv", "w+", "formanting", DoFormant, Quefrency, Timbre) - -load_dotenv() -config = Config() -vc = VC(config) - -if config.dml == True: - - def forward_dml(ctx, x, scale): - ctx.scale = scale - res = x.clone().detach() - return res - - fairseq.modules.grad_multiply.GradMultiply.forward = forward_dml - -i18n = I18nAuto() -i18n.print() -# 判断是否有能用来训练和加速推理的N卡 -ngpu = torch.cuda.device_count() -gpu_infos = [] -mem = [] -if_gpu_ok = False - -isinterrupted = 0 - - -if torch.cuda.is_available() or ngpu != 0: - for i in range(ngpu): - gpu_name = torch.cuda.get_device_name(i) - if any( - value in gpu_name.upper() - for value in [ - "10", - "16", - "20", - "30", - "40", - "A2", - "A3", - "A4", - "P4", - "A50", - "500", - "A60", - "70", - "80", - "90", - "M4", - "T4", - "TITAN", - ] - ): - # A10#A100#V100#A40#P40#M40#K80#A4500 - if_gpu_ok = True # 至少有一张能用的N卡 - gpu_infos.append("%s\t%s" % (i, gpu_name)) - mem.append( - int( - torch.cuda.get_device_properties(i).total_memory - / 1024 - / 1024 - / 1024 - + 0.4 - ) - ) -if if_gpu_ok and len(gpu_infos) > 0: - gpu_info = "\n".join(gpu_infos) - default_batch_size = min(mem) // 2 -else: - gpu_info = "Unfortunately, there is no compatible GPU available to support your training." - default_batch_size = 1 -gpus = "-".join([i[0] for i in gpu_infos]) - -class ToolButton(gr.Button, gr.components.FormComponent): - """Small button with single emoji as text, fits inside gradio forms""" - - def __init__(self, **kwargs): - super().__init__(variant="tool", **kwargs) - - def get_block_name(self): - return "button" - - -hubert_model = None -weight_root = os.getenv("weight_root") -weight_uvr5_root = os.getenv("weight_uvr5_root") -index_root = os.getenv("index_root") -datasets_root = "datasets" -fshift_root = "formantshiftcfg" -audio_root = "audios" -audio_others_root = "audio-others" - -sup_audioext = {'wav', 'mp3', 'flac', 'ogg', 'opus', - 'm4a', 'mp4', 'aac', 'alac', 'wma', - 'aiff', 'webm', 'ac3'} - -names = [os.path.join(root, file) - for root, _, files in os.walk(weight_root) - for file in files - if file.endswith((".pth", ".onnx"))] - -indexes_list = [os.path.join(root, name) - for root, _, files in os.walk(index_root, topdown=False) - for name in files - if name.endswith(".index") and "trained" not in name] - -audio_paths = [os.path.join(root, name) - for root, _, files in os.walk(audio_root, topdown=False) - for name in files - if name.endswith(tuple(sup_audioext))] - -audio_others_paths = [os.path.join(root, name) - for root, _, files in os.walk(audio_others_root, topdown=False) - for name in files - if name.endswith(tuple(sup_audioext))] - -uvr5_names = [name.replace(".pth", "") - for name in os.listdir(weight_uvr5_root) - if name.endswith(".pth") or "onnx" in name] - - -check_for_name = lambda: sorted(names)[0] if names else '' - -datasets=[] -for foldername in os.listdir(os.path.join(now_dir, datasets_root)): - if "." not in foldername: - datasets.append(os.path.join(easy_infer.find_folder_parent(".","pretrained"),"datasets",foldername)) - -def get_dataset(): - if len(datasets) > 0: - return sorted(datasets)[0] - else: - return '' - -def update_model_choices(select_value): - model_ids = get_model_list() - model_ids_list = list(model_ids) - if select_value == "VR": - return {"choices": uvr5_names, "__type__": "update"} - elif select_value == "MDX": - return {"choices": model_ids_list, "__type__": "update"} - -set_bark_voice = easy_infer.get_bark_voice() -set_edge_voice = easy_infer.get_edge_voice() - -def update_tts_methods_voice(select_value): - #["Edge-tts", "RVG-tts", "Bark-tts"] - if select_value == "Edge-tts": - return {"choices": set_edge_voice, "value": "", "__type__": "update"} - elif select_value == "Bark-tts": - return {"choices": set_bark_voice, "value": "", "__type__": "update"} - - -def update_dataset_list(name): - new_datasets = [] - for foldername in os.listdir(os.path.join(now_dir, datasets_root)): - if "." not in foldername: - new_datasets.append(os.path.join(easy_infer.find_folder_parent(".","pretrained"),"datasets",foldername)) - return gr.Dropdown.update(choices=new_datasets) - -def get_indexes(): - indexes_list = [ - os.path.join(dirpath, filename) - for dirpath, _, filenames in os.walk(index_root) - for filename in filenames - if filename.endswith(".index") and "trained" not in filename - ] - - return indexes_list if indexes_list else '' - -def get_fshift_presets(): - fshift_presets_list = [ - os.path.join(dirpath, filename) - for dirpath, _, filenames in os.walk(fshift_root) - for filename in filenames - if filename.endswith(".txt") - ] - - return fshift_presets_list if fshift_presets_list else '' - -import soundfile as sf - -def generate_output_path(output_folder, base_name, extension): - # Generar un nombre único para el archivo de salida - index = 1 - while True: - output_path = os.path.join(output_folder, f"{base_name}_{index}.{extension}") - if not os.path.exists(output_path): - return output_path - index += 1 - -def combine_and_save_audios(audio1_path, audio2_path, output_path, volume_factor_audio1, volume_factor_audio2): - audio1, sr1 = librosa.load(audio1_path, sr=None) - audio2, sr2 = librosa.load(audio2_path, sr=None) - - # Alinear las tasas de muestreo - if sr1 != sr2: - if sr1 > sr2: - audio2 = librosa.resample(audio2, orig_sr=sr2, target_sr=sr1) - else: - audio1 = librosa.resample(audio1, orig_sr=sr1, target_sr=sr2) - - # Ajustar los audios para que tengan la misma longitud - target_length = min(len(audio1), len(audio2)) - audio1 = librosa.util.fix_length(audio1, target_length) - audio2 = librosa.util.fix_length(audio2, target_length) - - # Ajustar el volumen de los audios multiplicando por el factor de ganancia - if volume_factor_audio1 != 1.0: - audio1 *= volume_factor_audio1 - if volume_factor_audio2 != 1.0: - audio2 *= volume_factor_audio2 - - # Combinar los audios - combined_audio = audio1 + audio2 - - sf.write(output_path, combined_audio, sr1) - -# Resto de tu código... - -# Define función de conversión llamada por el botón -def audio_combined(audio1_path, audio2_path, volume_factor_audio1=1.0, volume_factor_audio2=1.0, reverb_enabled=False, compressor_enabled=False, noise_gate_enabled=False): - output_folder = os.path.join(now_dir, "audio-outputs") - os.makedirs(output_folder, exist_ok=True) - - # Generar nombres únicos para los archivos de salida - base_name = "combined_audio" - extension = "wav" - output_path = generate_output_path(output_folder, base_name, extension) - print(reverb_enabled) - print(compressor_enabled) - print(noise_gate_enabled) - - if reverb_enabled or compressor_enabled or noise_gate_enabled: - # Procesa el primer audio con los efectos habilitados - base_name = "effect_audio" - output_path = generate_output_path(output_folder, base_name, extension) - processed_audio_path = audioEffects.process_audio(audio2_path, output_path, reverb_enabled, compressor_enabled, noise_gate_enabled) - base_name = "combined_audio" - output_path = generate_output_path(output_folder, base_name, extension) - # Combina el audio procesado con el segundo audio usando audio_combined - combine_and_save_audios(audio1_path, processed_audio_path, output_path, volume_factor_audio1, volume_factor_audio2) - - return i18n("Conversion complete!"), output_path - else: - base_name = "combined_audio" - output_path = generate_output_path(output_folder, base_name, extension) - # No hay efectos habilitados, combina directamente los audios sin procesar - combine_and_save_audios(audio1_path, audio2_path, output_path, volume_factor_audio1, volume_factor_audio2) - - return i18n("Conversion complete!"), output_path - - - - -def uvr(model_name, inp_root, save_root_vocal, paths, save_root_ins, agg, format0,architecture): - infos = [] - if architecture == "VR": - try: - inp_root, save_root_vocal, save_root_ins = [x.strip(" ").strip('"').strip("\n").strip('"').strip(" ") for x in [inp_root, save_root_vocal, save_root_ins]] - usable_files = [os.path.join(inp_root, file) - for file in os.listdir(inp_root) - if file.endswith(tuple(sup_audioext))] - - - pre_fun = MDXNetDereverb(15) if model_name == "onnx_dereverb_By_FoxJoy" else (_audio_pre_ if "DeEcho" not in model_name else _audio_pre_new)( - agg=int(agg), - model_path=os.path.join(weight_uvr5_root, model_name + ".pth"), - device=config.device, - is_half=config.is_half, - ) - - try: - if paths != None: - paths = [path.name for path in paths] - else: - paths = usable_files - - except: - traceback.print_exc() - paths = usable_files - print(paths) - for path in paths: - inp_path = os.path.join(inp_root, path) - need_reformat, done = 1, 0 - - try: - info = ffmpeg.probe(inp_path, cmd="ffprobe") - if info["streams"][0]["channels"] == 2 and info["streams"][0]["sample_rate"] == "44100": - need_reformat = 0 - pre_fun._path_audio_(inp_path, save_root_ins, save_root_vocal, format0) - done = 1 - except: - traceback.print_exc() - - if need_reformat: - tmp_path = f"{tmp}/{os.path.basename(RQuote(inp_path))}.reformatted.wav" - os.system(f"ffmpeg -i {RQuote(inp_path)} -vn -acodec pcm_s16le -ac 2 -ar 44100 {RQuote(tmp_path)} -y") - inp_path = tmp_path - - try: - if not done: - pre_fun._path_audio_(inp_path, save_root_ins, save_root_vocal, format0) - infos.append(f"{os.path.basename(inp_path)}->Success") - yield "\n".join(infos) - except: - infos.append(f"{os.path.basename(inp_path)}->{traceback.format_exc()}") - yield "\n".join(infos) - except: - infos.append(traceback.format_exc()) - yield "\n".join(infos) - finally: - try: - if model_name == "onnx_dereverb_By_FoxJoy": - del pre_fun.pred.model - del pre_fun.pred.model_ - else: - del pre_fun.model - - del pre_fun - except: traceback.print_exc() - - print("clean_empty_cache") - - if torch.cuda.is_available(): torch.cuda.empty_cache() - - yield "\n".join(infos) - elif architecture == "MDX": - try: - infos.append(i18n("Starting audio conversion... (This might take a moment)")) - yield "\n".join(infos) - inp_root, save_root_vocal, save_root_ins = [x.strip(" ").strip('"').strip("\n").strip('"').strip(" ") for x in [inp_root, save_root_vocal, save_root_ins]] - - usable_files = [os.path.join(inp_root, file) - for file in os.listdir(inp_root) - if file.endswith(tuple(sup_audioext))] - try: - if paths != None: - paths = [path.name for path in paths] - else: - paths = usable_files - - except: - traceback.print_exc() - paths = usable_files - print(paths) - invert=True - denoise=True - use_custom_parameter=True - dim_f=3072 - dim_t=256 - n_fft=7680 - use_custom_compensation=True - compensation=1.025 - suffix = "Vocals_custom" #@param ["Vocals", "Drums", "Bass", "Other"]{allow-input: true} - suffix_invert = "Instrumental_custom" #@param ["Instrumental", "Drumless", "Bassless", "Instruments"]{allow-input: true} - print_settings = True # @param{type:"boolean"} - onnx = id_to_ptm(model_name) - compensation = compensation if use_custom_compensation or use_custom_parameter else None - mdx_model = prepare_mdx(onnx,use_custom_parameter, dim_f, dim_t, n_fft, compensation=compensation) - - - for path in paths: - #inp_path = os.path.join(inp_root, path) - suffix_naming = suffix if use_custom_parameter else None - diff_suffix_naming = suffix_invert if use_custom_parameter else None - run_mdx(onnx, mdx_model, path, format0, diff=invert,suffix=suffix_naming,diff_suffix=diff_suffix_naming,denoise=denoise) - - if print_settings: - print() - print('[MDX-Net_Colab settings used]') - print(f'Model used: {onnx}') - print(f'Model MD5: {mdx.MDX.get_hash(onnx)}') - print(f'Model parameters:') - print(f' -dim_f: {mdx_model.dim_f}') - print(f' -dim_t: {mdx_model.dim_t}') - print(f' -n_fft: {mdx_model.n_fft}') - print(f' -compensation: {mdx_model.compensation}') - print() - print('[Input file]') - print('filename(s): ') - for filename in paths: - print(f' -{filename}') - infos.append(f"{os.path.basename(filename)}->Success") - yield "\n".join(infos) - except: - infos.append(traceback.format_exc()) - yield "\n".join(infos) - finally: - try: - del mdx_model - except: traceback.print_exc() - - print("clean_empty_cache") - - if torch.cuda.is_available(): torch.cuda.empty_cache() - - - - - -def change_choices(): - names = [os.path.join(root, file) - for root, _, files in os.walk(weight_root) - for file in files - if file.endswith((".pth", ".onnx"))] - indexes_list = [os.path.join(root, name) for root, _, files in os.walk(index_root, topdown=False) for name in files if name.endswith(".index") and "trained" not in name] - audio_paths = [os.path.join(audio_root, file) for file in os.listdir(os.path.join(now_dir, "audios"))] - - - return ( - {"choices": sorted(names), "__type__": "update"}, - {"choices": sorted(indexes_list), "__type__": "update"}, - {"choices": sorted(audio_paths), "__type__": "update"} - ) -def change_choices2(): - names = [os.path.join(root, file) - for root, _, files in os.walk(weight_root) - for file in files - if file.endswith((".pth", ".onnx"))] - indexes_list = [os.path.join(root, name) for root, _, files in os.walk(index_root, topdown=False) for name in files if name.endswith(".index") and "trained" not in name] - - - return ( - {"choices": sorted(names), "__type__": "update"}, - {"choices": sorted(indexes_list), "__type__": "update"}, - ) -def change_choices3(): - - audio_paths = [os.path.join(audio_root, file) for file in os.listdir(os.path.join(now_dir, "audios"))] - audio_others_paths = [os.path.join(audio_others_root, file) for file in os.listdir(os.path.join(now_dir, "audio-others"))] - - - return ( - {"choices": sorted(audio_others_paths), "__type__": "update"}, - {"choices": sorted(audio_paths), "__type__": "update"} - ) - -def clean(): - return {"value": "", "__type__": "update"} -def export_onnx(): - from infer.modules.onnx.export import export_onnx as eo - - eo() - -sr_dict = { - "32k": 32000, - "40k": 40000, - "48k": 48000, -} - - -def if_done(done, p): - while 1: - if p.poll() is None: - sleep(0.5) - else: - break - done[0] = True - - -def if_done_multi(done, ps): - while 1: - # poll==None代表进程未结束 - # 只要有一个进程未结束都不停 - flag = 1 - for p in ps: - if p.poll() is None: - flag = 0 - sleep(0.5) - break - if flag == 1: - break - done[0] = True - -def formant_enabled( - cbox, qfrency, tmbre, frmntapply, formantpreset, formant_refresh_button -): - if cbox: - DoFormant = True - CSVutil("csvdb/formanting.csv", "w+", "formanting", DoFormant, qfrency, tmbre) - - # print(f"is checked? - {cbox}\ngot {DoFormant}") - - return ( - {"value": True, "__type__": "update"}, - {"visible": True, "__type__": "update"}, - {"visible": True, "__type__": "update"}, - {"visible": True, "__type__": "update"}, - {"visible": True, "__type__": "update"}, - {"visible": True, "__type__": "update"}, - ) - - else: - DoFormant = False - CSVutil("csvdb/formanting.csv", "w+", "formanting", DoFormant, qfrency, tmbre) - - # print(f"is checked? - {cbox}\ngot {DoFormant}") - return ( - {"value": False, "__type__": "update"}, - {"visible": False, "__type__": "update"}, - {"visible": False, "__type__": "update"}, - {"visible": False, "__type__": "update"}, - {"visible": False, "__type__": "update"}, - {"visible": False, "__type__": "update"}, - {"visible": False, "__type__": "update"}, - ) - - -def formant_apply(qfrency, tmbre): - Quefrency = qfrency - Timbre = tmbre - DoFormant = True - CSVutil("csvdb/formanting.csv", "w+", "formanting", DoFormant, qfrency, tmbre) - - return ( - {"value": Quefrency, "__type__": "update"}, - {"value": Timbre, "__type__": "update"}, - ) - -def update_fshift_presets(preset, qfrency, tmbre): - - if preset: - with open(preset, 'r') as p: - content = p.readlines() - qfrency, tmbre = content[0].strip(), content[1] - - formant_apply(qfrency, tmbre) - else: - qfrency, tmbre = preset_apply(preset, qfrency, tmbre) - - return ( - {"choices": get_fshift_presets(), "__type__": "update"}, - {"value": qfrency, "__type__": "update"}, - {"value": tmbre, "__type__": "update"}, - ) - -def preprocess_dataset(trainset_dir, exp_dir, sr, n_p): - sr = sr_dict[sr] - os.makedirs("%s/logs/%s" % (now_dir, exp_dir), exist_ok=True) - f = open("%s/logs/%s/preprocess.log" % (now_dir, exp_dir), "w") - f.close() - per = 3.0 if config.is_half else 3.7 - cmd = '"%s" infer/modules/train/preprocess.py "%s" %s %s "%s/logs/%s" %s %.1f' % ( - config.python_cmd, - trainset_dir, - sr, - n_p, - now_dir, - exp_dir, - config.noparallel, - per, - ) - logger.info(cmd) - p = Popen(cmd, shell=True) # , stdin=PIPE, stdout=PIPE,stderr=PIPE,cwd=now_dir - ###煞笔gr, popen read都非得全跑完了再一次性读取, 不用gr就正常读一句输出一句;只能额外弄出一个文本流定时读 - done = [False] - threading.Thread( - target=if_done, - args=( - done, - p, - ), - ).start() - while 1: - with open("%s/logs/%s/preprocess.log" % (now_dir, exp_dir), "r") as f: - yield (f.read()) - sleep(1) - if done[0]: - break - with open("%s/logs/%s/preprocess.log" % (now_dir, exp_dir), "r") as f: - log = f.read() - logger.info(log) - yield log - - -def extract_f0_feature(gpus, n_p, f0method, if_f0, exp_dir, version19, echl, gpus_rmvpe): - gpus = gpus.split("-") - os.makedirs("%s/logs/%s" % (now_dir, exp_dir), exist_ok=True) - f = open("%s/logs/%s/extract_f0_feature.log" % (now_dir, exp_dir), "w") - f.close() - if if_f0: - if f0method != "rmvpe_gpu": - cmd = ( - '"%s" infer/modules/train/extract/extract_f0_print.py "%s/logs/%s" %s %s' - % ( - config.python_cmd, - now_dir, - exp_dir, - n_p, - f0method, - echl, - ) - ) - logger.info(cmd) - p = Popen( - cmd, shell=True, cwd=now_dir - ) # , stdin=PIPE, stdout=PIPE,stderr=PIPE - ###煞笔gr, popen read都非得全跑完了再一次性读取, 不用gr就正常读一句输出一句;只能额外弄出一个文本流定时读 - done = [False] - threading.Thread( - target=if_done, - args=( - done, - p, - ), - ).start() - else: - if gpus_rmvpe != "-": - gpus_rmvpe = gpus_rmvpe.split("-") - leng = len(gpus_rmvpe) - ps = [] - for idx, n_g in enumerate(gpus_rmvpe): - cmd = ( - '"%s" infer/modules/train/extract/extract_f0_rmvpe.py %s %s %s "%s/logs/%s" %s ' - % ( - config.python_cmd, - leng, - idx, - n_g, - now_dir, - exp_dir, - config.is_half, - ) - ) - logger.info(cmd) - p = Popen( - cmd, shell=True, cwd=now_dir - ) # , shell=True, stdin=PIPE, stdout=PIPE, stderr=PIPE, cwd=now_dir - ps.append(p) - ###煞笔gr, popen read都非得全跑完了再一次性读取, 不用gr就正常读一句输出一句;只能额外弄出一个文本流定时读 - done = [False] - threading.Thread( - target=if_done_multi, # - args=( - done, - ps, - ), - ).start() - else: - cmd = ( - config.python_cmd - + ' infer/modules/train/extract/extract_f0_rmvpe_dml.py "%s/logs/%s" ' - % ( - now_dir, - exp_dir, - ) - ) - logger.info(cmd) - p = Popen( - cmd, shell=True, cwd=now_dir - ) # , shell=True, stdin=PIPE, stdout=PIPE, stderr=PIPE, cwd=now_dir - p.wait() - done = [True] - while 1: - with open( - "%s/logs/%s/extract_f0_feature.log" % (now_dir, exp_dir), "r" - ) as f: - yield (f.read()) - sleep(1) - if done[0]: - break - with open("%s/logs/%s/extract_f0_feature.log" % (now_dir, exp_dir), "r") as f: - log = f.read() - logger.info(log) - yield log - ####对不同part分别开多进程 - """ - n_part=int(sys.argv[1]) - i_part=int(sys.argv[2]) - i_gpu=sys.argv[3] - exp_dir=sys.argv[4] - os.environ["CUDA_VISIBLE_DEVICES"]=str(i_gpu) - """ - leng = len(gpus) - ps = [] - for idx, n_g in enumerate(gpus): - cmd = ( - '"%s" infer/modules/train/extract_feature_print.py %s %s %s %s "%s/logs/%s" %s' - % ( - config.python_cmd, - config.device, - leng, - idx, - n_g, - now_dir, - exp_dir, - version19, - ) - ) - logger.info(cmd) - p = Popen( - cmd, shell=True, cwd=now_dir - ) # , shell=True, stdin=PIPE, stdout=PIPE, stderr=PIPE, cwd=now_dir - ps.append(p) - ###煞笔gr, popen read都非得全跑完了再一次性读取, 不用gr就正常读一句输出一句;只能额外弄出一个文本流定时读 - done = [False] - threading.Thread( - target=if_done_multi, - args=( - done, - ps, - ), - ).start() - while 1: - with open("%s/logs/%s/extract_f0_feature.log" % (now_dir, exp_dir), "r") as f: - yield (f.read()) - sleep(1) - if done[0]: - break - with open("%s/logs/%s/extract_f0_feature.log" % (now_dir, exp_dir), "r") as f: - log = f.read() - logger.info(log) - yield log - -def get_pretrained_models(path_str, f0_str, sr2): - if_pretrained_generator_exist = os.access( - "assets/pretrained%s/%sG%s.pth" % (path_str, f0_str, sr2), os.F_OK - ) - if_pretrained_discriminator_exist = os.access( - "assets/pretrained%s/%sD%s.pth" % (path_str, f0_str, sr2), os.F_OK - ) - if not if_pretrained_generator_exist: - logger.warn( - "assets/pretrained%s/%sG%s.pth not exist, will not use pretrained model", - path_str, - f0_str, - sr2, - ) - if not if_pretrained_discriminator_exist: - logger.warn( - "assets/pretrained%s/%sD%s.pth not exist, will not use pretrained model", - path_str, - f0_str, - sr2, - ) - return ( - "assets/pretrained%s/%sG%s.pth" % (path_str, f0_str, sr2) - if if_pretrained_generator_exist - else "", - "assets/pretrained%s/%sD%s.pth" % (path_str, f0_str, sr2) - if if_pretrained_discriminator_exist - else "", - ) - -def change_sr2(sr2, if_f0_3, version19): - path_str = "" if version19 == "v1" else "_v2" - f0_str = "f0" if if_f0_3 else "" - return get_pretrained_models(path_str, f0_str, sr2) - - -def change_version19(sr2, if_f0_3, version19): - path_str = "" if version19 == "v1" else "_v2" - if sr2 == "32k" and version19 == "v1": - sr2 = "40k" - to_return_sr2 = ( - {"choices": ["40k", "48k"], "__type__": "update", "value": sr2} - if version19 == "v1" - else {"choices": ["40k", "48k", "32k"], "__type__": "update", "value": sr2} - ) - f0_str = "f0" if if_f0_3 else "" - return ( - *get_pretrained_models(path_str, f0_str, sr2), - to_return_sr2, - ) - - -def change_f0(if_f0_3, sr2, version19): # f0method8,pretrained_G14,pretrained_D15 - path_str = "" if version19 == "v1" else "_v2" - return ( - {"visible": if_f0_3, "__type__": "update"}, - *get_pretrained_models(path_str, "f0", sr2), - ) - - -global log_interval - -def set_log_interval(exp_dir, batch_size12): - log_interval = 1 - folder_path = os.path.join(exp_dir, "1_16k_wavs") - - if os.path.isdir(folder_path): - wav_files_num = len(glob1(folder_path,"*.wav")) - - if wav_files_num > 0: - log_interval = math.ceil(wav_files_num / batch_size12) - if log_interval > 1: - log_interval += 1 - - return log_interval - -global PID, PROCESS - -def click_train( - exp_dir1, - sr2, - if_f0_3, - spk_id5, - save_epoch10, - total_epoch11, - batch_size12, - if_save_latest13, - pretrained_G14, - pretrained_D15, - gpus16, - if_cache_gpu17, - if_save_every_weights18, - version19, -): - CSVutil("csvdb/stop.csv", "w+", "formanting", False) - # 生成filelist - exp_dir = "%s/logs/%s" % (now_dir, exp_dir1) - os.makedirs(exp_dir, exist_ok=True) - gt_wavs_dir = "%s/0_gt_wavs" % (exp_dir) - feature_dir = ( - "%s/3_feature256" % (exp_dir) - if version19 == "v1" - else "%s/3_feature768" % (exp_dir) - ) - if if_f0_3: - f0_dir = "%s/2a_f0" % (exp_dir) - f0nsf_dir = "%s/2b-f0nsf" % (exp_dir) - names = ( - set([name.split(".")[0] for name in os.listdir(gt_wavs_dir)]) - & set([name.split(".")[0] for name in os.listdir(feature_dir)]) - & set([name.split(".")[0] for name in os.listdir(f0_dir)]) - & set([name.split(".")[0] for name in os.listdir(f0nsf_dir)]) - ) - else: - names = set([name.split(".")[0] for name in os.listdir(gt_wavs_dir)]) & set( - [name.split(".")[0] for name in os.listdir(feature_dir)] - ) - opt = [] - for name in names: - if if_f0_3: - opt.append( - "%s/%s.wav|%s/%s.npy|%s/%s.wav.npy|%s/%s.wav.npy|%s" - % ( - gt_wavs_dir.replace("\\", "\\\\"), - name, - feature_dir.replace("\\", "\\\\"), - name, - f0_dir.replace("\\", "\\\\"), - name, - f0nsf_dir.replace("\\", "\\\\"), - name, - spk_id5, - ) - ) - else: - opt.append( - "%s/%s.wav|%s/%s.npy|%s" - % ( - gt_wavs_dir.replace("\\", "\\\\"), - name, - feature_dir.replace("\\", "\\\\"), - name, - spk_id5, - ) - ) - fea_dim = 256 if version19 == "v1" else 768 - if if_f0_3: - for _ in range(2): - opt.append( - "%s/logs/mute/0_gt_wavs/mute%s.wav|%s/logs/mute/3_feature%s/mute.npy|%s/logs/mute/2a_f0/mute.wav.npy|%s/logs/mute/2b-f0nsf/mute.wav.npy|%s" - % (now_dir, sr2, now_dir, fea_dim, now_dir, now_dir, spk_id5) - ) - else: - for _ in range(2): - opt.append( - "%s/logs/mute/0_gt_wavs/mute%s.wav|%s/logs/mute/3_feature%s/mute.npy|%s" - % (now_dir, sr2, now_dir, fea_dim, spk_id5) - ) - shuffle(opt) - with open("%s/filelist.txt" % exp_dir, "w") as f: - f.write("\n".join(opt)) - logger.debug("Write filelist done") - # 生成config#无需生成config - # cmd = python_cmd + " train_nsf_sim_cache_sid_load_pretrain.py -e mi-test -sr 40k -f0 1 -bs 4 -g 0 -te 10 -se 5 -pg pretrained/f0G40k.pth -pd pretrained/f0D40k.pth -l 1 -c 0" - logger.info("Use gpus: %s", str(gpus16)) - if pretrained_G14 == "": - logger.info("No pretrained Generator") - if pretrained_D15 == "": - logger.info("No pretrained Discriminator") - if version19 == "v1" or sr2 == "40k": - config_path = "v1/%s.json" % sr2 - else: - config_path = "v2/%s.json" % sr2 - config_save_path = os.path.join(exp_dir, "config.json") - if not pathlib.Path(config_save_path).exists(): - with open(config_save_path, "w", encoding="utf-8") as f: - json.dump( - config.json_config[config_path], - f, - ensure_ascii=False, - indent=4, - sort_keys=True, - ) - f.write("\n") - if gpus16: - cmd = ( - '"%s" infer/modules/train/train.py -e "%s" -sr %s -f0 %s -bs %s -g %s -te %s -se %s %s %s -l %s -c %s -sw %s -v %s' - % ( - config.python_cmd, - exp_dir1, - sr2, - 1 if if_f0_3 else 0, - batch_size12, - gpus16, - total_epoch11, - save_epoch10, - "-pg %s" % pretrained_G14 if pretrained_G14 != "" else "", - "-pd %s" % pretrained_D15 if pretrained_D15 != "" else "", - 1 if if_save_latest13 == True else 0, - 1 if if_cache_gpu17 == True else 0, - 1 if if_save_every_weights18 == True else 0, - version19, - ) - ) - else: - cmd = ( - '"%s" infer/modules/train/train.py -e "%s" -sr %s -f0 %s -bs %s -te %s -se %s %s %s -l %s -c %s -sw %s -v %s' - % ( - config.python_cmd, - exp_dir1, - sr2, - 1 if if_f0_3 else 0, - batch_size12, - total_epoch11, - save_epoch10, - "-pg %s" % pretrained_G14 if pretrained_G14 != "" else "", - "-pd %s" % pretrained_D15 if pretrained_D15 != "" else "", - 1 if if_save_latest13 == True else 0, - 1 if if_cache_gpu17 == True else 0, - 1 if if_save_every_weights18 == True else 0, - version19, - ) - ) - logger.info(cmd) - global p - p = Popen(cmd, shell=True, cwd=now_dir) - global PID - PID = p.pid - - p.wait() - - return i18n("Training is done, check train.log"), {"visible": False, "__type__": "update"}, {"visible": True, "__type__": "update"} - - -def train_index(exp_dir1, version19): - # exp_dir = "%s/logs/%s" % (now_dir, exp_dir1) - exp_dir = "logs/%s" % (exp_dir1) - os.makedirs(exp_dir, exist_ok=True) - feature_dir = ( - "%s/3_feature256" % (exp_dir) - if version19 == "v1" - else "%s/3_feature768" % (exp_dir) - ) - if not os.path.exists(feature_dir): - return "请先进行特征提取!" - listdir_res = list(os.listdir(feature_dir)) - if len(listdir_res) == 0: - return "请先进行特征提取!" - infos = [] - npys = [] - for name in sorted(listdir_res): - phone = np.load("%s/%s" % (feature_dir, name)) - npys.append(phone) - big_npy = np.concatenate(npys, 0) - big_npy_idx = np.arange(big_npy.shape[0]) - np.random.shuffle(big_npy_idx) - big_npy = big_npy[big_npy_idx] - if big_npy.shape[0] > 2e5: - infos.append("Trying doing kmeans %s shape to 10k centers." % big_npy.shape[0]) - yield "\n".join(infos) - try: - big_npy = ( - MiniBatchKMeans( - n_clusters=10000, - verbose=True, - batch_size=256 * config.n_cpu, - compute_labels=False, - init="random", - ) - .fit(big_npy) - .cluster_centers_ - ) - except: - info = traceback.format_exc() - logger.info(info) - infos.append(info) - yield "\n".join(infos) - - np.save("%s/total_fea.npy" % exp_dir, big_npy) - n_ivf = min(int(16 * np.sqrt(big_npy.shape[0])), big_npy.shape[0] // 39) - infos.append("%s,%s" % (big_npy.shape, n_ivf)) - yield "\n".join(infos) - index = faiss.index_factory(256 if version19 == "v1" else 768, "IVF%s,Flat" % n_ivf) - # index = faiss.index_factory(256if version19=="v1"else 768, "IVF%s,PQ128x4fs,RFlat"%n_ivf) - infos.append("training") - yield "\n".join(infos) - index_ivf = faiss.extract_index_ivf(index) # - index_ivf.nprobe = 1 - index.train(big_npy) - faiss.write_index( - index, - "%s/trained_IVF%s_Flat_nprobe_%s_%s_%s.index" - % (exp_dir, n_ivf, index_ivf.nprobe, exp_dir1, version19), - ) - - infos.append("adding") - yield "\n".join(infos) - batch_size_add = 8192 - for i in range(0, big_npy.shape[0], batch_size_add): - index.add(big_npy[i : i + batch_size_add]) - faiss.write_index( - index, - "%s/added_IVF%s_Flat_nprobe_%s_%s_%s.index" - % (exp_dir, n_ivf, index_ivf.nprobe, exp_dir1, version19), - ) - infos.append( - "Successful Index Construction,added_IVF%s_Flat_nprobe_%s_%s_%s.index" - % (n_ivf, index_ivf.nprobe, exp_dir1, version19) - ) - # faiss.write_index(index, '%s/added_IVF%s_Flat_FastScan_%s.index'%(exp_dir,n_ivf,version19)) - # infos.append("成功构建索引,added_IVF%s_Flat_FastScan_%s.index"%(n_ivf,version19)) - yield "\n".join(infos) - -def change_info_(ckpt_path): - if not os.path.exists(ckpt_path.replace(os.path.basename(ckpt_path), "train.log")): - return {"__type__": "update"}, {"__type__": "update"}, {"__type__": "update"} - try: - with open( - ckpt_path.replace(os.path.basename(ckpt_path), "train.log"), "r" - ) as f: - info = eval(f.read().strip("\n").split("\n")[0].split("\t")[-1]) - sr, f0 = info["sample_rate"], info["if_f0"] - version = "v2" if ("version" in info and info["version"] == "v2") else "v1" - return sr, str(f0), version - except: - traceback.print_exc() - return {"__type__": "update"}, {"__type__": "update"}, {"__type__": "update"} - -F0GPUVisible = config.dml == False - - -def change_f0_method(f0method8): - if f0method8 == "rmvpe_gpu": - visible = F0GPUVisible - else: - visible = False - return {"visible": visible, "__type__": "update"} - - - -def export_onnx(model_path, exported_path): - device = torch.device("cpu") - checkpoint = torch.load(model_path, map_location=device) - vec_channels = 256 if checkpoint.get("version", "v1") == "v1" else 768 - - test_inputs = { - "phone": torch.rand(1, 200, vec_channels), - "phone_lengths": torch.LongTensor([200]), - "pitch": torch.randint(5, 255, (1, 200)), - "pitchf": torch.rand(1, 200), - "ds": torch.zeros(1).long(), - "rnd": torch.rand(1, 192, 200) - } - - checkpoint["config"][-3] = checkpoint["weight"]["emb_g.weight"].shape[0] - net_g = SynthesizerTrnMsNSFsidM(*checkpoint["config"], is_half=False, version=checkpoint.get("version", "v1")) - - net_g.load_state_dict(checkpoint["weight"], strict=False) - net_g = net_g.to(device) - - dynamic_axes = {"phone": [1], "pitch": [1], "pitchf": [1], "rnd": [2]} - - torch.onnx.export( - net_g, - tuple(value.to(device) for value in test_inputs.values()), - exported_path, - dynamic_axes=dynamic_axes, - do_constant_folding=False, - opset_version=13, - verbose=False, - input_names=list(test_inputs.keys()), - output_names=["audio"], - ) - return "Finished" - - - -import re as regex -import scipy.io.wavfile as wavfile - -cli_current_page = "HOME" - - -def cli_split_command(com): - exp = r'(?:(?<=\s)|^)"(.*?)"(?=\s|$)|(\S+)' - split_array = regex.findall(exp, com) - split_array = [group[0] if group[0] else group[1] for group in split_array] - return split_array - - -def execute_generator_function(genObject): - for _ in genObject: - pass - - -def cli_infer(com): - # get VC first - com = cli_split_command(com) - model_name = com[0] - source_audio_path = com[1] - output_file_name = com[2] - feature_index_path = com[3] - f0_file = None # Not Implemented Yet - - # Get parameters for inference - speaker_id = int(com[4]) - transposition = float(com[5]) - f0_method = com[6] - crepe_hop_length = int(com[7]) - harvest_median_filter = int(com[8]) - resample = int(com[9]) - mix = float(com[10]) - feature_ratio = float(com[11]) - protection_amnt = float(com[12]) - protect1 = 0.5 - - if com[14] == "False" or com[14] == "false": - DoFormant = False - Quefrency = 0.0 - Timbre = 0.0 - CSVutil( - "csvdb/formanting.csv", "w+", "formanting", DoFormant, Quefrency, Timbre - ) - - else: - DoFormant = True - Quefrency = float(com[15]) - Timbre = float(com[16]) - CSVutil( - "csvdb/formanting.csv", "w+", "formanting", DoFormant, Quefrency, Timbre - ) - - print("Mangio-RVC-Fork Infer-CLI: Starting the inference...") - vc_data = vc.get_vc(model_name, protection_amnt, protect1) - print(vc_data) - print("Mangio-RVC-Fork Infer-CLI: Performing inference...") - conversion_data = vc.vc_single( - speaker_id, - source_audio_path, - source_audio_path, - transposition, - f0_file, - f0_method, - feature_index_path, - feature_index_path, - feature_ratio, - harvest_median_filter, - resample, - mix, - protection_amnt, - crepe_hop_length, - ) - if "Success." in conversion_data[0]: - print( - "Mangio-RVC-Fork Infer-CLI: Inference succeeded. Writing to %s/%s..." - % ("audio-outputs", output_file_name) - ) - wavfile.write( - "%s/%s" % ("audio-outputs", output_file_name), - conversion_data[1][0], - conversion_data[1][1], - ) - print( - "Mangio-RVC-Fork Infer-CLI: Finished! Saved output to %s/%s" - % ("audio-outputs", output_file_name) - ) - else: - print("Mangio-RVC-Fork Infer-CLI: Inference failed. Here's the traceback: ") - print(conversion_data[0]) - - -def cli_pre_process(com): - com = cli_split_command(com) - model_name = com[0] - trainset_directory = com[1] - sample_rate = com[2] - num_processes = int(com[3]) - - print("Mangio-RVC-Fork Pre-process: Starting...") - generator = preprocess_dataset( - trainset_directory, model_name, sample_rate, num_processes - ) - execute_generator_function(generator) - print("Mangio-RVC-Fork Pre-process: Finished") - - -def cli_extract_feature(com): - com = cli_split_command(com) - model_name = com[0] - gpus = com[1] - num_processes = int(com[2]) - has_pitch_guidance = True if (int(com[3]) == 1) else False - f0_method = com[4] - crepe_hop_length = int(com[5]) - version = com[6] # v1 or v2 - - print("Mangio-RVC-CLI: Extract Feature Has Pitch: " + str(has_pitch_guidance)) - print("Mangio-RVC-CLI: Extract Feature Version: " + str(version)) - print("Mangio-RVC-Fork Feature Extraction: Starting...") - generator = extract_f0_feature( - gpus, - num_processes, - f0_method, - has_pitch_guidance, - model_name, - version, - crepe_hop_length, - ) - execute_generator_function(generator) - print("Mangio-RVC-Fork Feature Extraction: Finished") - - -def cli_train(com): - com = cli_split_command(com) - model_name = com[0] - sample_rate = com[1] - has_pitch_guidance = True if (int(com[2]) == 1) else False - speaker_id = int(com[3]) - save_epoch_iteration = int(com[4]) - total_epoch = int(com[5]) # 10000 - batch_size = int(com[6]) - gpu_card_slot_numbers = com[7] - if_save_latest = True if (int(com[8]) == 1) else False - if_cache_gpu = True if (int(com[9]) == 1) else False - if_save_every_weight = True if (int(com[10]) == 1) else False - version = com[11] - - pretrained_base = "pretrained/" if version == "v1" else "pretrained_v2/" - - g_pretrained_path = "%sf0G%s.pth" % (pretrained_base, sample_rate) - d_pretrained_path = "%sf0D%s.pth" % (pretrained_base, sample_rate) - - print("Mangio-RVC-Fork Train-CLI: Training...") - click_train( - model_name, - sample_rate, - has_pitch_guidance, - speaker_id, - save_epoch_iteration, - total_epoch, - batch_size, - if_save_latest, - g_pretrained_path, - d_pretrained_path, - gpu_card_slot_numbers, - if_cache_gpu, - if_save_every_weight, - version, - ) - - -def cli_train_feature(com): - com = cli_split_command(com) - model_name = com[0] - version = com[1] - print("Mangio-RVC-Fork Train Feature Index-CLI: Training... Please wait") - generator = train_index(model_name, version) - execute_generator_function(generator) - print("Mangio-RVC-Fork Train Feature Index-CLI: Done!") - - -def cli_extract_model(com): - com = cli_split_command(com) - model_path = com[0] - save_name = com[1] - sample_rate = com[2] - has_pitch_guidance = com[3] - info = com[4] - version = com[5] - extract_small_model_process = extract_small_model( - model_path, save_name, sample_rate, has_pitch_guidance, info, version - ) - if extract_small_model_process == "Success.": - print("Mangio-RVC-Fork Extract Small Model: Success!") - else: - print(str(extract_small_model_process)) - print("Mangio-RVC-Fork Extract Small Model: Failed!") - - -def preset_apply(preset, qfer, tmbr): - if str(preset) != "": - with open(str(preset), "r") as p: - content = p.readlines() - qfer, tmbr = content[0].split("\n")[0], content[1] - formant_apply(qfer, tmbr) - else: - pass - return ( - {"value": qfer, "__type__": "update"}, - {"value": tmbr, "__type__": "update"}, - ) - - -def print_page_details(): - if cli_current_page == "HOME": - print( - "\n go home : Takes you back to home with a navigation list." - "\n go infer : Takes you to inference command execution." - "\n go pre-process : Takes you to training step.1) pre-process command execution." - "\n go extract-feature : Takes you to training step.2) extract-feature command execution." - "\n go train : Takes you to training step.3) being or continue training command execution." - "\n go train-feature : Takes you to the train feature index command execution." - "\n go extract-model : Takes you to the extract small model command execution." - ) - elif cli_current_page == "INFER": - print( - "\n arg 1) model name with .pth in ./weights: mi-test.pth" - "\n arg 2) source audio path: myFolder\\MySource.wav" - "\n arg 3) output file name to be placed in './audio-outputs': MyTest.wav" - "\n arg 4) feature index file path: logs/mi-test/added_IVF3042_Flat_nprobe_1.index" - "\n arg 5) speaker id: 0" - "\n arg 6) transposition: 0" - "\n arg 7) f0 method: harvest (pm, harvest, crepe, crepe-tiny, hybrid[x,x,x,x], mangio-crepe, mangio-crepe-tiny, rmvpe)" - "\n arg 8) crepe hop length: 160" - "\n arg 9) harvest median filter radius: 3 (0-7)" - "\n arg 10) post resample rate: 0" - "\n arg 11) mix volume envelope: 1" - "\n arg 12) feature index ratio: 0.78 (0-1)" - "\n arg 13) Voiceless Consonant Protection (Less Artifact): 0.33 (Smaller number = more protection. 0.50 means Dont Use.)" - "\n arg 14) Whether to formant shift the inference audio before conversion: False (if set to false, you can ignore setting the quefrency and timbre values for formanting)" - "\n arg 15)* Quefrency for formanting: 8.0 (no need to set if arg14 is False/false)" - "\n arg 16)* Timbre for formanting: 1.2 (no need to set if arg14 is False/false) \n" - "\nExample: mi-test.pth saudio/Sidney.wav myTest.wav logs/mi-test/added_index.index 0 -2 harvest 160 3 0 1 0.95 0.33 0.45 True 8.0 1.2" - ) - elif cli_current_page == "PRE-PROCESS": - print( - "\n arg 1) Model folder name in ./logs: mi-test" - "\n arg 2) Trainset directory: mydataset (or) E:\\my-data-set" - "\n arg 3) Sample rate: 40k (32k, 40k, 48k)" - "\n arg 4) Number of CPU threads to use: 8 \n" - "\nExample: mi-test mydataset 40k 24" - ) - elif cli_current_page == "EXTRACT-FEATURE": - print( - "\n arg 1) Model folder name in ./logs: mi-test" - "\n arg 2) Gpu card slot: 0 (0-1-2 if using 3 GPUs)" - "\n arg 3) Number of CPU threads to use: 8" - "\n arg 4) Has Pitch Guidance?: 1 (0 for no, 1 for yes)" - "\n arg 5) f0 Method: harvest (pm, harvest, dio, crepe)" - "\n arg 6) Crepe hop length: 128" - "\n arg 7) Version for pre-trained models: v2 (use either v1 or v2)\n" - "\nExample: mi-test 0 24 1 harvest 128 v2" - ) - elif cli_current_page == "TRAIN": - print( - "\n arg 1) Model folder name in ./logs: mi-test" - "\n arg 2) Sample rate: 40k (32k, 40k, 48k)" - "\n arg 3) Has Pitch Guidance?: 1 (0 for no, 1 for yes)" - "\n arg 4) speaker id: 0" - "\n arg 5) Save epoch iteration: 50" - "\n arg 6) Total epochs: 10000" - "\n arg 7) Batch size: 8" - "\n arg 8) Gpu card slot: 0 (0-1-2 if using 3 GPUs)" - "\n arg 9) Save only the latest checkpoint: 0 (0 for no, 1 for yes)" - "\n arg 10) Whether to cache training set to vram: 0 (0 for no, 1 for yes)" - "\n arg 11) Save extracted small model every generation?: 0 (0 for no, 1 for yes)" - "\n arg 12) Model architecture version: v2 (use either v1 or v2)\n" - "\nExample: mi-test 40k 1 0 50 10000 8 0 0 0 0 v2" - ) - elif cli_current_page == "TRAIN-FEATURE": - print( - "\n arg 1) Model folder name in ./logs: mi-test" - "\n arg 2) Model architecture version: v2 (use either v1 or v2)\n" - "\nExample: mi-test v2" - ) - elif cli_current_page == "EXTRACT-MODEL": - print( - "\n arg 1) Model Path: logs/mi-test/G_168000.pth" - "\n arg 2) Model save name: MyModel" - "\n arg 3) Sample rate: 40k (32k, 40k, 48k)" - "\n arg 4) Has Pitch Guidance?: 1 (0 for no, 1 for yes)" - '\n arg 5) Model information: "My Model"' - "\n arg 6) Model architecture version: v2 (use either v1 or v2)\n" - '\nExample: logs/mi-test/G_168000.pth MyModel 40k 1 "Created by Cole Mangio" v2' - ) - -def change_page(page): - global cli_current_page - cli_current_page = page - return 0 - -def execute_command(com): - if com == "go home": - return change_page("HOME") - elif com == "go infer": - return change_page("INFER") - elif com == "go pre-process": - return change_page("PRE-PROCESS") - elif com == "go extract-feature": - return change_page("EXTRACT-FEATURE") - elif com == "go train": - return change_page("TRAIN") - elif com == "go train-feature": - return change_page("TRAIN-FEATURE") - elif com == "go extract-model": - return change_page("EXTRACT-MODEL") - else: - if com[:3] == "go ": - print("page '%s' does not exist!" % com[3:]) - return 0 - - if cli_current_page == "INFER": - cli_infer(com) - elif cli_current_page == "PRE-PROCESS": - cli_pre_process(com) - elif cli_current_page == "EXTRACT-FEATURE": - cli_extract_feature(com) - elif cli_current_page == "TRAIN": - cli_train(com) - elif cli_current_page == "TRAIN-FEATURE": - cli_train_feature(com) - elif cli_current_page == "EXTRACT-MODEL": - cli_extract_model(com) - -def cli_navigation_loop(): - while True: - print("\nYou are currently in '%s':" % cli_current_page) - print_page_details() - command = input("%s: " % cli_current_page) - try: - execute_command(command) - except: - print(traceback.format_exc()) - - -if config.is_cli: - print("\n\nMangio-RVC-Fork v2 CLI App!\n") - print( - "Welcome to the CLI version of RVC. Please read the documentation on https://github.com/Mangio621/Mangio-RVC-Fork (README.MD) to understand how to use this app.\n" - ) - cli_navigation_loop() - - - - - -def switch_pitch_controls(f0method0): - is_visible = f0method0 != 'rmvpe' - - if rvc_globals.NotesOrHertz: - return ( - {"visible": False, "__type__": "update"}, - {"visible": is_visible, "__type__": "update"}, - {"visible": False, "__type__": "update"}, - {"visible": is_visible, "__type__": "update"} - ) - else: - return ( - {"visible": is_visible, "__type__": "update"}, - {"visible": False, "__type__": "update"}, - {"visible": is_visible, "__type__": "update"}, - {"visible": False, "__type__": "update"} - ) - -def match_index(sid0): - picked = False - # folder = sid0.split('.')[0] - - # folder = re.split(r'. |_', sid0)[0] - folder = sid0.split(".")[0].split("_")[0] - # folder_test = sid0.split('.')[0].split('_')[0].split('-')[0] - parent_dir = "./logs/" + folder - # print(parent_dir) - if os.path.exists(parent_dir): - # print('path exists') - for filename in os.listdir(parent_dir.replace("\\", "/")): - if filename.endswith(".index"): - for i in range(len(indexes_list)): - if indexes_list[i] == ( - os.path.join(("./logs/" + folder), filename).replace("\\", "/") - ): - # print('regular index found') - break - else: - if indexes_list[i] == ( - os.path.join( - ("./logs/" + folder.lower()), filename - ).replace("\\", "/") - ): - # print('lowered index found') - parent_dir = "./logs/" + folder.lower() - break - # elif (indexes_list[i]).casefold() == ((os.path.join(("./logs/" + folder), filename).replace('\\','/')).casefold()): - # print('8') - # parent_dir = "./logs/" + folder.casefold() - # break - # elif (indexes_list[i]) == ((os.path.join(("./logs/" + folder_test), filename).replace('\\','/'))): - # parent_dir = "./logs/" + folder_test - # print(parent_dir) - # break - # elif (indexes_list[i]) == (os.path.join(("./logs/" + folder_test.lower()), filename).replace('\\','/')): - # parent_dir = "./logs/" + folder_test - # print(parent_dir) - # break - # else: - # #print('couldnt find index') - # continue - - # print('all done') - index_path = os.path.join( - parent_dir.replace("\\", "/"), filename.replace("\\", "/") - ).replace("\\", "/") - # print(index_path) - return (index_path, index_path) - - else: - # print('nothing found') - return ("", "") - -def stoptraining(mim): - if int(mim) == 1: - CSVutil("csvdb/stop.csv", "w+", "stop", "True") - # p.terminate() - # p.kill() - try: - os.kill(PID, signal.SIGTERM) - except Exception as e: - print(f"Couldn't click due to {e}") - pass - else: - pass - - return ( - {"visible": False, "__type__": "update"}, - {"visible": True, "__type__": "update"}, - ) - -weights_dir = 'weights/' - -def note_to_hz(note_name): - SEMITONES = {'C': -9, 'C#': -8, 'D': -7, 'D#': -6, 'E': -5, 'F': -4, 'F#': -3, 'G': -2, 'G#': -1, 'A': 0, 'A#': 1, 'B': 2} - pitch_class, octave = note_name[:-1], int(note_name[-1]) - semitone = SEMITONES[pitch_class] - note_number = 12 * (octave - 4) + semitone - frequency = 440.0 * (2.0 ** (1.0/12)) ** note_number - return frequency - -def save_to_wav(record_button): - if record_button is None: - pass - else: - path_to_file=record_button - new_name = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")+'.wav' - new_path='./audios/'+new_name - shutil.move(path_to_file,new_path) - return new_name -def save_to_wav2_edited(dropbox): - if dropbox is None: - pass - else: - file_path = dropbox.name - target_path = os.path.join('audios', os.path.basename(file_path)) - - if os.path.exists(target_path): - os.remove(target_path) - print('Replacing old dropdown file...') - - shutil.move(file_path, target_path) - return -def save_to_wav2(dropbox): - file_path = dropbox.name - target_path = os.path.join('audios', os.path.basename(file_path)) - - if os.path.exists(target_path): - os.remove(target_path) - print('Replacing old dropdown file...') - - shutil.move(file_path, target_path) - return target_path - -from gtts import gTTS -import edge_tts -import asyncio - - - - -def custom_voice( - _values, # filter indices - audio_files, # all audio files - model_voice_path='', - transpose=0, - f0method='pm', - index_rate_=float(0.66), - crepe_hop_length_=float(64), - f0_autotune=False, - file_index='', - file_index2='', - ): - - vc.get_vc(model_voice_path) - - - for _value_item in _values: - filename = "audio2/"+audio_files[_value_item] if _value_item != "converted_tts" else audio_files[0] - #filename = "audio2/"+audio_files[_value_item] - try: - print(audio_files[_value_item], model_voice_path) - except: - pass - info_, (sample_, audio_output_) = vc.vc_single_dont_save( - sid=0, - input_audio_path0=filename, #f"audio2/{filename}", - input_audio_path1=filename, #f"audio2/{filename}", - f0_up_key=transpose, # transpose for m to f and reverse 0 12 - f0_file=None, - f0_method= f0method, - file_index= file_index, # dir pwd? - file_index2= file_index2, - # file_big_npy1, - index_rate= index_rate_, - filter_radius= int(3), - resample_sr= int(0), - rms_mix_rate= float(0.25), - protect= float(0.33), - crepe_hop_length= crepe_hop_length_, - f0_autotune=f0_autotune, - f0_min=50, - note_min=50, - f0_max=1100, - note_max=1100 - ) - - sf.write( - file= filename, #f"audio2/{filename}", - samplerate=sample_, - data=audio_output_ - ) -def cast_to_device(tensor, device): - try: - return tensor.to(device) - except Exception as e: - print(e) - return tensor - - -def __bark__(text, voice_preset): - os.makedirs(os.path.join(now_dir,"tts"), exist_ok=True) - from transformers import AutoProcessor, BarkModel - device = "cuda:0" if torch.cuda.is_available() else "cpu" - dtype = torch.float32 if "cpu" in device else torch.float16 - bark_processor = AutoProcessor.from_pretrained( - "suno/bark", - cache_dir=os.path.join(now_dir,"tts","suno/bark"), - torch_dtype=dtype) - bark_model = BarkModel.from_pretrained( - "suno/bark", - cache_dir=os.path.join(now_dir,"tts","suno/bark"), - torch_dtype=dtype).to(device) - # bark_model.enable_cpu_offload() - inputs = bark_processor( - text=[text], - return_tensors="pt", - voice_preset=voice_preset - ) - tensor_dict = {k: cast_to_device(v,device) if hasattr(v,"to") else v for k, v in inputs.items()} - speech_values = bark_model.generate(**tensor_dict, do_sample=True) - sampling_rate = bark_model.generation_config.sample_rate - speech = speech_values.cpu().numpy().squeeze() - return speech, sampling_rate - - - -def make_test( - tts_text, - tts_voice, - model_path, - index_path, - transpose, - f0_method, - index_rate, - crepe_hop_length, - f0_autotune, - tts_method - ): - - if tts_voice == None: - return - - filename = os.path.join(now_dir, "audio-outputs", "converted_tts.wav") - if "SET_LIMIT" == os.getenv("DEMO"): - if len(tts_text) > 60: - tts_text = tts_text[:60] - print("DEMO; limit to 60 characters") - - language = tts_voice[:2] - if tts_method == "Edge-tts": - try: - #nest_asyncio.apply() # gradio;not - asyncio.run(edge_tts.Communicate(tts_text, "-".join(tts_voice.split('-')[:-1])).save(filename)) - except: - try: - tts = gTTS(tts_text, lang=language) - tts.save(filename) - tts.save - print(f'No audio was received. Please change the tts voice for {tts_voice}. USING gTTS.') - except: - tts = gTTS('a', lang=language) - tts.save(filename) - print('Error: Audio will be replaced.') - - os.system("cp audio-outputs/converted_tts.wav audio-outputs/real_tts.wav") - - custom_voice( - ["converted_tts"], # filter indices - ["audio-outputs/converted_tts.wav"], # all audio files - model_voice_path=model_path, - transpose=transpose, - f0method=f0_method, - index_rate_=index_rate, - crepe_hop_length_=crepe_hop_length, - f0_autotune=f0_autotune, - file_index='', - file_index2=index_path, - ) - return os.path.join(now_dir, "audio-outputs", "converted_tts.wav"), os.path.join(now_dir, "audio-outputs", "real_tts.wav") - elif tts_method == "Bark-tts": - try: - - script = tts_text.replace("\n", " ").strip() - sentences = sent_tokenize(script) - print(sentences) - silence = np.zeros(int(0.25 * SAMPLE_RATE)) - pieces = [] - nombre_archivo = os.path.join(now_dir, "audio-outputs", "bark_out.wav") - for sentence in sentences: - audio_array , _ = __bark__(sentence, tts_voice.split("-")[0]) - pieces += [audio_array, silence.copy()] - - sf.write( - file= nombre_archivo, - samplerate=SAMPLE_RATE, - data=np.concatenate(pieces) - ) - vc.get_vc(model_path) - info_, (sample_, audio_output_) = vc.vc_single_dont_save( - sid=0, - input_audio_path0=os.path.join(now_dir, "audio-outputs", "bark_out.wav"), #f"audio2/{filename}", - input_audio_path1=os.path.join(now_dir, "audio-outputs", "bark_out.wav"), #f"audio2/{filename}", - f0_up_key=transpose, # transpose for m to f and reverse 0 12 - f0_file=None, - f0_method=f0_method, - file_index= '', # dir pwd? - file_index2= index_path, - # file_big_npy1, - index_rate= index_rate, - filter_radius= int(3), - resample_sr= int(0), - rms_mix_rate= float(0.25), - protect= float(0.33), - crepe_hop_length= crepe_hop_length, - f0_autotune=f0_autotune, - f0_min=50, - note_min=50, - f0_max=1100, - note_max=1100 - ) - wavfile.write(os.path.join(now_dir, "audio-outputs", "converted_bark.wav"), rate=sample_, data=audio_output_) - return os.path.join(now_dir, "audio-outputs", "converted_bark.wav"), nombre_archivo - - except Exception as e: - print(f"{e}") - return None, None - - - - - - -def GradioSetup(UTheme=gr.themes.Soft()): - - default_weight = names[0] if names else '' - - with gr.Blocks(theme='JohnSmith9982/small_and_pretty', title="Applio") as app: - gr.HTML("

    🍏 Applio (Mangio-RVC-Fork HF)

    ") - gr.HTML("

    The current space only uses CPU, so it's only for inference. If you have issues with the queue, I recommend duplicating the space.

    ") - gr.Markdown( - "[![Duplicate this Space](https://huggingface.co/datasets/huggingface/badges/raw/main/duplicate-this-space-sm-dark.svg)](https://huggingface.co/spaces/r3gm/RVC_HF?duplicate=true)\n\n" - ) - with gr.Tabs(): - with gr.TabItem(i18n("Model Inference")): - with gr.Row(): - sid0 = gr.Dropdown(label=i18n("Inferencing voice:"), choices=sorted(names), value=default_weight) - refresh_button = gr.Button(i18n("Refresh"), variant="primary") - clean_button = gr.Button(i18n("Unload voice to save GPU memory"), variant="primary") - clean_button.click(fn=lambda: ({"value": "", "__type__": "update"}), inputs=[], outputs=[sid0]) - - - with gr.TabItem(i18n("Single")): - with gr.Row(): - spk_item = gr.Slider( - minimum=0, - maximum=2333, - step=1, - label=i18n("Select Speaker/Singer ID:"), - value=0, - visible=False, - interactive=True, - ) - - - with gr.Group(): - with gr.Row(): - with gr.Column(): # First column for audio-related inputs - dropbox = gr.File(label=i18n("Drag your audio here:")) - record_button=gr.Audio(source="microphone", label=i18n("Or record an audio:"), type="filepath") - input_audio0 = gr.Textbox( - label=i18n("Manual path to the audio file to be processed"), - value=os.path.join(now_dir, "audios", "someguy.mp3"), - visible=False - ) - input_audio1 = gr.Dropdown( - label=i18n("Auto detect audio path and select from the dropdown:"), - choices=sorted(audio_paths), - value='', - interactive=True, - ) - - input_audio1.select(fn=lambda:'',inputs=[],outputs=[input_audio0]) - input_audio0.input(fn=lambda:'',inputs=[],outputs=[input_audio1]) - - dropbox.upload(fn=save_to_wav2, inputs=[dropbox], outputs=[input_audio0]) - dropbox.upload(fn=easy_infer.change_choices2, inputs=[], outputs=[input_audio1]) - record_button.change(fn=save_to_wav, inputs=[record_button], outputs=[input_audio0]) - record_button.change(fn=easy_infer.change_choices2, inputs=[], outputs=[input_audio1]) - - best_match_index_path1 = match_index(sid0.value) # Get initial index from default sid0 (first voice model in list) - - with gr.Column(): # Second column for pitch shift and other options - file_index2 = gr.Dropdown( - label=i18n("Auto-detect index path and select from the dropdown:"), - choices=get_indexes(), - value=best_match_index_path1, - interactive=True, - allow_custom_value=True, - ) - index_rate1 = gr.Slider( - minimum=0, - maximum=1, - label=i18n("Search feature ratio:"), - value=0.75, - interactive=True, - ) - refresh_button.click( - fn=change_choices, inputs=[], outputs=[sid0, file_index2, input_audio1] - ) - with gr.Column(): - vc_transform0 = gr.Number( - label=i18n("Transpose (integer, number of semitones, raise by an octave: 12, lower by an octave: -12):"), value=0 - ) - - # Create a checkbox for advanced settings - advanced_settings_checkbox = gr.Checkbox( - value=False, - label=i18n("Advanced Settings"), - interactive=True, - ) - - # Advanced settings container - with gr.Column(visible=False) as advanced_settings: # Initially hidden - with gr.Row(label = i18n("Advanced Settings"), open = False): - with gr.Column(): - f0method0 = gr.Radio( - label=i18n( - "Select the pitch extraction algorithm:" - ), - choices=["pm", "harvest", "dio", "crepe", "crepe-tiny", "mangio-crepe", "mangio-crepe-tiny", "rmvpe", "rmvpe+"], - value="rmvpe+", - interactive=True, - ) - f0_autotune = gr.Checkbox( - label="Enable autotune", - interactive=True - ) - crepe_hop_length = gr.Slider( - minimum=1, - maximum=512, - step=1, - label=i18n("Mangio-Crepe Hop Length (Only applies to mangio-crepe): Hop length refers to the time it takes for the speaker to jump to a dramatic pitch. Lower hop lengths take more time to infer but are more pitch accurate."), - value=120, - interactive=True, - visible=False, - ) - filter_radius0 = gr.Slider( - minimum=0, - maximum=7, - label=i18n("If >=3: apply median filtering to the harvested pitch results. The value represents the filter radius and can reduce breathiness."), - value=3, - step=1, - interactive=True, - ) - - minpitch_slider = gr.Slider( - label = i18n("Min pitch:"), - info = i18n("Specify minimal pitch for inference [HZ]"), - step = 0.1, - minimum = 1, - scale = 0, - value = 50, - maximum = 16000, - interactive = True, - visible = (not rvc_globals.NotesOrHertz) and (f0method0.value != 'rmvpe'), - ) - minpitch_txtbox = gr.Textbox( - label = i18n("Min pitch:"), - info = i18n("Specify minimal pitch for inference [NOTE][OCTAVE]"), - placeholder = "C5", - visible = (rvc_globals.NotesOrHertz) and (f0method0.value != 'rmvpe'), - interactive = True, - ) - - maxpitch_slider = gr.Slider( - label = i18n("Max pitch:"), - info = i18n("Specify max pitch for inference [HZ]"), - step = 0.1, - minimum = 1, - scale = 0, - value = 1100, - maximum = 16000, - interactive = True, - visible = (not rvc_globals.NotesOrHertz) and (f0method0.value != 'rmvpe'), - ) - maxpitch_txtbox = gr.Textbox( - label = i18n("Max pitch:"), - info = i18n("Specify max pitch for inference [NOTE][OCTAVE]"), - placeholder = "C6", - visible = (rvc_globals.NotesOrHertz) and (f0method0.value != 'rmvpe'), - interactive = True, - ) - - with gr.Column(): - file_index1 = gr.Textbox( - label=i18n("Feature search database file path:"), - value="", - interactive=True, - ) - - with gr.Accordion(label = i18n("Custom f0 [Root pitch] File"), open = False): - f0_file = gr.File(label=i18n("F0 curve file (optional). One pitch per line. Replaces the default F0 and pitch modulation:")) - - f0method0.change( - fn=lambda radio: ( - { - "visible": radio in ['mangio-crepe', 'mangio-crepe-tiny'], - "__type__": "update" - } - ), - inputs=[f0method0], - outputs=[crepe_hop_length] - ) - - f0method0.change( - fn=switch_pitch_controls, - inputs=[f0method0], - outputs=[minpitch_slider, minpitch_txtbox, - maxpitch_slider, maxpitch_txtbox] - ) - - with gr.Column(): - resample_sr0 = gr.Slider( - minimum=0, - maximum=48000, - label=i18n("Resample the output audio in post-processing to the final sample rate. Set to 0 for no resampling:"), - value=0, - step=1, - interactive=True, - ) - rms_mix_rate0 = gr.Slider( - minimum=0, - maximum=1, - label=i18n("Use the volume envelope of the input to replace or mix with the volume envelope of the output. The closer the ratio is to 1, the more the output envelope is used:"), - value=0.25, - interactive=True, - ) - protect0 = gr.Slider( - minimum=0, - maximum=0.5, - label=i18n( - "Protect voiceless consonants and breath sounds to prevent artifacts such as tearing in electronic music. Set to 0.5 to disable. Decrease the value to increase protection, but it may reduce indexing accuracy:" - ), - value=0.33, - step=0.01, - interactive=True, - ) - formanting = gr.Checkbox( - value=bool(DoFormant), - label=i18n("Formant shift inference audio"), - info=i18n("Used for male to female and vice-versa conversions"), - interactive=True, - visible=True, - ) - - formant_preset = gr.Dropdown( - value='', - choices=get_fshift_presets(), - label=i18n("Browse presets for formanting"), - info=i18n("Presets are located in formantshiftcfg/ folder"), - visible=bool(DoFormant), - ) - - formant_refresh_button = gr.Button( - value='\U0001f504', - visible=bool(DoFormant), - variant='primary', - ) - - qfrency = gr.Slider( - value=Quefrency, - info=i18n("Default value is 1.0"), - label=i18n("Quefrency for formant shifting"), - minimum=0.0, - maximum=16.0, - step=0.1, - visible=bool(DoFormant), - interactive=True, - ) - - tmbre = gr.Slider( - value=Timbre, - info=i18n("Default value is 1.0"), - label=i18n("Timbre for formant shifting"), - minimum=0.0, - maximum=16.0, - step=0.1, - visible=bool(DoFormant), - interactive=True, - ) - frmntbut = gr.Button( - "Apply", variant="primary", visible=bool(DoFormant) - ) - - formant_preset.change( - fn=preset_apply, - inputs=[formant_preset, qfrency, tmbre], - outputs=[qfrency, tmbre], - ) - formanting.change( - fn=formant_enabled, - inputs=[ - formanting, - qfrency, - tmbre, - frmntbut, - formant_preset, - formant_refresh_button, - ], - outputs=[ - formanting, - qfrency, - tmbre, - frmntbut, - formant_preset, - formant_refresh_button, - ], - ) - frmntbut.click( - fn=formant_apply, - inputs=[qfrency, tmbre], - outputs=[qfrency, tmbre], - ) - formant_refresh_button.click( - fn=update_fshift_presets, - inputs=[formant_preset, qfrency, tmbre], - outputs=[formant_preset, qfrency, tmbre], - ) - - # Function to toggle advanced settings - def toggle_advanced_settings(checkbox): - return {"visible": checkbox, "__type__": "update"} - - # Attach the change event - advanced_settings_checkbox.change( - fn=toggle_advanced_settings, - inputs=[advanced_settings_checkbox], - outputs=[advanced_settings] - ) - - - but0 = gr.Button(i18n("Convert"), variant="primary").style(full_width=True) - - with gr.Row(): # Defines output info + output audio download after conversion - vc_output1 = gr.Textbox(label=i18n("Output information:")) - vc_output2 = gr.Audio(label=i18n("Export audio (click on the three dots in the lower right corner to download)")) - - with gr.Group(): # I think this defines the big convert button - with gr.Row(): - but0.click( - vc.vc_single, - [ - spk_item, - input_audio0, - input_audio1, - vc_transform0, - f0_file, - f0method0, - file_index1, - file_index2, - index_rate1, - filter_radius0, - resample_sr0, - rms_mix_rate0, - protect0, - crepe_hop_length, - minpitch_slider, minpitch_txtbox, - maxpitch_slider, maxpitch_txtbox, - f0_autotune - ], - [vc_output1, vc_output2], - ) - - - with gr.TabItem(i18n("Batch")): # Dont Change - with gr.Group(): # Markdown explanation of batch inference - gr.Markdown( - value=i18n("Batch conversion. Enter the folder containing the audio files to be converted or upload multiple audio files. The converted audio will be output in the specified folder (default: 'opt').") - ) - with gr.Row(): - with gr.Column(): - vc_transform1 = gr.Number( - label=i18n("Transpose (integer, number of semitones, raise by an octave: 12, lower by an octave: -12):"), value=0 - ) - opt_input = gr.Textbox(label=i18n("Specify output folder:"), value="opt") - with gr.Column(): - file_index4 = gr.Dropdown( - label=i18n("Auto-detect index path and select from the dropdown:"), - choices=get_indexes(), - value=best_match_index_path1, - interactive=True, - ) - sid0.select(fn=match_index, inputs=[sid0], outputs=[file_index2, file_index4]) - - refresh_button.click( - fn=lambda: change_choices()[1], - inputs=[], - outputs=file_index4, - ) - index_rate2 = gr.Slider( - minimum=0, - maximum=1, - label=i18n("Search feature ratio:"), - value=0.75, - interactive=True, - ) - with gr.Row(): - dir_input = gr.Textbox( - label=i18n("Enter the path of the audio folder to be processed (copy it from the address bar of the file manager):"), - value=os.path.join(now_dir, "audios"), - ) - inputs = gr.File( - file_count="multiple", label=i18n("You can also input audio files in batches. Choose one of the two options. Priority is given to reading from the folder.") - ) - - with gr.Row(): - with gr.Column(): - # Create a checkbox for advanced batch settings - advanced_settings_batch_checkbox = gr.Checkbox( - value=False, - label=i18n("Advanced Settings"), - interactive=True, - ) - - # Advanced batch settings container - with gr.Row(visible=False) as advanced_settings_batch: # Initially hidden - with gr.Row(label = i18n("Advanced Settings"), open = False): - with gr.Column(): - file_index3 = gr.Textbox( - label=i18n("Feature search database file path:"), - value="", - interactive=True, - ) - - f0method1 = gr.Radio( - label=i18n( - "Select the pitch extraction algorithm:" - ), - choices=["pm", "harvest", "crepe", "rmvpe"], - value="rmvpe", - interactive=True, - ) - f0_autotune = gr.Checkbox( - label="Enable autotune", - interactive=True - ) - filter_radius1 = gr.Slider( - minimum=0, - maximum=7, - label=i18n("If >=3: apply median filtering to the harvested pitch results. The value represents the filter radius and can reduce breathiness."), - value=3, - step=1, - interactive=True, - ) - - with gr.Row(): - format1 = gr.Radio( - label=i18n("Export file format"), - choices=["wav", "flac", "mp3", "m4a"], - value="wav", - interactive=True, - ) - - - with gr.Column(): - resample_sr1 = gr.Slider( - minimum=0, - maximum=48000, - label=i18n("Resample the output audio in post-processing to the final sample rate. Set to 0 for no resampling:"), - value=0, - step=1, - interactive=True, - ) - rms_mix_rate1 = gr.Slider( - minimum=0, - maximum=1, - label=i18n("Use the volume envelope of the input to replace or mix with the volume envelope of the output. The closer the ratio is to 1, the more the output envelope is used:"), - value=1, - interactive=True, - ) - protect1 = gr.Slider( - minimum=0, - maximum=0.5, - label=i18n( - "Protect voiceless consonants and breath sounds to prevent artifacts such as tearing in electronic music. Set to 0.5 to disable. Decrease the value to increase protection, but it may reduce indexing accuracy:" - ), - value=0.33, - step=0.01, - interactive=True, - ) - vc_output3 = gr.Textbox(label=i18n("Output information:")) - but1 = gr.Button(i18n("Convert"), variant="primary") - but1.click( - vc.vc_multi, - [ - spk_item, - dir_input, - opt_input, - inputs, - vc_transform1, - f0method1, - file_index3, - file_index4, - index_rate2, - filter_radius1, - resample_sr1, - rms_mix_rate1, - protect1, - format1, - crepe_hop_length, - minpitch_slider if (not rvc_globals.NotesOrHertz) else minpitch_txtbox, - maxpitch_slider if (not rvc_globals.NotesOrHertz) else maxpitch_txtbox, - f0_autotune - ], - [vc_output3], - ) - - sid0.change( - fn=vc.get_vc, - inputs=[sid0, protect0, protect1], - outputs=[spk_item, protect0, protect1], - ) - if not sid0.value == '': - spk_item, protect0, protect1 = vc.get_vc(sid0.value, protect0, protect1) - - #spk_item, protect0, protect1 = vc.get_vc(sid0.value, protect0, protect1) - - # Function to toggle advanced settings - def toggle_advanced_settings_batch(checkbox): - return {"visible": checkbox, "__type__": "update"} - - # Attach the change event - advanced_settings_batch_checkbox.change( - fn=toggle_advanced_settings_batch, - inputs=[advanced_settings_batch_checkbox], - outputs=[advanced_settings_batch] - ) - - - with gr.TabItem(i18n("Train")): - gr.Markdown("Training and All in One Inference Without UI/Gradio, Prevent Banning") - gr.Markdown("[Repository](https://github.com/ardha27/AI-Song-Cover-RVC)") - - with gr.Accordion(label=i18n("Step 1: Processing data")): - with gr.Row(): - exp_dir1 = gr.Textbox(label=i18n("Enter the model name:"), value=i18n("Model_Name")) - sr2 = gr.Radio( - label=i18n("Target sample rate:"), - choices=["40k", "48k", "32k"], - value="40k", - interactive=True, - ) - if_f0_3 = gr.Checkbox( - label=i18n("Whether the model has pitch guidance."), - value=True, - interactive=True, - ) - version19 = gr.Radio( - label=i18n("Version:"), - choices=["v1", "v2"], - value="v2", - interactive=True, - visible=True, - ) - np7 = gr.Slider( - minimum=0, - maximum=config.n_cpu, - step=1, - label=i18n("Number of CPU processes:"), - value=int(np.ceil(config.n_cpu / 1.5)), - interactive=True, - ) - with gr.Group(): - with gr.Accordion(label=i18n("Step 2: Skipping pitch extraction")): - - with gr.Row(): - # trainset_dir4 = gr.Textbox( - # label=i18n("Enter the path of the training folder:"), value=os.path.join(now_dir, datasets_root) - # ) - with gr.Column(): - trainset_dir4 = gr.Dropdown(choices=sorted(datasets), label=i18n("Select your dataset:"), value=get_dataset()) - btn_update_dataset_list = gr.Button(i18n("Update list"), variant="primary") - spk_id5 = gr.Slider( - minimum=0, - maximum=4, - step=1, - label=i18n("Specify the model ID:"), - value=0, - interactive=True, - ) - btn_update_dataset_list.click( - easy_infer.update_dataset_list, [spk_id5], trainset_dir4 - ) - but1 = gr.Button(i18n("Process data"), variant="primary") - info1 = gr.Textbox(label=i18n("Output information:"), value="") - but1.click( - preprocess_dataset, [trainset_dir4, exp_dir1, sr2, np7], [info1] - ) - with gr.Group(): - with gr.Accordion(label=i18n("Step 3: Extracting features")): - with gr.Row(): - with gr.Column(): - gpus6 = gr.Textbox( - label=i18n("Provide the GPU index(es) separated by '-', like 0-1-2 for using GPUs 0, 1, and 2:"), - value=gpus, - interactive=True, - ) - gpu_info9 = gr.Textbox( - label=i18n("GPU Information:"), value=gpu_info, visible=F0GPUVisible - ) - with gr.Column(): - f0method8 = gr.Radio( - label=i18n( - "Select the pitch extraction algorithm:" - ), - choices=["pm", "harvest", "dio", "crepe", "mangio-crepe", "rmvpe", "rmvpe_gpu"], - # [ MANGIO ]: Fork feature: Crepe on f0 extraction for training. - value="rmvpe", - interactive=True, - ) - gpus_rmvpe = gr.Textbox( - label=i18n( - "rmvpe卡号配置:以-分隔输入使用的不同进程卡号,例如0-0-1使用在卡0上跑2个进程并在卡1上跑1个进程" - ), - value="%s-%s" % (gpus, gpus), - interactive=True, - visible=F0GPUVisible, - ) - - extraction_crepe_hop_length = gr.Slider( - minimum=1, - maximum=512, - step=1, - label=i18n("Mangio-Crepe Hop Length (Only applies to mangio-crepe): Hop length refers to the time it takes for the speaker to jump to a dramatic pitch. Lower hop lengths take more time to infer but are more pitch accurate."), - value=64, - interactive=True, - visible=False, - ) - - f0method8.change( - fn=lambda radio: ( - { - "visible": radio in ['mangio-crepe', 'mangio-crepe-tiny'], - "__type__": "update" - } - ), - inputs=[f0method8], - outputs=[extraction_crepe_hop_length] - ) - f0method8.change( - fn=change_f0_method, - inputs=[f0method8], - outputs=[gpus_rmvpe], - ) - but2 = gr.Button(i18n("Feature extraction"), variant="primary") - info2 = gr.Textbox(label=i18n("Output information:"), value="", max_lines=8, interactive=False) - but2.click( - extract_f0_feature, - [gpus6, np7, f0method8, if_f0_3, exp_dir1, version19, extraction_crepe_hop_length, gpus_rmvpe,], - [info2], - ) - with gr.Group(): - with gr.Row(): - with gr.Accordion(label=i18n("Step 4: Model training started")): - with gr.Row(): - save_epoch10 = gr.Slider( - minimum=1, - maximum=100, - step=1, - label=i18n("Save frequency:"), - value=10, - interactive=True, - visible=True, - ) - total_epoch11 = gr.Slider( - minimum=1, - maximum=10000, - step=2, - label=i18n("Training epochs:"), - value=750, - interactive=True, - ) - batch_size12 = gr.Slider( - minimum=1, - maximum=50, - step=1, - label=i18n("Batch size per GPU:"), - value=default_batch_size, - #value=20, - interactive=True, - ) - - with gr.Row(): - if_save_latest13 = gr.Checkbox( - label=i18n("Whether to save only the latest .ckpt file to save hard drive space"), - value=True, - interactive=True, - ) - if_cache_gpu17 = gr.Checkbox( - label=i18n("Cache all training sets to GPU memory. Caching small datasets (less than 10 minutes) can speed up training"), - value=False, - interactive=True, - ) - if_save_every_weights18 = gr.Checkbox( - label=i18n("Save a small final model to the 'weights' folder at each save point"), - value=True, - interactive=True, - ) - - with gr.Row(): - pretrained_G14 = gr.Textbox( - lines=4, - label=i18n("Load pre-trained base model G path:"), - value="assets/pretrained_v2/f0G40k.pth", - interactive=True, - ) - pretrained_D15 = gr.Textbox( - lines=4, - label=i18n("Load pre-trained base model D path:"), - value="assets/pretrained_v2/f0D40k.pth", - interactive=True, - ) - gpus16 = gr.Textbox( - label=i18n("Provide the GPU index(es) separated by '-', like 0-1-2 for using GPUs 0, 1, and 2:"), - value=gpus, - interactive=True, - ) - sr2.change( - change_sr2, - [sr2, if_f0_3, version19], - [pretrained_G14, pretrained_D15], - ) - version19.change( - change_version19, - [sr2, if_f0_3, version19], - [pretrained_G14, pretrained_D15, sr2], - ) - if_f0_3.change( - fn=change_f0, - inputs=[if_f0_3, sr2, version19], - outputs=[f0method8, pretrained_G14, pretrained_D15], - ) - if_f0_3.change(fn=lambda radio: ( - { - "visible": radio in ['mangio-crepe', 'mangio-crepe-tiny'], - "__type__": "update" - } - ), inputs=[f0method8], outputs=[extraction_crepe_hop_length]) - - butstop = gr.Button(i18n("Stop training"), - variant='primary', - visible=False, - ) - but3 = gr.Button(i18n("Train model"), variant="primary", visible=True) - but3.click(fn=stoptraining, inputs=[gr.Number(value=0, visible=False)], outputs=[but3, butstop]) - butstop.click(fn=stoptraining, inputs=[gr.Number(value=1, visible=False)], outputs=[but3, butstop]) - - - with gr.Column(): - info3 = gr.Textbox(label=i18n("Output information:"), value="", max_lines=4) - save_action = gr.Dropdown(label=i18n("Save type"), choices=[i18n("Save all"),i18n("Save D and G"),i18n("Save voice")], value=i18n("Choose the method"), interactive=True) - - but7 = gr.Button(i18n("Save model"), variant="primary") - but4 = gr.Button(i18n("Train feature index"), variant="primary") - - - - if_save_every_weights18.change( - fn=lambda if_save_every_weights: ( - { - "visible": if_save_every_weights, - "__type__": "update" - } - ), - inputs=[if_save_every_weights18], - outputs=[save_epoch10] - ) - - but3.click( - click_train, - [ - exp_dir1, - sr2, - if_f0_3, - spk_id5, - save_epoch10, - total_epoch11, - batch_size12, - if_save_latest13, - pretrained_G14, - pretrained_D15, - gpus16, - if_cache_gpu17, - if_save_every_weights18, - version19, - ], - [info3, butstop, but3], - ) - - but4.click(train_index, [exp_dir1, version19], info3) - but7.click(easy_infer.save_model, [exp_dir1, save_action], info3) - with gr.Group(): - with gr.Row(): - with gr.Accordion(label=i18n("Step 5: Export lowest points on a graph of the model")): - - lowestval_weight_dir = gr.Textbox(visible=False) - ds = gr.Textbox(visible=False) - weights_dir1 = gr.Textbox(visible=False, value=weights_dir) - - - with gr.Row(): - amntlastmdls = gr.Slider( - minimum=1, - maximum=25, - label=i18n('How many lowest points to save:'), - value=3, - step=1, - interactive=True, - ) - lpexport = gr.Button( - value=i18n('Export lowest points of a model'), - variant='primary', - ) - lw_mdls = gr.File( - file_count="multiple", - label=i18n("Output models:"), - interactive=False, - ) ##### - - with gr.Row(): - infolpex = gr.Textbox(label=i18n("Output information:"), value="", max_lines=10) - mdlbl = gr.Dataframe(label=i18n('Stats of selected models:'), datatype='number', type='pandas') - - lpexport.click( - lambda model_name: os.path.join("logs", model_name, "lowestvals"), - inputs=[exp_dir1], - outputs=[lowestval_weight_dir] - ) - - lpexport.click(fn=tensorlowest.main, inputs=[exp_dir1, save_epoch10, amntlastmdls], outputs=[ds]) - - ds.change( - fn=tensorlowest.selectweights, - inputs=[exp_dir1, ds, weights_dir1, lowestval_weight_dir], - outputs=[infolpex, lw_mdls, mdlbl], - ) - with gr.TabItem(i18n("UVR5")): # UVR section - with gr.Group(): - with gr.Row(): - with gr.Column(): - model_select = gr.Radio( - label=i18n("Model Architecture:"), - choices=["VR", "MDX"], - value="VR", - interactive=True, - ) - dir_wav_input = gr.Textbox( - label=i18n("Enter the path of the audio folder to be processed:"), - value=os.path.join(now_dir, "audios") - ) - wav_inputs = gr.File( - file_count="multiple", label=i18n("You can also input audio files in batches. Choose one of the two options. Priority is given to reading from the folder.") - ) - - with gr.Column(): - model_choose = gr.Dropdown(label=i18n("Model:"), choices=uvr5_names) - agg = gr.Slider( - minimum=0, - maximum=20, - step=1, - label="Vocal Extraction Aggressive", - value=10, - interactive=True, - visible=False, - ) - opt_vocal_root = gr.Textbox( - label=i18n("Specify the output folder for vocals:"), value="opt" - ) - opt_ins_root = gr.Textbox( - label=i18n("Specify the output folder for accompaniment:"), value="opt" - ) - format0 = gr.Radio( - label=i18n("Export file format:"), - choices=["wav", "flac", "mp3", "m4a"], - value="flac", - interactive=True, - ) - model_select.change( - fn=update_model_choices, - inputs=model_select, - outputs=model_choose, - ) - but2 = gr.Button(i18n("Convert"), variant="primary") - vc_output4 = gr.Textbox(label=i18n("Output information:")) - #wav_inputs.upload(fn=save_to_wav2_edited, inputs=[wav_inputs], outputs=[]) - but2.click( - uvr, - [ - model_choose, - dir_wav_input, - opt_vocal_root, - wav_inputs, - opt_ins_root, - agg, - format0, - model_select - ], - [vc_output4], - ) - with gr.TabItem(i18n("TTS")): - with gr.Group(): - with gr.Column(): - text_test = gr.Textbox(label=i18n("Text:"), placeholder=i18n("Enter the text you want to convert to voice..."), lines=6) - - with gr.Group(): - with gr.Row(): - with gr.Column(): - tts_methods_voice = ["Edge-tts", "Bark-tts"] - ttsmethod_test = gr.Dropdown(tts_methods_voice, value='Edge-tts', label = i18n('TTS Method:'), visible=True) - tts_test = gr.Dropdown(set_edge_voice, label = i18n('TTS Model:'), visible=True) - ttsmethod_test.change( - fn=update_tts_methods_voice, - inputs=ttsmethod_test, - outputs=tts_test, - ) - - with gr.Column(): - model_voice_path07 = gr.Dropdown(label=i18n('RVC Model:'), choices=sorted(names), value=default_weight) - best_match_index_path1 = match_index(model_voice_path07.value) - - file_index2_07 = gr.Dropdown( - label=i18n('Select the .index file:'), - choices=get_indexes(), - value=best_match_index_path1, - interactive=True, - allow_custom_value=True, - ) - #transpose_test = gr.Number(label = i18n('Transpose (integer, number Fof semitones, raise by an octave: 12, lower by an octave: -12):'), value=0, visible=True, interactive= True) - - - - - with gr.Row(): - refresh_button_ = gr.Button(i18n("Refresh"), variant="primary") - refresh_button_.click(fn=change_choices2, inputs=[], outputs=[model_voice_path07, file_index2_07]) - with gr.Row(): - original_ttsvoice = gr.Audio(label=i18n('Audio TTS:')) - ttsvoice = gr.Audio(label=i18n('Audio RVC:')) - - with gr.Row(): - button_test = gr.Button(i18n("Convert"), variant="primary") - - - button_test.click(make_test, inputs=[ - text_test, - tts_test, - model_voice_path07, - file_index2_07, - #transpose_test, - vc_transform0, - f0method8, - index_rate1, - crepe_hop_length, - f0_autotune, - ttsmethod_test - ], outputs=[ttsvoice, original_ttsvoice]) - - with gr.TabItem(i18n("Resources")): - easy_infer.download_model() - easy_infer.download_backup() - easy_infer.download_dataset(trainset_dir4) - easy_infer.download_audio() - easy_infer.youtube_separator() - with gr.TabItem(i18n("Extra")): - gr.Markdown( - value=i18n("This section contains some extra utilities that often may be in experimental phases") - ) - with gr.TabItem(i18n("Merge Audios")): - with gr.Group(): - gr.Markdown( - value="## " + i18n("Merge your generated audios with the instrumental") - ) - gr.Markdown(value=".",visible=True) - gr.Markdown(value=".",visible=True) - with gr.Row(): - with gr.Column(): - dropbox = gr.File(label=i18n("Drag your audio here:")) - gr.Markdown(value=i18n("### Instrumental settings:")) - input_audio1 = gr.Dropdown( - label=i18n("Choose your instrumental:"), - choices=sorted(audio_others_paths), - value='', - interactive=True, - ) - input_audio1_scale = gr.Slider( - minimum=0, - maximum=10, - label=i18n("Volume of the instrumental audio:"), - value=1.00, - interactive=True, - ) - gr.Markdown(value=i18n("### Audio settings:")) - input_audio3 = gr.Dropdown( - label=i18n("Select the generated audio"), - choices=sorted(audio_paths), - value='', - interactive=True, - ) - with gr.Row(): - input_audio3_scale = gr.Slider( - minimum=0, - maximum=10, - label=i18n("Volume of the generated audio:"), - value=1.00, - interactive=True, - ) - - gr.Markdown(value=i18n("### Add the effects:")) - reverb_ = gr.Checkbox( - label=i18n("Reverb"), - value=False, - interactive=True, - ) - compressor_ = gr.Checkbox( - label=i18n("Compressor"), - value=False, - interactive=True, - ) - noise_gate_ = gr.Checkbox( - label=i18n("Noise Gate"), - value=False, - interactive=True, - ) - - butnone = gr.Button(i18n("Merge"), variant="primary").style(full_width=True) - - vc_output1 = gr.Textbox(label=i18n("Output information:")) - vc_output2 = gr.Audio(label=i18n("Export audio (click on the three dots in the lower right corner to download)"), type='filepath') - - dropbox.upload(fn=save_to_wav2, inputs=[dropbox], outputs=[input_audio1]) - dropbox.upload(fn=easy_infer.change_choices2, inputs=[], outputs=[input_audio1]) - - refresh_button.click( - fn=lambda: change_choices3(), - inputs=[], - outputs=[input_audio1, input_audio3], - ) - - butnone.click( - fn=audio_combined, - inputs=[input_audio1, input_audio3,input_audio1_scale,input_audio3_scale,reverb_,compressor_,noise_gate_], - outputs=[vc_output1, vc_output2] - ) - - - with gr.TabItem(i18n("Processing")): - with gr.Group(): - - with gr.Accordion(label=i18n("Model fusion, can be used to test timbre fusion")): - with gr.Row(): - with gr.Column(): - name_to_save0 = gr.Textbox( - label=i18n("Name:"), - value="", - max_lines=1, - interactive=True, - placeholder=i18n("Name for saving") - ) - alpha_a = gr.Slider( - minimum=0, - maximum=1, - label=i18n("Weight for Model A:"), - value=0.5, - interactive=True, - ) - if_f0_ = gr.Checkbox( - label=i18n("Whether the model has pitch guidance."), - value=True, - interactive=True, - ) - version_2 = gr.Radio( - label=i18n("Model architecture version:"), - choices=["v1", "v2"], - value="v2", - interactive=True, - ) - sr_ = gr.Radio( - label=i18n("Target sample rate:"), - choices=["40k", "48k"], - value="40k", - interactive=True, - ) - - - with gr.Column(): - ckpt_a = gr.Textbox(label=i18n("Path to Model A:"), value="", interactive=True, placeholder=i18n("Path to model")) - - ckpt_b = gr.Textbox(label=i18n("Path to Model B:"), value="", interactive=True, placeholder=i18n("Path to model")) - - info__ = gr.Textbox( - label=i18n("Model information to be placed:"), value="", max_lines=8, interactive=True, placeholder=i18n("Model information to be placed") - ) - info4 = gr.Textbox(label=i18n("Output information:"), value="", max_lines=8) - - - but6 = gr.Button(i18n("Fusion"), variant="primary") - - but6.click( - merge, - [ - ckpt_a, - ckpt_b, - alpha_a, - sr_, - if_f0_, - info__, - name_to_save0, - version_2, - ], - info4, - ) # def merge(path1,path2,alpha1,sr,f0,info): - with gr.Group(): - with gr.Accordion(label=i18n("Modify model information")): - with gr.Row(): ###### - with gr.Column(): - ckpt_path0 = gr.Textbox( - label=i18n("Path to Model:"), value="", interactive=True, placeholder=i18n("Path to model") - ) - info_ = gr.Textbox( - label=i18n("Model information to be modified:"), value="", max_lines=8, interactive=True, placeholder=i18n("Model information to be placed") - ) - - with gr.Column(): - name_to_save1 = gr.Textbox( - label=i18n("Save file name:"), - placeholder=i18n("Name for saving"), - value="", - max_lines=8, - interactive=True, - - ) - - info5 = gr.Textbox(label=i18n("Output information:"), value="", max_lines=8) - but7 = gr.Button(i18n("Modify"), variant="primary") - but7.click(change_info, [ckpt_path0, info_, name_to_save1], info5) - with gr.Group(): - with gr.Accordion(label=i18n("View model information")): - with gr.Row(): - with gr.Column(): - ckpt_path1 = gr.Textbox( - label=i18n("Path to Model:"), value="", interactive=True, placeholder=i18n("Path to model") - ) - - info6 = gr.Textbox(label=i18n("Output information:"), value="", max_lines=8) - but8 = gr.Button(i18n("View"), variant="primary") - but8.click(show_info, [ckpt_path1], info6) - with gr.Group(): - with gr.Accordion(label=i18n("Model extraction")): - with gr.Row(): - with gr.Column(): - save_name = gr.Textbox( - label=i18n("Name:"), value="", interactive=True, placeholder=i18n("Name for saving") - ) - if_f0__ = gr.Checkbox( - label=i18n("Whether the model has pitch guidance."), - value=True, - interactive=True, - ) - version_1 = gr.Radio( - label=i18n("Model architecture version:"), - choices=["v1", "v2"], - value="v2", - interactive=True, - ) - sr__ = gr.Radio( - label=i18n("Target sample rate:"), - choices=["32k", "40k", "48k"], - value="40k", - interactive=True, - ) - - with gr.Column(): - ckpt_path2 = gr.Textbox( - - label=i18n("Path to Model:"), - placeholder=i18n("Path to model"), - interactive=True, - ) - info___ = gr.Textbox( - label=i18n("Model information to be placed:"), value="", max_lines=8, interactive=True, placeholder=i18n("Model information to be placed") - ) - info7 = gr.Textbox(label=i18n("Output information:"), value="", max_lines=8) - - with gr.Row(): - - but9 = gr.Button(i18n("Extract"), variant="primary") - ckpt_path2.change( - change_info_, [ckpt_path2], [sr__, if_f0__, version_1] - ) - but9.click( - extract_small_model, - [ckpt_path2, save_name, sr__, if_f0__, info___, version_1], - info7, - ) - - - - - with gr.TabItem(i18n("Settings")): - with gr.Row(): - gr.Markdown(value= - i18n("Pitch settings") - ) - noteshertz = gr.Checkbox( - label = i18n("Whether to use note names instead of their hertz value. E.G. [C5, D6] instead of [523.25, 1174.66]Hz"), - value = rvc_globals.NotesOrHertz, - interactive = True, - ) - - noteshertz.change(fn=lambda nhertz: rvc_globals.__setattr__('NotesOrHertz', nhertz), inputs=[noteshertz], outputs=[]) - - noteshertz.change( - fn=switch_pitch_controls, - inputs=[f0method0], - outputs=[ - minpitch_slider, minpitch_txtbox, - maxpitch_slider, maxpitch_txtbox,] - ) - return app - -def GradioRun(app): - share_gradio_link = config.iscolab or config.paperspace - concurrency_count = 511 - max_size = 1022 - - if ( - config.iscolab or config.paperspace - ): - app.queue(concurrency_count=concurrency_count, max_size=max_size).launch( - favicon_path="./images/icon.png", - ) - else: - app.queue(concurrency_count=concurrency_count, max_size=max_size).launch( - favicon_path=".\images\icon.png", - ) - -if __name__ == "__main__": - if os.name == 'nt': - print(i18n("Any ConnectionResetErrors post-conversion are irrelevant and purely visual; they can be ignored.\n")) - app = GradioSetup(UTheme=config.grtheme) - GradioRun(app) \ No newline at end of file diff --git a/spaces/Epoching/3D_Photo_Inpainting/README.md b/spaces/Epoching/3D_Photo_Inpainting/README.md deleted file mode 100644 index 7031bd5312984b3bc9bcb8181a4980284c6df703..0000000000000000000000000000000000000000 --- a/spaces/Epoching/3D_Photo_Inpainting/README.md +++ /dev/null @@ -1,37 +0,0 @@ ---- -title: 3D_Photo_Inpainting -emoji: 👁 -colorFrom: purple -colorTo: red -sdk: gradio -app_file: app.py -pinned: false ---- - -# Configuration - -`title`: _string_ -Display title for the Space - -`emoji`: _string_ -Space emoji (emoji-only character allowed) - -`colorFrom`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) - -`colorTo`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) - -`sdk`: _string_ -Can be either `gradio` or `streamlit` - -`sdk_version` : _string_ -Only applicable for `streamlit` SDK. -See [doc](https://hf.co/docs/hub/spaces) for more info on supported versions. - -`app_file`: _string_ -Path to your main application file (which contains either `gradio` or `streamlit` Python code). -Path is relative to the root of the repository. - -`pinned`: _boolean_ -Whether the Space stays on top of your list. diff --git a/spaces/EsoCode/text-generation-webui/extensions/openai/script.py b/spaces/EsoCode/text-generation-webui/extensions/openai/script.py deleted file mode 100644 index 323d68236bec77b1d6c6a4e6f5e7ed7631516d81..0000000000000000000000000000000000000000 --- a/spaces/EsoCode/text-generation-webui/extensions/openai/script.py +++ /dev/null @@ -1,889 +0,0 @@ -import base64 -import json -import os -import time -import requests -import yaml -import numpy as np -from http.server import BaseHTTPRequestHandler, ThreadingHTTPServer -from threading import Thread -from modules.utils import get_available_models -from modules.models import load_model, unload_model -from modules.models_settings import (get_model_settings_from_yamls, - update_model_parameters) - -from modules import shared -from modules.text_generation import encode, generate_reply - -params = { - 'port': int(os.environ.get('OPENEDAI_PORT')) if 'OPENEDAI_PORT' in os.environ else 5001, -} - -debug = True if 'OPENEDAI_DEBUG' in os.environ else False - -# Slightly different defaults for OpenAI's API -# Data type is important, Ex. use 0.0 for a float 0 -default_req_params = { - 'max_new_tokens': 200, - 'temperature': 1.0, - 'top_p': 1.0, - 'top_k': 1, - 'repetition_penalty': 1.18, - 'repetition_penalty_range': 0, - 'encoder_repetition_penalty': 1.0, - 'suffix': None, - 'stream': False, - 'echo': False, - 'seed': -1, - # 'n' : default(body, 'n', 1), # 'n' doesn't have a direct map - 'truncation_length': 2048, - 'add_bos_token': True, - 'do_sample': True, - 'typical_p': 1.0, - 'epsilon_cutoff': 0.0, # In units of 1e-4 - 'eta_cutoff': 0.0, # In units of 1e-4 - 'tfs': 1.0, - 'top_a': 0.0, - 'min_length': 0, - 'no_repeat_ngram_size': 0, - 'num_beams': 1, - 'penalty_alpha': 0.0, - 'length_penalty': 1.0, - 'early_stopping': False, - 'mirostat_mode': 0, - 'mirostat_tau': 5.0, - 'mirostat_eta': 0.1, - 'ban_eos_token': False, - 'skip_special_tokens': True, - 'custom_stopping_strings': '', -} - -# Optional, install the module and download the model to enable -# v1/embeddings -try: - from sentence_transformers import SentenceTransformer -except ImportError: - pass - -st_model = os.environ["OPENEDAI_EMBEDDING_MODEL"] if "OPENEDAI_EMBEDDING_MODEL" in os.environ else "all-mpnet-base-v2" -embedding_model = None - -# little helper to get defaults if arg is present but None and should be the same type as default. -def default(dic, key, default): - val = dic.get(key, default) - if type(val) != type(default): - # maybe it's just something like 1 instead of 1.0 - try: - v = type(default)(val) - if type(val)(v) == val: # if it's the same value passed in, it's ok. - return v - except: - pass - - val = default - return val - - -def clamp(value, minvalue, maxvalue): - return max(minvalue, min(value, maxvalue)) - - -def float_list_to_base64(float_list): - # Convert the list to a float32 array that the OpenAPI client expects - float_array = np.array(float_list, dtype="float32") - - # Get raw bytes - bytes_array = float_array.tobytes() - - # Encode bytes into base64 - encoded_bytes = base64.b64encode(bytes_array) - - # Turn raw base64 encoded bytes into ASCII - ascii_string = encoded_bytes.decode('ascii') - return ascii_string - - -class Handler(BaseHTTPRequestHandler): - def send_access_control_headers(self): - self.send_header("Access-Control-Allow-Origin", "*") - self.send_header("Access-Control-Allow-Credentials", "true") - self.send_header( - "Access-Control-Allow-Methods", - "GET,HEAD,OPTIONS,POST,PUT" - ) - self.send_header( - "Access-Control-Allow-Headers", - "Origin, Accept, X-Requested-With, Content-Type, " - "Access-Control-Request-Method, Access-Control-Request-Headers, " - "Authorization" - ) - - def openai_error(self, message, code = 500, error_type = 'APIError', param = '', internal_message = ''): - self.send_response(code) - self.send_access_control_headers() - self.send_header('Content-Type', 'application/json') - self.end_headers() - error_resp = { - 'error': { - 'message': message, - 'code': code, - 'type': error_type, - 'param': param, - } - } - if internal_message: - error_resp['internal_message'] = internal_message - - response = json.dumps(error_resp) - self.wfile.write(response.encode('utf-8')) - - def do_OPTIONS(self): - self.send_response(200) - self.send_access_control_headers() - self.send_header('Content-Type', 'application/json') - self.end_headers() - self.wfile.write("OK".encode('utf-8')) - - def do_GET(self): - if self.path.startswith('/v1/engines') or self.path.startswith('/v1/models'): - current_model_list = [ shared.model_name ] # The real chat/completions model, maybe "None" - embeddings_model_list = [ st_model ] if embedding_model else [] # The real sentence transformer embeddings model - pseudo_model_list = [ # these are expected by so much, so include some here as a dummy - 'gpt-3.5-turbo', # /v1/chat/completions - 'text-curie-001', # /v1/completions, 2k context - 'text-davinci-002' # /v1/embeddings text-embedding-ada-002:1536, text-davinci-002:768 - ] - - is_legacy = 'engines' in self.path - is_list = self.path in ['/v1/engines', '/v1/models'] - - resp = '' - - if is_legacy and not is_list: # load model - model_name = self.path[self.path.find('/v1/engines/') + len('/v1/engines/'):] - - resp = { - "id": model_name, - "object": "engine", - "owner": "self", - "ready": True, - } - if model_name not in pseudo_model_list + embeddings_model_list + current_model_list: # Real model only - # No args. Maybe it works anyways! - # TODO: hack some heuristics into args for better results - - shared.model_name = model_name - unload_model() - - model_settings = get_model_settings_from_yamls(shared.model_name) - shared.settings.update(model_settings) - update_model_parameters(model_settings, initial=True) - - if shared.settings['mode'] != 'instruct': - shared.settings['instruction_template'] = None - - shared.model, shared.tokenizer = load_model(shared.model_name) - - if not shared.model: # load failed. - shared.model_name = "None" - resp['id'] = "None" - resp['ready'] = False - - elif is_list: - # TODO: Lora's? - available_model_list = get_available_models() - all_model_list = current_model_list + embeddings_model_list + pseudo_model_list + available_model_list - - models = {} - - if is_legacy: - models = [{ "id": id, "object": "engine", "owner": "user", "ready": True } for id in all_model_list ] - if not shared.model: - models[0]['ready'] = False - else: - models = [{ "id": id, "object": "model", "owned_by": "user", "permission": [] } for id in all_model_list ] - - resp = { - "object": "list", - "data": models, - } - - else: - the_model_name = self.path[len('/v1/models/'):] - resp = { - "id": the_model_name, - "object": "model", - "owned_by": "user", - "permission": [] - } - - self.send_response(200) - self.send_access_control_headers() - self.send_header('Content-Type', 'application/json') - self.end_headers() - response = json.dumps(resp) - self.wfile.write(response.encode('utf-8')) - - elif '/billing/usage' in self.path: - # Ex. /v1/dashboard/billing/usage?start_date=2023-05-01&end_date=2023-05-31 - self.send_response(200) - self.send_access_control_headers() - self.send_header('Content-Type', 'application/json') - self.end_headers() - - response = json.dumps({ - "total_usage": 0, - }) - self.wfile.write(response.encode('utf-8')) - - else: - self.send_error(404) - - def do_POST(self): - if debug: - print(self.headers) # did you know... python-openai sends your linux kernel & python version? - content_length = int(self.headers['Content-Length']) - body = json.loads(self.rfile.read(content_length).decode('utf-8')) - - if debug: - print(body) - - if '/completions' in self.path or '/generate' in self.path: - - if not shared.model: - self.openai_error("No model loaded.") - return - - is_legacy = '/generate' in self.path - is_chat_request = 'chat' in self.path - resp_list = 'data' if is_legacy else 'choices' - - # XXX model is ignored for now - # model = body.get('model', shared.model_name) # ignored, use existing for now - model = shared.model_name - created_time = int(time.time()) - - cmpl_id = "chatcmpl-%d" % (created_time) if is_chat_request else "conv-%d" % (created_time) - - # Request Parameters - # Try to use openai defaults or map them to something with the same intent - req_params = default_req_params.copy() - stopping_strings = [] - - if 'stop' in body: - if isinstance(body['stop'], str): - stopping_strings.extend([body['stop']]) - elif isinstance(body['stop'], list): - stopping_strings.extend(body['stop']) - - truncation_length = default(shared.settings, 'truncation_length', 2048) - truncation_length = clamp(default(body, 'truncation_length', truncation_length), 1, truncation_length) - - default_max_tokens = truncation_length if is_chat_request else 16 # completions default, chat default is 'inf' so we need to cap it. - - max_tokens_str = 'length' if is_legacy else 'max_tokens' - max_tokens = default(body, max_tokens_str, default(shared.settings, 'max_new_tokens', default_max_tokens)) - # if the user assumes OpenAI, the max_tokens is way too large - try to ignore it unless it's small enough - - req_params['max_new_tokens'] = max_tokens - req_params['truncation_length'] = truncation_length - req_params['temperature'] = clamp(default(body, 'temperature', default_req_params['temperature']), 0.001, 1.999) # fixup absolute 0.0 - req_params['top_p'] = clamp(default(body, 'top_p', default_req_params['top_p']), 0.001, 1.0) - req_params['top_k'] = default(body, 'best_of', default_req_params['top_k']) - req_params['suffix'] = default(body, 'suffix', default_req_params['suffix']) - req_params['stream'] = default(body, 'stream', default_req_params['stream']) - req_params['echo'] = default(body, 'echo', default_req_params['echo']) - req_params['seed'] = shared.settings.get('seed', default_req_params['seed']) - req_params['add_bos_token'] = shared.settings.get('add_bos_token', default_req_params['add_bos_token']) - - is_streaming = req_params['stream'] - - self.send_response(200) - self.send_access_control_headers() - if is_streaming: - self.send_header('Content-Type', 'text/event-stream') - self.send_header('Cache-Control', 'no-cache') - # self.send_header('Connection', 'keep-alive') - else: - self.send_header('Content-Type', 'application/json') - self.end_headers() - - token_count = 0 - completion_token_count = 0 - prompt = '' - stream_object_type = '' - object_type = '' - - if is_chat_request: - # Chat Completions - stream_object_type = 'chat.completions.chunk' - object_type = 'chat.completions' - - messages = body['messages'] - - role_formats = { - 'user': 'user: {message}\n', - 'assistant': 'assistant: {message}\n', - 'system': '{message}', - 'context': 'You are a helpful assistant. Answer as concisely as possible.', - 'prompt': 'assistant:', - } - - # Instruct models can be much better - if shared.settings['instruction_template']: - try: - instruct = yaml.safe_load(open(f"characters/instruction-following/{shared.settings['instruction_template']}.yaml", 'r')) - - template = instruct['turn_template'] - system_message_template = "{message}" - system_message_default = instruct['context'] - bot_start = template.find('<|bot|>') # So far, 100% of instruction templates have this token - user_message_template = template[:bot_start].replace('<|user-message|>', '{message}').replace('<|user|>', instruct['user']) - bot_message_template = template[bot_start:].replace('<|bot-message|>', '{message}').replace('<|bot|>', instruct['bot']) - bot_prompt = bot_message_template[:bot_message_template.find('{message}')].rstrip(' ') - - role_formats = { - 'user': user_message_template, - 'assistant': bot_message_template, - 'system': system_message_template, - 'context': system_message_default, - 'prompt': bot_prompt, - } - - if 'Alpaca' in shared.settings['instruction_template']: - stopping_strings.extend(['\n###']) - elif instruct['user']: # WizardLM and some others have no user prompt. - stopping_strings.extend(['\n' + instruct['user'], instruct['user']]) - - if debug: - print(f"Loaded instruction role format: {shared.settings['instruction_template']}") - - except Exception as e: - stopping_strings.extend(['\nuser:']) - - print(f"Exception: When loading characters/instruction-following/{shared.settings['instruction_template']}.yaml: {repr(e)}") - print("Warning: Loaded default instruction-following template for model.") - - else: - stopping_strings.extend(['\nuser:']) - print("Warning: Loaded default instruction-following template for model.") - - system_msgs = [] - chat_msgs = [] - - # You are ChatGPT, a large language model trained by OpenAI. Answer as concisely as possible. Knowledge cutoff: {knowledge_cutoff} Current date: {current_date} - context_msg = role_formats['system'].format(message=role_formats['context']) if role_formats['context'] else '' - if context_msg: - system_msgs.extend([context_msg]) - - # Maybe they sent both? This is not documented in the API, but some clients seem to do this. - if 'prompt' in body: - prompt_msg = role_formats['system'].format(message=body['prompt']) - system_msgs.extend([prompt_msg]) - - for m in messages: - role = m['role'] - content = m['content'] - msg = role_formats[role].format(message=content) - if role == 'system': - system_msgs.extend([msg]) - else: - chat_msgs.extend([msg]) - - # can't really truncate the system messages - system_msg = '\n'.join(system_msgs) - if system_msg and system_msg[-1] != '\n': - system_msg = system_msg + '\n' - - system_token_count = len(encode(system_msg)[0]) - remaining_tokens = truncation_length - system_token_count - chat_msg = '' - - while chat_msgs: - new_msg = chat_msgs.pop() - new_size = len(encode(new_msg)[0]) - if new_size <= remaining_tokens: - chat_msg = new_msg + chat_msg - remaining_tokens -= new_size - else: - print(f"Warning: too many messages for context size, dropping {len(chat_msgs) + 1} oldest message(s).") - break - - prompt = system_msg + chat_msg + role_formats['prompt'] - - token_count = len(encode(prompt)[0]) - - else: - # Text Completions - stream_object_type = 'text_completion.chunk' - object_type = 'text_completion' - - # ... encoded as a string, array of strings, array of tokens, or array of token arrays. - if is_legacy: - prompt = body['context'] # Older engines.generate API - else: - prompt = body['prompt'] # XXX this can be different types - - if isinstance(prompt, list): - self.openai_error("API Batched generation not yet supported.") - return - - token_count = len(encode(prompt)[0]) - if token_count >= truncation_length: - new_len = int(len(prompt) * shared.settings['truncation_length'] / token_count) - prompt = prompt[-new_len:] - new_token_count = len(encode(prompt)[0]) - print(f"Warning: truncating prompt to {new_len} characters, was {token_count} tokens. Now: {new_token_count} tokens.") - token_count = new_token_count - - if truncation_length - token_count < req_params['max_new_tokens']: - print(f"Warning: Ignoring max_new_tokens ({req_params['max_new_tokens']}), too large for the remaining context. Remaining tokens: {truncation_length - token_count}") - req_params['max_new_tokens'] = truncation_length - token_count - print(f"Warning: Set max_new_tokens = {req_params['max_new_tokens']}") - - if is_streaming: - # begin streaming - chunk = { - "id": cmpl_id, - "object": stream_object_type, - "created": created_time, - "model": shared.model_name, - resp_list: [{ - "index": 0, - "finish_reason": None, - }], - } - - if stream_object_type == 'text_completion.chunk': - chunk[resp_list][0]["text"] = "" - else: - # So yeah... do both methods? delta and messages. - chunk[resp_list][0]["message"] = {'role': 'assistant', 'content': ''} - chunk[resp_list][0]["delta"] = {'role': 'assistant', 'content': ''} - - response = 'data: ' + json.dumps(chunk) + '\r\n\r\n' - self.wfile.write(response.encode('utf-8')) - - # generate reply ####################################### - if debug: - print({'prompt': prompt, 'req_params': req_params}) - generator = generate_reply(prompt, req_params, stopping_strings=stopping_strings, is_chat=False) - - answer = '' - seen_content = '' - longest_stop_len = max([len(x) for x in stopping_strings] + [0]) - - for a in generator: - answer = a - - stop_string_found = False - len_seen = len(seen_content) - search_start = max(len_seen - longest_stop_len, 0) - - for string in stopping_strings: - idx = answer.find(string, search_start) - if idx != -1: - answer = answer[:idx] # clip it. - stop_string_found = True - - if stop_string_found: - break - - # If something like "\nYo" is generated just before "\nYou:" - # is completed, buffer and generate more, don't send it - buffer_and_continue = False - - for string in stopping_strings: - for j in range(len(string) - 1, 0, -1): - if answer[-j:] == string[:j]: - buffer_and_continue = True - break - else: - continue - break - - if buffer_and_continue: - continue - - if is_streaming: - # Streaming - new_content = answer[len_seen:] - - if not new_content or chr(0xfffd) in new_content: # partial unicode character, don't send it yet. - continue - - seen_content = answer - chunk = { - "id": cmpl_id, - "object": stream_object_type, - "created": created_time, - "model": shared.model_name, - resp_list: [{ - "index": 0, - "finish_reason": None, - }], - } - - # strip extra leading space off new generated content - if len_seen == 0 and new_content[0] == ' ': - new_content = new_content[1:] - - if stream_object_type == 'text_completion.chunk': - chunk[resp_list][0]['text'] = new_content - else: - # So yeah... do both methods? delta and messages. - chunk[resp_list][0]['message'] = {'content': new_content} - chunk[resp_list][0]['delta'] = {'content': new_content} - response = 'data: ' + json.dumps(chunk) + '\r\n\r\n' - self.wfile.write(response.encode('utf-8')) - completion_token_count += len(encode(new_content)[0]) - - if is_streaming: - chunk = { - "id": cmpl_id, - "object": stream_object_type, - "created": created_time, - "model": model, # TODO: add Lora info? - resp_list: [{ - "index": 0, - "finish_reason": "stop", - }], - "usage": { - "prompt_tokens": token_count, - "completion_tokens": completion_token_count, - "total_tokens": token_count + completion_token_count - } - } - if stream_object_type == 'text_completion.chunk': - chunk[resp_list][0]['text'] = '' - else: - # So yeah... do both methods? delta and messages. - chunk[resp_list][0]['message'] = {'content': ''} - chunk[resp_list][0]['delta'] = {'content': ''} - - response = 'data: ' + json.dumps(chunk) + '\r\n\r\ndata: [DONE]\r\n\r\n' - self.wfile.write(response.encode('utf-8')) - # Finished if streaming. - if debug: - if answer and answer[0] == ' ': - answer = answer[1:] - print({'answer': answer}, chunk) - return - - # strip extra leading space off new generated content - if answer and answer[0] == ' ': - answer = answer[1:] - - if debug: - print({'response': answer}) - - completion_token_count = len(encode(answer)[0]) - stop_reason = "stop" - if token_count + completion_token_count >= truncation_length: - stop_reason = "length" - - resp = { - "id": cmpl_id, - "object": object_type, - "created": created_time, - "model": model, # TODO: add Lora info? - resp_list: [{ - "index": 0, - "finish_reason": stop_reason, - }], - "usage": { - "prompt_tokens": token_count, - "completion_tokens": completion_token_count, - "total_tokens": token_count + completion_token_count - } - } - - if is_chat_request: - resp[resp_list][0]["message"] = {"role": "assistant", "content": answer} - else: - resp[resp_list][0]["text"] = answer - - response = json.dumps(resp) - self.wfile.write(response.encode('utf-8')) - - elif '/edits' in self.path: - if not shared.model: - self.openai_error("No model loaded.") - return - - self.send_response(200) - self.send_access_control_headers() - self.send_header('Content-Type', 'application/json') - self.end_headers() - - created_time = int(time.time()) - - # Using Alpaca format, this may work with other models too. - instruction = body['instruction'] - input = body.get('input', '') - - # Request parameters - req_params = default_req_params.copy() - stopping_strings = [] - - # Alpaca is verbose so a good default prompt - default_template = ( - "Below is an instruction that describes a task, paired with an input that provides further context. " - "Write a response that appropriately completes the request.\n\n" - "### Instruction:\n{instruction}\n\n### Input:\n{input}\n\n### Response:\n" - ) - - instruction_template = default_template - - # Use the special instruction/input/response template for anything trained like Alpaca - if shared.settings['instruction_template']: - if 'Alpaca' in shared.settings['instruction_template']: - stopping_strings.extend(['\n###']) - else: - try: - instruct = yaml.safe_load(open(f"characters/instruction-following/{shared.settings['instruction_template']}.yaml", 'r')) - - template = instruct['turn_template'] - template = template\ - .replace('<|user|>', instruct.get('user', ''))\ - .replace('<|bot|>', instruct.get('bot', ''))\ - .replace('<|user-message|>', '{instruction}\n{input}') - - instruction_template = instruct.get('context', '') + template[:template.find('<|bot-message|>')].rstrip(' ') - if instruct['user']: - stopping_strings.extend(['\n' + instruct['user'], instruct['user'] ]) - - except Exception as e: - instruction_template = default_template - print(f"Exception: When loading characters/instruction-following/{shared.settings['instruction_template']}.yaml: {repr(e)}") - print("Warning: Loaded default instruction-following template (Alpaca) for model.") - else: - stopping_strings.extend(['\n###']) - print("Warning: Loaded default instruction-following template (Alpaca) for model.") - - - edit_task = instruction_template.format(instruction=instruction, input=input) - - truncation_length = default(shared.settings, 'truncation_length', 2048) - token_count = len(encode(edit_task)[0]) - max_tokens = truncation_length - token_count - - req_params['max_new_tokens'] = max_tokens - req_params['truncation_length'] = truncation_length - req_params['temperature'] = clamp(default(body, 'temperature', default_req_params['temperature']), 0.001, 1.999) # fixup absolute 0.0 - req_params['top_p'] = clamp(default(body, 'top_p', default_req_params['top_p']), 0.001, 1.0) - req_params['seed'] = shared.settings.get('seed', default_req_params['seed']) - req_params['add_bos_token'] = shared.settings.get('add_bos_token', default_req_params['add_bos_token']) - - if debug: - print({'edit_template': edit_task, 'req_params': req_params, 'token_count': token_count}) - - generator = generate_reply(edit_task, req_params, stopping_strings=stopping_strings, is_chat=False) - - longest_stop_len = max([len(x) for x in stopping_strings] + [0]) - answer = '' - seen_content = '' - for a in generator: - answer = a - - stop_string_found = False - len_seen = len(seen_content) - search_start = max(len_seen - longest_stop_len, 0) - - for string in stopping_strings: - idx = answer.find(string, search_start) - if idx != -1: - answer = answer[:idx] # clip it. - stop_string_found = True - - if stop_string_found: - break - - - # some reply's have an extra leading space to fit the instruction template, just clip it off from the reply. - if edit_task[-1] != '\n' and answer and answer[0] == ' ': - answer = answer[1:] - - completion_token_count = len(encode(answer)[0]) - - resp = { - "object": "edit", - "created": created_time, - "choices": [{ - "text": answer, - "index": 0, - }], - "usage": { - "prompt_tokens": token_count, - "completion_tokens": completion_token_count, - "total_tokens": token_count + completion_token_count - } - } - - if debug: - print({'answer': answer, 'completion_token_count': completion_token_count}) - - response = json.dumps(resp) - self.wfile.write(response.encode('utf-8')) - - elif '/images/generations' in self.path and 'SD_WEBUI_URL' in os.environ: - # Stable Diffusion callout wrapper for txt2img - # Low effort implementation for compatibility. With only "prompt" being passed and assuming DALL-E - # the results will be limited and likely poor. SD has hundreds of models and dozens of settings. - # If you want high quality tailored results you should just use the Stable Diffusion API directly. - # it's too general an API to try and shape the result with specific tags like "masterpiece", etc, - # Will probably work best with the stock SD models. - # SD configuration is beyond the scope of this API. - # At this point I will not add the edits and variations endpoints (ie. img2img) because they - # require changing the form data handling to accept multipart form data, also to properly support - # url return types will require file management and a web serving files... Perhaps later! - - self.send_response(200) - self.send_access_control_headers() - self.send_header('Content-Type', 'application/json') - self.end_headers() - - width, height = [ int(x) for x in default(body, 'size', '1024x1024').split('x') ] # ignore the restrictions on size - response_format = default(body, 'response_format', 'url') # or b64_json - - payload = { - 'prompt': body['prompt'], # ignore prompt limit of 1000 characters - 'width': width, - 'height': height, - 'batch_size': default(body, 'n', 1) # ignore the batch limits of max 10 - } - - resp = { - 'created': int(time.time()), - 'data': [] - } - - # TODO: support SD_WEBUI_AUTH username:password pair. - sd_url = f"{os.environ['SD_WEBUI_URL']}/sdapi/v1/txt2img" - - response = requests.post(url=sd_url, json=payload) - r = response.json() - # r['parameters']... - for b64_json in r['images']: - if response_format == 'b64_json': - resp['data'].extend([{'b64_json': b64_json}]) - else: - resp['data'].extend([{'url': f'data:image/png;base64,{b64_json}'}]) # yeah it's lazy. requests.get() will not work with this - - response = json.dumps(resp) - self.wfile.write(response.encode('utf-8')) - - elif '/embeddings' in self.path and embedding_model is not None: - self.send_response(200) - self.send_access_control_headers() - self.send_header('Content-Type', 'application/json') - self.end_headers() - - input = body['input'] if 'input' in body else body['text'] - if type(input) is str: - input = [input] - - embeddings = embedding_model.encode(input).tolist() - - def enc_emb(emb): - # If base64 is specified, encode. Otherwise, do nothing. - if body.get("encoding_format", "") == "base64": - return float_list_to_base64(emb) - else: - return emb - data = [{"object": "embedding", "embedding": enc_emb(emb), "index": n} for n, emb in enumerate(embeddings)] - - response = json.dumps({ - "object": "list", - "data": data, - "model": st_model, # return the real model - "usage": { - "prompt_tokens": 0, - "total_tokens": 0, - } - }) - - if debug: - print(f"Embeddings return size: {len(embeddings[0])}, number: {len(embeddings)}") - self.wfile.write(response.encode('utf-8')) - - elif '/moderations' in self.path: - # for now do nothing, just don't error. - self.send_response(200) - self.send_access_control_headers() - self.send_header('Content-Type', 'application/json') - self.end_headers() - - response = json.dumps({ - "id": "modr-5MWoLO", - "model": "text-moderation-001", - "results": [{ - "categories": { - "hate": False, - "hate/threatening": False, - "self-harm": False, - "sexual": False, - "sexual/minors": False, - "violence": False, - "violence/graphic": False - }, - "category_scores": { - "hate": 0.0, - "hate/threatening": 0.0, - "self-harm": 0.0, - "sexual": 0.0, - "sexual/minors": 0.0, - "violence": 0.0, - "violence/graphic": 0.0 - }, - "flagged": False - }] - }) - self.wfile.write(response.encode('utf-8')) - - elif self.path == '/api/v1/token-count': - # NOT STANDARD. lifted from the api extension, but it's still very useful to calculate tokenized length client side. - self.send_response(200) - self.send_access_control_headers() - self.send_header('Content-Type', 'application/json') - self.end_headers() - - tokens = encode(body['prompt'])[0] - response = json.dumps({ - 'results': [{ - 'tokens': len(tokens) - }] - }) - self.wfile.write(response.encode('utf-8')) - - else: - print(self.path, self.headers) - self.send_error(404) - - -def run_server(): - global embedding_model - try: - embedding_model = SentenceTransformer(st_model) - print(f"\nLoaded embedding model: {st_model}, max sequence length: {embedding_model.max_seq_length}") - except: - print(f"\nFailed to load embedding model: {st_model}") - pass - - server_addr = ('0.0.0.0' if shared.args.listen else '127.0.0.1', params['port']) - server = ThreadingHTTPServer(server_addr, Handler) - if shared.args.share: - try: - from flask_cloudflared import _run_cloudflared - public_url = _run_cloudflared(params['port'], params['port'] + 1) - print(f'Starting OpenAI compatible api at\nOPENAI_API_BASE={public_url}/v1') - except ImportError: - print('You should install flask_cloudflared manually') - else: - print(f'Starting OpenAI compatible api:\nOPENAI_API_BASE=http://{server_addr[0]}:{server_addr[1]}/v1') - - server.serve_forever() - - -def setup(): - Thread(target=run_server, daemon=True).start() diff --git a/spaces/Fox1997/vits-uma-genshin-honkai/Docker/vits.sh b/spaces/Fox1997/vits-uma-genshin-honkai/Docker/vits.sh deleted file mode 100644 index 2b87f26eda96d3800b73b4a21b210c78888a2299..0000000000000000000000000000000000000000 --- a/spaces/Fox1997/vits-uma-genshin-honkai/Docker/vits.sh +++ /dev/null @@ -1,20 +0,0 @@ -#!/bin/bash -run() { - echo -e "\033[32m已完成初始化,启动服务...\033[0m" - python3 /app/vits-uma-genshin-honkai/app.py -} -install() { - echo -e "\033[33m正在初始化:安装依赖....\033[0m" - pip install -r /app/vits-uma-genshin-honkai/requirements.txt -i https://mirrors.ustc.edu.cn/pypi/web/simple - echo -e "\033[33m正在下载模型....\033[0m" - rm -f /app/vits-uma-genshin-honkai/model/G_953000.pth - wget -O /app/vits-uma-genshin-honkai/model/G_953000.pth https://huggingface.co/spaces/ikechan8370/vits-uma-genshin-honkai/resolve/main/model/G_953000.pth - echo -e "\033[32m初始化完成!\033[0m" - run -} - -if [ ! -f "/app/vits-uma-genshin-honkai/model/G_953000.pth" ] || [ "$(stat -c%s "/app/vits-uma-genshin-honkai/model/G_953000.pth")" -lt 10000 ]; then - install -else - run -fi diff --git a/spaces/Freiburg-AI-Research/dermoscopic_image_generation/glide_text2im/clip/utils.py b/spaces/Freiburg-AI-Research/dermoscopic_image_generation/glide_text2im/clip/utils.py deleted file mode 100644 index 8fc5b059dad76877f4442da36a8d6327302fe341..0000000000000000000000000000000000000000 --- a/spaces/Freiburg-AI-Research/dermoscopic_image_generation/glide_text2im/clip/utils.py +++ /dev/null @@ -1,97 +0,0 @@ -import math -from typing import Callable, Optional - -import attr -import torch -import torch.nn as nn -import torch.nn.functional as F - -FilterFn = Callable[[torch.Tensor], torch.Tensor] - - -class ZeroKeyBiasGrad(torch.autograd.Function): - @staticmethod - def forward(ctx, x): - return x - - @staticmethod - def backward(ctx, output_grad): - output_grad = output_grad.clone() - output_grad.chunk(3)[1].zero_() - return output_grad - - -def zero_key_bias_grad(x: torch.Tensor) -> torch.Tensor: - return ZeroKeyBiasGrad.apply(x) - - -@attr.s(eq=False, repr=False) -class LayerNorm(nn.Module): - n_state: int = attr.ib() - eps: float = attr.ib(default=1e-6) - device: torch.device = attr.ib(default=torch.device("cuda")) - - def __attrs_post_init__(self) -> None: - super().__init__() - self.g = nn.Parameter(torch.ones((self.n_state,), dtype=torch.float32, device=self.device)) - self.b = nn.Parameter(torch.zeros((self.n_state,), dtype=torch.float32, device=self.device)) - self.g.weight_decay_level = "disable" # type: ignore - self.b.weight_decay_level = "disable" # type: ignore - - def forward(self, x: torch.Tensor) -> torch.Tensor: - return F.layer_norm( - x.type(torch.float32), torch.Size((self.n_state,)), self.g, self.b, self.eps - ) - - -@attr.s(eq=False, repr=False) -class Affine(nn.Module): - n_in: int = attr.ib() - n_out: int = attr.ib() - use_bias: bool = attr.ib(default=True) - use_admnet_init: bool = attr.ib(default=False) - std: Optional[float] = attr.ib(default=None) - extra_init_scale: Optional[float] = attr.ib(default=None) - bias_filter_fn: FilterFn = attr.ib(default=lambda x: x) - device: torch.device = attr.ib(default=torch.device("cuda")) - - def __attrs_post_init__(self) -> None: - super().__init__() - - if not self.use_admnet_init: - self.std = self.std if self.std is not None else math.sqrt(2 / (self.n_in + self.n_out)) - self.std = ( - self.std if self.extra_init_scale is None else self.std * self.extra_init_scale - ) - - w = torch.empty((self.n_out, self.n_in), dtype=torch.float32, device=self.device) - self.w = nn.Parameter(w) - - if self.use_bias: - self.b = nn.Parameter( - torch.zeros((self.n_out,), dtype=torch.float32, device=self.device) - ) - self.b.weight_decay_level = "disable" # type: ignore - else: - if self.extra_init_scale is not None: - raise ValueError("extra_init_scale incompatible with admnet init") - - w = torch.empty((self.n_out, self.n_in), dtype=torch.float32, device=self.device) - - if self.use_bias: - b = torch.empty((self.n_out,), dtype=torch.float32, device=self.device) - - self.w = nn.Parameter(w) - - if self.use_bias: - self.b = nn.Parameter(b) - self.b.weight_decay_level = "disable" # type: ignore - - def forward(self, x: torch.Tensor) -> torch.Tensor: - w = self.w if self.w.dtype == x.dtype else self.w.to(x.dtype) - b = ( - self.bias_filter_fn(self.b if self.b.dtype == x.dtype else self.b.to(x.dtype)) - if self.use_bias - else None - ) - return F.linear(x, w, b) diff --git a/spaces/FridaZuley/RVC_HFKawaii/demucs/raw.py b/spaces/FridaZuley/RVC_HFKawaii/demucs/raw.py deleted file mode 100644 index d4941ad2d7ed858f490db441f5b46b12bd61ad78..0000000000000000000000000000000000000000 --- a/spaces/FridaZuley/RVC_HFKawaii/demucs/raw.py +++ /dev/null @@ -1,173 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -import argparse -import os -from collections import defaultdict, namedtuple -from pathlib import Path - -import musdb -import numpy as np -import torch as th -import tqdm -from torch.utils.data import DataLoader - -from .audio import AudioFile - -ChunkInfo = namedtuple("ChunkInfo", ["file_index", "offset", "local_index"]) - - -class Rawset: - """ - Dataset of raw, normalized, float32 audio files - """ - def __init__(self, path, samples=None, stride=None, channels=2, streams=None): - self.path = Path(path) - self.channels = channels - self.samples = samples - if stride is None: - stride = samples if samples is not None else 0 - self.stride = stride - entries = defaultdict(list) - for root, folders, files in os.walk(self.path, followlinks=True): - folders.sort() - files.sort() - for file in files: - if file.endswith(".raw"): - path = Path(root) / file - name, stream = path.stem.rsplit('.', 1) - entries[(path.parent.relative_to(self.path), name)].append(int(stream)) - - self._entries = list(entries.keys()) - - sizes = [] - self._lengths = [] - ref_streams = sorted(entries[self._entries[0]]) - assert ref_streams == list(range(len(ref_streams))) - if streams is None: - self.streams = ref_streams - else: - self.streams = streams - for entry in sorted(entries.keys()): - streams = entries[entry] - assert sorted(streams) == ref_streams - file = self._path(*entry) - length = file.stat().st_size // (4 * channels) - if samples is None: - sizes.append(1) - else: - if length < samples: - self._entries.remove(entry) - continue - sizes.append((length - samples) // stride + 1) - self._lengths.append(length) - if not sizes: - raise ValueError(f"Empty dataset {self.path}") - self._cumulative_sizes = np.cumsum(sizes) - self._sizes = sizes - - def __len__(self): - return self._cumulative_sizes[-1] - - @property - def total_length(self): - return sum(self._lengths) - - def chunk_info(self, index): - file_index = np.searchsorted(self._cumulative_sizes, index, side='right') - if file_index == 0: - local_index = index - else: - local_index = index - self._cumulative_sizes[file_index - 1] - return ChunkInfo(offset=local_index * self.stride, - file_index=file_index, - local_index=local_index) - - def _path(self, folder, name, stream=0): - return self.path / folder / (name + f'.{stream}.raw') - - def __getitem__(self, index): - chunk = self.chunk_info(index) - entry = self._entries[chunk.file_index] - - length = self.samples or self._lengths[chunk.file_index] - streams = [] - to_read = length * self.channels * 4 - for stream_index, stream in enumerate(self.streams): - offset = chunk.offset * 4 * self.channels - file = open(self._path(*entry, stream=stream), 'rb') - file.seek(offset) - content = file.read(to_read) - assert len(content) == to_read - content = np.frombuffer(content, dtype=np.float32) - content = content.copy() # make writable - streams.append(th.from_numpy(content).view(length, self.channels).t()) - return th.stack(streams, dim=0) - - def name(self, index): - chunk = self.chunk_info(index) - folder, name = self._entries[chunk.file_index] - return folder / name - - -class MusDBSet: - def __init__(self, mus, streams=slice(None), samplerate=44100, channels=2): - self.mus = mus - self.streams = streams - self.samplerate = samplerate - self.channels = channels - - def __len__(self): - return len(self.mus.tracks) - - def __getitem__(self, index): - track = self.mus.tracks[index] - return (track.name, AudioFile(track.path).read(channels=self.channels, - seek_time=0, - streams=self.streams, - samplerate=self.samplerate)) - - -def build_raw(mus, destination, normalize, workers, samplerate, channels): - destination.mkdir(parents=True, exist_ok=True) - loader = DataLoader(MusDBSet(mus, channels=channels, samplerate=samplerate), - batch_size=1, - num_workers=workers, - collate_fn=lambda x: x[0]) - for name, streams in tqdm.tqdm(loader): - if normalize: - ref = streams[0].mean(dim=0) # use mono mixture as reference - streams = (streams - ref.mean()) / ref.std() - for index, stream in enumerate(streams): - open(destination / (name + f'.{index}.raw'), "wb").write(stream.t().numpy().tobytes()) - - -def main(): - parser = argparse.ArgumentParser('rawset') - parser.add_argument('--workers', type=int, default=10) - parser.add_argument('--samplerate', type=int, default=44100) - parser.add_argument('--channels', type=int, default=2) - parser.add_argument('musdb', type=Path) - parser.add_argument('destination', type=Path) - - args = parser.parse_args() - - build_raw(musdb.DB(root=args.musdb, subsets=["train"], split="train"), - args.destination / "train", - normalize=True, - channels=args.channels, - samplerate=args.samplerate, - workers=args.workers) - build_raw(musdb.DB(root=args.musdb, subsets=["train"], split="valid"), - args.destination / "valid", - normalize=True, - samplerate=args.samplerate, - channels=args.channels, - workers=args.workers) - - -if __name__ == "__main__": - main() diff --git a/spaces/Frorozcol/dreambooth-training/convertosd.py b/spaces/Frorozcol/dreambooth-training/convertosd.py deleted file mode 100644 index e4bec6cbe894dd74b24f633cc66346d687d3f802..0000000000000000000000000000000000000000 --- a/spaces/Frorozcol/dreambooth-training/convertosd.py +++ /dev/null @@ -1,226 +0,0 @@ -# Script for converting a HF Diffusers saved pipeline to a Stable Diffusion checkpoint. -# *Only* converts the UNet, VAE, and Text Encoder. -# Does not convert optimizer state or any other thing. -# Written by jachiam - -import argparse -import os.path as osp - -import torch -import gc - -# =================# -# UNet Conversion # -# =================# - -unet_conversion_map = [ - # (stable-diffusion, HF Diffusers) - ("time_embed.0.weight", "time_embedding.linear_1.weight"), - ("time_embed.0.bias", "time_embedding.linear_1.bias"), - ("time_embed.2.weight", "time_embedding.linear_2.weight"), - ("time_embed.2.bias", "time_embedding.linear_2.bias"), - ("input_blocks.0.0.weight", "conv_in.weight"), - ("input_blocks.0.0.bias", "conv_in.bias"), - ("out.0.weight", "conv_norm_out.weight"), - ("out.0.bias", "conv_norm_out.bias"), - ("out.2.weight", "conv_out.weight"), - ("out.2.bias", "conv_out.bias"), -] - -unet_conversion_map_resnet = [ - # (stable-diffusion, HF Diffusers) - ("in_layers.0", "norm1"), - ("in_layers.2", "conv1"), - ("out_layers.0", "norm2"), - ("out_layers.3", "conv2"), - ("emb_layers.1", "time_emb_proj"), - ("skip_connection", "conv_shortcut"), -] - -unet_conversion_map_layer = [] -# hardcoded number of downblocks and resnets/attentions... -# would need smarter logic for other networks. -for i in range(4): - # loop over downblocks/upblocks - - for j in range(2): - # loop over resnets/attentions for downblocks - hf_down_res_prefix = f"down_blocks.{i}.resnets.{j}." - sd_down_res_prefix = f"input_blocks.{3*i + j + 1}.0." - unet_conversion_map_layer.append((sd_down_res_prefix, hf_down_res_prefix)) - - if i < 3: - # no attention layers in down_blocks.3 - hf_down_atn_prefix = f"down_blocks.{i}.attentions.{j}." - sd_down_atn_prefix = f"input_blocks.{3*i + j + 1}.1." - unet_conversion_map_layer.append((sd_down_atn_prefix, hf_down_atn_prefix)) - - for j in range(3): - # loop over resnets/attentions for upblocks - hf_up_res_prefix = f"up_blocks.{i}.resnets.{j}." - sd_up_res_prefix = f"output_blocks.{3*i + j}.0." - unet_conversion_map_layer.append((sd_up_res_prefix, hf_up_res_prefix)) - - if i > 0: - # no attention layers in up_blocks.0 - hf_up_atn_prefix = f"up_blocks.{i}.attentions.{j}." - sd_up_atn_prefix = f"output_blocks.{3*i + j}.1." - unet_conversion_map_layer.append((sd_up_atn_prefix, hf_up_atn_prefix)) - - if i < 3: - # no downsample in down_blocks.3 - hf_downsample_prefix = f"down_blocks.{i}.downsamplers.0.conv." - sd_downsample_prefix = f"input_blocks.{3*(i+1)}.0.op." - unet_conversion_map_layer.append((sd_downsample_prefix, hf_downsample_prefix)) - - # no upsample in up_blocks.3 - hf_upsample_prefix = f"up_blocks.{i}.upsamplers.0." - sd_upsample_prefix = f"output_blocks.{3*i + 2}.{1 if i == 0 else 2}." - unet_conversion_map_layer.append((sd_upsample_prefix, hf_upsample_prefix)) - -hf_mid_atn_prefix = "mid_block.attentions.0." -sd_mid_atn_prefix = "middle_block.1." -unet_conversion_map_layer.append((sd_mid_atn_prefix, hf_mid_atn_prefix)) - -for j in range(2): - hf_mid_res_prefix = f"mid_block.resnets.{j}." - sd_mid_res_prefix = f"middle_block.{2*j}." - unet_conversion_map_layer.append((sd_mid_res_prefix, hf_mid_res_prefix)) - - -def convert_unet_state_dict(unet_state_dict): - # buyer beware: this is a *brittle* function, - # and correct output requires that all of these pieces interact in - # the exact order in which I have arranged them. - mapping = {k: k for k in unet_state_dict.keys()} - for sd_name, hf_name in unet_conversion_map: - mapping[hf_name] = sd_name - for k, v in mapping.items(): - if "resnets" in k: - for sd_part, hf_part in unet_conversion_map_resnet: - v = v.replace(hf_part, sd_part) - mapping[k] = v - for k, v in mapping.items(): - for sd_part, hf_part in unet_conversion_map_layer: - v = v.replace(hf_part, sd_part) - mapping[k] = v - new_state_dict = {v: unet_state_dict[k] for k, v in mapping.items()} - return new_state_dict - - -# ================# -# VAE Conversion # -# ================# - -vae_conversion_map = [ - # (stable-diffusion, HF Diffusers) - ("nin_shortcut", "conv_shortcut"), - ("norm_out", "conv_norm_out"), - ("mid.attn_1.", "mid_block.attentions.0."), -] - -for i in range(4): - # down_blocks have two resnets - for j in range(2): - hf_down_prefix = f"encoder.down_blocks.{i}.resnets.{j}." - sd_down_prefix = f"encoder.down.{i}.block.{j}." - vae_conversion_map.append((sd_down_prefix, hf_down_prefix)) - - if i < 3: - hf_downsample_prefix = f"down_blocks.{i}.downsamplers.0." - sd_downsample_prefix = f"down.{i}.downsample." - vae_conversion_map.append((sd_downsample_prefix, hf_downsample_prefix)) - - hf_upsample_prefix = f"up_blocks.{i}.upsamplers.0." - sd_upsample_prefix = f"up.{3-i}.upsample." - vae_conversion_map.append((sd_upsample_prefix, hf_upsample_prefix)) - - # up_blocks have three resnets - # also, up blocks in hf are numbered in reverse from sd - for j in range(3): - hf_up_prefix = f"decoder.up_blocks.{i}.resnets.{j}." - sd_up_prefix = f"decoder.up.{3-i}.block.{j}." - vae_conversion_map.append((sd_up_prefix, hf_up_prefix)) - -# this part accounts for mid blocks in both the encoder and the decoder -for i in range(2): - hf_mid_res_prefix = f"mid_block.resnets.{i}." - sd_mid_res_prefix = f"mid.block_{i+1}." - vae_conversion_map.append((sd_mid_res_prefix, hf_mid_res_prefix)) - - -vae_conversion_map_attn = [ - # (stable-diffusion, HF Diffusers) - ("norm.", "group_norm."), - ("q.", "query."), - ("k.", "key."), - ("v.", "value."), - ("proj_out.", "proj_attn."), -] - - -def reshape_weight_for_sd(w): - # convert HF linear weights to SD conv2d weights - return w.reshape(*w.shape, 1, 1) - - -def convert_vae_state_dict(vae_state_dict): - mapping = {k: k for k in vae_state_dict.keys()} - for k, v in mapping.items(): - for sd_part, hf_part in vae_conversion_map: - v = v.replace(hf_part, sd_part) - mapping[k] = v - for k, v in mapping.items(): - if "attentions" in k: - for sd_part, hf_part in vae_conversion_map_attn: - v = v.replace(hf_part, sd_part) - mapping[k] = v - new_state_dict = {v: vae_state_dict[k] for k, v in mapping.items()} - weights_to_convert = ["q", "k", "v", "proj_out"] - print("Converting to CKPT ...") - for k, v in new_state_dict.items(): - for weight_name in weights_to_convert: - if f"mid.attn_1.{weight_name}.weight" in k: - new_state_dict[k] = reshape_weight_for_sd(v) - return new_state_dict - - -# =========================# -# Text Encoder Conversion # -# =========================# -# pretty much a no-op - - -def convert_text_enc_state_dict(text_enc_dict): - return text_enc_dict - - -def convert(model_path, checkpoint_path): - unet_path = osp.join(model_path, "unet", "diffusion_pytorch_model.bin") - vae_path = osp.join(model_path, "vae", "diffusion_pytorch_model.bin") - text_enc_path = osp.join(model_path, "text_encoder", "pytorch_model.bin") - - # Convert the UNet model - unet_state_dict = torch.load(unet_path, map_location='cpu') - unet_state_dict = convert_unet_state_dict(unet_state_dict) - unet_state_dict = {"model.diffusion_model." + k: v for k, v in unet_state_dict.items()} - - # Convert the VAE model - vae_state_dict = torch.load(vae_path, map_location='cpu') - vae_state_dict = convert_vae_state_dict(vae_state_dict) - vae_state_dict = {"first_stage_model." + k: v for k, v in vae_state_dict.items()} - - # Convert the text encoder model - text_enc_dict = torch.load(text_enc_path, map_location='cpu') - text_enc_dict = convert_text_enc_state_dict(text_enc_dict) - text_enc_dict = {"cond_stage_model.transformer." + k: v for k, v in text_enc_dict.items()} - - # Put together new checkpoint - state_dict = {**unet_state_dict, **vae_state_dict, **text_enc_dict} - - state_dict = {k:v.half() for k,v in state_dict.items()} - state_dict = {"state_dict": state_dict} - torch.save(state_dict, checkpoint_path) - del state_dict, text_enc_dict, vae_state_dict, unet_state_dict - torch.cuda.empty_cache() - gc.collect() diff --git a/spaces/Gen-Sim/Gen-Sim/scripts/metascripts/train3_cliport_indomain_small.sh b/spaces/Gen-Sim/Gen-Sim/scripts/metascripts/train3_cliport_indomain_small.sh deleted file mode 100644 index 250de044eb27a54505b3ec8c72fa56c2a917d0e6..0000000000000000000000000000000000000000 --- a/spaces/Gen-Sim/Gen-Sim/scripts/metascripts/train3_cliport_indomain_small.sh +++ /dev/null @@ -1,13 +0,0 @@ -#!/bin/bash -#SBATCH -c 10 -#SBATCH -n 1 -#SBATCH -o logs/%j.out -#SBATCH --exclusive - -STEPS=${1-'15000'} -now=$(date "+%Y-%m-%d_%H-%M-%S") - -sh scripts/traintest_scripts/train_test_multi_task_indistribution_small.sh data \ -"[stack-block-pyramid,put-block-in-bowl,place-red-in-green]" \ - cliport3_task_indomain_demo50_2023-07-27_23-08-25 $STEPS - diff --git a/spaces/GiladtheFixer/test_sentiment/app.py b/spaces/GiladtheFixer/test_sentiment/app.py deleted file mode 100644 index a156305c62d64cc40ea97c5455026cc72c0f1b01..0000000000000000000000000000000000000000 --- a/spaces/GiladtheFixer/test_sentiment/app.py +++ /dev/null @@ -1,20 +0,0 @@ -import gradio as gr - -from transformers import pipeline - - - -pipe=pipeline('sentiment-analysis') - -def sentiment_calssifir(text): - return pipe(text) - - - - -demo = gr.Interface(fn=sentiment_calssifir, inputs="text", outputs="json") -demo.launch() - # outputs="label", - # title=title, - # description=description, - # live=True).launch(debug=True) \ No newline at end of file diff --git a/spaces/GipAdonimus/Real-Time-Voice-Cloning/app.py b/spaces/GipAdonimus/Real-Time-Voice-Cloning/app.py deleted file mode 100644 index 84df0269552db39f2b8f3efeeacf0eac26c4a06c..0000000000000000000000000000000000000000 --- a/spaces/GipAdonimus/Real-Time-Voice-Cloning/app.py +++ /dev/null @@ -1,61 +0,0 @@ -import gradio as gr -import os -import shlex -import gdown -import uuid -import torch - -cpu_param = "--cpu" if not torch.cuda.is_available() else "" - -if (not os.path.exists("synpretrained.pt")): - gdown.download("https://drive.google.com/u/0/uc?id=1EqFMIbvxffxtjiVrtykroF6_mUh-5Z3s&export=download&confirm=t", - "synpretrained.pt", quiet=False) - gdown.download("https://drive.google.com/uc?export=download&id=1q8mEGwCkFy23KZsinbuvdKAQLqNKbYf1", - "encpretrained.pt", quiet=False) - gdown.download("https://drive.google.com/uc?export=download&id=1cf2NO6FtI0jDuy8AV3Xgn6leO6dHjIgu", - "vocpretrained.pt", quiet=False) - - -def inference(audio_path, text, mic_path=None): - if mic_path: - audio_path = mic_path - output_path = f"/tmp/output_{uuid.uuid4()}.wav" - os.system( - f"python demo_cli.py --no_sound {cpu_param} --audio_path {audio_path} --text {shlex.quote(text.strip())} --output_path {output_path}") - return output_path - - -title = "Real-Time-Voice-Cloning" -description = "Gradio demo for Real-Time-Voice-Cloning: Clone a voice in 5 seconds to generate arbitrary speech in real-time. To use it, simply upload your audio, or click one of the examples to load them. Read more at the links below." -article = "

    Real-Time Voice Cloning | Github Repo

    " - -examples = [['test.wav', "This is real time voice cloning on huggingface spaces"]] - - -def toggle(choice): - if choice == "mic": - return gr.update(visible=True, value=None), gr.update(visible=False, value=None) - else: - return gr.update(visible=False, value=None), gr.update(visible=True, value=None) - - -with gr.Blocks() as demo: - with gr.Row(): - with gr.Column(): - radio = gr.Radio(["mic", "file"], value="mic", - label="How would you like to upload your audio?") - mic_input = gr.Mic(label="Input", type="filepath", visible=False) - audio_file = gr.Audio( - type="filepath", label="Input", visible=True) - text_input = gr.Textbox(label="Text") - with gr.Column(): - audio_output = gr.Audio(label="Output") - - gr.Examples(examples, fn=inference, inputs=[audio_file, text_input], - outputs=audio_output, cache_examples=True) - btn = gr.Button("Generate") - btn.click(inference, inputs=[audio_file, - text_input, mic_input], outputs=audio_output) - radio.change(toggle, radio, [mic_input, audio_file]) - -demo.launch(enable_queue=True) diff --git a/spaces/Gmq-x/gpt-academic/colorful.py b/spaces/Gmq-x/gpt-academic/colorful.py deleted file mode 100644 index d90972bb30a8f8fb932abbc34232e474df4d5205..0000000000000000000000000000000000000000 --- a/spaces/Gmq-x/gpt-academic/colorful.py +++ /dev/null @@ -1,91 +0,0 @@ -import platform -from sys import stdout - -if platform.system()=="Linux": - pass -else: - from colorama import init - init() - -# Do you like the elegance of Chinese characters? -def print红(*kw,**kargs): - print("\033[0;31m",*kw,"\033[0m",**kargs) -def print绿(*kw,**kargs): - print("\033[0;32m",*kw,"\033[0m",**kargs) -def print黄(*kw,**kargs): - print("\033[0;33m",*kw,"\033[0m",**kargs) -def print蓝(*kw,**kargs): - print("\033[0;34m",*kw,"\033[0m",**kargs) -def print紫(*kw,**kargs): - print("\033[0;35m",*kw,"\033[0m",**kargs) -def print靛(*kw,**kargs): - print("\033[0;36m",*kw,"\033[0m",**kargs) - -def print亮红(*kw,**kargs): - print("\033[1;31m",*kw,"\033[0m",**kargs) -def print亮绿(*kw,**kargs): - print("\033[1;32m",*kw,"\033[0m",**kargs) -def print亮黄(*kw,**kargs): - print("\033[1;33m",*kw,"\033[0m",**kargs) -def print亮蓝(*kw,**kargs): - print("\033[1;34m",*kw,"\033[0m",**kargs) -def print亮紫(*kw,**kargs): - print("\033[1;35m",*kw,"\033[0m",**kargs) -def print亮靛(*kw,**kargs): - print("\033[1;36m",*kw,"\033[0m",**kargs) - - - -def print亮红(*kw,**kargs): - print("\033[1;31m",*kw,"\033[0m",**kargs) -def print亮绿(*kw,**kargs): - print("\033[1;32m",*kw,"\033[0m",**kargs) -def print亮黄(*kw,**kargs): - print("\033[1;33m",*kw,"\033[0m",**kargs) -def print亮蓝(*kw,**kargs): - print("\033[1;34m",*kw,"\033[0m",**kargs) -def print亮紫(*kw,**kargs): - print("\033[1;35m",*kw,"\033[0m",**kargs) -def print亮靛(*kw,**kargs): - print("\033[1;36m",*kw,"\033[0m",**kargs) - -print_red = print红 -print_green = print绿 -print_yellow = print黄 -print_blue = print蓝 -print_purple = print紫 -print_indigo = print靛 - -print_bold_red = print亮红 -print_bold_green = print亮绿 -print_bold_yellow = print亮黄 -print_bold_blue = print亮蓝 -print_bold_purple = print亮紫 -print_bold_indigo = print亮靛 - -if not stdout.isatty(): - # redirection, avoid a fucked up log file - print红 = print - print绿 = print - print黄 = print - print蓝 = print - print紫 = print - print靛 = print - print亮红 = print - print亮绿 = print - print亮黄 = print - print亮蓝 = print - print亮紫 = print - print亮靛 = print - print_red = print - print_green = print - print_yellow = print - print_blue = print - print_purple = print - print_indigo = print - print_bold_red = print - print_bold_green = print - print_bold_yellow = print - print_bold_blue = print - print_bold_purple = print - print_bold_indigo = print \ No newline at end of file diff --git a/spaces/Gna1L/jonatasgrosman-wav2vec2-large-xlsr-53-english/README.md b/spaces/Gna1L/jonatasgrosman-wav2vec2-large-xlsr-53-english/README.md deleted file mode 100644 index 2073f79ccd6f252622d8331579c3b62912421fcb..0000000000000000000000000000000000000000 --- a/spaces/Gna1L/jonatasgrosman-wav2vec2-large-xlsr-53-english/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Jonatasgrosman Wav2vec2 Large Xlsr 53 English -emoji: 🔥 -colorFrom: green -colorTo: green -sdk: gradio -sdk_version: 3.47.1 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Gradio-Blocks/uniformer_image_detection/mmdet/models/detectors/fsaf.py b/spaces/Gradio-Blocks/uniformer_image_detection/mmdet/models/detectors/fsaf.py deleted file mode 100644 index 9f10fa1ae10f31e6cb5de65505b14a4fc97dd022..0000000000000000000000000000000000000000 --- a/spaces/Gradio-Blocks/uniformer_image_detection/mmdet/models/detectors/fsaf.py +++ /dev/null @@ -1,17 +0,0 @@ -from ..builder import DETECTORS -from .single_stage import SingleStageDetector - - -@DETECTORS.register_module() -class FSAF(SingleStageDetector): - """Implementation of `FSAF `_""" - - def __init__(self, - backbone, - neck, - bbox_head, - train_cfg=None, - test_cfg=None, - pretrained=None): - super(FSAF, self).__init__(backbone, neck, bbox_head, train_cfg, - test_cfg, pretrained) diff --git a/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/deeplabv3plus/deeplabv3plus_r101-d8_480x480_80k_pascal_context_59.py b/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/deeplabv3plus/deeplabv3plus_r101-d8_480x480_80k_pascal_context_59.py deleted file mode 100644 index a6a7688c7a5f6ff1209eb7c44abdd105e91a2b76..0000000000000000000000000000000000000000 --- a/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/deeplabv3plus/deeplabv3plus_r101-d8_480x480_80k_pascal_context_59.py +++ /dev/null @@ -1,2 +0,0 @@ -_base_ = './deeplabv3plus_r50-d8_480x480_80k_pascal_context_59.py' -model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/spaces/GrandaddyShmax/AudioCraft_Plus/audiocraft/data/info_audio_dataset.py b/spaces/GrandaddyShmax/AudioCraft_Plus/audiocraft/data/info_audio_dataset.py deleted file mode 100644 index 47ab4b1594faf1e9f1ce962fb980d80295b1f079..0000000000000000000000000000000000000000 --- a/spaces/GrandaddyShmax/AudioCraft_Plus/audiocraft/data/info_audio_dataset.py +++ /dev/null @@ -1,110 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. -"""Base classes for the datasets that also provide non-audio metadata, -e.g. description, text transcription etc. -""" -from dataclasses import dataclass -import logging -import math -import re -import typing as tp - -import torch - -from .audio_dataset import AudioDataset, AudioMeta -from ..environment import AudioCraftEnvironment -from ..modules.conditioners import SegmentWithAttributes, ConditioningAttributes - - -logger = logging.getLogger(__name__) - - -def _clusterify_meta(meta: AudioMeta) -> AudioMeta: - """Monkey-patch meta to match cluster specificities.""" - meta.path = AudioCraftEnvironment.apply_dataset_mappers(meta.path) - if meta.info_path is not None: - meta.info_path.zip_path = AudioCraftEnvironment.apply_dataset_mappers(meta.info_path.zip_path) - return meta - - -def clusterify_all_meta(meta: tp.List[AudioMeta]) -> tp.List[AudioMeta]: - """Monkey-patch all meta to match cluster specificities.""" - return [_clusterify_meta(m) for m in meta] - - -@dataclass -class AudioInfo(SegmentWithAttributes): - """Dummy SegmentInfo with empty attributes. - - The InfoAudioDataset is expected to return metadata that inherits - from SegmentWithAttributes class and can return conditioning attributes. - - This basically guarantees all datasets will be compatible with current - solver that contain conditioners requiring this. - """ - audio_tokens: tp.Optional[torch.Tensor] = None # populated when using cached batch for training a LM. - - def to_condition_attributes(self) -> ConditioningAttributes: - return ConditioningAttributes() - - -class InfoAudioDataset(AudioDataset): - """AudioDataset that always returns metadata as SegmentWithAttributes along with the audio waveform. - - See `audiocraft.data.audio_dataset.AudioDataset` for initialization arguments. - """ - def __init__(self, meta: tp.List[AudioMeta], **kwargs): - super().__init__(clusterify_all_meta(meta), **kwargs) - - def __getitem__(self, index: int) -> tp.Union[torch.Tensor, tp.Tuple[torch.Tensor, SegmentWithAttributes]]: - if not self.return_info: - wav = super().__getitem__(index) - assert isinstance(wav, torch.Tensor) - return wav - wav, meta = super().__getitem__(index) - return wav, AudioInfo(**meta.to_dict()) - - -def get_keyword_or_keyword_list(value: tp.Optional[str]) -> tp.Union[tp.Optional[str], tp.Optional[tp.List[str]]]: - """Preprocess a single keyword or possible a list of keywords.""" - if isinstance(value, list): - return get_keyword_list(value) - else: - return get_keyword(value) - - -def get_string(value: tp.Optional[str]) -> tp.Optional[str]: - """Preprocess a single keyword.""" - if value is None or (not isinstance(value, str)) or len(value) == 0 or value == 'None': - return None - else: - return value.strip() - - -def get_keyword(value: tp.Optional[str]) -> tp.Optional[str]: - """Preprocess a single keyword.""" - if value is None or (not isinstance(value, str)) or len(value) == 0 or value == 'None': - return None - else: - return value.strip().lower() - - -def get_keyword_list(values: tp.Union[str, tp.List[str]]) -> tp.Optional[tp.List[str]]: - """Preprocess a list of keywords.""" - if isinstance(values, str): - values = [v.strip() for v in re.split(r'[,\s]', values)] - elif isinstance(values, float) and math.isnan(values): - values = [] - if not isinstance(values, list): - logger.debug(f"Unexpected keyword list {values}") - values = [str(values)] - - kws = [get_keyword(v) for v in values] - kw_list = [k for k in kws if k is not None] - if len(kw_list) == 0: - return None - else: - return kw_list diff --git a/spaces/HaMerL/ChaosinChat/modules/overwrites.py b/spaces/HaMerL/ChaosinChat/modules/overwrites.py deleted file mode 100644 index 035a4a52722d66ee28af1c05231ad1cea3339ef5..0000000000000000000000000000000000000000 --- a/spaces/HaMerL/ChaosinChat/modules/overwrites.py +++ /dev/null @@ -1,94 +0,0 @@ -from __future__ import annotations -import logging - -from llama_index import Prompt -from typing import List, Tuple -import mdtex2html -from gradio_client import utils as client_utils - -from modules.presets import * -from modules.llama_func import * - - -def compact_text_chunks(self, prompt: Prompt, text_chunks: List[str]) -> List[str]: - logging.debug("Compacting text chunks...🚀🚀🚀") - combined_str = [c.strip() for c in text_chunks if c.strip()] - combined_str = [f"[{index+1}] {c}" for index, c in enumerate(combined_str)] - combined_str = "\n\n".join(combined_str) - # resplit based on self.max_chunk_overlap - text_splitter = self.get_text_splitter_given_prompt(prompt, 1, padding=1) - return text_splitter.split_text(combined_str) - - -def postprocess( - self, - y: List[List[str | Tuple[str] | Tuple[str, str] | None] | Tuple], - ) -> List[List[str | Dict | None]]: - """ - Parameters: - y: List of lists representing the message and response pairs. Each message and response should be a string, which may be in Markdown format. It can also be a tuple whose first element is a string filepath or URL to an image/video/audio, and second (optional) element is the alt text, in which case the media file is displayed. It can also be None, in which case that message is not displayed. - Returns: - List of lists representing the message and response. Each message and response will be a string of HTML, or a dictionary with media information. Or None if the message is not to be displayed. - """ - if y is None: - return [] - processed_messages = [] - for message_pair in y: - assert isinstance( - message_pair, (tuple, list) - ), f"Expected a list of lists or list of tuples. Received: {message_pair}" - assert ( - len(message_pair) == 2 - ), f"Expected a list of lists of length 2 or list of tuples of length 2. Received: {message_pair}" - - processed_messages.append( - [ - self._postprocess_chat_messages(message_pair[0], "user"), - self._postprocess_chat_messages(message_pair[1], "bot"), - ] - ) - return processed_messages - -def postprocess_chat_messages( - self, chat_message: str | Tuple | List | None, message_type: str - ) -> str | Dict | None: - if chat_message is None: - return None - elif isinstance(chat_message, (tuple, list)): - filepath = chat_message[0] - mime_type = client_utils.get_mimetype(filepath) - filepath = self.make_temp_copy_if_needed(filepath) - return { - "name": filepath, - "mime_type": mime_type, - "alt_text": chat_message[1] if len(chat_message) > 1 else None, - "data": None, # These last two fields are filled in by the frontend - "is_file": True, - } - elif isinstance(chat_message, str): - if message_type == "bot": - if not detect_converted_mark(chat_message): - chat_message = convert_mdtext(chat_message) - elif message_type == "user": - if not detect_converted_mark(chat_message): - chat_message = convert_asis(chat_message) - return chat_message - else: - raise ValueError(f"Invalid message for Chatbot component: {chat_message}") - -with open("./assets/custom.js", "r", encoding="utf-8") as f, open("./assets/Kelpy-Codos.js", "r", encoding="utf-8") as f2: - customJS = f.read() - kelpyCodos = f2.read() - -def reload_javascript(): - print("Reloading javascript...") - js = f'' - def template_response(*args, **kwargs): - res = GradioTemplateResponseOriginal(*args, **kwargs) - res.body = res.body.replace(b'', f'{js}'.encode("utf8")) - res.init_headers() - return res - - gr.routes.templates.TemplateResponse = template_response - -GradioTemplateResponseOriginal = gr.routes.templates.TemplateResponse \ No newline at end of file diff --git a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/data/legacy/masked_lm_dataset.py b/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/data/legacy/masked_lm_dataset.py deleted file mode 100644 index dd8ea2c60aff306ab3a756223a298a28d41a4991..0000000000000000000000000000000000000000 --- a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/data/legacy/masked_lm_dataset.py +++ /dev/null @@ -1,303 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import math -from typing import Dict, List, Tuple - -import numpy as np -import torch -from fairseq.data import Dictionary, FairseqDataset, data_utils -from fairseq.data.concat_dataset import ConcatDataset -from fairseq.data.legacy.block_pair_dataset import BlockPairDataset -from fairseq.data.token_block_dataset import TokenBlockDataset - - -class MaskedLMDataset(FairseqDataset): - """ - A wrapper Dataset for masked language modelling. The dataset - wraps around TokenBlockDataset or BlockedPairDataset and creates a batch - where the input blocks are masked according to the specified masking - probability. Additionally the batch can also contain sentence level targets - if this is specified. - - Args: - dataset: Dataset which generates blocks of data. Only BlockPairDataset - and TokenBlockDataset are supported. - sizes: Sentence lengths - vocab: Dictionary with the vocabulary and special tokens. - pad_idx: Id of padding token in dictionary - mask_idx: Id of mask token in dictionary - classif_token_idx: Id of classification token in dictionary. This is the - token associated with the sentence embedding (Eg: CLS for BERT) - sep_token_idx: Id of separator token in dictionary - (Eg: SEP in BERT) - seed: Seed for random number generator for reproducibility. - shuffle: Shuffle the elements before batching. - has_pairs: Specifies whether the underlying dataset - generates a pair of blocks along with a sentence_target or not. - Setting it to True assumes that the underlying dataset generates a - label for the pair of sentences which is surfaced as - sentence_target. The default value assumes a single block with no - sentence target. - segment_id: An optional segment id for filling in the segment labels - when we are in the single block setting (Eg: XLM). Default is 0. - masking_ratio: specifies what percentage of the blocks should be masked. - masking_prob: specifies the probability of a given token being - replaced with the "MASK" token. - random_token_prob: specifies the probability of a given token being - replaced by a random token from the vocabulary. - """ - - def __init__( - self, - dataset: FairseqDataset, - sizes: np.ndarray, - vocab: Dictionary, - pad_idx: int, - mask_idx: int, - classif_token_idx: int, - sep_token_idx: int, - seed: int = 1, - shuffle: bool = True, - has_pairs: bool = True, - segment_id: int = 0, - masking_ratio: float = 0.15, - masking_prob: float = 0.8, - random_token_prob: float = 0.1, - ): - # Make sure the input datasets are the ones supported - assert ( - isinstance(dataset, TokenBlockDataset) - or isinstance(dataset, BlockPairDataset) - or isinstance(dataset, ConcatDataset) - ), ( - "MaskedLMDataset only wraps TokenBlockDataset or BlockPairDataset or " - "ConcatDataset" - ) - - self.dataset = dataset - self.sizes = np.array(sizes) - self.vocab = vocab - self.pad_idx = pad_idx - self.mask_idx = mask_idx - self.classif_token_idx = classif_token_idx - self.sep_token_idx = sep_token_idx - self.shuffle = shuffle - self.seed = seed - self.has_pairs = has_pairs - self.segment_id = segment_id - self.masking_ratio = masking_ratio - self.masking_prob = masking_prob - self.random_token_prob = random_token_prob - - # If we have only one block then sizes needs to be updated to include - # the classification token - if not has_pairs: - self.sizes = self.sizes + 1 - - def __getitem__(self, index: int): - # if has_pairs, then expect 2 blocks and a sentence target - if self.has_pairs: - (block_one, block_two, sentence_target) = self.dataset[index] - else: - block_one = self.dataset[index] - - return { - "id": index, - "block_one": block_one, - "block_two": block_two if self.has_pairs else None, - "sentence_target": sentence_target if self.has_pairs else None, - } - - def __len__(self): - return len(self.dataset) - - def _mask_block( - self, - sentence: np.ndarray, - mask_idx: int, - pad_idx: int, - dictionary_token_range: Tuple, - ): - """ - Mask tokens for Masked Language Model training - Samples mask_ratio tokens that will be predicted by LM. - - Note:This function may not be efficient enough since we had multiple - conversions between np and torch, we can replace them with torch - operators later. - - Args: - sentence: 1d tensor to be masked - mask_idx: index to use for masking the sentence - pad_idx: index to use for masking the target for tokens we aren't - predicting - dictionary_token_range: range of indices in dictionary which can - be used for random word replacement - (e.g. without special characters) - Return: - masked_sent: masked sentence - target: target with words which we are not predicting replaced - by pad_idx - """ - masked_sent = np.copy(sentence) - sent_length = len(sentence) - mask_num = math.ceil(sent_length * self.masking_ratio) - mask = np.random.choice(sent_length, mask_num, replace=False) - target = np.copy(sentence) - - for i in range(sent_length): - if i in mask: - rand = np.random.random() - - # replace with mask if probability is less than masking_prob - # (Eg: 0.8) - if rand < self.masking_prob: - masked_sent[i] = mask_idx - - # replace with random token if probability is less than - # masking_prob + random_token_prob (Eg: 0.9) - elif rand < (self.masking_prob + self.random_token_prob): - # sample random token from dictionary - masked_sent[i] = np.random.randint( - dictionary_token_range[0], dictionary_token_range[1] - ) - else: - target[i] = pad_idx - - return masked_sent, target - - def _collate(self, samples: List[Dict], pad_idx: int, eos_idx: int): - """ - Does the heavy lifting for creating a batch from the input list of - examples. The logic is as follows: - 1. Mask the input blocks. In case has_pair is True then we have 2 - blocks to mask. - 2. Prepend the first masked block tensor with the special token - used as sentence embedding. Eg: CLS in BERT. This happens - irrespective of the value of has_pair. - 3. If has_pair is True, then append the first masked block with the - special separator token (eg: SEP for BERT) and compute segment - label accordingly. In this case, also append the second masked - block with this special separator token and compute its segment - label. - 4. For the targets tensor, prepend and append with padding index - accordingly. - 5. Concatenate all tensors. - """ - if len(samples) == 0: - return {} - # To ensure determinism, we reset the state of the PRNG after every - # batch based on the seed and the first id of the batch. This ensures - # that across epochs we get the same mask for the same example. This - # is needed for reproducibility and is how BERT does masking - # TODO: Can we add deteminism without this constraint? - with data_utils.numpy_seed(self.seed + samples[0]["id"]): - for s in samples: - - # token range is needed for replacing with random token during - # masking - token_range = (self.vocab.nspecial, len(self.vocab)) - - # mask according to specified probabilities. - masked_blk_one, masked_tgt_one = self._mask_block( - s["block_one"], - self.mask_idx, - self.pad_idx, - token_range, - ) - - tokens = np.concatenate([[self.classif_token_idx], masked_blk_one]) - targets = np.concatenate([[self.pad_idx], masked_tgt_one]) - segments = np.ones(len(tokens)) * self.segment_id - - # if has_pairs is True then we need to add the SEP token to both - # the blocks after masking and re-compute segments based on the new - # lengths. - if self.has_pairs: - tokens_one = np.concatenate([tokens, [self.sep_token_idx]]) - targets_one = np.concatenate([targets, [self.pad_idx]]) - - masked_blk_two, masked_tgt_two = self._mask_block( - s["block_two"], self.mask_idx, self.pad_idx, token_range - ) - tokens_two = np.concatenate([masked_blk_two, [self.sep_token_idx]]) - targets_two = np.concatenate([masked_tgt_two, [self.pad_idx]]) - - # block + 1 sep + 1 special (CLS) - segments_one = np.zeros(len(tokens_one)) - # block + 1 sep - segments_two = np.ones(len(tokens_two)) - - tokens = np.concatenate([tokens_one, tokens_two]) - targets = np.concatenate([targets_one, targets_two]) - segments = np.concatenate([segments_one, segments_two]) - - s["source"] = torch.LongTensor(tokens) - s["segment_labels"] = torch.LongTensor(segments) - s["lm_target"] = torch.LongTensor(targets) - - def merge(key): - return data_utils.collate_tokens( - [s[key] for s in samples], pad_idx, eos_idx, left_pad=False - ) - - return { - "id": torch.LongTensor([s["id"] for s in samples]), - "ntokens": sum(len(s["source"]) for s in samples), - "net_input": { - "src_tokens": merge("source"), - "segment_labels": merge("segment_labels"), - }, - "lm_target": merge("lm_target"), - "sentence_target": torch.LongTensor([s["sentence_target"] for s in samples]) - if self.has_pairs - else None, - "nsentences": len(samples), - } - - def collater(self, samples: List[Dict]): - """Merge a list of samples to form a mini-batch. - - Args: - samples (List[dict]): samples to collate - - Returns: - dict: a mini-batch of data - """ - return self._collate(samples, self.vocab.pad(), self.vocab.eos()) - - def num_tokens(self, index: int): - """ - Return the number of tokens in a sample. This value is used to - enforce max-tokens during batching. - """ - return self.sizes[index] - - def size(self, index: int): - """ - Return an example's size as a float or tuple. This value is used when - filtering a dataset with max-positions. - """ - return self.sizes[index] - - def ordered_indices(self): - """ - Return an ordered list of indices. Batches will be constructed based - on this order. - """ - if self.shuffle: - return np.random.permutation(len(self)) - else: - order = [np.arange(len(self))] - order.append(self.sizes) - return np.lexsort(order) - - @property - def supports_prefetch(self): - return getattr(self.dataset, "supports_prefetch", False) - - def prefetch(self, indices): - self.dataset.prefetch(indices) diff --git a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/data/offset_tokens_dataset.py b/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/data/offset_tokens_dataset.py deleted file mode 100644 index 6fabbdcdaa1a8f70d8d8c07db4cd53754503c194..0000000000000000000000000000000000000000 --- a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/data/offset_tokens_dataset.py +++ /dev/null @@ -1,15 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -from . import BaseWrapperDataset - - -class OffsetTokensDataset(BaseWrapperDataset): - def __init__(self, dataset, offset): - super().__init__(dataset) - self.offset = offset - - def __getitem__(self, idx): - return self.dataset[idx] + self.offset diff --git a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/tests/distributed/test_module_proxy_wrapper.py b/spaces/HarryLee/eCommerceImageCaptioning/fairseq/tests/distributed/test_module_proxy_wrapper.py deleted file mode 100644 index 2803a044cdcc12e0a348f40d06ce89c571d307ed..0000000000000000000000000000000000000000 --- a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/tests/distributed/test_module_proxy_wrapper.py +++ /dev/null @@ -1,75 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import unittest - -import torch -from torch import nn - -from fairseq.distributed import ModuleProxyWrapper - -from .utils import objects_are_equal - - -class MockDDPWrapper(nn.Module): - """A simple wrapper with an interface similar to DistributedDataParallel.""" - - def __init__(self, module): - super().__init__() - self.module = module - - def forward(self, x): - return self.module(x) - - -class Model(nn.Module): - def __init__(self): - super().__init__() - self.linear = nn.Linear(5, 10) - self.xyz = "hello" - - def forward(self, x): - return self.linear(x) - - def get_xyz(self): - return self.xyz - - -class TestModuleProxyWrapper(unittest.TestCase): - - def _get_module(self): - module = Model() - wrapped_module = MockDDPWrapper(module) - wrapped_module = ModuleProxyWrapper(wrapped_module) - return wrapped_module, module - - def test_getattr_forwarding(self): - wrapped_module, module = self._get_module() - assert module.xyz == "hello" - assert module.get_xyz() == "hello" - assert wrapped_module.xyz == "hello" - - wrapped_module.xyz = "world" - assert wrapped_module.xyz == "world" - assert module.get_xyz() == "hello" - - def test_state_dict(self): - wrapped_module, module = self._get_module() - assert objects_are_equal(wrapped_module.state_dict(), module.state_dict()) - - def test_load_state_dict(self): - wrapped_module, module = self._get_module() - wrapped_module.load_state_dict(module.state_dict()) - input = torch.rand(4, 5) - torch.testing.assert_allclose(wrapped_module(input), module(input)) - - def test_forward(self): - wrapped_module, module = self._get_module() - input = torch.rand(4, 5) - torch.testing.assert_allclose(wrapped_module(input), module(input)) - - -if __name__ == "__main__": - unittest.main() diff --git a/spaces/Hina4867/bingo/src/components/voice.tsx b/spaces/Hina4867/bingo/src/components/voice.tsx deleted file mode 100644 index 074d0e145229947282a472bd84f6578cf0b3c71c..0000000000000000000000000000000000000000 --- a/spaces/Hina4867/bingo/src/components/voice.tsx +++ /dev/null @@ -1,52 +0,0 @@ -import React, { useEffect } from 'react' -import { useSetAtom } from 'jotai' -import { useBing } from '@/lib/hooks/use-bing' -import Image from 'next/image' -import VoiceIcon from '@/assets/images/voice.svg' -import VoiceButton from './ui/voice' -import { SR } from '@/lib/bots/bing/sr' -import { voiceListenAtom } from '@/state' - -const sr = new SR(['发送', '清空', '退出']) - -const Voice = ({ setInput, input, sendMessage, isSpeaking }: Pick, 'setInput' | 'sendMessage' | 'input' | 'isSpeaking'>) => { - const setListen = useSetAtom(voiceListenAtom) - useEffect(() => { - if (sr.listening) return - sr.transcript = !isSpeaking - }, [isSpeaking]) - - useEffect(() => { - sr.onchange = (msg: string, command?: string) => { - switch (command) { - case '退出': - sr.stop() - break; - case '发送': - sendMessage(input) - case '清空': - setInput('') - break; - default: - setInput(input + msg) - } - } - }, [input]) - - const switchSR = (enable: boolean = false) => { - setListen(enable) - if (enable) { - sr.start() - } else { - sr.stop() - } - } - - return sr.listening ? ( - switchSR(false)} /> - ) : ( - switchSR(true)} /> - ) -}; - -export default Voice; diff --git a/spaces/HuggingFaceH4/open_llm_leaderboard/models_backlinks.py b/spaces/HuggingFaceH4/open_llm_leaderboard/models_backlinks.py deleted file mode 100644 index e1601174d8eae6052c65575d3b4c268f09a80208..0000000000000000000000000000000000000000 --- a/spaces/HuggingFaceH4/open_llm_leaderboard/models_backlinks.py +++ /dev/null @@ -1,1309 +0,0 @@ -models = [ - "uni-tianyan/Uni-TianYan", - "fangloveskari/ORCA_LLaMA_70B_QLoRA", - "garage-bAInd/Platypus2-70B-instruct", - "upstage/Llama-2-70b-instruct-v2", - "fangloveskari/Platypus_QLoRA_LLaMA_70b", - "yeontaek/llama-2-70B-ensemble-v5", - "TheBloke/Genz-70b-GPTQ", - "TheBloke/Platypus2-70B-Instruct-GPTQ", - "psmathur/model_007", - "yeontaek/llama-2-70B-ensemble-v4", - "psmathur/orca_mini_v3_70b", - "ehartford/Samantha-1.11-70b", - "MayaPH/GodziLLa2-70B", - "psmathur/model_007_v2", - "chargoddard/MelangeA-70b", - "ehartford/Samantha-1.1-70b", - "psmathur/model_009", - "upstage/Llama-2-70b-instruct", - "yeontaek/llama-2-70B-ensemble-v7", - "yeontaek/llama-2-70B-ensemble-v6", - "chargoddard/MelangeB-70b", - "yeontaek/llama-2-70B-ensemble-v3", - "chargoddard/MelangeC-70b", - "garage-bAInd/Camel-Platypus2-70B", - "yeontaek/llama-2-70B-ensemble-v2", - "garage-bAInd/Camel-Platypus2-70B", - "migtissera/Synthia-70B-v1.2", - "v2ray/LLaMA-2-Wizard-70B-QLoRA", - "quantumaikr/llama-2-70b-fb16-orca-chat-10k", - "v2ray/LLaMA-2-Wizard-70B-QLoRA", - "stabilityai/StableBeluga2", - "quantumaikr/llama-2-70b-fb16-guanaco-1k", - "garage-bAInd/Camel-Platypus2-70B", - "migtissera/Synthia-70B-v1.1", - "migtissera/Synthia-70B", - "psmathur/model_101", - "augtoma/qCammel70", - "augtoma/qCammel-70", - "augtoma/qCammel-70v1", - "augtoma/qCammel-70x", - "augtoma/qCammel-70-x", - "jondurbin/airoboros-l2-70b-gpt4-1.4.1", - "dfurman/llama-2-70b-dolphin-peft", - "jondurbin/airoboros-l2-70b-2.1", - "TheBloke/llama-2-70b-Guanaco-QLoRA-fp16", - "quantumaikr/QuantumLM-llama2-70B-Korean-LoRA", - "quantumaikr/quantumairk-llama-2-70B-instruct", - "psmathur/model_420", - "psmathur/model_51", - "garage-bAInd/Camel-Platypus2-70B", - "TheBloke/Airoboros-L2-70B-2.1-GPTQ", - "OpenAssistant/llama2-70b-oasst-sft-v10", - "garage-bAInd/Platypus2-70B", - "liuxiang886/llama2-70B-qlora-gpt4", - "upstage/llama-65b-instruct", - "quantumaikr/llama-2-70b-fb16-korean", - "NousResearch/Nous-Hermes-Llama2-70b", - "v2ray/LLaMA-2-Jannie-70B-QLoRA", - "jondurbin/airoboros-l2-70b-gpt4-m2.0", - "jondurbin/airoboros-l2-70b-gpt4-m2.0", - "OpenAssistant/llama2-70b-oasst-sft-v10", - "yeontaek/llama-2-70B-ensemble-v8", - "jondurbin/airoboros-l2-70b-gpt4-2.0", - "jarradh/llama2_70b_chat_uncensored", - "WizardLM/WizardMath-70B-V1.0", - "jordiclive/Llama-2-70b-oasst-1-200", - "WizardLM/WizardMath-70B-V1.0", - "jondurbin/airoboros-l2-70b-gpt4-2.0", - "OpenLemur/lemur-70b-chat-v1", - "tiiuae/falcon-180B", - "tiiuae/falcon-180B", - "stabilityai/StableBeluga1-Delta", - "psmathur/model_42_70b", - "psmathur/test_42_70b", - "TheBloke/fiction.live-Kimiko-V2-70B-fp16", - "tiiuae/falcon-180B", - "WizardLM/WizardMath-70B-V1.0", - "tiiuae/falcon-180B-chat", - "jondurbin/airoboros-l2-70b-gpt4-2.0", - "ehartford/samantha-1.1-llama-33b", - "ajibawa-2023/scarlett-33b", - "ddobokki/Llama-2-70b-orca-200k", - "TheBloke/gpt4-alpaca-lora_mlp-65B-HF", - "tiiuae/falcon-180B-chat", - "tiiuae/falcon-180B-chat", - "tiiuae/falcon-180B", - "TheBloke/Lemur-70B-Chat-v1-GPTQ", - "NousResearch/Nous-Puffin-70B", - "WizardLM/WizardLM-70B-V1.0", - "WizardLM/WizardMath-70B-V1.0", - "meta-llama/Llama-2-70b-hf", - "TheBloke/Llama-2-70B-fp16", - "Weyaxi/llama-2-alpacagpt4-1000step", - "WizardLM/WizardLM-70B-V1.0", - "simsim314/WizardLM-70B-V1.0-HF", - "simsim314/WizardLM-70B-V1.0-HF", - "WizardLM/WizardLM-70B-V1.0", - "openbmb/UltraLM-65b", - "psmathur/model_420_preview", - "WizardLM/WizardLM-70B-V1.0", - "simsim314/WizardLM-70B-V1.0-HF", - "OpenBuddy/openbuddy-llama2-70b-v10.1-bf16", - "upstage/llama-30b-instruct-2048", - "jondurbin/airoboros-65b-gpt4-1.2", - "TheBloke/guanaco-65B-HF", - "jondurbin/airoboros-65b-gpt4-1.3", - "meta-llama/Llama-2-70b-chat-hf", - "ValiantLabs/ShiningValiant", - "Faradaylab/Aria-70B", - "lilloukas/GPlatty-30B", - "TheBloke/VicUnlocked-alpaca-65B-QLoRA-fp16", - "jondurbin/airoboros-65b-gpt4-1.4-peft", - "jondurbin/airoboros-65b-gpt4-1.4", - "jondurbin/airoboros-65b-gpt4-2.0", - "TheBloke/WizardLM-70B-V1.0-GPTQ", - "TheBloke/WizardLM-70B-V1.0-GPTQ", - "ariellee/SuperPlatty-30B", - "jondurbin/airoboros-65b-gpt4-1.4", - "jondurbin/airoboros-65b-gpt4-2.0", - "yeontaek/llama-2-70b-IA3-guanaco", - "CalderaAI/30B-Lazarus", - "Aspik101/trurl-2-13b-pl-instruct_unload", - "ehartford/WizardLM-33B-V1.0-Uncensored", - "ehartford/WizardLM-33B-V1.0-Uncensored", - "OpenBuddy/openbuddy-llama-65b-v8-bf16", - "Aspik101/llama-30b-instruct-2048-PL-lora", - "h2oai/h2ogpt-research-oasst1-llama-65b", - "Aspik101/llama-30b-instruct-2048-PL-lora", - "CalderaAI/30B-Epsilon", - "Aspik101/llama-30b-2048-instruct-PL-lora_unload", - "jondurbin/airoboros-65b-gpt4-m2.0", - "jondurbin/airoboros-65b-gpt4-m2.0", - "Aeala/Alpaca-elina-65b", - "TheBloke/robin-65b-v2-fp16", - "TheBloke/gpt4-alpaca-lora-30b-HF", - "TheBloke/Llama-2-70B-chat-GPTQ", - "upstage/llama-30b-instruct", - "OpenLemur/lemur-70b-v1", - "lmsys/vicuna-33b-v1.3", - "ausboss/llama-30b-supercot", - "ai-business/Luban-13B", - "Henk717/airochronos-33B", - "lmsys/vicuna-33b-v1.3", - "Henk717/airochronos-33B", - "bavest/fin-llama-33b-merged", - "jondurbin/airoboros-33b-gpt4-1.4", - "YeungNLP/firefly-llama-30b", - "Aspik101/30B-Lazarus-instruct-PL-lora_unload", - "uukuguy/speechless-llama2-luban-orca-platypus-13b", - "xxyyy123/test_merge_p_ov1_w0.66_w0.5_n1", - "jondurbin/airoboros-33b-gpt4-1.2", - "TheBloke/alpaca-lora-65B-HF", - "bofenghuang/vigogne-33b-instruct", - "yeontaek/llama-2-13B-ensemble-v5", - "garage-bAInd/Platypus-30B", - "Open-Orca/OpenOrca-Platypus2-13B", - "kajdun/viwaai-30b_v4", - "lilloukas/Platypus-30B", - "Open-Orca/OpenOrca-Platypus2-13B", - "Henk717/chronoboros-33B", - "jondurbin/airoboros-33b-2.1", - "HiTZ/alpaca-lora-65b-en-pt-es-ca", - "quantumaikr/QuantumLM-70B-hf", - "uukuguy/speechless-llama2-13b", - "uukuguy/speechless-llama2-hermes-orca-platypus-13b", - "openaccess-ai-collective/manticore-30b-chat-pyg-alpha", - "LLMs/WizardLM-30B-V1.0", - "TheBloke/WizardLM-30B-fp16", - "openaccess-ai-collective/hippogriff-30b-chat", - "concedo/Vicuzard-30B-Uncensored", - "TFLai/OpenOrca-Platypus2-13B-QLoRA-0.80-epoch", - "huggingface/llama-65b", - "huggyllama/llama-65b", - "gaodrew/gaodrew-llama-30b-instruct-2048-Open-Platypus-100steps", - "uukuguy/speechless-llama2-hermes-orca-platypus-wizardlm-13b", - "Sao10K/Mythical-Destroyer-V2-L2-13B", - "camel-ai/CAMEL-33B-Combined-Data", - "dsvv-cair/alpaca-cleaned-llama-30b-bf16", - "MetaIX/GPT4-X-Alpasta-30b", - "garage-bAInd/Stable-Platypus2-13B", - "TFLai/Luban-Platypus2-13B-QLora-0.80-epoch", - "TheBloke/OpenOrca-Platypus2-13B-GPTQ", - "IkariDev/Athena-tmp", - "OpenBuddyEA/openbuddy-llama-30b-v7.1-bf16", - "OpenBuddyEA/openbuddy-llama-30b-v7.1-bf16", - "Open-Orca/OpenOrcaxOpenChat-Preview2-13B", - "psmathur/model_007_13b_v2", - "Aspik101/Vicuzard-30B-Uncensored-instruct-PL-lora_unload", - "jondurbin/airoboros-33b-gpt4-m2.0", - "Sao10K/Mythical-Destroyer-L2-13B", - "TheBloke/Wizard-Vicuna-30B-Uncensored-fp16", - "ehartford/Wizard-Vicuna-30B-Uncensored", - "TFLai/Nova-13B", - "TheBloke/robin-33B-v2-fp16", - "totally-not-an-llm/PuddleJumper-13b", - "Aeala/VicUnlocked-alpaca-30b", - "Yhyu13/oasst-rlhf-2-llama-30b-7k-steps-hf", - "jondurbin/airoboros-33b-gpt4", - "jondurbin/airoboros-33b-gpt4-m2.0", - "tiiuae/falcon-40b-instruct", - "psmathur/orca_mini_v3_13b", - "Aeala/GPT4-x-AlpacaDente-30b", - "MayaPH/GodziLLa-30B", - "jondurbin/airoboros-33b-gpt4-m2.0", - "TFLai/SpeechlessV1-Nova-13B", - "yeontaek/llama-2-13B-ensemble-v4", - "ajibawa-2023/carl-33b", - "jondurbin/airoboros-33b-gpt4-2.0", - "TFLai/Stable-Platypus2-13B-QLoRA-0.80-epoch", - "jondurbin/airoboros-33b-gpt4-1.3", - "TehVenom/oasst-sft-6-llama-33b-xor-MERGED-16bit", - "TFLai/OrcaMini-Platypus2-13B-QLoRA-0.80-epoch", - "jondurbin/airoboros-33b-gpt4-2.0", - "chargoddard/Chronorctypus-Limarobormes-13b", - "jondurbin/airoboros-33b-gpt4-1.3", - "Open-Orca/OpenOrca-Platypus2-13B", - "FelixChao/vicuna-33b-coder", - "FelixChao/vicuna-33b-coder", - "Gryphe/MythoMix-L2-13b", - "Aeala/Enterredaas-33b", - "yeontaek/llama-2-13B-ensemble-v1", - "TFLai/OpenOrcaPlatypus2-Platypus2-13B-QLora-0.80-epoch", - "TFLai/Ensemble5-Platypus2-13B-QLora-0.80-epoch", - "yeontaek/llama-2-13B-ensemble-v3", - "TFLai/MythoMix-Platypus2-13B-QLoRA-0.80-epoch", - "yihan6324/llama2-13b-instructmining-40k-sharegpt", - "timdettmers/guanaco-33b-merged", - "TFLai/EnsembleV5-Nova-13B", - "circulus/Llama-2-13b-orca-v1", - "Undi95/ReMM-SLERP-L2-13B", - "Gryphe/MythoMax-L2-13b", - "stabilityai/StableBeluga-13B", - "circulus/Llama-2-13b-orca-v1", - "ehartford/WizardLM-30B-Uncensored", - "The-Face-Of-Goonery/huginnv1.2", - "TheBloke/OpenOrcaxOpenChat-Preview2-13B-GPTQ", - "Sao10K/Stheno-L2-13B", - "bofenghuang/vigogne-2-13b-instruct", - "The-Face-Of-Goonery/Huginn-13b-FP16", - "grimpep/L2-MythoMax22b-instruct-Falseblock", - "TFLai/Nous-Hermes-Platypus2-13B-QLoRA-0.80-epoch", - "yeontaek/Platypus2xOpenOrca-13B-IA3-v4", - "yeontaek/Platypus2xOpenOrca-13B-IA3", - "yeontaek/Platypus2xOpenOrca-13B-IA3-ensemble", - "Open-Orca/LlongOrca-13B-16k", - "Sao10K/Stheno-Inverted-L2-13B", - "garage-bAInd/Camel-Platypus2-13B", - "digitous/Alpacino30b", - "NousResearch/Nous-Hermes-Llama2-13b", - "yeontaek/Platypus2xOpenOrca-13B-IA3-v3", - "TFLai/MythicalDestroyerV2-Platypus2-13B-QLora-0.80-epoch", - "TheBloke/VicUnlocked-30B-LoRA-HF", - "Undi95/Nous-Hermes-13B-Code", - "The-Face-Of-Goonery/Chronos-Beluga-v2-13bfp16", - "NousResearch/Nous-Hermes-Llama2-13b", - "Monero/WizardLM-Uncensored-SuperCOT-StoryTelling-30b", - "TheBloke/Wizard-Vicuna-30B-Uncensored-GPTQ", - "Open-Orca/OpenOrcaxOpenChat-Preview2-13B", - "Austism/chronos-hermes-13b-v2", - "yeontaek/Platypus2xOpenOrca-13B-IA3-v2.1", - "yeontaek/Platypus2xOpenOrca-13B-IA3-v2", - "Gryphe/MythoLogic-L2-13b", - "augtoma/qCammel-13", - "YeungNLP/firefly-llama2-13b-v1.2", - "Aspik101/StableBeluga-13B-instruct-PL-lora_unload", - "andreaskoepf/llama2-13b-megacode2_min100", - "rombodawg/LosslessMegaCoder-llama2-13b-mini", - "yulan-team/YuLan-Chat-2-13b-fp16", - "elinas/chronos-33b", - "YeungNLP/firefly-llama2-13b", - "Sao10K/Medusa-13b", - "OptimalScale/robin-65b-v2-delta", - "minlik/chinese-alpaca-33b-merged", - "OpenAssistant/llama2-13b-megacode2-oasst", - "TheBloke/OpenAssistant-SFT-7-Llama-30B-HF", - "Undi95/UndiMix-v1-13b", - "ehartford/Samantha-1.11-13b", - "beaugogh/Llama2-13b-sharegpt4", - "Aeala/GPT4-x-AlpacaDente2-30b", - "luffycodes/nash-vicuna-13b-v1dot5-ep2-w-rag-w-simple", - "WizardLM/WizardLM-13B-V1.1", - "uukuguy/speechless-orca-platypus-coig-lite-2k-0.6e-13b", - "huggyllama/llama-30b", - "Undi95/ReMM-L2-13B-PIPPA", - "Undi95/ReMM-L2-13B", - "gaodrew/gaodrew-gorgonzola-13b", - "lmsys/vicuna-13b-v1.5", - "yeontaek/Platypus2xOpenOrca-13B-LoRa", - "Yhyu13/llama-30B-hf-openassitant", - "huggingface/llama-30b", - "lmsys/vicuna-13b-v1.5", - "TFLai/Athena-Platypus2-13B-QLora-0.80-epoch", - "TheBloke/dromedary-65b-lora-HF", - "yeontaek/llama-2-13b-Beluga-QLoRA", - "The-Face-Of-Goonery/Huginn-13b-V4", - "The-Face-Of-Goonery/Huginn-13b-v4.5", - "The-Face-Of-Goonery/Huginn-v3-13b", - "tiiuae/falcon-40b", - "WhoTookMyAmogusNickname/NewHope_HF_not_official", - "gaodrew/OpenOrca-Platypus2-13B-thera-1250", - "SLAM-group/NewHope", - "garage-bAInd/Platypus2-13B", - "migtissera/Synthia-13B", - "elinas/chronos-13b-v2", - "mosaicml/mpt-30b-chat", - "CHIH-HUNG/llama-2-13b-OpenOrca_5w", - "uukuguy/speechless-hermes-coig-lite-13b", - "TheBloke/tulu-30B-fp16", - "uukuguy/speechless-hermes-coig-lite-13b", - "xDAN-AI/xDAN_13b_l2_lora", - "lmsys/vicuna-13b-v1.5-16k", - "openchat/openchat_v3.1", - "CHIH-HUNG/llama-2-13b-dolphin_5w", - "Aspik101/vicuna-13b-v1.5-PL-lora_unload", - "Undi95/MLewd-L2-13B", - "ehartford/minotaur-llama2-13b-qlora", - "kajdun/iubaris-13b-v3", - "TFLai/Limarp-Platypus2-13B-QLoRA-0.80-epoch", - "openchat/openchat_v3.1", - "uukuguy/speechless-orca-platypus-coig-lite-4k-0.6e-13b", - "ziqingyang/chinese-alpaca-2-13b", - "TFLai/Airboros2.1-Platypus2-13B-QLora-0.80-epoch", - "yeontaek/llama-2-13b-Guanaco-QLoRA", - "lmsys/vicuna-13b-v1.5-16k", - "ehartford/based-30b", - "kingbri/airolima-chronos-grad-l2-13B", - "openchat/openchat_v3.2", - "uukuguy/speechless-orca-platypus-coig-lite-4k-0.5e-13b", - "yeontaek/Platypus2-13B-LoRa", - "kingbri/chronolima-airo-grad-l2-13B", - "openchat/openchat_v3.2", - "TFLai/PuddleJumper-Platypus2-13B-QLoRA-0.80-epoch", - "shareAI/llama2-13b-Chinese-chat", - "ehartford/WizardLM-1.0-Uncensored-Llama2-13b", - "Aspik101/Redmond-Puffin-13B-instruct-PL-lora_unload", - "yeontaek/llama-2-13B-ensemble-v6", - "WizardLM/WizardLM-13B-V1.2", - "TheBloke/WizardLM-13B-V1.1-GPTQ", - "bhenrym14/airophin-13b-pntk-16k-fp16", - "ehartford/WizardLM-1.0-Uncensored-Llama2-13b", - "Mikael110/llama-2-13b-guanaco-fp16", - "yeontaek/airoboros-2.1-llama-2-13B-QLoRa", - "CalderaAI/13B-Legerdemain-L2", - "grimpep/llama2-22b-wizard_vicuna", - "grimpep/llama2-22B-GPLATTY", - "bhenrym14/airophin-13b-pntk-16k-fp16", - "yeontaek/llama-2-13b-QLoRA", - "OpenAssistant/llama2-13b-orca-8k-3319", - "TheBloke/WizardLM-13B-V1-1-SuperHOT-8K-fp16", - "duliadotio/dulia-13b-8k-alpha", - "Undi95/LewdEngine", - "OpenBuddy/openbuddy-llama2-13b-v8.1-fp16", - "CHIH-HUNG/llama-2-13b-open_orca_20w", - "bhenrym14/airoboros-33b-gpt4-1.4.1-lxctx-PI-16384-fp16", - "FlagAlpha/Llama2-Chinese-13b-Chat", - "LLMs/WizardLM-13B-V1.0", - "chansung/gpt4-alpaca-lora-13b-decapoda-1024", - "TheBloke/wizardLM-13B-1.0-fp16", - "digitous/13B-Chimera", - "yeontaek/Platypus2xOpenOrcaxGuanaco-13B-LoRa", - "jondurbin/airoboros-l2-13b-2.1", - "Monero/WizardLM-30B-Uncensored-Guanaco-SuperCOT-30b", - "TheBloke/UltraLM-13B-fp16", - "openaccess-ai-collective/minotaur-13b-fixed", - "NousResearch/Redmond-Puffin-13B", - "KoboldAI/LLaMA2-13B-Holomax", - "Lajonbot/WizardLM-13B-V1.2-PL-lora_unload", - "yeontaek/Platypus2-13B-LoRa-v2", - "TheBloke/airoboros-13B-HF", - "jondurbin/airoboros-13b", - "jjaaaww/posi_13b", - "CoolWP/llama-2-13b-guanaco-fp16", - "yeontaek/Platypus2-13B-QLoRa", - "h2oai/h2ogpt-research-oig-oasst1-512-30b", - "dfurman/llama-2-13b-guanaco-peft", - "NousResearch/Redmond-Puffin-13B", - "pe-nlp/llama-2-13b-platypus-vicuna-wizard", - "CHIH-HUNG/llama-2-13b-dolphin_20w", - "NousResearch/Nous-Hermes-13b", - "NobodyExistsOnTheInternet/GiftedConvo13bLoraNoEconsE4", - "ehartford/Wizard-Vicuna-13B-Uncensored", - "TheBloke/Wizard-Vicuna-13B-Uncensored-HF", - "openchat/openchat_v3.2_super", - "bhenrym14/airophin-v2-13b-PI-8k-fp16", - "openaccess-ai-collective/manticore-13b", - "The-Face-Of-Goonery/Huginn-22b-Prototype", - "jphme/Llama-2-13b-chat-german", - "grimpep/llama2-28B-Airo03", - "TheBloke/Kimiko-v2-13B-fp16", - "FPHam/Free_Sydney_13b_HF", - "lmsys/vicuna-13b-v1.3", - "FelixChao/llama2-13b-math1.1", - "CalderaAI/13B-BlueMethod", - "meta-llama/Llama-2-13b-chat-hf", - "deepse/CodeUp-Llama-2-13b-chat-hf", - "WizardLM/WizardMath-13B-V1.0", - "WizardLM/WizardMath-13B-V1.0", - "HyperbeeAI/Tulpar-7b-v0", - "xxyyy123/test_qkvo_adptor", - "xxyyy123/mc_data_30k_from_platpus_orca_7b_10k_v1_lora_qkvo_rank14_v2", - "openchat/openchat_v2_w", - "FelixChao/llama2-13b-math1.1", - "psmathur/orca_mini_v3_7b", - "TehVenom/Metharme-13b-Merged", - "xxyyy123/10k_v1_lora_qkvo_rank14_v3", - "OpenAssistant/llama2-13b-orca-v2-8k-3166", - "openaccess-ai-collective/wizard-mega-13b", - "jondurbin/airoboros-13b-gpt4-1.4", - "jondurbin/airoboros-13b-gpt4-1.4-fp16", - "Monero/Manticore-13b-Chat-Pyg-Guanaco", - "FelixChao/llama2-13b-math1.2", - "chargoddard/platypus-2-22b-relora", - "FelixChao/llama2-13b-math1.2", - "Gryphe/MythoBoros-13b", - "CalderaAI/13B-Ouroboros", - "OpenAssistant/llama2-13b-orca-v2-8k-3166", - "heegyu/LIMA2-13b-hf", - "digitous/13B-HyperMantis", - "Gryphe/MythoLogic-13b", - "TheBloke/Airoboros-L2-13B-2.1-GPTQ", - "chargoddard/platypus2-22b-relora", - "openchat/openchat_v2", - "yeontaek/Platypus2-13B-IA3", - "stabilityai/StableBeluga-7B", - "circulus/Llama-2-7b-orca-v1", - "budecosystem/genz-13b-v2", - "TheBloke/gpt4-x-vicuna-13B-HF", - "NobodyExistsOnTheInternet/GiftedConvo13bLoraNoEcons", - "zarakiquemparte/zarafusionex-1.1-l2-7b", - "Lajonbot/tableBeluga-7B-instruct-pl-lora_unload", - "jondurbin/airoboros-13b-gpt4", - "gaodrew/gaodrew-gorgonzola-13b", - "jondurbin/airoboros-13b-gpt4-1.1", - "TheBloke/gpt4-alpaca-lora-13B-HF", - "zarakiquemparte/zarablendex-vq-l2-7b", - "openaccess-ai-collective/manticore-13b-chat-pyg", - "Lajonbot/Llama-2-13b-hf-instruct-pl-lora_unload", - "NobodyExistsOnTheInternet/PuffedLIMA13bQLORA", - "xxyyy123/10k_v1_lora_qkvo_rank28_v2", - "jondurbin/airoboros-l2-13b-gpt4-1.4.1", - "dhmeltzer/Llama-2-13b-hf-eli5-wiki-1024_r_64_alpha_16", - "NobodyExistsOnTheInternet/PuffedConvo13bLoraE4", - "yihan6324/llama2-7b-instructmining-40k-sharegpt", - "CHIH-HUNG/llama-2-13b-Open_Platypus_and_ccp_2.6w", - "Aeala/GPT4-x-Alpasta-13b", - "psmathur/orca_mini_v2_13b", - "YeungNLP/firefly-llama-13b", - "psmathur/orca_mini_v2_13b", - "zarakiquemparte/zarafusionix-l2-7b", - "yihan6324/llama2-7b-instructmining-60k-sharegpt", - "yihan6324/llama-2-7b-instructmining-60k-sharegpt", - "layoric/llama-2-13b-code-alpaca", - "bofenghuang/vigogne-13b-instruct", - "Lajonbot/vicuna-13b-v1.3-PL-lora_unload", - "lvkaokao/llama2-7b-hf-chat-lora-v3", - "ehartford/dolphin-llama-13b", - "YeungNLP/firefly-llama-13b-v1.2", - "TheBloke/Kimiko-13B-fp16", - "kevinpro/Vicuna-13B-CoT", - "eachadea/vicuna-13b-1.1", - "pillowtalks-ai/delta13b", - "TheBloke/vicuna-13B-1.1-HF", - "TheBloke/Vicuna-13B-CoT-fp16", - "lmsys/vicuna-13b-delta-v1.1", - "lmsys/vicuna-13b-v1.1", - "xxyyy123/20k_v1_lora_qkvo_rank14_v2", - "TheBloke/guanaco-13B-HF", - "TheBloke/vicuna-13b-v1.3.0-GPTQ", - "edor/Stable-Platypus2-mini-7B", - "totally-not-an-llm/EverythingLM-13b-V2-16k", - "zarakiquemparte/zaraxe-l2-7b", - "beaugogh/Llama2-7b-openorca-mc-v2", - "TheBloke/Nous-Hermes-13B-SuperHOT-8K-fp16", - "quantumaikr/QuantumLM", - "jondurbin/airoboros-13b-gpt4-1.2", - "TheBloke/robin-13B-v2-fp16", - "TFLai/llama-2-13b-4bit-alpaca-gpt4", - "yihan6324/llama2-7b-instructmining-orca-40k", - "dvruette/oasst-llama-13b-2-epochs", - "Open-Orca/LlongOrca-7B-16k", - "Aspik101/Nous-Hermes-13b-pl-lora_unload", - "ehartford/Samantha-1.11-CodeLlama-34b", - "nkpz/llama2-22b-chat-wizard-uncensored", - "bofenghuang/vigogne-13b-chat", - "beaugogh/Llama2-7b-openorca-mc-v1", - "OptimalScale/robin-13b-v2-delta", - "pe-nlp/llama-2-13b-vicuna-wizard", - "chargoddard/llama2-22b", - "gywy/llama2-13b-chinese-v1", - "frank098/Wizard-Vicuna-13B-juniper", - "IGeniusDev/llama13B-quant8-testv1-openorca-customdataset", - "CHIH-HUNG/llama-2-13b-huangyt_Fintune_1_17w-gate_up_down_proj", - "eachadea/vicuna-13b", - "yihan6324/llama2-7b-instructmining-orca-90k", - "chargoddard/llama2-22b-blocktriangular", - "luffycodes/mcq-vicuna-13b-v1.5", - "Yhyu13/chimera-inst-chat-13b-hf", - "luffycodes/mcq-vicuna-13b-v1.5", - "chargoddard/ypotryll-22b-epoch2-qlora", - "totally-not-an-llm/EverythingLM-13b-16k", - "luffycodes/mcq-hal-vicuna-13b-v1.5", - "openaccess-ai-collective/minotaur-13b", - "IGeniusDev/llama13B-quant8-testv1-openorca-customdataset", - "chargoddard/llama2-22b-blocktriangular", - "TFLai/Platypus2-13B-QLoRA-0.80-epoch", - "meta-llama/Llama-2-13b-hf", - "CHIH-HUNG/llama-2-13b-huangyt_FINETUNE2_3w-gate_up_down_proj", - "luffycodes/mcq-hal-vicuna-13b-v1.5", - "TheBloke/Llama-2-13B-fp16", - "TaylorAI/Flash-Llama-13B", - "shareAI/bimoGPT-llama2-13b", - "wahaha1987/llama_13b_sharegpt94k_fastchat", - "openchat/openchat_8192", - "CHIH-HUNG/llama-2-13b-huangyt_Fintune_1_17w-q_k_v_o_proj", - "dvruette/llama-13b-pretrained-sft-do2", - "CHIH-HUNG/llama-2-13b-alpaca-test", - "OpenBuddy/openbuddy-llama2-13b-v11.1-bf16", - "CHIH-HUNG/llama-2-13b-FINETUNE2_TEST_2.2w", - "project-baize/baize-v2-13b", - "jondurbin/airoboros-l2-13b-gpt4-m2.0", - "yeontaek/Platypus2xOpenOrca-13B-LoRa-v2", - "CHIH-HUNG/llama-2-13b-huangyt_FINETUNE2_3w", - "xzuyn/Alpacino-SuperCOT-13B", - "jondurbin/airoboros-l2-13b-gpt4-2.0", - "aiplanet/effi-13b", - "clibrain/Llama-2-13b-ft-instruct-es", - "CHIH-HUNG/llama-2-13b-huangyt_Fintune_1_17w", - "bofenghuang/vigogne-2-7b-instruct", - "CHIH-HUNG/llama-2-13b-huangyt_FINETUNE2_3w-q_k_v_o_proj", - "bofenghuang/vigogne-2-7b-chat", - "aiplanet/effi-13b", - "haonan-li/bactrian-x-llama-13b-merged", - "beaugogh/Llama2-7b-sharegpt4", - "HWERI/Llama2-7b-sharegpt4", - "jondurbin/airoboros-13b-gpt4-1.3", - "jondurbin/airoboros-c34b-2.1", - "junelee/wizard-vicuna-13b", - "TheBloke/wizard-vicuna-13B-HF", - "Open-Orca/OpenOrca-Preview1-13B", - "TheBloke/h2ogpt-oasst1-512-30B-HF", - "TheBloke/Llama-2-13B-GPTQ", - "camel-ai/CAMEL-13B-Combined-Data", - "lmsys/vicuna-7b-v1.5", - "lmsys/vicuna-7b-v1.5-16k", - "lmsys/vicuna-7b-v1.5", - "ausboss/llama-13b-supercot", - "TheBloke/tulu-13B-fp16", - "NousResearch/Nous-Hermes-llama-2-7b", - "jlevin/guanaco-13b-llama-2", - "lmsys/vicuna-7b-v1.5-16k", - "dvruette/llama-13b-pretrained", - "nkpz/llama2-22b-daydreamer-v3", - "dvruette/llama-13b-pretrained-dropout", - "jondurbin/airoboros-l2-13b-2.1", - "LLMs/Stable-Vicuna-13B", - "64bits/LexPodLM-13B", - "lizhuang144/llama_mirror_13b_v1.0", - "TheBloke/stable-vicuna-13B-HF", - "zarakiquemparte/zaraxls-l2-7b", - "TheBloke/Llama-2-13B-GPTQ", - "Kiddyz/testlm-3", - "migtissera/Synthia-7B", - "zarakiquemparte/zarablend-l2-7b", - "mosaicml/mpt-30b-instruct", - "PocketDoc/Dans-PileOfSets-Mk1-llama-13b-merged", - "vonjack/Qwen-LLaMAfied-HFTok-7B-Chat", - "l3utterfly/llama2-7b-layla", - "Lajonbot/vicuna-7b-v1.5-PL-lora_unload", - "heegyu/LIMA-13b-hf", - "frank098/WizardLM_13B_juniper", - "ashercn97/manatee-7b", - "chavinlo/gpt4-x-alpaca", - "PocketDoc/Dans-PersonalityEngine-13b", - "ehartford/WizardLM-1.0-Uncensored-CodeLlama-34b", - "digitous/Alpacino13b", - "edor/Hermes-Platypus2-mini-7B", - "lvkaokao/llama2-7b-hf-chat-lora-v2", - "Kiddyz/testlm-1-1", - "Kiddyz/testlm", - "Kiddyz/testlm-1", - "Kiddyz/testlm2", - "radm/Philosophy-Platypus2-13b", - "aiplanet/effi-13b", - "Harshvir/Llama-2-7B-physics", - "YeungNLP/firefly-ziya-13b", - "LinkSoul/Chinese-Llama-2-7b", - "PeanutJar/LLaMa-2-PeanutButter_v10-7B", - "OpenBuddy/openbuddy-llama2-13b-v11-bf16", - "StudentLLM/Alpagasus-2-13B-QLoRA-pipeline", - "meta-llama/Llama-2-13b-hf", - "WizardLM/WizardCoder-Python-34B-V1.0", - "dvruette/llama-13b-pretrained-sft-epoch-1", - "camel-ai/CAMEL-13B-Role-Playing-Data", - "ziqingyang/chinese-llama-2-13b", - "rombodawg/LosslessMegaCoder-llama2-7b-mini", - "TheBloke/koala-13B-HF", - "lmsys/vicuna-7b-delta-v1.1", - "eachadea/vicuna-7b-1.1", - "Ejafa/vicuna_7B_vanilla_1.1", - "lvkaokao/llama2-7b-hf-chat-lora", - "OpenBuddy/openbuddy-atom-13b-v9-bf16", - "Norquinal/llama-2-7b-claude-chat-rp", - "Danielbrdz/Barcenas-7b", - "heegyu/WizardVicuna2-13b-hf", - "meta-llama/Llama-2-7b-chat-hf", - "PeanutJar/LLaMa-2-PeanutButter_v14-7B", - "PeanutJar/LLaMa-2-PeanutButter_v4-7B", - "davzoku/cria-llama2-7b-v1.3", - "OpenBuddy/openbuddy-atom-13b-v9-bf16", - "lvkaokao/llama2-7b-hf-instruction-lora", - "Tap-M/Luna-AI-Llama2-Uncensored", - "ehartford/Samantha-1.11-7b", - "WizardLM/WizardCoder-Python-34B-V1.0", - "TheBloke/Manticore-13B-Chat-Pyg-Guanaco-SuperHOT-8K-GPTQ", - "Mikael110/llama-2-7b-guanaco-fp16", - "garage-bAInd/Platypus2-7B", - "PeanutJar/LLaMa-2-PeanutButter_v18_B-7B", - "mosaicml/mpt-30b", - "garage-bAInd/Platypus2-7B", - "huggingface/llama-13b", - "dvruette/oasst-llama-13b-1000-steps", - "jordiclive/gpt4all-alpaca-oa-codealpaca-lora-13b", - "huggyllama/llama-13b", - "Voicelab/trurl-2-7b", - "TFLai/llama-13b-4bit-alpaca", - "gywy/llama2-13b-chinese-v2", - "lmsys/longchat-13b-16k", - "Aspik101/trurl-2-7b-pl-instruct_unload", - "WizardLM/WizardMath-7B-V1.0", - "Norquinal/llama-2-7b-claude-chat", - "TheTravellingEngineer/llama2-7b-chat-hf-dpo", - "HuggingFaceH4/starchat-beta", - "joehuangx/spatial-vicuna-7b-v1.5-LoRA", - "conceptofmind/LLongMA-2-13b-16k", - "tianyil1/denas-llama2", - "lmsys/vicuna-7b-v1.3", - "conceptofmind/LLongMA-2-13b-16k", - "openchat/opencoderplus", - "ajibawa-2023/scarlett-7b", - "dhmeltzer/llama-7b-SFT_eli5_wiki65k_1024_r_64_alpha_16_merged", - "psyche/kollama2-7b-v2", - "heegyu/LIMA2-7b-hf", - "dhmeltzer/llama-7b-SFT-qlora-eli5-wiki_DPO_ds_RM_top_2_1024_r_64_alpha_16", - "abhishek/llama2guanacotest", - "jondurbin/airoboros-l2-7b-2.1", - "llama-anon/instruct-13b", - "FelixChao/vicuna-7B-physics", - "Aspik101/Llama-2-7b-hf-instruct-pl-lora_unload", - "shibing624/chinese-alpaca-plus-13b-hf", - "davzoku/cria-llama2-7b-v1.3_peft", - "quantumaikr/llama-2-7b-hf-guanaco-1k", - "togethercomputer/Llama-2-7B-32K-Instruct", - "sia-ai/llama-2-7b-1-percent-open-orca-1000-steps-v0", - "TheTravellingEngineer/llama2-7b-hf-guanaco", - "Lajonbot/Llama-2-7b-chat-hf-instruct-pl-lora_unload", - "jondurbin/airoboros-l2-7b-gpt4-1.4.1", - "wahaha1987/llama_7b_sharegpt94k_fastchat", - "FelixChao/vicuna-7B-chemical", - "TinyPixel/llama2-7b-oa", - "chaoyi-wu/MedLLaMA_13B", - "edor/Platypus2-mini-7B", - "RoversX/llama-2-7b-hf-small-shards-Samantha-V1-SFT", - "venkycs/llama-v2-7b-32kC-Security", - "psyche/kollama2-7b", - "Fredithefish/Guanaco-7B-Uncensored", - "TheTravellingEngineer/llama2-7b-chat-hf-guanaco", - "ehartford/WizardLM-13B-Uncensored", - "PocketDoc/Dans-CreepingSenseOfDoom", - "wenge-research/yayi-7b-llama2", - "georgesung/llama2_7b_chat_uncensored", - "TinyPixel/llama2-7b-instruct", - "quantumaikr/QuantumLM-7B", - "xzuyn/MedicWizard-7B", - "wenge-research/yayi-7b-llama2", - "TinyPixel/lima-test", - "elyza/ELYZA-japanese-Llama-2-7b-instruct", - "lgaalves/llama-2-7b-hf_open-platypus", - "ziqingyang/chinese-alpaca-2-7b", - "TehVenom/Pygmalion-Vicuna-1.1-7b", - "meta-llama/Llama-2-7b-hf", - "bongchoi/test-llama2-7b", - "TaylorAI/Flash-Llama-7B", - "TheTravellingEngineer/llama2-7b-chat-hf-v2", - "TheTravellingEngineer/llama2-7b-chat-hf-v4", - "kashif/stack-llama-2", - "PeanutJar/LLaMa-2-PeanutButter_v18_A-7B", - "ToolBench/ToolLLaMA-7b-LoRA", - "Monero/WizardLM-13b-OpenAssistant-Uncensored", - "TheTravellingEngineer/llama2-7b-chat-hf-v2", - "TheTravellingEngineer/llama2-7b-chat-hf-v4", - "mrm8488/llama-2-coder-7b", - "elyza/ELYZA-japanese-Llama-2-7b-fast-instruct", - "clibrain/Llama-2-7b-ft-instruct-es", - "medalpaca/medalpaca-7b", - "TheBloke/tulu-7B-fp16", - "OpenBuddy/openbuddy-openllama-13b-v7-fp16", - "TaylorAI/FLAN-Llama-7B-2_Llama2-7B-Flash_868_full_model", - "Aspik101/vicuna-7b-v1.3-instruct-pl-lora_unload", - "jondurbin/airoboros-l2-7b-gpt4-2.0", - "dhmeltzer/llama-7b-SFT_ds_eli5_1024_r_64_alpha_16_merged", - "GOAT-AI/GOAT-7B-Community", - "AtomEchoAI/AtomGPT_56k", - "julianweng/Llama-2-7b-chat-orcah", - "TehVenom/Pygmalion-13b-Merged", - "jondurbin/airoboros-7b-gpt4-1.1", - "dhmeltzer/llama-7b-SFT_ds_wiki65k_1024_r_64_alpha_16_merged", - "bofenghuang/vigogne-7b-chat", - "lmsys/longchat-7b-v1.5-32k", - "jondurbin/airoboros-l2-7b-gpt4-m2.0", - "synapsoft/Llama-2-7b-chat-hf-flan2022-1.2M", - "jondurbin/airoboros-7b-gpt4-1.4", - "Charlie911/vicuna-7b-v1.5-lora-mctaco", - "yihan6324/instructmining-platypus-15k", - "meta-llama/Llama-2-7b-hf", - "TheTravellingEngineer/llama2-7b-chat-hf-v3", - "quantumaikr/KoreanLM-hf", - "openthaigpt/openthaigpt-1.0.0-alpha-7b-chat-ckpt-hf", - "TheBloke/Llama-2-7B-GPTQ", - "TheBloke/Llama-2-7B-GPTQ", - "LLMs/AlpacaGPT4-7B-elina", - "ehartford/Wizard-Vicuna-7B-Uncensored", - "TheBloke/Wizard-Vicuna-7B-Uncensored-HF", - "TheTravellingEngineer/llama2-7b-chat-hf-v3", - "golaxy/gowizardlm", - "ehartford/dolphin-llama2-7b", - "CHIH-HUNG/llama-2-7b-dolphin_10w-test", - "mncai/chatdoctor", - "psyche/kollama2-7b-v3", - "jondurbin/airoboros-7b-gpt4", - "jondurbin/airoboros-7b", - "TheBloke/airoboros-7b-gpt4-fp16", - "mosaicml/mpt-7b-8k-chat", - "elyza/ELYZA-japanese-Llama-2-7b", - "bofenghuang/vigogne-7b-instruct", - "jxhong/CAlign-alpaca-7b", - "golaxy/goims", - "jondurbin/airoboros-7b-gpt4-1.2", - "jphme/orca_mini_v2_ger_7b", - "psmathur/orca_mini_v2_7b", - "notstoic/PygmalionCoT-7b", - "golaxy/gogpt2-13b", - "golaxy/gogpt2-13b-chat", - "togethercomputer/LLaMA-2-7B-32K", - "TheBloke/wizardLM-7B-HF", - "keyfan/vicuna-chinese-replication-v1.1", - "golaxy/gogpt2-7b", - "aiplanet/effi-7b", - "arver/llama7b-qlora", - "titan087/OpenLlama13B-Guanaco", - "chavinlo/alpaca-native", - "project-baize/baize-healthcare-lora-7B", - "AlpinDale/pygmalion-instruct", - "openlm-research/open_llama_13b", - "jondurbin/airoboros-7b-gpt4-1.3", - "elyza/ELYZA-japanese-Llama-2-7b-fast", - "jondurbin/airoboros-gpt-3.5-turbo-100k-7b", - "uukuguy/speechless-codellama-orca-13b", - "bigcode/starcoderplus", - "TheBloke/guanaco-7B-HF", - "Neko-Institute-of-Science/metharme-7b", - "TigerResearch/tigerbot-7b-base", - "golaxy/gogpt-7b", - "togethercomputer/LLaMA-2-7B-32K", - "yhyhy3/open_llama_7b_v2_med_instruct", - "ajibawa-2023/carl-7b", - "stabilityai/stablelm-base-alpha-7b-v2", - "conceptofmind/LLongMA-2-7b-16k", - "TehVenom/Pygmalion_AlpacaLora-7b", - "jondurbin/airoboros-7b-gpt4-1.4.1-qlora", - "wannaphong/openthaigpt-0.1.0-beta-full-model_for_open_llm_leaderboard", - "ausboss/llama7b-wizardlm-unfiltered", - "project-baize/baize-v2-7b", - "LMFlow/Robin-v2", - "HanningZhang/Robin-v2", - "LMFlow/Robin-7b-v2", - "OptimalScale/robin-7b-v2-delta", - "uukuguy/speechless-codellama-platypus-13b", - "jerryjalapeno/nart-100k-7b", - "wenge-research/yayi-13b-llama2", - "fireballoon/baichuan-vicuna-chinese-7b", - "jlevin/guanaco-unchained-llama-2-7b", - "csitfun/llama-7b-logicot", - "DevaMalla/llama7b_alpaca_1gpu_bf16", - "WeOpenML/PandaLM-Alpaca-7B-v1", - "illuin/test-custom-llama", - "yeontaek/WizardCoder-Python-13B-LoRa", - "ashercn97/giraffe-7b", - "mosaicml/mpt-7b-chat", - "abhishek/autotrain-llama-alpaca-peft-52508123785", - "Neko-Institute-of-Science/pygmalion-7b", - "TFLai/llama-7b-4bit-alpaca", - "huggingface/llama-7b", - "TheBloke/Planner-7B-fp16", - "shibing624/chinese-llama-plus-13b-hf", - "AGI-inc/lora_moe_7b_baseline", - "DevaMalla/llama-base-7b", - "AGI-inc/lora_moe_7b", - "togethercomputer/GPT-JT-6B-v0", - "ehartford/WizardLM-7B-Uncensored", - "shibing624/chinese-alpaca-plus-7b-hf", - "beomi/llama-2-ko-7b", - "mosaicml/mpt-7b-8k-instruct", - "Enno-Ai/ennodata-7b", - "mosaicml/mpt-7b-instruct", - "facebook/opt-iml-max-30b", - "WeOpenML/Alpaca-7B-v1", - "TheBloke/Project-Baize-v2-7B-GPTQ", - "codellama/CodeLlama-13b-Instruct-hf", - "TheBloke/CodeLlama-13B-Instruct-fp16", - "facebook/galactica-30b", - "FreedomIntelligence/phoenix-inst-chat-7b", - "openlm-research/open_llama_7b_v2", - "GeorgiaTechResearchInstitute/galpaca-30b", - "THUDM/chatglm2-6b", - "togethercomputer/GPT-JT-6B-v1", - "TheBloke/koala-7B-HF", - "nathan0/mpt_delta_tuned_model_v3", - "nathan0/mpt_delta_tuned_model_v2", - "GeorgiaTechResearchInstitute/galpaca-30b", - "JosephusCheung/Guanaco", - "shareAI/CodeLLaMA-chat-13b-Chinese", - "TigerResearch/tigerbot-7b-sft", - "Writer/InstructPalmyra-20b", - "OpenAssistant/codellama-13b-oasst-sft-v10", - "bigscience/bloomz-7b1-mt", - "nathan0/mpt_delta_tuned_model_v3", - "VMware/open-llama-7b-open-instruct", - "baichuan-inc/Baichuan-7B", - "anas-awadalla/mpt-7b", - "mosaicml/mpt-7b", - "bigscience/bloomz-7b1", - "ziqingyang/chinese-llama-2-7b", - "OpenAssistant/codellama-13b-oasst-sft-v10", - "wenge-research/yayi-7b", - "tiiuae/falcon-7b", - "togethercomputer/RedPajama-INCITE-Instruct-7B-v0.1", - "togethercomputer/RedPajama-INCITE-7B-Instruct", - "TheBloke/landmark-attention-llama7b-fp16", - "togethercomputer/GPT-JT-Moderation-6B", - "h2oai/h2ogpt-gm-oasst1-en-1024-20b", - "dvruette/gpt-neox-20b-full-precision", - "TehVenom/Moderator-Chan_GPT-JT-6b", - "dvruette/oasst-gpt-neox-20b-1000-steps", - "AlekseyKorshuk/pygmalion-6b-vicuna-chatml", - "facebook/opt-66b", - "Salesforce/codegen-16B-nl", - "Vmware/open-llama-7b-v2-open-instruct", - "mosaicml/mpt-7b-storywriter", - "acrastt/Marx-3B-V2", - "openlm-research/open_llama_7b", - "Fredithefish/ReasonixPajama-3B-HF", - "togethercomputer/GPT-NeoXT-Chat-Base-20B", - "psmathur/orca_mini_13b", - "RWKV/rwkv-raven-14b", - "h2oai/h2ogpt-oasst1-512-20b", - "acrastt/Marx-3B", - "klosax/open_llama_13b_600bt_preview", - "synapsoft/Llama-2-7b-hf-flan2022-1.2M", - "OpenAssistant/oasst-sft-1-pythia-12b", - "golaxy/gogpt-7b-bloom", - "Writer/palmyra-large", - "psmathur/orca_mini_7b", - "dvruette/oasst-pythia-12b-6000-steps", - "NousResearch/CodeLlama-13b-hf", - "codellama/CodeLlama-13b-hf", - "h2oai/h2ogpt-gm-oasst1-multilang-1024-20b", - "VMware/open-llama-0.7T-7B-open-instruct-v1.1", - "dvruette/oasst-pythia-12b-flash-attn-5000-steps", - "dvruette/oasst-gpt-neox-20b-3000-steps", - "RobbeD/OpenLlama-Platypus-3B", - "facebook/opt-30b", - "acrastt/Puma-3B", - "OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5", - "dvruette/oasst-pythia-12b-pretrained-sft", - "digitous/GPT-R", - "acrastt/Griffin-3B", - "togethercomputer/RedPajama-INCITE-Base-7B-v0.1", - "togethercomputer/RedPajama-INCITE-7B-Base", - "CobraMamba/mamba-gpt-3b-v3", - "Danielbrdz/CodeBarcenas-7b", - "l3utterfly/open-llama-3b-v2-layla", - "CobraMamba/mamba-gpt-3b-v2", - "OpenAssistant/pythia-12b-sft-v8-7k-steps", - "KoboldAI/GPT-NeoX-20B-Erebus", - "RobbeD/Orca-Platypus-3B", - "h2oai/h2ogpt-gm-oasst1-en-1024-12b", - "OpenAssistant/pythia-12b-sft-v8-2.5k-steps", - "AlekseyKorshuk/chatml-pyg-v1", - "togethercomputer/RedPajama-INCITE-Chat-7B-v0.1", - "togethercomputer/RedPajama-INCITE-7B-Chat", - "digitous/Javelin-R", - "dvruette/oasst-pythia-12b-reference", - "EleutherAI/gpt-neox-20b", - "KoboldAI/fairseq-dense-13B", - "OpenAssistant/pythia-12b-sft-v8-rlhf-2k-steps", - "codellama/CodeLlama-7b-Instruct-hf", - "digitous/Javelin-GPTJ", - "KoboldAI/GPT-NeoX-20B-Skein", - "digitous/Javalion-R", - "h2oai/h2ogpt-oasst1-512-12b", - "acrastt/Bean-3B", - "KoboldAI/GPT-J-6B-Skein", - "nomic-ai/gpt4all-j", - "databricks/dolly-v2-12b", - "TehVenom/Dolly_Shygmalion-6b-Dev_V8P2", - "databricks/dolly-v2-7b", - "Aspik101/WizardVicuna-Uncensored-3B-instruct-PL-lora_unload", - "digitous/Adventien-GPTJ", - "openlm-research/open_llama_3b_v2", - "RWKV/rwkv-4-14b-pile", - "Lazycuber/Janemalion-6B", - "OpenAssistant/pythia-12b-pre-v8-12.5k-steps", - "digitous/Janin-R", - "kfkas/Llama-2-ko-7b-Chat", - "heegyu/WizardVicuna-Uncensored-3B-0719", - "h2oai/h2ogpt-gm-oasst1-en-1024-open-llama-7b-preview-400bt", - "TaylorAI/Flash-Llama-3B", - "kfkas/Llama-2-ko-7b-Chat", - "digitous/Skegma-GPTJ", - "digitous/Javalion-GPTJ", - "Pirr/pythia-13b-deduped-green_devil", - "TehVenom/PPO_Shygmalion-V8p4_Dev-6b", - "dvruette/oasst-pythia-6.9b-4000-steps", - "heegyu/WizardVicuna-3B-0719", - "psmathur/orca_mini_3b", - "OpenAssistant/galactica-6.7b-finetuned", - "frank098/orca_mini_3b_juniper", - "PygmalionAI/pygmalion-6b", - "TehVenom/PPO_Pygway-V8p4_Dev-6b", - "TFLai/gpt-neox-20b-4bit-alpaca", - "Corianas/gpt-j-6B-Dolly", - "TehVenom/Dolly_Shygmalion-6b", - "digitous/Janin-GPTJ", - "TehVenom/GPT-J-Pyg_PPO-6B-Dev-V8p4", - "EleutherAI/gpt-j-6b", - "KoboldAI/GPT-J-6B-Shinen", - "TehVenom/Dolly_Malion-6b", - "TehVenom/ChanMalion", - "Salesforce/codegen-6B-nl", - "Fredithefish/RedPajama-INCITE-Chat-3B-Instruction-Tuning-with-GPT-4", - "KoboldAI/GPT-J-6B-Janeway", - "togethercomputer/RedPajama-INCITE-Chat-3B-v1", - "togethercomputer/Pythia-Chat-Base-7B", - "heegyu/RedTulu-Uncensored-3B-0719", - "KoboldAI/PPO_Pygway-6b-Mix", - "KoboldAI/OPT-13B-Erebus", - "KoboldAI/fairseq-dense-6.7B", - "EleutherAI/pythia-12b-deduped", - "pszemraj/pythia-6.9b-HC3", - "Fredithefish/Guanaco-3B-Uncensored-v2", - "facebook/opt-13b", - "TehVenom/GPT-J-Pyg_PPO-6B", - "EleutherAI/pythia-6.9b-deduped", - "Devio/test-1400", - "Fredithefish/Guanaco-3B-Uncensored", - "codellama/CodeLlama-7b-hf", - "acrastt/RedPajama-INCITE-Chat-Instruct-3B-V1", - "Fredithefish/ScarletPajama-3B-HF", - "KoboldAI/OPT-13B-Nerybus-Mix", - "YeungNLP/firefly-bloom-7b1", - "DanielSc4/RedPajama-INCITE-Chat-3B-v1-RL-LoRA-8bit-test1", - "klosax/open_llama_7b_400bt_preview", - "KoboldAI/OPT-13B-Nerys-v2", - "TehVenom/PPO_Shygmalion-6b", - "amazon/LightGPT", - "KnutJaegersberg/black_goo_recipe_c", - "NousResearch/CodeLlama-7b-hf", - "togethercomputer/RedPajama-INCITE-Instruct-3B-v1", - "heegyu/WizardVicuna-open-llama-3b-v2", - "bigscience/bloom-7b1", - "Devio/test-22B", - "RWKV/rwkv-raven-7b", - "hakurei/instruct-12b", - "CobraMamba/mamba-gpt-3b", - "KnutJaegersberg/black_goo_recipe_a", - "acrastt/OmegLLaMA-3B", - "codellama/CodeLlama-7b-Instruct-hf", - "h2oai/h2ogpt-oig-oasst1-512-6_9b", - "KoboldAI/OPT-6.7B-Erebus", - "facebook/opt-6.7b", - "KnutJaegersberg/black_goo_recipe_d", - "KnutJaegersberg/LLongMA-3b-LIMA", - "KnutJaegersberg/black_goo_recipe_b", - "KoboldAI/OPT-6.7B-Nerybus-Mix", - "health360/Healix-3B", - "EleutherAI/pythia-12b", - "Fredithefish/RedPajama-INCITE-Chat-3B-ShareGPT-11K", - "GeorgiaTechResearchInstitute/galactica-6.7b-evol-instruct-70k", - "h2oai/h2ogpt-oig-oasst1-256-6_9b", - "ikala/bloom-zh-3b-chat", - "Taekyoon/llama2-ko-7b-test", - "anhnv125/pygmalion-6b-roleplay", - "TehVenom/DiffMerge_Pygmalion_Main-onto-V8P4", - "KoboldAI/OPT-6B-nerys-v2", - "Lazycuber/pyg-instruct-wizardlm", - "Devio/testC", - "KoboldAI/OPT-30B-Erebus", - "Fredithefish/CrimsonPajama", - "togethercomputer/RedPajama-INCITE-Base-3B-v1", - "bigscience/bloomz-3b", - "conceptofmind/Open-LLongMA-3b", - "RWKV/rwkv-4-7b-pile", - "openlm-research/open_llama_3b", - "ewof/koishi-instruct-3b", - "DanielSc4/RedPajama-INCITE-Chat-3B-v1-FT-LoRA-8bit-test1", - "cerebras/Cerebras-GPT-13B", - "EleutherAI/pythia-6.7b", - "aisquared/chopt-2_7b", - "Azure99/blossom-v1-3b", - "PSanni/Deer-3b", - "bertin-project/bertin-gpt-j-6B-alpaca", - "OpenBuddy/openbuddy-openllama-3b-v10-bf16", - "KoboldAI/fairseq-dense-2.7B", - "ehartford/CodeLlama-34b-Instruct-hf", - "codellama/CodeLlama-34b-Instruct-hf", - "TheBloke/CodeLlama-34B-Instruct-fp16", - "h2oai/h2ogpt-gm-oasst1-en-2048-open-llama-7b-preview-300bt-v2", - "openlm-research/open_llama_7b_700bt_preview", - "NbAiLab/nb-gpt-j-6B-alpaca", - "KoboldAI/OPT-2.7B-Erebus", - "Writer/camel-5b-hf", - "EleutherAI/pythia-2.7b", - "facebook/xglm-7.5B", - "EleutherAI/pythia-2.8b-deduped", - "klosax/open_llama_3b_350bt_preview", - "klosax/openllama-3b-350bt", - "KoboldAI/OPT-2.7B-Nerybus-Mix", - "KoboldAI/GPT-J-6B-Adventure", - "cerebras/Cerebras-GPT-6.7B", - "TFLai/pythia-2.8b-4bit-alpaca", - "facebook/opt-2.7b", - "KoboldAI/OPT-2.7B-Nerys-v2", - "bigscience/bloom-3b", - "Devio/test100", - "RWKV/rwkv-raven-3b", - "Azure99/blossom-v2-3b", - "codellama/CodeLlama-34b-Python-hf", - "bhenrym14/airoboros-33b-gpt4-1.4.1-PI-8192-fp16", - "EleutherAI/gpt-neo-2.7B", - "danielhanchen/open_llama_3b_600bt_preview", - "HuggingFaceH4/starchat-alpha", - "pythainlp/wangchanglm-7.5B-sft-en-sharded", - "beaugogh/pythia-1.4b-deduped-sharegpt", - "HWERI/pythia-1.4b-deduped-sharegpt", - "OpenAssistant/stablelm-7b-sft-v7-epoch-3", - "codellama/CodeLlama-7b-Python-hf", - "aisquared/chopt-1_3b", - "PygmalionAI/metharme-1.3b", - "Linly-AI/Chinese-LLaMA-2-13B-hf", - "chargoddard/llama-2-34b-uncode", - "RWKV/rwkv-4-3b-pile", - "pythainlp/wangchanglm-7.5B-sft-enth", - "MBZUAI/LaMini-GPT-1.5B", - "Writer/palmyra-base", - "KoboldAI/fairseq-dense-1.3B", - "EleutherAI/pythia-1.4b-deduped", - "MBZUAI/lamini-neo-1.3b", - "h2oai/h2ogpt-gm-oasst1-en-2048-open-llama-7b-preview-300bt", - "sartmis1/starcoder-finetune-openapi", - "MayaPH/opt-flan-iml-6.7b", - "facebook/xglm-4.5B", - "WizardLM/WizardCoder-15B-V1.0", - "facebook/opt-iml-max-1.3b", - "stabilityai/stablelm-tuned-alpha-7b", - "aisquared/dlite-v2-1_5b", - "stabilityai/stablelm-base-alpha-7b", - "sartmis1/starcoder-finetune-selfinstruct", - "lizhuang144/starcoder_mirror", - "bigcode/starcoder", - "TheBloke/CodeLlama-34B-Python-fp16", - "open-llm-leaderboard/bloomz-1b7-4bit-alpaca-auto-eval-adapter-applied", - "ehartford/CodeLlama-34b-Python-hf", - "codellama/CodeLlama-7b-Python-hf", - "GeorgiaTechResearchInstitute/starcoder-gpteacher-code-instruct", - "LoupGarou/WizardCoder-Guanaco-15B-V1.0", - "golaxy/gogpt-3b-bloom", - "EleutherAI/pythia-1.3b", - "codellama/CodeLlama-13b-Python-hf", - "hakurei/lotus-12B", - "NYTK/PULI-GPTrio", - "facebook/opt-1.3b", - "TheBloke/CodeLlama-13B-Python-fp16", - "codellama/CodeLlama-13b-Python-hf", - "RWKV/rwkv-raven-1b5", - "PygmalionAI/pygmalion-2.7b", - "bigscience/bloom-1b7", - "gpt2-xl", - "LoupGarou/WizardCoder-Guanaco-15B-V1.1", - "RWKV/rwkv-4-1b5-pile", - "codellama/CodeLlama-34b-hf", - "NousResearch/CodeLlama-34b-hf", - "rinna/bilingual-gpt-neox-4b-8k", - "lxe/Cerebras-GPT-2.7B-Alpaca-SP", - "cerebras/Cerebras-GPT-2.7B", - "jzjiao/opt-1.3b-rlhf", - "EleutherAI/gpt-neo-1.3B", - "aisquared/dlite-v1-1_5b", - "Corianas/Quokka_2.7b", - "MrNJK/gpt2-xl-sft", - "facebook/galactica-1.3b", - "aisquared/dlite-v2-774m", - "EleutherAI/pythia-1b-deduped", - "Kunhao/pile-7b-250b-tokens", - "w601sxs/b1ade-1b", - "rinna/bilingual-gpt-neox-4b", - "shaohang/SparseOPT-1.3B", - "shaohang/Sparse0.5_OPT-1.3", - "EleutherAI/polyglot-ko-12.8b", - "Salesforce/codegen-6B-multi", - "bigscience/bloom-1b1", - "TFLai/gpt-neo-1.3B-4bit-alpaca", - "FabbriSimo01/Bloom_1b_Quantized", - "MBZUAI/LaMini-GPT-774M", - "Locutusque/gpt2-large-conversational", - "Devio/test-3b", - "stabilityai/stablelm-tuned-alpha-3b", - "PygmalionAI/pygmalion-1.3b", - "KoboldAI/fairseq-dense-355M", - "Rachneet/gpt2-xl-alpaca", - "gpt2-large", - "Mikivis/gpt2-large-lora-sft", - "stabilityai/stablelm-base-alpha-3b", - "gpt2-medium", - "Kunhao/pile-7b", - "aisquared/dlite-v1-774m", - "aisquared/dlite-v2-355m", - "YeungNLP/firefly-bloom-2b6-v2", - "KnutJaegersberg/gpt-2-xl-EvolInstruct", - "KnutJaegersberg/galactica-orca-wizardlm-1.3b", - "cerebras/Cerebras-GPT-1.3B", - "FabbriSimo01/Cerebras_1.3b_Quantized", - "facebook/xglm-1.7B", - "EleutherAI/pythia-410m-deduped", - "TheBloke/GPlatty-30B-SuperHOT-8K-fp16", - "DataLinguistic/DataLinguistic-34B-V1.0", - "Corianas/Quokka_1.3b", - "TheTravellingEngineer/bloom-560m-RLHF-v2", - "Corianas/1.3b", - "RWKV/rwkv-4-430m-pile", - "porkorbeef/Llama-2-13b-sf", - "xhyi/PT_GPTNEO350_ATG", - "TheBloke/Wizard-Vicuna-13B-Uncensored-GPTQ", - "bigscience/bloomz-560m", - "TheBloke/medalpaca-13B-GPTQ-4bit", - "TheBloke/Vicuna-33B-1-3-SuperHOT-8K-fp16", - "aisquared/dlite-v1-355m", - "uukuguy/speechless-codellama-orca-airoboros-13b-0.10e", - "yhyhy3/med-orca-instruct-33b", - "TheBloke/Wizard-Vicuna-30B-Superhot-8K-fp16", - "TheTravellingEngineer/bloom-1b1-RLHF", - "MBZUAI/lamini-cerebras-1.3b", - "IDEA-CCNL/Ziya-LLaMA-13B-Pretrain-v1", - "TheBloke/WizardLM-7B-uncensored-GPTQ", - "TheBloke/EverythingLM-13B-16K-GPTQ", - "quantumaikr/open_llama_7b_hf", - "TheBloke/chronos-wizardlm-uc-scot-st-13B-GPTQ", - "TheBloke/WizardLM-30B-Uncensored-GPTQ", - "IDEA-CCNL/Ziya-LLaMA-13B-v1", - "Phind/Phind-CodeLlama-34B-v1", - "robowaifudev/megatron-gpt2-345m", - "MayaPH/GodziLLa-30B-instruct", - "TheBloke/CAMEL-33B-Combined-Data-SuperHOT-8K-fp16", - "uukuguy/speechless-codellama-orca-platypus-13b-0.10e", - "doas/test2", - "BreadAi/PM_modelV2", - "bigcode/santacoder", - "TheBloke/wizard-vicuna-13B-GPTQ", - "porkorbeef/Llama-2-13b", - "TehVenom/DiffMerge-DollyGPT-Pygmalion", - "PygmalionAI/pygmalion-350m", - "TheBloke/orca_mini_v3_7B-GPTQ", - "TheBloke/WizardLM-Uncensored-SuperCOT-StoryTelling-30B-GPTQ", - "TheBloke/WizardLM-30B-GPTQ", - "bigscience/bloom-560m", - "TFLai/gpt2-turkish-uncased", - "TheBloke/guanaco-33B-GPTQ", - "TheBloke/openchat_v2_openorca_preview-GPTQ", - "porkorbeef/Llama-2-13b-public", - "TheBloke/LongChat-13B-GPTQ", - "yhyhy3/med-orca-instruct-33b", - "TheBloke/airoboros-33B-gpt4-1-4-SuperHOT-8K-fp16", - "TheBloke/Chinese-Alpaca-33B-SuperHOT-8K-fp16", - "MayaPH/FinOPT-Franklin", - "TheBloke/WizardLM-33B-V1.0-Uncensored-GPTQ", - "TheBloke/Project-Baize-v2-13B-GPTQ", - "malhajar/Platypus2-70B-instruct-4bit-gptq", - "KoboldAI/OPT-350M-Erebus", - "rishiraj/bloom-560m-guanaco", - "Panchovix/WizardLM-33B-V1.0-Uncensored-SuperHOT-8k", - "doas/test5", - "vicgalle/alpaca-7b", - "beomi/KoAlpaca-Polyglot-5.8B", - "Phind/Phind-CodeLlama-34B-Python-v1", - "timdettmers/guanaco-65b-merged", - "TheBloke/wizard-mega-13B-GPTQ", - "MayaPH/GodziLLa-30B-plus", - "TheBloke/Platypus-30B-SuperHOT-8K-fp16", - "facebook/opt-350m", - "KoboldAI/OPT-350M-Nerys-v2", - "TheBloke/robin-33B-v2-GPTQ", - "jaspercatapang/Echidna-30B", - "TheBloke/llama-30b-supercot-SuperHOT-8K-fp16", - "marcchew/test1", - "Harshvir/LaMini-Neo-1.3B-Mental-Health_lora", - "golaxy/gogpt-560m", - "TheBloke/orca_mini_13B-GPTQ", - "Panchovix/airoboros-33b-gpt4-1.2-SuperHOT-8k", - "Aspik101/tulu-7b-instruct-pl-lora_unload", - "Phind/Phind-CodeLlama-34B-v2", - "BreadAi/MusePy-1-2", - "cerebras/Cerebras-GPT-590M", - "microsoft/CodeGPT-small-py", - "victor123/WizardLM-13B-1.0", - "OptimalScale/robin-65b-v2-delta", - "voidful/changpt-bart", - "FabbriSimo01/GPT_Large_Quantized", - "MayaPH/FinOPT-Lincoln", - "KoboldAI/fairseq-dense-125M", - "SebastianSchramm/Cerebras-GPT-111M-instruction", - "TheTravellingEngineer/bloom-560m-RLHF", - "breadlicker45/dough-instruct-base-001", - "WizardLM/WizardLM-30B-V1.0", - "WizardLM/WizardLM-30B-V1.0", - "WizardLM/WizardLM-30B-V1.0", - "TaylorAI/Flash-Llama-30M-20001", - "porkorbeef/Llama-2-13b-12_153950", - "huggingtweets/bladeecity-jerma985", - "KnutJaegersberg/megatron-GPT-2-345m-EvolInstruct", - "bhenrym14/airoboros-33b-gpt4-1.4.1-lxctx-PI-16384-fp16", - "microsoft/DialoGPT-small", - "Corianas/590m", - "facebook/xglm-564M", - "EleutherAI/gpt-neo-125m", - "EleutherAI/pythia-160m-deduped", - "klosax/pythia-160m-deduped-step92k-193bt", - "MBZUAI/lamini-neo-125m", - "bigcode/tiny_starcoder_py", - "concedo/OPT-19M-ChatSalad", - "anton-l/gpt-j-tiny-random", - "grantprice/Cerebras-GPT-590M-finetuned-DND", - "deepnight-research/zsc-text", - "WangZeJun/bloom-820m-chat", - "cerebras/Cerebras-GPT-256M", - "ai-forever/rugpt3large_based_on_gpt2", - "alibidaran/medical_transcription_generator", - "Deci/DeciCoder-1b", - "microsoft/DialoGPT-medium", - "ogimgio/gpt-neo-125m-neurallinguisticpioneers", - "open-llm-leaderboard/bloom-560m-4bit-alpaca-auto-eval-adapter-applied", - "BreadAi/gpt-YA-1-1_160M", - "microsoft/DialoGPT-large", - "facebook/opt-125m", - "huggingtweets/jerma985", - "Locutusque/gpt2-conversational-or-qa", - "concedo/Pythia-70M-ChatSalad", - "roneneldan/TinyStories-1M", - "BreadAi/DiscordPy", - "bigcode/gpt_bigcode-santacoder", - "Tincando/fiction_story_generator", - "klosax/pythia-70m-deduped-step44k-92bt", - "Quake24/easyTermsSummerizer", - "BreadAi/gpt-YA-1-1_70M", - "EleutherAI/pythia-160m", - "euclaise/gpt-neox-122m-minipile-digits", - "MBZUAI/lamini-cerebras-590m", - "nicholasKluge/Aira-124M", - "MayaPH/FinOPT-Washington", - "cyberagent/open-calm-large", - "BreadAi/StoryPy", - "EleutherAI/pythia-70m", - "BreadAi/gpt-Youtube", - "roneneldan/TinyStories-33M", - "EleutherAI/pythia-70m-deduped", - "lgaalves/gpt2_guanaco-dolly-platypus", - "Corianas/Quokka_590m", - "lgaalves/gpt2_platypus-dolly-guanaco", - "cyberagent/open-calm-7b", - "RWKV/rwkv-4-169m-pile", - "gpt2", - "roneneldan/TinyStories-28M", - "lgaalves/gpt2_open-platypus", - "gpt2", - "SaylorTwift/gpt2_test", - "roneneldan/TinyStories-3M", - "nthngdy/pythia-owt2-70m-50k", - "Corianas/256_5epoch", - "roneneldan/TinyStories-8M", - "lgaalves/gpt2-dolly", - "nthngdy/pythia-owt2-70m-100k", - "aisquared/dlite-v2-124m", - "mncai/SGPT-1.3B-insurance-epoch10", - "huggingtweets/gladosystem", - "abhiramtirumala/DialoGPT-sarcastic-medium", - "MBZUAI/lamini-cerebras-256m", - "cerebras/Cerebras-GPT-111M", - "uberkie/metharme-1.3b-finetuned", - "MBZUAI/lamini-cerebras-111m", - "psyche/kogpt", - "Corianas/Quokka_256m", - "vicgalle/gpt2-alpaca-gpt4", - "aisquared/dlite-v1-124m", - "Mikivis/xuanxuan", - "MBZUAI/LaMini-GPT-124M", - "vicgalle/gpt2-alpaca", - "huashiyiqike/testmodel", - "Corianas/111m", - "baseline", -] diff --git a/spaces/ICML2022/OFA/fairseq/examples/hubert/simple_kmeans/learn_kmeans.py b/spaces/ICML2022/OFA/fairseq/examples/hubert/simple_kmeans/learn_kmeans.py deleted file mode 100644 index 113ac655b8c0a585fe43797e99674e445098edd0..0000000000000000000000000000000000000000 --- a/spaces/ICML2022/OFA/fairseq/examples/hubert/simple_kmeans/learn_kmeans.py +++ /dev/null @@ -1,146 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import logging -import os -import sys - -import numpy as np -from sklearn.cluster import MiniBatchKMeans - -import joblib - -logging.basicConfig( - format="%(asctime)s | %(levelname)s | %(name)s | %(message)s", - datefmt="%Y-%m-%d %H:%M:%S", - level=os.environ.get("LOGLEVEL", "INFO").upper(), - stream=sys.stdout, -) -logger = logging.getLogger("learn_kmeans") - - -def get_km_model( - n_clusters, - init, - max_iter, - batch_size, - tol, - max_no_improvement, - n_init, - reassignment_ratio, -): - return MiniBatchKMeans( - n_clusters=n_clusters, - init=init, - max_iter=max_iter, - batch_size=batch_size, - verbose=1, - compute_labels=False, - tol=tol, - max_no_improvement=max_no_improvement, - init_size=None, - n_init=n_init, - reassignment_ratio=reassignment_ratio, - ) - - -def load_feature_shard(feat_dir, split, nshard, rank, percent): - feat_path = f"{feat_dir}/{split}_{rank}_{nshard}.npy" - leng_path = f"{feat_dir}/{split}_{rank}_{nshard}.len" - with open(leng_path, "r") as f: - lengs = [int(line.rstrip()) for line in f] - offsets = [0] + np.cumsum(lengs[:-1]).tolist() - - if percent < 0: - return np.load(feat_path, mmap_mode="r") - else: - nsample = int(np.ceil(len(lengs) * percent)) - indices = np.random.choice(len(lengs), nsample, replace=False) - feat = np.load(feat_path, mmap_mode="r") - sampled_feat = np.concatenate( - [feat[offsets[i]: offsets[i] + lengs[i]] for i in indices], axis=0 - ) - logger.info( - ( - f"sampled {nsample} utterances, {len(sampled_feat)} frames " - f"from shard {rank}/{nshard}" - ) - ) - return sampled_feat - - -def load_feature(feat_dir, split, nshard, seed, percent): - assert percent <= 1.0 - feat = np.concatenate( - [ - load_feature_shard(feat_dir, split, nshard, r, percent) - for r in range(nshard) - ], - axis=0, - ) - logging.info(f"loaded feature with dimension {feat.shape}") - return feat - - -def learn_kmeans( - feat_dir, - split, - nshard, - km_path, - n_clusters, - seed, - percent, - init, - max_iter, - batch_size, - tol, - n_init, - reassignment_ratio, - max_no_improvement, -): - np.random.seed(seed) - feat = load_feature(feat_dir, split, nshard, seed, percent) - km_model = get_km_model( - n_clusters, - init, - max_iter, - batch_size, - tol, - max_no_improvement, - n_init, - reassignment_ratio, - ) - km_model.fit(feat) - joblib.dump(km_model, km_path) - - inertia = -km_model.score(feat) / len(feat) - logger.info("total intertia: %.5f", inertia) - logger.info("finished successfully") - - -if __name__ == "__main__": - import argparse - - parser = argparse.ArgumentParser() - parser.add_argument("feat_dir", type=str) - parser.add_argument("split", type=str) - parser.add_argument("nshard", type=int) - parser.add_argument("km_path", type=str) - parser.add_argument("n_clusters", type=int) - parser.add_argument("--seed", default=0, type=int) - parser.add_argument( - "--percent", default=-1, type=float, help="sample a subset; -1 for all" - ) - parser.add_argument("--init", default="k-means++") - parser.add_argument("--max_iter", default=100, type=int) - parser.add_argument("--batch_size", default=10000, type=int) - parser.add_argument("--tol", default=0.0, type=float) - parser.add_argument("--max_no_improvement", default=100, type=int) - parser.add_argument("--n_init", default=20, type=int) - parser.add_argument("--reassignment_ratio", default=0.0, type=float) - args = parser.parse_args() - logging.info(str(args)) - - learn_kmeans(**vars(args)) diff --git a/spaces/ICML2022/OFA/fairseq/examples/textless_nlp/gslm/unit2speech/utils.py b/spaces/ICML2022/OFA/fairseq/examples/textless_nlp/gslm/unit2speech/utils.py deleted file mode 100644 index 7aced08d38301b98b19e2df7d19f1c61150107bc..0000000000000000000000000000000000000000 --- a/spaces/ICML2022/OFA/fairseq/examples/textless_nlp/gslm/unit2speech/utils.py +++ /dev/null @@ -1,55 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - - -import torch -from examples.textless_nlp.gslm.unit2speech.tacotron2.model import Tacotron2 -from examples.textless_nlp.gslm.unit2speech.tacotron2.waveglow_denoiser import ( - Denoiser, -) - - -def load_quantized_audio_from_file(file_path): - base_fname_batch, quantized_units_batch = [], [] - with open(file_path) as f: - for line in f: - base_fname, quantized_units_str = line.rstrip().split("|") - quantized_units = [int(q) for q in quantized_units_str.split(" ")] - base_fname_batch.append(base_fname) - quantized_units_batch.append(quantized_units) - return base_fname_batch, quantized_units_batch - - -def synthesize_audio(model, waveglow, denoiser, inp, lab=None, strength=0.0): - assert inp.size(0) == 1 - inp = inp.cuda() - if lab is not None: - lab = torch.LongTensor(1).cuda().fill_(lab) - - with torch.no_grad(): - _, mel, _, ali, has_eos = model.inference(inp, lab, ret_has_eos=True) - aud = waveglow.infer(mel, sigma=0.666) - aud_dn = denoiser(aud, strength=strength).squeeze(1) - return mel, aud, aud_dn, has_eos - - -def load_tacotron(tacotron_model_path, max_decoder_steps): - ckpt_dict = torch.load(tacotron_model_path) - hparams = ckpt_dict["hparams"] - hparams.max_decoder_steps = max_decoder_steps - sr = hparams.sampling_rate - model = Tacotron2(hparams) - model.load_state_dict(ckpt_dict["model_dict"]) - model = model.cuda().eval().half() - return model, sr, hparams - - -def load_waveglow(waveglow_path): - waveglow = torch.load(waveglow_path)["model"] - waveglow = waveglow.cuda().eval().half() - for k in waveglow.convinv: - k.float() - denoiser = Denoiser(waveglow) - return waveglow, denoiser diff --git a/spaces/ICML2022/OFA/fairseq/examples/wmt19/README.md b/spaces/ICML2022/OFA/fairseq/examples/wmt19/README.md deleted file mode 100644 index 5c90d0e6c4ae8d043ca622e70c5828dca6f9c2f2..0000000000000000000000000000000000000000 --- a/spaces/ICML2022/OFA/fairseq/examples/wmt19/README.md +++ /dev/null @@ -1,85 +0,0 @@ -# WMT 19 - -This page provides pointers to the models of Facebook-FAIR's WMT'19 news translation task submission [(Ng et al., 2019)](https://arxiv.org/abs/1907.06616). - -## Pre-trained models - -Model | Description | Download ----|---|--- -`transformer.wmt19.en-de` | En->De Ensemble | [download (.tar.gz)](https://dl.fbaipublicfiles.com/fairseq/models/wmt19.en-de.joined-dict.ensemble.tar.gz) -`transformer.wmt19.de-en` | De->En Ensemble | [download (.tar.gz)](https://dl.fbaipublicfiles.com/fairseq/models/wmt19.de-en.joined-dict.ensemble.tar.gz) -`transformer.wmt19.en-ru` | En->Ru Ensemble | [download (.tar.gz)](https://dl.fbaipublicfiles.com/fairseq/models/wmt19.en-ru.ensemble.tar.gz) -`transformer.wmt19.ru-en` | Ru->En Ensemble | [download (.tar.gz)](https://dl.fbaipublicfiles.com/fairseq/models/wmt19.ru-en.ensemble.tar.gz) -`transformer_lm.wmt19.en` | En Language Model | [download (.tar.gz)](https://dl.fbaipublicfiles.com/fairseq/models/lm/wmt19.en.tar.gz) -`transformer_lm.wmt19.de` | De Language Model | [download (.tar.gz)](https://dl.fbaipublicfiles.com/fairseq/models/lm/wmt19.de.tar.gz) -`transformer_lm.wmt19.ru` | Ru Language Model | [download (.tar.gz)](https://dl.fbaipublicfiles.com/fairseq/models/lm/wmt19.ru.tar.gz) - -## Pre-trained single models before finetuning - -Model | Description | Download ----|---|--- -`transformer.wmt19.en-de` | En->De Single, no finetuning | [download (.tar.gz)](https://dl.fbaipublicfiles.com/fairseq/models/wmt19.en-de.ffn8192.tar.gz) -`transformer.wmt19.de-en` | De->En Single, no finetuning | [download (.tar.gz)](https://dl.fbaipublicfiles.com/fairseq/models/wmt19.de-en.ffn8192.tar.gz) -`transformer.wmt19.en-ru` | En->Ru Single, no finetuning | [download (.tar.gz)](https://dl.fbaipublicfiles.com/fairseq/models/wmt19.en-ru.ffn8192.tar.gz) -`transformer.wmt19.ru-en` | Ru->En Single, no finetuning | [download (.tar.gz)](https://dl.fbaipublicfiles.com/fairseq/models/wmt19.ru-en.ffn8192.tar.gz) - -## Example usage (torch.hub) - -#### Requirements - -We require a few additional Python dependencies for preprocessing: -```bash -pip install fastBPE sacremoses -``` - -#### Translation - -```python -import torch - -# English to German translation -en2de = torch.hub.load('pytorch/fairseq', 'transformer.wmt19.en-de', checkpoint_file='model1.pt:model2.pt:model3.pt:model4.pt', - tokenizer='moses', bpe='fastbpe') -en2de.translate("Machine learning is great!") # 'Maschinelles Lernen ist großartig!' - -# German to English translation -de2en = torch.hub.load('pytorch/fairseq', 'transformer.wmt19.de-en', checkpoint_file='model1.pt:model2.pt:model3.pt:model4.pt', - tokenizer='moses', bpe='fastbpe') -de2en.translate("Maschinelles Lernen ist großartig!") # 'Machine learning is great!' - -# English to Russian translation -en2ru = torch.hub.load('pytorch/fairseq', 'transformer.wmt19.en-ru', checkpoint_file='model1.pt:model2.pt:model3.pt:model4.pt', - tokenizer='moses', bpe='fastbpe') -en2ru.translate("Machine learning is great!") # 'Машинное обучение - это здорово!' - -# Russian to English translation -ru2en = torch.hub.load('pytorch/fairseq', 'transformer.wmt19.ru-en', checkpoint_file='model1.pt:model2.pt:model3.pt:model4.pt', - tokenizer='moses', bpe='fastbpe') -ru2en.translate("Машинное обучение - это здорово!") # 'Machine learning is great!' -``` - -#### Language Modeling - -```python -# Sample from the English LM -en_lm = torch.hub.load('pytorch/fairseq', 'transformer_lm.wmt19.en', tokenizer='moses', bpe='fastbpe') -en_lm.sample("Machine learning is") # 'Machine learning is the future of computing, says Microsoft boss Satya Nadella ...' - -# Sample from the German LM -de_lm = torch.hub.load('pytorch/fairseq', 'transformer_lm.wmt19.de', tokenizer='moses', bpe='fastbpe') -de_lm.sample("Maschinelles lernen ist") # 'Maschinelles lernen ist das A und O (neues-deutschland.de) Die Arbeitsbedingungen für Lehrerinnen und Lehrer sind seit Jahren verbesserungswürdig ...' - -# Sample from the Russian LM -ru_lm = torch.hub.load('pytorch/fairseq', 'transformer_lm.wmt19.ru', tokenizer='moses', bpe='fastbpe') -ru_lm.sample("машинное обучение это") # 'машинное обучение это то, что мы называем "искусственным интеллектом".' -``` - -## Citation -```bibtex -@inproceedings{ng2019facebook}, - title = {Facebook FAIR's WMT19 News Translation Task Submission}, - author = {Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}, - booktitle = {Proc. of WMT}, - year = 2019, -} -``` diff --git a/spaces/ICML2022/OFA/fairseq/fairseq/criterions/hubert_criterion.py b/spaces/ICML2022/OFA/fairseq/fairseq/criterions/hubert_criterion.py deleted file mode 100644 index 68cb24e6f142c46e108c53479fd4027a741f5f92..0000000000000000000000000000000000000000 --- a/spaces/ICML2022/OFA/fairseq/fairseq/criterions/hubert_criterion.py +++ /dev/null @@ -1,177 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import math -import re -from dataclasses import dataclass, field -from typing import List, Optional - -import torch -import torch.nn.functional as F -from fairseq import metrics, utils -from fairseq.criterions import FairseqCriterion, register_criterion -from fairseq.dataclass import FairseqDataclass - - -@dataclass -class HubertCriterionConfig(FairseqDataclass): - pred_masked_weight: float = field( - default=1.0, - metadata={"help": "weight for predictive loss for masked frames"}, - ) - pred_nomask_weight: float = field( - default=0.0, - metadata={"help": "weight for predictive loss for unmasked frames"}, - ) - loss_weights: Optional[List[float]] = field( - default=None, - metadata={"help": "weights for additional loss terms (not first one)"}, - ) - log_keys: List[str] = field( - default_factory=lambda: [], - metadata={"help": "output keys to log"}, - ) - - -@register_criterion("hubert", dataclass=HubertCriterionConfig) -class HubertCriterion(FairseqCriterion): - def __init__(self, task, pred_masked_weight, pred_nomask_weight, loss_weights=None, log_keys=None): - super().__init__(task) - self.pred_masked_weight = pred_masked_weight - self.pred_nomask_weight = pred_nomask_weight - self.loss_weights = loss_weights - self.log_keys = [] if log_keys is None else log_keys - - def forward(self, model, sample, reduce=True, log_pred=False): - """Compute the loss for the given sample. - Returns a tuple with three elements: - 1) the loss - 2) the sample size, which is used as the denominator for the gradient - 3) logging outputs to display while training - """ - net_output = model(target_list=sample["target_list"], **sample["net_input"]) - loss = 0. - sample_size = 0 - logging_output = {} - reduction = "sum" if reduce else "none" - - loss_m_list = [] - logp_m_list = model.get_logits(net_output, True) - targ_m_list = model.get_targets(net_output, True) - assert self.pred_masked_weight == 0 or len(logp_m_list) > 0 - for i, (logp_m, targ_m) in enumerate(zip(logp_m_list, targ_m_list)): - loss_m = F.cross_entropy(logp_m, targ_m, reduction=reduction) - loss_m_list.append(loss_m) - logging_output[f"loss_m_{i}"] = loss_m.detach().item() - if self.pred_masked_weight > 0: - loss += self.pred_masked_weight * sum(loss_m_list) - sample_size += targ_m_list[0].numel() - - loss_u_list = [] - logp_u_list = model.get_logits(net_output, False) - targ_u_list = model.get_targets(net_output, False) - assert self.pred_nomask_weight == 0 or len(logp_u_list) > 0 - for i, (logp_u, targ_u) in enumerate(zip(logp_u_list, targ_u_list)): - loss_u = F.cross_entropy(logp_u, targ_u, reduction=reduction) - loss_u_list.append(loss_u) - logging_output[f"loss_u_{i}"] = loss_u.detach().item() - if self.pred_nomask_weight > 0: - loss += self.pred_nomask_weight * sum(loss_u_list) - sample_size += targ_u_list[0].numel() - - if self.loss_weights is not None: - assert hasattr(model, "get_extra_losses") - extra_losses, names = model.get_extra_losses(net_output) - if torch.is_tensor(extra_losses): - extra_losses = [extra_losses] - names = [names] - if len(self.loss_weights) == 1 and len(extra_losses) != 1: - self.loss_weights = [self.loss_weights[0]] * len(extra_losses) - assert len(extra_losses) == len(self.loss_weights), f"{len(extra_losses)}, {len(self.loss_weights)}" - for p, n, coef in zip(extra_losses, names, self.loss_weights): - if coef != 0 and p is not None: - p = coef * p.float() * sample_size - loss += p - logging_output[f"loss_{n}"] = p.item() - - logging_output = { - "loss": loss.item() if reduce else loss, - "ntokens": sample_size, - "nsentences": sample["id"].numel(), - "sample_size": sample_size, - **logging_output, - } - - for lk in self.log_keys: - if lk in net_output: - logging_output[lk] = float((net_output[lk])) - - def compute_correct(logits): - if logits.numel() == 0: - return 0, 0 - else: - assert logits.dim() > 1, logits.shape - max = logits.argmax(-1) == 0 - min = logits.argmin(-1) == 0 - both = max & min - corr = max.long().sum().item() - both.long().sum().item() - count = max.numel() - return corr, count - - with torch.no_grad(): - for i, logp_m in enumerate(logp_m_list): - corr_m, count_m = compute_correct(logp_m) - logging_output[f"correct_m_{i}"] = corr_m - logging_output[f"count_m_{i}"] = count_m - - for i, logp_u in enumerate(logp_u_list): - corr_u, count_u = compute_correct(logp_u) - logging_output[f"correct_u_{i}"] = corr_u - logging_output[f"count_u_{i}"] = count_u - - return loss, sample_size, logging_output - - @staticmethod - def reduce_metrics(logging_outputs) -> None: - """Aggregate logging outputs from data parallel training (copied from normal cross entropy).""" - loss_sum = sum(log.get("loss", 0) for log in logging_outputs) - ntokens = sum(log.get("ntokens", 0) for log in logging_outputs) - sample_size = sum(log.get("sample_size", 0) for log in logging_outputs) - - metrics.log_scalar("loss", loss_sum / sample_size / math.log(2), sample_size, round=3) - if sample_size != ntokens: - metrics.log_scalar("nll_loss", loss_sum / ntokens / math.log(2), ntokens, round=3) - metrics.log_derived("ppl", lambda meters: utils.get_perplexity(meters["nll_loss"].avg)) - else: - metrics.log_derived("ppl", lambda meters: utils.get_perplexity(meters["loss"].avg)) - - counts = {} - for lk in logging_outputs[0].keys(): - if lk.startswith("count_"): - val = sum(log[lk] for log in logging_outputs) - metrics.log_scalar(lk, val) - counts[lk] = val - - for lk in logging_outputs[0].keys(): - if lk.startswith("loss_"): - val = sum(log[lk] for log in logging_outputs) - metrics.log_scalar(lk, val / sample_size / math.log(2), round=3) - elif lk.startswith("correct_"): - val = sum(log[lk] for log in logging_outputs) - metrics.log_scalar(lk, val / counts[re.sub("correct", "count", lk)]) - - @staticmethod - def aggregate_logging_outputs(logging_outputs): - """Aggregate logging outputs from data parallel training.""" - raise NotImplementedError() - - @staticmethod - def logging_outputs_can_be_summed() -> bool: - """ - Whether the logging outputs returned by `forward` can be summed - across workers prior to calling `reduce_metrics`. Setting this - to True will improves distributed training speed. - """ - return False diff --git a/spaces/ICML2022/OFA/fairseq/fairseq/data/roll_dataset.py b/spaces/ICML2022/OFA/fairseq/fairseq/data/roll_dataset.py deleted file mode 100644 index a2915eeb3e8fb4dfb4b2bb33e0464ad0783d854c..0000000000000000000000000000000000000000 --- a/spaces/ICML2022/OFA/fairseq/fairseq/data/roll_dataset.py +++ /dev/null @@ -1,18 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import torch - -from . import BaseWrapperDataset - - -class RollDataset(BaseWrapperDataset): - def __init__(self, dataset, shifts): - super().__init__(dataset) - self.shifts = shifts - - def __getitem__(self, index): - item = self.dataset[index] - return torch.roll(item, self.shifts) diff --git a/spaces/IDEA-Research/Grounded-SAM/README.md b/spaces/IDEA-Research/Grounded-SAM/README.md deleted file mode 100644 index f16eac7e67797e87cf8be03dca4f9425565e5f5d..0000000000000000000000000000000000000000 --- a/spaces/IDEA-Research/Grounded-SAM/README.md +++ /dev/null @@ -1,370 +0,0 @@ ---- -title: Grounded SAM -emoji: 💩 -colorFrom: red -colorTo: purple -sdk: gradio -sdk_version: 3.24.1 -app_file: app.py -pinned: false -license: apache-2.0 ---- - -![](./assets/Grounded-SAM_logo.png) - -# Grounded-Segment-Anything -We plan to create a very interesting demo by combining [Grounding DINO](https://github.com/IDEA-Research/GroundingDINO) and [Segment Anything](https://github.com/facebookresearch/segment-anything)! Right now, this is just a simple small project. We will continue to improve it and create more interesting demos. And thanks for the community users provide the [colab demo](https://github.com/camenduru/grounded-segment-anything-colab) for us. - -We are very willing to **help everyone share and promote new projects** based on Segment-Anything, we highlight some excellent projects here: [Highlight Extension Projects](#bulb-highlight-extension-projects). You can submit a new issue (with `project` tag) or a new pull request to add new projects' links. - - -**Why this project?** - -The **core idea** behind this project is to **combine the strengths of different models in order to build a very powerful pipeline for solving complex problems**. And it's worth mentioning that this is a workflow for combining strong expert models, where **all parts can be used separately or in combination, and can be replaced with any similar but different models (like replacing Grounding DINO with GLIP or other detectors / replacing Stable-Diffusion with ControlNet or GLIGEN/ Combining with ChatGPT)**. - -- [Segment Anything](https://github.com/facebookresearch/segment-anything) is a strong segmentation model. But it needs prompts (like boxes/points) to generate masks. -- [Grounding DINO](https://github.com/IDEA-Research/GroundingDINO) is a strong zero-shot detector which is capable of to generate high quality boxes and labels with free-form text. -- The combination of `Grounding DINO + SAM` enable to **detect and segment everything at any levels** with text inputs! -- The combination of `BLIP + Grounding DINO + SAM` for **automatic labeling system**! -- The combination of `Grounding DINO + SAM + Stable-diffusion` for **data-factory, generating new data**! -- The combination of `Whisper + Grounding DINO + SAM` to **detect and segment anything with speech**! -- The chatbot **for the above tools** with better reasoning! - -**🔥 🔈Speak to edit🎨: Whisper + ChatGPT + Grounded-SAM + SD** - -![](assets/acoustics/gsam_whisper_inpainting_demo.png) - -**Grounded-SAM** -![](./assets/grounded_sam2.png) - -**Grounded-SAM + Stable-Diffusion Inpainting: Data-Factory, Generating New Data!** -![](./assets/grounded_sam_inpainting_demo.png) - -**BLIP + Grounded-SAM: Automatic Label System!** - -Using BLIP to generate caption, extracting tags with ChatGPT, and using Grounded-SAM for box and mask generating. Here's the demo output: - -![](./assets/automatic_label_output_demo3.jpg) - -**Imagine Space** - -Some possible avenues for future work ... -- Automatic image generation to construct new datasets. -- Stronger foundation models with segmentation pre-training. -- Collaboration with (Chat-)GPT. -- A whole pipeline to automatically label image (with box and mask) and generate new image. - -**More Examples** -![](./assets/grounded_sam_demo3_demo4.png) - - -**Tips** -- If you want to detect multiple objects in one sentence with [Grounding DINO](https://github.com/IDEA-Research/GroundingDINO), we suggest seperating each name with `.` . An example: `cat . dog . chair .` - -## What's New - -- :fire: **ChatBot** for our project is built! - -https://user-images.githubusercontent.com/24236723/231955561-2ae4ec1a-c75f-4cc5-9b7b-517aa1432123.mp4 - - - -- 🆕 Release the interactive fashion-edit playground in [here](https://github.com/IDEA-Research/Grounded-Segment-Anything/tree/humanFace). Run in the notebook, just click for annotating points for further segmentation. Enjoy it! - - - - -- :new: Checkout our related human-face-edit branch [here](https://github.com/IDEA-Research/Grounded-Segment-Anything/tree/humanFace). We'll keep updating this branch with more interesting features. Here are some examples: - - ![](https://github.com/IDEA-Research/Grounded-Segment-Anything/blob/humanFace/assets/231-hair-edit.png) - - -## :bulb: Highlight Extension Projects -- [Segment Everything Everywhere All at Once](https://github.com/UX-Decoder/Segment-Everything-Everywhere-All-At-Once) Support various types of prompts and any combination of prompts. -- [Computer Vision in the Wild (CVinW) Readings](https://github.com/Computer-Vision-in-the-Wild/CVinW_Readings) for those who are interested in open-set tasks in computer vision. -- [OpenSeeD](https://github.com/IDEA-Research/OpenSeeD): interactive segmentation with box input to generate mask. -- [Zero-Shot Anomaly Detection](https://github.com/caoyunkang/GroundedSAM-zero-shot-anomaly-detection) by Yunkang Cao -- [EditAnything: ControlNet + StableDiffusion based on the SAM segmentation mask](https://github.com/sail-sg/EditAnything) by Shanghua Gao and Pan Zhou -- [IEA: Image Editing Anything](https://github.com/feizc/IEA) by Zhengcong Fei -- [SAM-MMRorate: Combining Rotated Object Detector and SAM](https://github.com/Li-Qingyun/sam-mmrotate) by Qingyun Li and Xue Yang -- [Awesome-Anything](https://github.com/VainF/Awesome-Anything) by Gongfan Fang -- [Prompt-Segment-Anything](https://github.com/RockeyCoss/Prompt-Segment-Anything) by Rockey -- [**WebUi for Segment-Anything! Grounding-SAM is on the way!**](https://github.com/continue-revolution/sd-webui-segment-anything) by Chengsong Zhang -- [Inpainting Anything: Inpaint Anything with SAM + Inpainting models](https://github.com/geekyutao/Inpaint-Anything) by Tao Yu -- [Grounded Segment Anything From Objects to Parts: Combining Segment-Anything with VLPart & GLIP & Visual ChatGPT](https://github.com/Cheems-Seminar/segment-anything-and-name-it) by Peize Sun and Shoufa Chen -- [Narapi-SAM: Integration of Segment Anything into Narapi (A nice viewer for SAM)](https://github.com/MIC-DKFZ/napari-sam) by MIC-DKFZ -- [Grounded Segment Anything Colab](https://github.com/camenduru/grounded-segment-anything-colab) by camenduru -- [Optical Character Recognition with Segment Anything](https://github.com/yeungchenwa/OCR-SAM) by Zhenhua Yang -- [Transform Image into Unique Paragraph with ChatGPT, BLIP2, OFA, GRIT, Segment Anything, ControlNet](https://github.com/showlab/Image2Paragraph) by showlab -- [Lang-Segment-Anything: Another awesome demo for combining GroundingDINO with Segment-Anything](https://github.com/luca-medeiros/lang-segment-anything) by Luca Medeiros -- [🥳 🚀 **Playground: Integrate SAM and OpenMMLab!**](https://github.com/open-mmlab/playground) -- [3D-object via Segment Anything](https://github.com/dvlab-research/3D-Box-Segment-Anything) by Yukang Chen - -## :bookmark_tabs: Catelog -- [x] Grounding DINO Demo -- [x] Grounding DINO + Segment Anything Demo -- [x] Grounding DINO + Segment Anything + Stable-Diffusion Demo -- [x] BLIP + Grounding DINO + Segment Anything + Stable-Diffusion Demo -- [x] Whisper + Grounding DINO + Segment Anything + Stable-Diffusion Demo -- [ ] Hugging Face Demo -- [ ] Colab demo - -## :open_book: Notebook Demo -See our [notebook file](grounded_sam.ipynb) as an example. - -## :hammer_and_wrench: Installation -The code requires `python>=3.8`, as well as `pytorch>=1.7` and `torchvision>=0.8`. Please follow the instructions [here](https://pytorch.org/get-started/locally/) to install both PyTorch and TorchVision dependencies. Installing both PyTorch and TorchVision with CUDA support is strongly recommended. - -Install Segment Anything: - -```bash -python -m pip install -e segment_anything -``` - -Install Grounding DINO: - -```bash -python -m pip install -e GroundingDINO -``` - - -Install diffusers: - -```bash -pip install --upgrade diffusers[torch] -``` - - -The following optional dependencies are necessary for mask post-processing, saving masks in COCO format, the example notebooks, and exporting the model in ONNX format. `jupyter` is also required to run the example notebooks. -``` -pip install opencv-python pycocotools matplotlib onnxruntime onnx ipykernel -``` - -More details can be found in [install segment anything](https://github.com/facebookresearch/segment-anything#installation) and [install GroundingDINO](https://github.com/IDEA-Research/GroundingDINO#install) - - -## :runner: Run Grounding DINO Demo -- Download the checkpoint for Grounding Dino: -```bash -cd Grounded-Segment-Anything - -wget https://github.com/IDEA-Research/GroundingDINO/releases/download/v0.1.0-alpha/groundingdino_swint_ogc.pth -``` - -- Run demo -```bash -export CUDA_VISIBLE_DEVICES=0 -python grounding_dino_demo.py \ - --config GroundingDINO/groundingdino/config/GroundingDINO_SwinT_OGC.py \ - --grounded_checkpoint groundingdino_swint_ogc.pth \ - --input_image assets/demo1.jpg \ - --output_dir "outputs" \ - --box_threshold 0.3 \ - --text_threshold 0.25 \ - --text_prompt "bear" \ - --device "cuda" -``` -- The model prediction visualization will be saved in `output_dir` as follow: - -![](./assets/grounding_dino_output_demo1.jpg) - -## :running_man: Run Grounded-Segment-Anything Demo -- Download the checkpoint for Segment Anything and Grounding Dino: -```bash -cd Grounded-Segment-Anything - -wget https://dl.fbaipublicfiles.com/segment_anything/sam_vit_h_4b8939.pth -wget https://github.com/IDEA-Research/GroundingDINO/releases/download/v0.1.0-alpha/groundingdino_swint_ogc.pth -``` - -- Run Demo -```bash -export CUDA_VISIBLE_DEVICES=0 -python grounded_sam_demo.py \ - --config GroundingDINO/groundingdino/config/GroundingDINO_SwinT_OGC.py \ - --grounded_checkpoint groundingdino_swint_ogc.pth \ - --sam_checkpoint sam_vit_h_4b8939.pth \ - --input_image assets/demo1.jpg \ - --output_dir "outputs" \ - --box_threshold 0.3 \ - --text_threshold 0.25 \ - --text_prompt "bear" \ - --device "cuda" -``` - -- The model prediction visualization will be saved in `output_dir` as follow: - -![](./assets/grounded_sam_output_demo1.jpg) - - -## :skier: Run Grounded-Segment-Anything + Inpainting Demo - -```bash -CUDA_VISIBLE_DEVICES=0 -python grounded_sam_inpainting_demo.py \ - --config GroundingDINO/groundingdino/config/GroundingDINO_SwinT_OGC.py \ - --grounded_checkpoint groundingdino_swint_ogc.pth \ - --sam_checkpoint sam_vit_h_4b8939.pth \ - --input_image assets/inpaint_demo.jpg \ - --output_dir "outputs" \ - --box_threshold 0.3 \ - --text_threshold 0.25 \ - --det_prompt "bench" \ - --inpaint_prompt "A sofa, high quality, detailed" \ - --device "cuda" -``` - -## :golfing: Run Grounded-Segment-Anything + Inpainting Gradio APP - -```bash -python gradio_app.py -``` - -- The gradio_app visualization as follow: - -![](./assets/gradio_demo.png) - - -## :robot: Run Grounded-Segment-Anything + BLIP Demo -It is easy to generate pseudo labels automatically as follows: -1. Use BLIP (or other caption models) to generate a caption. -2. Extract tags from the caption. We use ChatGPT to handle the potential complicated sentences. -3. Use Grounded-Segment-Anything to generate the boxes and masks. - -- Run Demo -```bash -export CUDA_VISIBLE_DEVICES=0 -python automatic_label_demo.py \ - --config GroundingDINO/groundingdino/config/GroundingDINO_SwinT_OGC.py \ - --grounded_checkpoint groundingdino_swint_ogc.pth \ - --sam_checkpoint sam_vit_h_4b8939.pth \ - --input_image assets/demo3.jpg \ - --output_dir "outputs" \ - --openai_key your_openai_key \ - --box_threshold 0.25 \ - --text_threshold 0.2 \ - --iou_threshold 0.5 \ - --device "cuda" -``` - -- The pseudo labels and model prediction visualization will be saved in `output_dir` as follows: - -![](./assets/automatic_label_output_demo3.jpg) - - -## :open_mouth: Run Grounded-Segment-Anything + Whisper Demo -Detect and segment anything with speech! - -**Install Whisper** -```bash -pip install -U openai-whisper -``` -See the [whisper official page](https://github.com/openai/whisper#setup) if you have other questions for the installation. - -**Run Voice-to-Label Demo** - -Optional: Download the demo audio file - -```bash -wget https://huggingface.co/ShilongLiu/GroundingDINO/resolve/main/demo_audio.mp3 -``` - - -```bash -export CUDA_VISIBLE_DEVICES=0 -python grounded_sam_whisper_demo.py \ - --config GroundingDINO/groundingdino/config/GroundingDINO_SwinT_OGC.py \ - --grounded_checkpoint groundingdino_swint_ogc.pth \ - --sam_checkpoint sam_vit_h_4b8939.pth \ - --input_image assets/demo4.jpg \ - --output_dir "outputs" \ - --box_threshold 0.3 \ - --text_threshold 0.25 \ - --speech_file "demo_audio.mp3" \ - --device "cuda" -``` - -![](./assets/grounded_sam_whisper_output.jpg) - -**Run Voice-to-inpaint Demo** - -You can enable chatgpt to help you automatically detect the object and inpainting order with `--enable_chatgpt`. - -Or you can specify the object you want to inpaint [stored in `args.det_speech_file`] and the text you want to inpaint with [stored in `args.inpaint_speech_file`]. - -```bash -# Example: enable chatgpt -export CUDA_VISIBLE_DEVICES=0 -export OPENAI_KEY=your_openai_key -python grounded_sam_whisper_inpainting_demo.py \ - --config GroundingDINO/groundingdino/config/GroundingDINO_SwinT_OGC.py \ - --grounded_checkpoint groundingdino_swint_ogc.pth \ - --sam_checkpoint sam_vit_h_4b8939.pth \ - --input_image assets/inpaint_demo.jpg \ - --output_dir "outputs" \ - --box_threshold 0.3 \ - --text_threshold 0.25 \ - --prompt_speech_file assets/acoustics/prompt_speech_file.mp3 \ - --enable_chatgpt \ - --openai_key $OPENAI_KEY \ - --device "cuda" -``` - -```bash -# Example: without chatgpt -export CUDA_VISIBLE_DEVICES=0 -python grounded_sam_whisper_inpainting_demo.py \ - --config GroundingDINO/groundingdino/config/GroundingDINO_SwinT_OGC.py \ - --grounded_checkpoint groundingdino_swint_ogc.pth \ - --sam_checkpoint sam_vit_h_4b8939.pth \ - --input_image assets/inpaint_demo.jpg \ - --output_dir "outputs" \ - --box_threshold 0.3 \ - --text_threshold 0.25 \ - --det_speech_file "assets/acoustics/det_voice.mp3" \ - --inpaint_speech_file "assets/acoustics/inpaint_voice.mp3" \ - --device "cuda" -``` - -![](./assets/acoustics/gsam_whisper_inpainting_pipeline.png) - -## :speech_balloon: Run ChatBot Demo -Following [Visual ChatGPT](https://github.com/microsoft/visual-chatgpt), we add a ChatBot for our project. Currently, it supports: -1. "Descripe the image." -2. "Detect the dog (and the cat) in the image." -3. "Segment anything in the image." -4. "Segment the dog (and the cat) in the image." -5. "Help me label the image." -6. "Replace the dog with a cat in the image." - -To use the ChatBot: -- Install whisper if you want to use audio as input. -- Set the default model setting in the tool `Grounded_dino_sam_inpainting`. -- Run Demo -```bash -export CUDA_VISIBLE_DEVICES=0 -python chatbot.py -``` - - -## :cupid: Acknowledgements -- [Segment Anything](https://github.com/facebookresearch/segment-anything) -- [Grounding DINO](https://github.com/IDEA-Research/GroundingDINO) - -## Citation -If you find this project helpful for your research, please consider citing the following BibTeX entry. -```BibTex -@article{kirillov2023segany, - title={Segment Anything}, - author={Kirillov, Alexander and Mintun, Eric and Ravi, Nikhila and Mao, Hanzi and Rolland, Chloe and Gustafson, Laura and Xiao, Tete and Whitehead, Spencer and Berg, Alexander C. and Lo, Wan-Yen and Doll{\'a}r, Piotr and Girshick, Ross}, - journal={arXiv:2304.02643}, - year={2023} -} - -@inproceedings{ShilongLiu2023GroundingDM, - title={Grounding DINO: Marrying DINO with Grounded Pre-Training for Open-Set Object Detection}, - author={Shilong Liu and Zhaoyang Zeng and Tianhe Ren and Feng Li and Hao Zhang and Jie Yang and Chunyuan Li and Jianwei Yang and Hang Su and Jun Zhu and Lei Zhang}, - year={2023} -} -``` - diff --git a/spaces/Ibtehaj10/cheating-detection-FYP/screenshot/Login.py b/spaces/Ibtehaj10/cheating-detection-FYP/screenshot/Login.py deleted file mode 100644 index 30b89fc5d62fe96826b7c8339094bf93c2c1487c..0000000000000000000000000000000000000000 --- a/spaces/Ibtehaj10/cheating-detection-FYP/screenshot/Login.py +++ /dev/null @@ -1,694 +0,0 @@ -import cv2 -import datetime -import imutils -import numpy as np -from centroidtracker import CentroidTracker -import pandas as pd -import torch -import streamlit as st -import mediapipe as mp -import cv2 as cv -import numpy as np -import tempfile -import time -from PIL import Image -import pandas as pd -import torch -import base64 -import streamlit.components.v1 as components -import csv -import pickle -from pathlib import Path -import streamlit_authenticator as stauth -import os -import csv -from streamlit_option_menu import option_menu -# x-x-x-x-x-x-x-x-x-x-x-x-x-x LOGIN FORM x-x-x-x-x-x-x-x-x - - -import streamlit as st -import pandas as pd -import hashlib -import sqlite3 -# - -import pickle -from pathlib import Path -import streamlit_authenticator as stauth -import pyautogui - -# print("Done !!!") - -data = ["student Count",'Date','Id','Mobile','Watch'] -with open('final.csv', 'w') as file: - writer = csv.writer(file) - writer.writerow(data) - - -# # l1 = [] -# # l2 = [] -# # if st.button('signup'): - - -# # usernames = st.text_input('Username') -# # pwd = st.text_input('Password') -# # l1.append(usernames) -# # l2.append(pwd) - -# # names = ["dmin", "ser"] -# # if st.button("signupsss"): -# # username =l1 - -# # password =l2 - -# # hashed_passwords =stauth.Hasher(password).generate() - -# # file_path = Path(__file__).parent / "hashed_pw.pkl" - -# # with file_path.open("wb") as file: -# # pickle.dump(hashed_passwords, file) - - -# # elif st.button('Logins'): -# names = ['dmin', 'ser'] - -# username = [] - -# file_path = Path(__file__).parent / 'hashed_pw.pkl' - -# with file_path.open('rb') as file: -# hashed_passwords = pickle.load(file) - -# authenticator = stauth.Authenticate(names,username,hashed_passwords,'Cheating Detection','abcdefg',cookie_expiry_days=180) - -# name,authentication_status,username= authenticator.login('Login','main') - - -# if authentication_status == False: -# st.error('Username/Password is incorrect') - -# if authentication_status == None: -# st.error('Please enter a username and password') - -@st.experimental_memo -def get_img_as_base64(file): - with open(file, "rb") as f: - data = f.read() - return base64.b64encode(data).decode() - - -#img = get_img_as_base64("/home/anas/PersonTracking/WebUI/attendence.jpg") - -page_bg_img = f""" - -""" - -st.markdown(page_bg_img, unsafe_allow_html=True) -files = pd.read_csv('LoginStatus.csv') - - -idS = list(files['Id']) -Pwd = list(files['Password'].astype(str)) - -# print(type(Pwd)) -ids = st.sidebar.text_input('Enter a username') -Pswd = st.sidebar.text_input('Enter a password',type="password",key="password") - -# print('list : ',type(Pwd)) - - - -if (ids in idS) and(str(Pswd) in Pwd): - - # st.empty() - date_time = time.strftime("%b %d %Y %-I:%M %p") - date = date_time.split() - dates = date[0:3] - times = date[3:5] - # x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-xAPPLICACTION -x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x - - def non_max_suppression_fast(boxes, overlapThresh): - try: - if len(boxes) == 0: - return [] - - if boxes.dtype.kind == "i": - boxes = boxes.astype("float") - - pick = [] - - x1 = boxes[:, 0] - y1 = boxes[:, 1] - x2 = boxes[:, 2] - y2 = boxes[:, 3] - - area = (x2 - x1 + 1) * (y2 - y1 + 1) - idxs = np.argsort(y2) - - while len(idxs) > 0: - last = len(idxs) - 1 - i = idxs[last] - pick.append(i) - - xx1 = np.maximum(x1[i], x1[idxs[:last]]) - yy1 = np.maximum(y1[i], y1[idxs[:last]]) - xx2 = np.minimum(x2[i], x2[idxs[:last]]) - yy2 = np.minimum(y2[i], y2[idxs[:last]]) - - w = np.maximum(0, xx2 - xx1 + 1) - h = np.maximum(0, yy2 - yy1 + 1) - - overlap = (w * h) / area[idxs[:last]] - - idxs = np.delete(idxs, np.concatenate(([last], - np.where(overlap > overlapThresh)[0]))) - - return boxes[pick].astype("int") - except Exception as e: - print("Exception occurred in non_max_suppression : {}".format(e)) - - - protopath = "MobileNetSSD_deploy.prototxt" - modelpath = "MobileNetSSD_deploy.caffemodel" - detector = cv2.dnn.readNetFromCaffe(prototxt=protopath, caffeModel=modelpath) - # Only enable it if you are using OpenVino environment - # detector.setPreferableBackend(cv2.dnn.DNN_BACKEND_INFERENCE_ENGINE) - # detector.setPreferableTarget(cv2.dnn.DNN_TARGET_CPU) - - - CLASSES = ["background", "aeroplane", "bicycle", "bird", "boat", - "bottle", "bus", "car", "cat", "chair", "cow", "diningtable", - "dog", "horse", "motorbike", "person", "pottedplant", "sheep", - "sofa", "train", "tvmonitor"] - - tracker = CentroidTracker(maxDisappeared=80, maxDistance=90) - - st.markdown( - """ - - """, - unsafe_allow_html=True, - ) - hide_streamlit_style = """ - - """ - st.markdown(hide_streamlit_style, unsafe_allow_html=True) - - - # Resize Images to fit Container - @st.cache() - # Get Image Dimensions - def image_resize(image, width=None, height=None, inter=cv.INTER_AREA): - dim = None - (h,w) = image.shape[:2] - - if width is None and height is None: - return image - - if width is None: - r = width/float(w) - dim = (int(w*r),height) - - else: - r = width/float(w) - dim = width, int(h*r) - - # Resize image - resized = cv.resize(image,dim,interpolation=inter) - - return resized - - # About Page - # authenticator.logout('Logout') - EXAMPLE_NO = 3 - - - def streamlit_menu(example=1): - if example == 1: - # 1. as sidebar menu - with st.sidebar: - selected = option_menu( - menu_title="Main Menu", # required - options=["Home", "Projects", "Contact"], # required - icons=["house", "book", "envelope"], # optional - menu_icon="cast", # optional - default_index=0, # optional - ) - return selected - - if example == 2: - # 2. horizontal menu w/o custom style - selected = option_menu( - menu_title=None, # required - options=["Home", "Projects", "Contact"], # required - icons=["house", "book", "envelope"], # optional - menu_icon="cast", # optional - default_index=0, # optional - orientation="horizontal", - ) - return selected - - if example == 3: - # 2. horizontal menu with custom style - selected = option_menu( - menu_title=None, # required - options=["Home", "Projects", "Contact"], # required - icons=["house", "book", "envelope"], # optional - menu_icon="cast", # optional - default_index=0, # optional - orientation="horizontal", - styles={ - "container": {"padding": "0!important", "background-color": "#eaeaea"}, - "icon": {"color": "#080602", "font-size": "18px"}, - "nav-link": { - "font-size": "18px", - "text-align": "left", - "color": "#000000", - "margin": "0px", - "--hover-color": "#E1A031", - }, - "nav-link-selected": {"background-color": "#ffffff"}, - }, - ) - return selected - - - selected = streamlit_menu(example=EXAMPLE_NO) - - if selected == "Home": - st.title(f"You have selected {selected}") - # if selected == "Projects": - # st.title(f"You have selected {selected}") - if selected == "Contact": - st.title(f"You have selected {selected}") - # app_mode = st.sidebar.selectbox( - # 'App Mode', - # ['Application'] - # ) - if selected == 'Projects': - # 2. horizontal menu with custom style - # selected = option_menu( - # menu_title=None, # required - # options=["Home", "Projects", "Contact"], # required - # icons=["house", "book", "envelope"], # optional - # menu_icon="cast", # optional - # default_index=0, # optional - # orientation="horizontal", - # styles={ - # "container": {"padding": "0!important", "background-color": "#fafafa"}, - # "icon": {"color": "orange", "font-size": "25px"}, - # "nav-link": { - # "font-size": "25px", - # "text-align": "left", - # "margin": "0px", - # "--hover-color": "#eee", - # }, - # "nav-link-selected": {"background-color": "blue"}, - # }, - # ) - # if app_mode == 'About': - # st.title('About Product And Team') - # st.markdown(''' - # Imran Bhai Project - # ''') - # st.markdown( - # """ - # - # """, - # unsafe_allow_html=True, - # ) - - - - - # elif app_mode == 'Application': - - st.set_option('deprecation.showfileUploaderEncoding', False) - - use_webcam = "pass" - # record = st.sidebar.checkbox("Record Video") - - # if record: - # st.checkbox('Recording', True) - - # drawing_spec = mp.solutions.drawing_utils.DrawingSpec(thickness=2, circle_radius=1) - - # st.sidebar.markdown('---') - - # ## Add Sidebar and Window style - # st.markdown( - # """ - # - # """, - # unsafe_allow_html=True, - # ) - - # max_faces = st.sidebar.number_input('Maximum Number of Faces', value=5, min_value=1) - # st.sidebar.markdown('---') - # detection_confidence = st.sidebar.slider('Min Detection Confidence', min_value=0.0,max_value=1.0,value=0.5) - # tracking_confidence = st.sidebar.slider('Min Tracking Confidence', min_value=0.0,max_value=1.0,value=0.5) - # st.sidebar.markdown('---') - - ## Get Video - stframe = st.empty() - video_file_buffer = st.file_uploader("Upload a Video", type=['mp4', 'mov', 'avi', 'asf', 'm4v']) - temp_file = tempfile.NamedTemporaryFile(delete=False) - - - if not video_file_buffer: - if use_webcam: - video = cv.VideoCapture(0) - else: - try: - video = cv.VideoCapture(1) - temp_file.name = video - except: - pass - else: - temp_file.write(video_file_buffer.read()) - video = cv.VideoCapture(temp_file.name) - - width = int(video.get(cv.CAP_PROP_FRAME_WIDTH)) - height = int(video.get(cv.CAP_PROP_FRAME_HEIGHT)) - fps_input = int(video.get(cv.CAP_PROP_FPS)) - - ## Recording - codec = cv.VideoWriter_fourcc('a','v','c','1') - out = cv.VideoWriter('output1.mp4', codec, fps_input, (width,height)) - - # st.sidebar.text('Input Video') - # st.sidebar.video(temp_file.name) - - fps = 0 - i = 0 - - drawing_spec = mp.solutions.drawing_utils.DrawingSpec(thickness=2, circle_radius=1) - - kpil, kpil2, kpil3,kpil4,kpil5, kpil6 = st.columns(6) - - with kpil: - st.markdown('**Frame Rate**') - kpil_text = st.markdown('0') - - with kpil2: - st.markdown('**detection ID**') - kpil2_text = st.markdown('0') - - with kpil3: - st.markdown('**Mobile**') - kpil3_text = st.markdown('0') - with kpil4: - st.markdown('**Watch**') - kpil4_text = st.markdown('0') - with kpil5: - st.markdown('**Count**') - kpil5_text = st.markdown('0') - with kpil6: - st.markdown('**Img Res**') - kpil6_text = st.markdown('0') - - - - st.markdown('
    ', unsafe_allow_html=True) - # try: - def main(): - db = {} - - # cap = cv2.VideoCapture('//home//anas//PersonTracking//WebUI//movement.mp4') - path='/usr/local/lib/python3.10/dist-packages/yolo0vs5/yolov5s-int8.tflite' - #count=0 - custom = 'yolov5s' - - model = torch.hub.load('/usr/local/lib/python3.10/dist-packages/yolovs5', custom, path,source='local',force_reload=True) - - b=model.names[0] = 'person' - mobile = model.names[67] = 'cell phone' - watch = model.names[75] = 'clock' - - fps_start_time = datetime.datetime.now() - fps = 0 - size=416 - - count=0 - counter=0 - - - color=(0,0,255) - - cy1=250 - offset=6 - - - pt1 = (120, 100) - pt2 = (980, 1150) - color = (0, 255, 0) - - pt3 = (283, 103) - pt4 = (1500, 1150) - - cy2 = 500 - color = (0, 255, 0) - total_frames = 0 - prevTime = 0 - cur_frame = 0 - count=0 - counter=0 - fps_start_time = datetime.datetime.now() - fps = 0 - total_frames = 0 - lpc_count = 0 - opc_count = 0 - object_id_list = [] - # success = True - if st.button("Detect"): - try: - while video.isOpened(): - - ret, frame = video.read() - frame = imutils.resize(frame, width=600) - total_frames = total_frames + 1 - - (H, W) = frame.shape[:2] - - blob = cv2.dnn.blobFromImage(frame, 0.007843, (W, H), 127.5) - - detector.setInput(blob) - person_detections = detector.forward() - rects = [] - for i in np.arange(0, person_detections.shape[2]): - confidence = person_detections[0, 0, i, 2] - if confidence > 0.5: - idx = int(person_detections[0, 0, i, 1]) - - if CLASSES[idx] != "person": - continue - - person_box = person_detections[0, 0, i, 3:7] * np.array([W, H, W, H]) - (startX, startY, endX, endY) = person_box.astype("int") - rects.append(person_box) - - boundingboxes = np.array(rects) - boundingboxes = boundingboxes.astype(int) - rects = non_max_suppression_fast(boundingboxes, 0.3) - - objects = tracker.update(rects) - for (objectId, bbox) in objects.items(): - x1, y1, x2, y2 = bbox - x1 = int(x1) - y1 = int(y1) - x2 = int(x2) - y2 = int(y2) - - cv2.rectangle(frame, (x1, y1), (x2, y2), (0, 0, 255), 2) - text = "ID: {}".format(objectId) - # print(text) - cv2.putText(frame, text, (x1, y1-5), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0, 0, 255), 1) - if objectId not in object_id_list: - object_id_list.append(objectId) - fps_end_time = datetime.datetime.now() - time_diff = fps_end_time - fps_start_time - if time_diff.seconds == 0: - fps = 0.0 - else: - fps = (total_frames / time_diff.seconds) - - fps_text = "FPS: {:.2f}".format(fps) - - cv2.putText(frame, fps_text, (5, 30), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0, 0, 255), 1) - lpc_count = len(objects) - opc_count = len(object_id_list) - - lpc_txt = "LPC: {}".format(lpc_count) - opc_txt = "OPC: {}".format(opc_count) - - count += 1 - if count % 4 != 0: - continue - # frame=cv.resize(frame, (600,500)) - # cv2.line(frame, pt1, pt2,color,2) - # cv2.line(frame, pt3, pt4,color,2) - results = model(frame,size) - components = results.pandas().xyxy[0] - for index, row in results.pandas().xyxy[0].iterrows(): - x1 = int(row['xmin']) - y1 = int(row['ymin']) - x2 = int(row['xmax']) - y2 = int(row['ymax']) - confidence = (row['confidence']) - obj = (row['class']) - - - # min':x1,'ymin':y1,'xmax':x2,'ymax':y2,'confidence':confidence,'Object':obj} - # if lpc_txt is not None: - # try: - # db["student Count"] = [lpc_txt] - # except: - # db["student Count"] = ['N/A'] - if obj == 0: - cv2.rectangle(frame,(x1,y1),(x2,y2),(0,0,255),2) - rectx1,recty1 = ((x1+x2)/2,(y1+y2)/2) - rectcenter = int(rectx1),int(recty1) - cx = rectcenter[0] - cy = rectcenter[1] - cv2.circle(frame,(cx,cy),3,(0,255,0),-1) - cv2.putText(frame,str(b), (x1,y1), cv2.FONT_HERSHEY_PLAIN,2,(255,255,255),2) - - db["student Count"] = [lpc_txt] - db['Date'] = [date_time] - db['id'] = ['N/A'] - db['Mobile']=['N/A'] - db['Watch'] = ['N/A'] - if cy<(cy1+offset) and cy>(cy1-offset): - DB = [] - counter+=1 - DB.append(counter) - - ff = DB[-1] - fx = str(ff) - # cv2.line(frame, pt1, pt2,(0, 0, 255),2) - # if cy<(cy2+offset) and cy>(cy2-offset): - - # cv2.line(frame, pt3, pt4,(0, 0, 255),2) - font = cv2.FONT_HERSHEY_TRIPLEX - cv2.putText(frame,fx,(50, 50),font, 1,(0, 0, 255),2,cv2.LINE_4) - cv2.putText(frame,"Movement",(70, 70),font, 1,(0, 0, 255),2,cv2.LINE_4) - kpil2_text.write(f"
    {text}
    ", unsafe_allow_html=True) - - - db['id'] = [text] - name = "/screenshot/"+str(date_time) + '.jpg' - print ('Creating...' + name) - cv2.imwrite(name, frame) - - # myScreenshot = pyautogui.screenshot() - # if st.buttn("Dowload ss"): - # myScreenshot.save(r'name.png') - # myScreenshot.save(r'/home/anas/PersonTracking/AIComputerVision-master/pages/name.png') - if obj == 67: - cv2.rectangle(frame,(x1,y1),(x2,y2),(0,0,255),2) - rectx1,recty1 = ((x1+x2)/2,(y1+y2)/2) - rectcenter = int(rectx1),int(recty1) - cx = rectcenter[0] - cy = rectcenter[1] - cv2.circle(frame,(cx,cy),3,(0,255,0),-1) - cv2.putText(frame,str(mobile), (x1,y1), cv2.FONT_HERSHEY_PLAIN,2,(255,255,255),2) - cv2.putText(frame,'Mobile',(50, 50),cv2.FONT_HERSHEY_PLAIN, 1,(0, 0, 255),2,cv2.LINE_4) - kpil3_text.write(f"
    {mobile}{text}
    ", unsafe_allow_html=True) - db['Mobile']=mobile+' '+text - name = "/screenshot/"+str(date_time) + '.jpg' - print ('Creating...' + name) - - # writing the extracted images - cv2.imwrite(name, frame) - - # myScreenshot = pyautogui.screenshot() - # if st.buttn("Dowload ss"): - # myScreenshot.save(r'/home/anas/PersonTracking/AIComputerVision-master/pages/name.png') - # myScreenshot.save(r'name.png') - - if obj == 75: - cv2.rectangle(frame,(x1,y1),(x2,y2),(0,0,255),2) - rectx1,recty1 = ((x1+x2)/2,(y1+y2)/2) - rectcenter = int(rectx1),int(recty1) - cx = rectcenter[0] - cy = rectcenter[1] - cv2.circle(frame,(cx,cy),3,(0,255,0),-1) - cv2.putText(frame,str(watch), (x1,y1), cv2.FONT_HERSHEY_PLAIN,2,(255,255,255),2) - cv2.putText(frame,'Watch',(50, 50),cv2.FONT_HERSHEY_PLAIN, 1,(0, 0, 255),2,cv2.LINE_4) - kpil6_text.write(f"
    {watch}
    ", unsafe_allow_html=True) - - - db['Watch']=watch - name = "/screenshot/"+str(date_time) + '.jpg' - print ('Creating...' + name) - cv2.imwrite(name, frame) - - # writing the extracted images - - # myScreenshot = pyautogui.screenshot() - # if st.buttn("Dowload ss"): - # myScreenshot.save(r'/home/anas/PersonTracking/AIComputerVision-master/pages/name.png') - # myScreenshot.save(r'name.png') - - - - kpil_text.write(f"
    {int(fps)}
    ", unsafe_allow_html=True) - kpil5_text.write(f"
    {lpc_txt}
    ", unsafe_allow_html=True) - kpil6_text.write(f"
    {width*height}
    ", - unsafe_allow_html=True) - - - frame = cv.resize(frame,(0,0), fx=0.8, fy=0.8) - frame = image_resize(image=frame, width=640) - stframe.image(frame,channels='BGR', use_column_width=True) - df = pd.DataFrame(db) - df.to_csv('final.csv',mode='a',header=False,index=False) - except: - pass - with open('final.csv') as f: - st.download_button(label = 'Download Cheating Report',data=f,file_name='data.csv') - - os.remove("final.csv") - main() diff --git a/spaces/Iceclear/StableSR/StableSR/basicsr/archs/duf_arch.py b/spaces/Iceclear/StableSR/StableSR/basicsr/archs/duf_arch.py deleted file mode 100644 index e2b3ab7df4d890c9220d74ed8c461ad9d155120a..0000000000000000000000000000000000000000 --- a/spaces/Iceclear/StableSR/StableSR/basicsr/archs/duf_arch.py +++ /dev/null @@ -1,276 +0,0 @@ -import numpy as np -import torch -from torch import nn as nn -from torch.nn import functional as F - -from basicsr.utils.registry import ARCH_REGISTRY - - -class DenseBlocksTemporalReduce(nn.Module): - """A concatenation of 3 dense blocks with reduction in temporal dimension. - - Note that the output temporal dimension is 6 fewer the input temporal dimension, since there are 3 blocks. - - Args: - num_feat (int): Number of channels in the blocks. Default: 64. - num_grow_ch (int): Growing factor of the dense blocks. Default: 32 - adapt_official_weights (bool): Whether to adapt the weights translated from the official implementation. - Set to false if you want to train from scratch. Default: False. - """ - - def __init__(self, num_feat=64, num_grow_ch=32, adapt_official_weights=False): - super(DenseBlocksTemporalReduce, self).__init__() - if adapt_official_weights: - eps = 1e-3 - momentum = 1e-3 - else: # pytorch default values - eps = 1e-05 - momentum = 0.1 - - self.temporal_reduce1 = nn.Sequential( - nn.BatchNorm3d(num_feat, eps=eps, momentum=momentum), nn.ReLU(inplace=True), - nn.Conv3d(num_feat, num_feat, (1, 1, 1), stride=(1, 1, 1), padding=(0, 0, 0), bias=True), - nn.BatchNorm3d(num_feat, eps=eps, momentum=momentum), nn.ReLU(inplace=True), - nn.Conv3d(num_feat, num_grow_ch, (3, 3, 3), stride=(1, 1, 1), padding=(0, 1, 1), bias=True)) - - self.temporal_reduce2 = nn.Sequential( - nn.BatchNorm3d(num_feat + num_grow_ch, eps=eps, momentum=momentum), nn.ReLU(inplace=True), - nn.Conv3d( - num_feat + num_grow_ch, - num_feat + num_grow_ch, (1, 1, 1), - stride=(1, 1, 1), - padding=(0, 0, 0), - bias=True), nn.BatchNorm3d(num_feat + num_grow_ch, eps=eps, momentum=momentum), nn.ReLU(inplace=True), - nn.Conv3d(num_feat + num_grow_ch, num_grow_ch, (3, 3, 3), stride=(1, 1, 1), padding=(0, 1, 1), bias=True)) - - self.temporal_reduce3 = nn.Sequential( - nn.BatchNorm3d(num_feat + 2 * num_grow_ch, eps=eps, momentum=momentum), nn.ReLU(inplace=True), - nn.Conv3d( - num_feat + 2 * num_grow_ch, - num_feat + 2 * num_grow_ch, (1, 1, 1), - stride=(1, 1, 1), - padding=(0, 0, 0), - bias=True), nn.BatchNorm3d(num_feat + 2 * num_grow_ch, eps=eps, momentum=momentum), - nn.ReLU(inplace=True), - nn.Conv3d( - num_feat + 2 * num_grow_ch, num_grow_ch, (3, 3, 3), stride=(1, 1, 1), padding=(0, 1, 1), bias=True)) - - def forward(self, x): - """ - Args: - x (Tensor): Input tensor with shape (b, num_feat, t, h, w). - - Returns: - Tensor: Output with shape (b, num_feat + num_grow_ch * 3, 1, h, w). - """ - x1 = self.temporal_reduce1(x) - x1 = torch.cat((x[:, :, 1:-1, :, :], x1), 1) - - x2 = self.temporal_reduce2(x1) - x2 = torch.cat((x1[:, :, 1:-1, :, :], x2), 1) - - x3 = self.temporal_reduce3(x2) - x3 = torch.cat((x2[:, :, 1:-1, :, :], x3), 1) - - return x3 - - -class DenseBlocks(nn.Module): - """ A concatenation of N dense blocks. - - Args: - num_feat (int): Number of channels in the blocks. Default: 64. - num_grow_ch (int): Growing factor of the dense blocks. Default: 32. - num_block (int): Number of dense blocks. The values are: - DUF-S (16 layers): 3 - DUF-M (18 layers): 9 - DUF-L (52 layers): 21 - adapt_official_weights (bool): Whether to adapt the weights translated from the official implementation. - Set to false if you want to train from scratch. Default: False. - """ - - def __init__(self, num_block, num_feat=64, num_grow_ch=16, adapt_official_weights=False): - super(DenseBlocks, self).__init__() - if adapt_official_weights: - eps = 1e-3 - momentum = 1e-3 - else: # pytorch default values - eps = 1e-05 - momentum = 0.1 - - self.dense_blocks = nn.ModuleList() - for i in range(0, num_block): - self.dense_blocks.append( - nn.Sequential( - nn.BatchNorm3d(num_feat + i * num_grow_ch, eps=eps, momentum=momentum), nn.ReLU(inplace=True), - nn.Conv3d( - num_feat + i * num_grow_ch, - num_feat + i * num_grow_ch, (1, 1, 1), - stride=(1, 1, 1), - padding=(0, 0, 0), - bias=True), nn.BatchNorm3d(num_feat + i * num_grow_ch, eps=eps, momentum=momentum), - nn.ReLU(inplace=True), - nn.Conv3d( - num_feat + i * num_grow_ch, - num_grow_ch, (3, 3, 3), - stride=(1, 1, 1), - padding=(1, 1, 1), - bias=True))) - - def forward(self, x): - """ - Args: - x (Tensor): Input tensor with shape (b, num_feat, t, h, w). - - Returns: - Tensor: Output with shape (b, num_feat + num_block * num_grow_ch, t, h, w). - """ - for i in range(0, len(self.dense_blocks)): - y = self.dense_blocks[i](x) - x = torch.cat((x, y), 1) - return x - - -class DynamicUpsamplingFilter(nn.Module): - """Dynamic upsampling filter used in DUF. - - Reference: https://github.com/yhjo09/VSR-DUF - - It only supports input with 3 channels. And it applies the same filters to 3 channels. - - Args: - filter_size (tuple): Filter size of generated filters. The shape is (kh, kw). Default: (5, 5). - """ - - def __init__(self, filter_size=(5, 5)): - super(DynamicUpsamplingFilter, self).__init__() - if not isinstance(filter_size, tuple): - raise TypeError(f'The type of filter_size must be tuple, but got type{filter_size}') - if len(filter_size) != 2: - raise ValueError(f'The length of filter size must be 2, but got {len(filter_size)}.') - # generate a local expansion filter, similar to im2col - self.filter_size = filter_size - filter_prod = np.prod(filter_size) - expansion_filter = torch.eye(int(filter_prod)).view(filter_prod, 1, *filter_size) # (kh*kw, 1, kh, kw) - self.expansion_filter = expansion_filter.repeat(3, 1, 1, 1) # repeat for all the 3 channels - - def forward(self, x, filters): - """Forward function for DynamicUpsamplingFilter. - - Args: - x (Tensor): Input image with 3 channels. The shape is (n, 3, h, w). - filters (Tensor): Generated dynamic filters. The shape is (n, filter_prod, upsampling_square, h, w). - filter_prod: prod of filter kernel size, e.g., 1*5*5=25. - upsampling_square: similar to pixel shuffle, upsampling_square = upsampling * upsampling. - e.g., for x 4 upsampling, upsampling_square= 4*4 = 16 - - Returns: - Tensor: Filtered image with shape (n, 3*upsampling_square, h, w) - """ - n, filter_prod, upsampling_square, h, w = filters.size() - kh, kw = self.filter_size - expanded_input = F.conv2d( - x, self.expansion_filter.to(x), padding=(kh // 2, kw // 2), groups=3) # (n, 3*filter_prod, h, w) - expanded_input = expanded_input.view(n, 3, filter_prod, h, w).permute(0, 3, 4, 1, - 2) # (n, h, w, 3, filter_prod) - filters = filters.permute(0, 3, 4, 1, 2) # (n, h, w, filter_prod, upsampling_square] - out = torch.matmul(expanded_input, filters) # (n, h, w, 3, upsampling_square) - return out.permute(0, 3, 4, 1, 2).view(n, 3 * upsampling_square, h, w) - - -@ARCH_REGISTRY.register() -class DUF(nn.Module): - """Network architecture for DUF - - ``Paper: Deep Video Super-Resolution Network Using Dynamic Upsampling Filters Without Explicit Motion Compensation`` - - Reference: https://github.com/yhjo09/VSR-DUF - - For all the models below, 'adapt_official_weights' is only necessary when - loading the weights converted from the official TensorFlow weights. - Please set it to False if you are training the model from scratch. - - There are three models with different model size: DUF16Layers, DUF28Layers, - and DUF52Layers. This class is the base class for these models. - - Args: - scale (int): The upsampling factor. Default: 4. - num_layer (int): The number of layers. Default: 52. - adapt_official_weights_weights (bool): Whether to adapt the weights - translated from the official implementation. Set to false if you - want to train from scratch. Default: False. - """ - - def __init__(self, scale=4, num_layer=52, adapt_official_weights=False): - super(DUF, self).__init__() - self.scale = scale - if adapt_official_weights: - eps = 1e-3 - momentum = 1e-3 - else: # pytorch default values - eps = 1e-05 - momentum = 0.1 - - self.conv3d1 = nn.Conv3d(3, 64, (1, 3, 3), stride=(1, 1, 1), padding=(0, 1, 1), bias=True) - self.dynamic_filter = DynamicUpsamplingFilter((5, 5)) - - if num_layer == 16: - num_block = 3 - num_grow_ch = 32 - elif num_layer == 28: - num_block = 9 - num_grow_ch = 16 - elif num_layer == 52: - num_block = 21 - num_grow_ch = 16 - else: - raise ValueError(f'Only supported (16, 28, 52) layers, but got {num_layer}.') - - self.dense_block1 = DenseBlocks( - num_block=num_block, num_feat=64, num_grow_ch=num_grow_ch, - adapt_official_weights=adapt_official_weights) # T = 7 - self.dense_block2 = DenseBlocksTemporalReduce( - 64 + num_grow_ch * num_block, num_grow_ch, adapt_official_weights=adapt_official_weights) # T = 1 - channels = 64 + num_grow_ch * num_block + num_grow_ch * 3 - self.bn3d2 = nn.BatchNorm3d(channels, eps=eps, momentum=momentum) - self.conv3d2 = nn.Conv3d(channels, 256, (1, 3, 3), stride=(1, 1, 1), padding=(0, 1, 1), bias=True) - - self.conv3d_r1 = nn.Conv3d(256, 256, (1, 1, 1), stride=(1, 1, 1), padding=(0, 0, 0), bias=True) - self.conv3d_r2 = nn.Conv3d(256, 3 * (scale**2), (1, 1, 1), stride=(1, 1, 1), padding=(0, 0, 0), bias=True) - - self.conv3d_f1 = nn.Conv3d(256, 512, (1, 1, 1), stride=(1, 1, 1), padding=(0, 0, 0), bias=True) - self.conv3d_f2 = nn.Conv3d( - 512, 1 * 5 * 5 * (scale**2), (1, 1, 1), stride=(1, 1, 1), padding=(0, 0, 0), bias=True) - - def forward(self, x): - """ - Args: - x (Tensor): Input with shape (b, 7, c, h, w) - - Returns: - Tensor: Output with shape (b, c, h * scale, w * scale) - """ - num_batches, num_imgs, _, h, w = x.size() - - x = x.permute(0, 2, 1, 3, 4) # (b, c, 7, h, w) for Conv3D - x_center = x[:, :, num_imgs // 2, :, :] - - x = self.conv3d1(x) - x = self.dense_block1(x) - x = self.dense_block2(x) - x = F.relu(self.bn3d2(x), inplace=True) - x = F.relu(self.conv3d2(x), inplace=True) - - # residual image - res = self.conv3d_r2(F.relu(self.conv3d_r1(x), inplace=True)) - - # filter - filter_ = self.conv3d_f2(F.relu(self.conv3d_f1(x), inplace=True)) - filter_ = F.softmax(filter_.view(num_batches, 25, self.scale**2, h, w), dim=1) - - # dynamic filter - out = self.dynamic_filter(x_center, filter_) - out += res.squeeze_(2) - out = F.pixel_shuffle(out, self.scale) - - return out diff --git a/spaces/InpaintAI/Inpaint-Anything/third_party/lama/saicinpainting/evaluation/losses/lpips.py b/spaces/InpaintAI/Inpaint-Anything/third_party/lama/saicinpainting/evaluation/losses/lpips.py deleted file mode 100644 index b5f19b747f2457902695213f7efcde4fdc306c1f..0000000000000000000000000000000000000000 --- a/spaces/InpaintAI/Inpaint-Anything/third_party/lama/saicinpainting/evaluation/losses/lpips.py +++ /dev/null @@ -1,891 +0,0 @@ -############################################################ -# The contents below have been combined using files in the # -# following repository: # -# https://github.com/richzhang/PerceptualSimilarity # -############################################################ - -############################################################ -# __init__.py # -############################################################ - -import numpy as np -from skimage.metrics import structural_similarity -import torch - -from saicinpainting.utils import get_shape - - -class PerceptualLoss(torch.nn.Module): - def __init__(self, model='net-lin', net='alex', colorspace='rgb', model_path=None, spatial=False, use_gpu=True): - # VGG using our perceptually-learned weights (LPIPS metric) - # def __init__(self, model='net', net='vgg', use_gpu=True): # "default" way of using VGG as a perceptual loss - super(PerceptualLoss, self).__init__() - self.use_gpu = use_gpu - self.spatial = spatial - self.model = DistModel() - self.model.initialize(model=model, net=net, use_gpu=use_gpu, colorspace=colorspace, - model_path=model_path, spatial=self.spatial) - - def forward(self, pred, target, normalize=True): - """ - Pred and target are Variables. - If normalize is True, assumes the images are between [0,1] and then scales them between [-1,+1] - If normalize is False, assumes the images are already between [-1,+1] - Inputs pred and target are Nx3xHxW - Output pytorch Variable N long - """ - - if normalize: - target = 2 * target - 1 - pred = 2 * pred - 1 - - return self.model(target, pred) - - -def normalize_tensor(in_feat, eps=1e-10): - norm_factor = torch.sqrt(torch.sum(in_feat ** 2, dim=1, keepdim=True)) - return in_feat / (norm_factor + eps) - - -def l2(p0, p1, range=255.): - return .5 * np.mean((p0 / range - p1 / range) ** 2) - - -def psnr(p0, p1, peak=255.): - return 10 * np.log10(peak ** 2 / np.mean((1. * p0 - 1. * p1) ** 2)) - - -def dssim(p0, p1, range=255.): - return (1 - compare_ssim(p0, p1, data_range=range, multichannel=True)) / 2. - - -def rgb2lab(in_img, mean_cent=False): - from skimage import color - img_lab = color.rgb2lab(in_img) - if (mean_cent): - img_lab[:, :, 0] = img_lab[:, :, 0] - 50 - return img_lab - - -def tensor2np(tensor_obj): - # change dimension of a tensor object into a numpy array - return tensor_obj[0].cpu().float().numpy().transpose((1, 2, 0)) - - -def np2tensor(np_obj): - # change dimenion of np array into tensor array - return torch.Tensor(np_obj[:, :, :, np.newaxis].transpose((3, 2, 0, 1))) - - -def tensor2tensorlab(image_tensor, to_norm=True, mc_only=False): - # image tensor to lab tensor - from skimage import color - - img = tensor2im(image_tensor) - img_lab = color.rgb2lab(img) - if (mc_only): - img_lab[:, :, 0] = img_lab[:, :, 0] - 50 - if (to_norm and not mc_only): - img_lab[:, :, 0] = img_lab[:, :, 0] - 50 - img_lab = img_lab / 100. - - return np2tensor(img_lab) - - -def tensorlab2tensor(lab_tensor, return_inbnd=False): - from skimage import color - import warnings - warnings.filterwarnings("ignore") - - lab = tensor2np(lab_tensor) * 100. - lab[:, :, 0] = lab[:, :, 0] + 50 - - rgb_back = 255. * np.clip(color.lab2rgb(lab.astype('float')), 0, 1) - if (return_inbnd): - # convert back to lab, see if we match - lab_back = color.rgb2lab(rgb_back.astype('uint8')) - mask = 1. * np.isclose(lab_back, lab, atol=2.) - mask = np2tensor(np.prod(mask, axis=2)[:, :, np.newaxis]) - return (im2tensor(rgb_back), mask) - else: - return im2tensor(rgb_back) - - -def rgb2lab(input): - from skimage import color - return color.rgb2lab(input / 255.) - - -def tensor2im(image_tensor, imtype=np.uint8, cent=1., factor=255. / 2.): - image_numpy = image_tensor[0].cpu().float().numpy() - image_numpy = (np.transpose(image_numpy, (1, 2, 0)) + cent) * factor - return image_numpy.astype(imtype) - - -def im2tensor(image, imtype=np.uint8, cent=1., factor=255. / 2.): - return torch.Tensor((image / factor - cent) - [:, :, :, np.newaxis].transpose((3, 2, 0, 1))) - - -def tensor2vec(vector_tensor): - return vector_tensor.data.cpu().numpy()[:, :, 0, 0] - - -def voc_ap(rec, prec, use_07_metric=False): - """ ap = voc_ap(rec, prec, [use_07_metric]) - Compute VOC AP given precision and recall. - If use_07_metric is true, uses the - VOC 07 11 point method (default:False). - """ - if use_07_metric: - # 11 point metric - ap = 0. - for t in np.arange(0., 1.1, 0.1): - if np.sum(rec >= t) == 0: - p = 0 - else: - p = np.max(prec[rec >= t]) - ap = ap + p / 11. - else: - # correct AP calculation - # first append sentinel values at the end - mrec = np.concatenate(([0.], rec, [1.])) - mpre = np.concatenate(([0.], prec, [0.])) - - # compute the precision envelope - for i in range(mpre.size - 1, 0, -1): - mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i]) - - # to calculate area under PR curve, look for points - # where X axis (recall) changes value - i = np.where(mrec[1:] != mrec[:-1])[0] - - # and sum (\Delta recall) * prec - ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1]) - return ap - - -def tensor2im(image_tensor, imtype=np.uint8, cent=1., factor=255. / 2.): - # def tensor2im(image_tensor, imtype=np.uint8, cent=1., factor=1.): - image_numpy = image_tensor[0].cpu().float().numpy() - image_numpy = (np.transpose(image_numpy, (1, 2, 0)) + cent) * factor - return image_numpy.astype(imtype) - - -def im2tensor(image, imtype=np.uint8, cent=1., factor=255. / 2.): - # def im2tensor(image, imtype=np.uint8, cent=1., factor=1.): - return torch.Tensor((image / factor - cent) - [:, :, :, np.newaxis].transpose((3, 2, 0, 1))) - - -############################################################ -# base_model.py # -############################################################ - - -class BaseModel(torch.nn.Module): - def __init__(self): - super().__init__() - - def name(self): - return 'BaseModel' - - def initialize(self, use_gpu=True): - self.use_gpu = use_gpu - - def forward(self): - pass - - def get_image_paths(self): - pass - - def optimize_parameters(self): - pass - - def get_current_visuals(self): - return self.input - - def get_current_errors(self): - return {} - - def save(self, label): - pass - - # helper saving function that can be used by subclasses - def save_network(self, network, path, network_label, epoch_label): - save_filename = '%s_net_%s.pth' % (epoch_label, network_label) - save_path = os.path.join(path, save_filename) - torch.save(network.state_dict(), save_path) - - # helper loading function that can be used by subclasses - def load_network(self, network, network_label, epoch_label): - save_filename = '%s_net_%s.pth' % (epoch_label, network_label) - save_path = os.path.join(self.save_dir, save_filename) - print('Loading network from %s' % save_path) - network.load_state_dict(torch.load(save_path, map_location='cpu')) - - def update_learning_rate(): - pass - - def get_image_paths(self): - return self.image_paths - - def save_done(self, flag=False): - np.save(os.path.join(self.save_dir, 'done_flag'), flag) - np.savetxt(os.path.join(self.save_dir, 'done_flag'), [flag, ], fmt='%i') - - -############################################################ -# dist_model.py # -############################################################ - -import os -from collections import OrderedDict -from scipy.ndimage import zoom -from tqdm import tqdm - - -class DistModel(BaseModel): - def name(self): - return self.model_name - - def initialize(self, model='net-lin', net='alex', colorspace='Lab', pnet_rand=False, pnet_tune=False, - model_path=None, - use_gpu=True, printNet=False, spatial=False, - is_train=False, lr=.0001, beta1=0.5, version='0.1'): - ''' - INPUTS - model - ['net-lin'] for linearly calibrated network - ['net'] for off-the-shelf network - ['L2'] for L2 distance in Lab colorspace - ['SSIM'] for ssim in RGB colorspace - net - ['squeeze','alex','vgg'] - model_path - if None, will look in weights/[NET_NAME].pth - colorspace - ['Lab','RGB'] colorspace to use for L2 and SSIM - use_gpu - bool - whether or not to use a GPU - printNet - bool - whether or not to print network architecture out - spatial - bool - whether to output an array containing varying distances across spatial dimensions - spatial_shape - if given, output spatial shape. if None then spatial shape is determined automatically via spatial_factor (see below). - spatial_factor - if given, specifies upsampling factor relative to the largest spatial extent of a convolutional layer. if None then resized to size of input images. - spatial_order - spline order of filter for upsampling in spatial mode, by default 1 (bilinear). - is_train - bool - [True] for training mode - lr - float - initial learning rate - beta1 - float - initial momentum term for adam - version - 0.1 for latest, 0.0 was original (with a bug) - ''' - BaseModel.initialize(self, use_gpu=use_gpu) - - self.model = model - self.net = net - self.is_train = is_train - self.spatial = spatial - self.model_name = '%s [%s]' % (model, net) - - if (self.model == 'net-lin'): # pretrained net + linear layer - self.net = PNetLin(pnet_rand=pnet_rand, pnet_tune=pnet_tune, pnet_type=net, - use_dropout=True, spatial=spatial, version=version, lpips=True) - kw = dict(map_location='cpu') - if (model_path is None): - import inspect - model_path = os.path.abspath( - os.path.join(os.path.dirname(__file__), '..', '..', '..', 'models', 'lpips_models', f'{net}.pth')) - - if (not is_train): - self.net.load_state_dict(torch.load(model_path, **kw), strict=False) - - elif (self.model == 'net'): # pretrained network - self.net = PNetLin(pnet_rand=pnet_rand, pnet_type=net, lpips=False) - elif (self.model in ['L2', 'l2']): - self.net = L2(use_gpu=use_gpu, colorspace=colorspace) # not really a network, only for testing - self.model_name = 'L2' - elif (self.model in ['DSSIM', 'dssim', 'SSIM', 'ssim']): - self.net = DSSIM(use_gpu=use_gpu, colorspace=colorspace) - self.model_name = 'SSIM' - else: - raise ValueError("Model [%s] not recognized." % self.model) - - self.trainable_parameters = list(self.net.parameters()) - - if self.is_train: # training mode - # extra network on top to go from distances (d0,d1) => predicted human judgment (h*) - self.rankLoss = BCERankingLoss() - self.trainable_parameters += list(self.rankLoss.net.parameters()) - self.lr = lr - self.old_lr = lr - self.optimizer_net = torch.optim.Adam(self.trainable_parameters, lr=lr, betas=(beta1, 0.999)) - else: # test mode - self.net.eval() - - # if (use_gpu): - # self.net.to(gpu_ids[0]) - # self.net = torch.nn.DataParallel(self.net, device_ids=gpu_ids) - # if (self.is_train): - # self.rankLoss = self.rankLoss.to(device=gpu_ids[0]) # just put this on GPU0 - - if (printNet): - print('---------- Networks initialized -------------') - print_network(self.net) - print('-----------------------------------------------') - - def forward(self, in0, in1, retPerLayer=False): - ''' Function computes the distance between image patches in0 and in1 - INPUTS - in0, in1 - torch.Tensor object of shape Nx3xXxY - image patch scaled to [-1,1] - OUTPUT - computed distances between in0 and in1 - ''' - - return self.net(in0, in1, retPerLayer=retPerLayer) - - # ***** TRAINING FUNCTIONS ***** - def optimize_parameters(self): - self.forward_train() - self.optimizer_net.zero_grad() - self.backward_train() - self.optimizer_net.step() - self.clamp_weights() - - def clamp_weights(self): - for module in self.net.modules(): - if (hasattr(module, 'weight') and module.kernel_size == (1, 1)): - module.weight.data = torch.clamp(module.weight.data, min=0) - - def set_input(self, data): - self.input_ref = data['ref'] - self.input_p0 = data['p0'] - self.input_p1 = data['p1'] - self.input_judge = data['judge'] - - # if (self.use_gpu): - # self.input_ref = self.input_ref.to(device=self.gpu_ids[0]) - # self.input_p0 = self.input_p0.to(device=self.gpu_ids[0]) - # self.input_p1 = self.input_p1.to(device=self.gpu_ids[0]) - # self.input_judge = self.input_judge.to(device=self.gpu_ids[0]) - - # self.var_ref = Variable(self.input_ref, requires_grad=True) - # self.var_p0 = Variable(self.input_p0, requires_grad=True) - # self.var_p1 = Variable(self.input_p1, requires_grad=True) - - def forward_train(self): # run forward pass - # print(self.net.module.scaling_layer.shift) - # print(torch.norm(self.net.module.net.slice1[0].weight).item(), torch.norm(self.net.module.lin0.model[1].weight).item()) - - assert False, "We shoud've not get here when using LPIPS as a metric" - - self.d0 = self(self.var_ref, self.var_p0) - self.d1 = self(self.var_ref, self.var_p1) - self.acc_r = self.compute_accuracy(self.d0, self.d1, self.input_judge) - - self.var_judge = Variable(1. * self.input_judge).view(self.d0.size()) - - self.loss_total = self.rankLoss(self.d0, self.d1, self.var_judge * 2. - 1.) - - return self.loss_total - - def backward_train(self): - torch.mean(self.loss_total).backward() - - def compute_accuracy(self, d0, d1, judge): - ''' d0, d1 are Variables, judge is a Tensor ''' - d1_lt_d0 = (d1 < d0).cpu().data.numpy().flatten() - judge_per = judge.cpu().numpy().flatten() - return d1_lt_d0 * judge_per + (1 - d1_lt_d0) * (1 - judge_per) - - def get_current_errors(self): - retDict = OrderedDict([('loss_total', self.loss_total.data.cpu().numpy()), - ('acc_r', self.acc_r)]) - - for key in retDict.keys(): - retDict[key] = np.mean(retDict[key]) - - return retDict - - def get_current_visuals(self): - zoom_factor = 256 / self.var_ref.data.size()[2] - - ref_img = tensor2im(self.var_ref.data) - p0_img = tensor2im(self.var_p0.data) - p1_img = tensor2im(self.var_p1.data) - - ref_img_vis = zoom(ref_img, [zoom_factor, zoom_factor, 1], order=0) - p0_img_vis = zoom(p0_img, [zoom_factor, zoom_factor, 1], order=0) - p1_img_vis = zoom(p1_img, [zoom_factor, zoom_factor, 1], order=0) - - return OrderedDict([('ref', ref_img_vis), - ('p0', p0_img_vis), - ('p1', p1_img_vis)]) - - def save(self, path, label): - if (self.use_gpu): - self.save_network(self.net.module, path, '', label) - else: - self.save_network(self.net, path, '', label) - self.save_network(self.rankLoss.net, path, 'rank', label) - - def update_learning_rate(self, nepoch_decay): - lrd = self.lr / nepoch_decay - lr = self.old_lr - lrd - - for param_group in self.optimizer_net.param_groups: - param_group['lr'] = lr - - print('update lr [%s] decay: %f -> %f' % (type, self.old_lr, lr)) - self.old_lr = lr - - -def score_2afc_dataset(data_loader, func, name=''): - ''' Function computes Two Alternative Forced Choice (2AFC) score using - distance function 'func' in dataset 'data_loader' - INPUTS - data_loader - CustomDatasetDataLoader object - contains a TwoAFCDataset inside - func - callable distance function - calling d=func(in0,in1) should take 2 - pytorch tensors with shape Nx3xXxY, and return numpy array of length N - OUTPUTS - [0] - 2AFC score in [0,1], fraction of time func agrees with human evaluators - [1] - dictionary with following elements - d0s,d1s - N arrays containing distances between reference patch to perturbed patches - gts - N array in [0,1], preferred patch selected by human evaluators - (closer to "0" for left patch p0, "1" for right patch p1, - "0.6" means 60pct people preferred right patch, 40pct preferred left) - scores - N array in [0,1], corresponding to what percentage function agreed with humans - CONSTS - N - number of test triplets in data_loader - ''' - - d0s = [] - d1s = [] - gts = [] - - for data in tqdm(data_loader.load_data(), desc=name): - d0s += func(data['ref'], data['p0']).data.cpu().numpy().flatten().tolist() - d1s += func(data['ref'], data['p1']).data.cpu().numpy().flatten().tolist() - gts += data['judge'].cpu().numpy().flatten().tolist() - - d0s = np.array(d0s) - d1s = np.array(d1s) - gts = np.array(gts) - scores = (d0s < d1s) * (1. - gts) + (d1s < d0s) * gts + (d1s == d0s) * .5 - - return (np.mean(scores), dict(d0s=d0s, d1s=d1s, gts=gts, scores=scores)) - - -def score_jnd_dataset(data_loader, func, name=''): - ''' Function computes JND score using distance function 'func' in dataset 'data_loader' - INPUTS - data_loader - CustomDatasetDataLoader object - contains a JNDDataset inside - func - callable distance function - calling d=func(in0,in1) should take 2 - pytorch tensors with shape Nx3xXxY, and return pytorch array of length N - OUTPUTS - [0] - JND score in [0,1], mAP score (area under precision-recall curve) - [1] - dictionary with following elements - ds - N array containing distances between two patches shown to human evaluator - sames - N array containing fraction of people who thought the two patches were identical - CONSTS - N - number of test triplets in data_loader - ''' - - ds = [] - gts = [] - - for data in tqdm(data_loader.load_data(), desc=name): - ds += func(data['p0'], data['p1']).data.cpu().numpy().tolist() - gts += data['same'].cpu().numpy().flatten().tolist() - - sames = np.array(gts) - ds = np.array(ds) - - sorted_inds = np.argsort(ds) - ds_sorted = ds[sorted_inds] - sames_sorted = sames[sorted_inds] - - TPs = np.cumsum(sames_sorted) - FPs = np.cumsum(1 - sames_sorted) - FNs = np.sum(sames_sorted) - TPs - - precs = TPs / (TPs + FPs) - recs = TPs / (TPs + FNs) - score = voc_ap(recs, precs) - - return (score, dict(ds=ds, sames=sames)) - - -############################################################ -# networks_basic.py # -############################################################ - -import torch.nn as nn -from torch.autograd import Variable -import numpy as np - - -def spatial_average(in_tens, keepdim=True): - return in_tens.mean([2, 3], keepdim=keepdim) - - -def upsample(in_tens, out_H=64): # assumes scale factor is same for H and W - in_H = in_tens.shape[2] - scale_factor = 1. * out_H / in_H - - return nn.Upsample(scale_factor=scale_factor, mode='bilinear', align_corners=False)(in_tens) - - -# Learned perceptual metric -class PNetLin(nn.Module): - def __init__(self, pnet_type='vgg', pnet_rand=False, pnet_tune=False, use_dropout=True, spatial=False, - version='0.1', lpips=True): - super(PNetLin, self).__init__() - - self.pnet_type = pnet_type - self.pnet_tune = pnet_tune - self.pnet_rand = pnet_rand - self.spatial = spatial - self.lpips = lpips - self.version = version - self.scaling_layer = ScalingLayer() - - if (self.pnet_type in ['vgg', 'vgg16']): - net_type = vgg16 - self.chns = [64, 128, 256, 512, 512] - elif (self.pnet_type == 'alex'): - net_type = alexnet - self.chns = [64, 192, 384, 256, 256] - elif (self.pnet_type == 'squeeze'): - net_type = squeezenet - self.chns = [64, 128, 256, 384, 384, 512, 512] - self.L = len(self.chns) - - self.net = net_type(pretrained=not self.pnet_rand, requires_grad=self.pnet_tune) - - if (lpips): - self.lin0 = NetLinLayer(self.chns[0], use_dropout=use_dropout) - self.lin1 = NetLinLayer(self.chns[1], use_dropout=use_dropout) - self.lin2 = NetLinLayer(self.chns[2], use_dropout=use_dropout) - self.lin3 = NetLinLayer(self.chns[3], use_dropout=use_dropout) - self.lin4 = NetLinLayer(self.chns[4], use_dropout=use_dropout) - self.lins = [self.lin0, self.lin1, self.lin2, self.lin3, self.lin4] - if (self.pnet_type == 'squeeze'): # 7 layers for squeezenet - self.lin5 = NetLinLayer(self.chns[5], use_dropout=use_dropout) - self.lin6 = NetLinLayer(self.chns[6], use_dropout=use_dropout) - self.lins += [self.lin5, self.lin6] - - def forward(self, in0, in1, retPerLayer=False): - # v0.0 - original release had a bug, where input was not scaled - in0_input, in1_input = (self.scaling_layer(in0), self.scaling_layer(in1)) if self.version == '0.1' else ( - in0, in1) - outs0, outs1 = self.net(in0_input), self.net(in1_input) - feats0, feats1, diffs = {}, {}, {} - - for kk in range(self.L): - feats0[kk], feats1[kk] = normalize_tensor(outs0[kk]), normalize_tensor(outs1[kk]) - diffs[kk] = (feats0[kk] - feats1[kk]) ** 2 - - if (self.lpips): - if (self.spatial): - res = [upsample(self.lins[kk].model(diffs[kk]), out_H=in0.shape[2]) for kk in range(self.L)] - else: - res = [spatial_average(self.lins[kk].model(diffs[kk]), keepdim=True) for kk in range(self.L)] - else: - if (self.spatial): - res = [upsample(diffs[kk].sum(dim=1, keepdim=True), out_H=in0.shape[2]) for kk in range(self.L)] - else: - res = [spatial_average(diffs[kk].sum(dim=1, keepdim=True), keepdim=True) for kk in range(self.L)] - - val = res[0] - for l in range(1, self.L): - val += res[l] - - if (retPerLayer): - return (val, res) - else: - return val - - -class ScalingLayer(nn.Module): - def __init__(self): - super(ScalingLayer, self).__init__() - self.register_buffer('shift', torch.Tensor([-.030, -.088, -.188])[None, :, None, None]) - self.register_buffer('scale', torch.Tensor([.458, .448, .450])[None, :, None, None]) - - def forward(self, inp): - return (inp - self.shift) / self.scale - - -class NetLinLayer(nn.Module): - ''' A single linear layer which does a 1x1 conv ''' - - def __init__(self, chn_in, chn_out=1, use_dropout=False): - super(NetLinLayer, self).__init__() - - layers = [nn.Dropout(), ] if (use_dropout) else [] - layers += [nn.Conv2d(chn_in, chn_out, 1, stride=1, padding=0, bias=False), ] - self.model = nn.Sequential(*layers) - - -class Dist2LogitLayer(nn.Module): - ''' takes 2 distances, puts through fc layers, spits out value between [0,1] (if use_sigmoid is True) ''' - - def __init__(self, chn_mid=32, use_sigmoid=True): - super(Dist2LogitLayer, self).__init__() - - layers = [nn.Conv2d(5, chn_mid, 1, stride=1, padding=0, bias=True), ] - layers += [nn.LeakyReLU(0.2, True), ] - layers += [nn.Conv2d(chn_mid, chn_mid, 1, stride=1, padding=0, bias=True), ] - layers += [nn.LeakyReLU(0.2, True), ] - layers += [nn.Conv2d(chn_mid, 1, 1, stride=1, padding=0, bias=True), ] - if (use_sigmoid): - layers += [nn.Sigmoid(), ] - self.model = nn.Sequential(*layers) - - def forward(self, d0, d1, eps=0.1): - return self.model(torch.cat((d0, d1, d0 - d1, d0 / (d1 + eps), d1 / (d0 + eps)), dim=1)) - - -class BCERankingLoss(nn.Module): - def __init__(self, chn_mid=32): - super(BCERankingLoss, self).__init__() - self.net = Dist2LogitLayer(chn_mid=chn_mid) - # self.parameters = list(self.net.parameters()) - self.loss = torch.nn.BCELoss() - - def forward(self, d0, d1, judge): - per = (judge + 1.) / 2. - self.logit = self.net(d0, d1) - return self.loss(self.logit, per) - - -# L2, DSSIM metrics -class FakeNet(nn.Module): - def __init__(self, use_gpu=True, colorspace='Lab'): - super(FakeNet, self).__init__() - self.use_gpu = use_gpu - self.colorspace = colorspace - - -class L2(FakeNet): - - def forward(self, in0, in1, retPerLayer=None): - assert (in0.size()[0] == 1) # currently only supports batchSize 1 - - if (self.colorspace == 'RGB'): - (N, C, X, Y) = in0.size() - value = torch.mean(torch.mean(torch.mean((in0 - in1) ** 2, dim=1).view(N, 1, X, Y), dim=2).view(N, 1, 1, Y), - dim=3).view(N) - return value - elif (self.colorspace == 'Lab'): - value = l2(tensor2np(tensor2tensorlab(in0.data, to_norm=False)), - tensor2np(tensor2tensorlab(in1.data, to_norm=False)), range=100.).astype('float') - ret_var = Variable(torch.Tensor((value,))) - # if (self.use_gpu): - # ret_var = ret_var.cuda() - return ret_var - - -class DSSIM(FakeNet): - - def forward(self, in0, in1, retPerLayer=None): - assert (in0.size()[0] == 1) # currently only supports batchSize 1 - - if (self.colorspace == 'RGB'): - value = dssim(1. * tensor2im(in0.data), 1. * tensor2im(in1.data), range=255.).astype('float') - elif (self.colorspace == 'Lab'): - value = dssim(tensor2np(tensor2tensorlab(in0.data, to_norm=False)), - tensor2np(tensor2tensorlab(in1.data, to_norm=False)), range=100.).astype('float') - ret_var = Variable(torch.Tensor((value,))) - # if (self.use_gpu): - # ret_var = ret_var.cuda() - return ret_var - - -def print_network(net): - num_params = 0 - for param in net.parameters(): - num_params += param.numel() - print('Network', net) - print('Total number of parameters: %d' % num_params) - - -############################################################ -# pretrained_networks.py # -############################################################ - -from collections import namedtuple -import torch -from torchvision import models as tv - - -class squeezenet(torch.nn.Module): - def __init__(self, requires_grad=False, pretrained=True): - super(squeezenet, self).__init__() - pretrained_features = tv.squeezenet1_1(pretrained=pretrained).features - self.slice1 = torch.nn.Sequential() - self.slice2 = torch.nn.Sequential() - self.slice3 = torch.nn.Sequential() - self.slice4 = torch.nn.Sequential() - self.slice5 = torch.nn.Sequential() - self.slice6 = torch.nn.Sequential() - self.slice7 = torch.nn.Sequential() - self.N_slices = 7 - for x in range(2): - self.slice1.add_module(str(x), pretrained_features[x]) - for x in range(2, 5): - self.slice2.add_module(str(x), pretrained_features[x]) - for x in range(5, 8): - self.slice3.add_module(str(x), pretrained_features[x]) - for x in range(8, 10): - self.slice4.add_module(str(x), pretrained_features[x]) - for x in range(10, 11): - self.slice5.add_module(str(x), pretrained_features[x]) - for x in range(11, 12): - self.slice6.add_module(str(x), pretrained_features[x]) - for x in range(12, 13): - self.slice7.add_module(str(x), pretrained_features[x]) - if not requires_grad: - for param in self.parameters(): - param.requires_grad = False - - def forward(self, X): - h = self.slice1(X) - h_relu1 = h - h = self.slice2(h) - h_relu2 = h - h = self.slice3(h) - h_relu3 = h - h = self.slice4(h) - h_relu4 = h - h = self.slice5(h) - h_relu5 = h - h = self.slice6(h) - h_relu6 = h - h = self.slice7(h) - h_relu7 = h - vgg_outputs = namedtuple("SqueezeOutputs", ['relu1', 'relu2', 'relu3', 'relu4', 'relu5', 'relu6', 'relu7']) - out = vgg_outputs(h_relu1, h_relu2, h_relu3, h_relu4, h_relu5, h_relu6, h_relu7) - - return out - - -class alexnet(torch.nn.Module): - def __init__(self, requires_grad=False, pretrained=True): - super(alexnet, self).__init__() - alexnet_pretrained_features = tv.alexnet(pretrained=pretrained).features - self.slice1 = torch.nn.Sequential() - self.slice2 = torch.nn.Sequential() - self.slice3 = torch.nn.Sequential() - self.slice4 = torch.nn.Sequential() - self.slice5 = torch.nn.Sequential() - self.N_slices = 5 - for x in range(2): - self.slice1.add_module(str(x), alexnet_pretrained_features[x]) - for x in range(2, 5): - self.slice2.add_module(str(x), alexnet_pretrained_features[x]) - for x in range(5, 8): - self.slice3.add_module(str(x), alexnet_pretrained_features[x]) - for x in range(8, 10): - self.slice4.add_module(str(x), alexnet_pretrained_features[x]) - for x in range(10, 12): - self.slice5.add_module(str(x), alexnet_pretrained_features[x]) - if not requires_grad: - for param in self.parameters(): - param.requires_grad = False - - def forward(self, X): - h = self.slice1(X) - h_relu1 = h - h = self.slice2(h) - h_relu2 = h - h = self.slice3(h) - h_relu3 = h - h = self.slice4(h) - h_relu4 = h - h = self.slice5(h) - h_relu5 = h - alexnet_outputs = namedtuple("AlexnetOutputs", ['relu1', 'relu2', 'relu3', 'relu4', 'relu5']) - out = alexnet_outputs(h_relu1, h_relu2, h_relu3, h_relu4, h_relu5) - - return out - - -class vgg16(torch.nn.Module): - def __init__(self, requires_grad=False, pretrained=True): - super(vgg16, self).__init__() - vgg_pretrained_features = tv.vgg16(pretrained=pretrained).features - self.slice1 = torch.nn.Sequential() - self.slice2 = torch.nn.Sequential() - self.slice3 = torch.nn.Sequential() - self.slice4 = torch.nn.Sequential() - self.slice5 = torch.nn.Sequential() - self.N_slices = 5 - for x in range(4): - self.slice1.add_module(str(x), vgg_pretrained_features[x]) - for x in range(4, 9): - self.slice2.add_module(str(x), vgg_pretrained_features[x]) - for x in range(9, 16): - self.slice3.add_module(str(x), vgg_pretrained_features[x]) - for x in range(16, 23): - self.slice4.add_module(str(x), vgg_pretrained_features[x]) - for x in range(23, 30): - self.slice5.add_module(str(x), vgg_pretrained_features[x]) - if not requires_grad: - for param in self.parameters(): - param.requires_grad = False - - def forward(self, X): - h = self.slice1(X) - h_relu1_2 = h - h = self.slice2(h) - h_relu2_2 = h - h = self.slice3(h) - h_relu3_3 = h - h = self.slice4(h) - h_relu4_3 = h - h = self.slice5(h) - h_relu5_3 = h - vgg_outputs = namedtuple("VggOutputs", ['relu1_2', 'relu2_2', 'relu3_3', 'relu4_3', 'relu5_3']) - out = vgg_outputs(h_relu1_2, h_relu2_2, h_relu3_3, h_relu4_3, h_relu5_3) - - return out - - -class resnet(torch.nn.Module): - def __init__(self, requires_grad=False, pretrained=True, num=18): - super(resnet, self).__init__() - if (num == 18): - self.net = tv.resnet18(pretrained=pretrained) - elif (num == 34): - self.net = tv.resnet34(pretrained=pretrained) - elif (num == 50): - self.net = tv.resnet50(pretrained=pretrained) - elif (num == 101): - self.net = tv.resnet101(pretrained=pretrained) - elif (num == 152): - self.net = tv.resnet152(pretrained=pretrained) - self.N_slices = 5 - - self.conv1 = self.net.conv1 - self.bn1 = self.net.bn1 - self.relu = self.net.relu - self.maxpool = self.net.maxpool - self.layer1 = self.net.layer1 - self.layer2 = self.net.layer2 - self.layer3 = self.net.layer3 - self.layer4 = self.net.layer4 - - def forward(self, X): - h = self.conv1(X) - h = self.bn1(h) - h = self.relu(h) - h_relu1 = h - h = self.maxpool(h) - h = self.layer1(h) - h_conv2 = h - h = self.layer2(h) - h_conv3 = h - h = self.layer3(h) - h_conv4 = h - h = self.layer4(h) - h_conv5 = h - - outputs = namedtuple("Outputs", ['relu1', 'conv2', 'conv3', 'conv4', 'conv5']) - out = outputs(h_relu1, h_conv2, h_conv3, h_conv4, h_conv5) - - return out diff --git a/spaces/Intel/NeuralChat-ICX-INT4/fastchat/data/split_long_conversation.py b/spaces/Intel/NeuralChat-ICX-INT4/fastchat/data/split_long_conversation.py deleted file mode 100644 index 9362a922833da9bacb72c9c7093987013c6c553c..0000000000000000000000000000000000000000 --- a/spaces/Intel/NeuralChat-ICX-INT4/fastchat/data/split_long_conversation.py +++ /dev/null @@ -1,121 +0,0 @@ -""" -Split long conversations based on certain max length. - -Usage: python3 -m fastchat.data.split_long_conversation \ - --in sharegpt_clean.json \ - --out sharegpt_split.json \ - --model-name-or-path $ -""" -import argparse -from concurrent.futures import ProcessPoolExecutor -import json -from typing import Dict, Sequence, Optional - -import transformers -from tqdm import tqdm - -from fastchat import conversation as conversation_lib - - -def make_sample(sample, start_idx, end_idx): - assert (end_idx - start_idx) % 2 == 0 - return { - "id": sample["id"] + "_" + str(start_idx), - "conversations": sample["conversations"][start_idx:end_idx], - } - - -tokenizer = max_length = None - - -def split_one_sample(sample): - tokenized_lens = [] - conversations = sample["conversations"] - conversations = conversations[: len(conversations) // 2 * 2] - for c in conversations: - length = len(tokenizer(c["value"]).input_ids) + 6 - tokenized_lens.append(length) - - start_idx = 0 - cur_len = 0 - - if len(conversations) % 2 != 0 or len(conversations) < 2: - return [] - - new_samples = [] - for i in range(0, len(conversations), 2): - tmp_len = tokenized_lens[i] + tokenized_lens[i + 1] - if cur_len + tmp_len > max_length: - new_samples.append(make_sample(sample, start_idx, i)) - start_idx = i - cur_len = 0 - elif i == len(conversations) - 2: - new_samples.append(make_sample(sample, start_idx, i + 2)) - - cur_len += tmp_len - - return new_samples - - -def split_all(content, begin, end, tokenizer_, max_length_): - """ - Keep the maximum round of conversations within the max token length constraint - """ - global tokenizer, max_length - tokenizer = tokenizer_ - max_length = max_length_ - - content = content[begin:end] - new_content = [] - - with ProcessPoolExecutor() as executor: - for result in tqdm(executor.map(split_one_sample, content), total=len(content)): - new_content.extend(result) - - return new_content - - -def filter_invalid_roles(content): - new_content = [] - for i, c in enumerate(content): - roles = ["human", "gpt"] - if len(c["conversations"]) <= 0: - continue - - valid = True - for j, s in enumerate(c["conversations"]): - if s["from"] != roles[j % 2]: - valid = False - break - - if valid: - new_content.append(c) - - return new_content - - -def main(args): - content = json.load(open(args.in_file, "r")) - tokenizer = transformers.AutoTokenizer.from_pretrained( - args.model_name_or_path, - model_max_length=args.max_length, - padding_side="right", - use_fast=False, - ) - new_content = split_all(content, args.begin, args.end, tokenizer, args.max_length) - new_content = filter_invalid_roles(new_content) - - print(f"total: {len(content)}, new: {len(new_content)}") - json.dump(new_content, open(args.out_file, "w"), indent=2) - - -if __name__ == "__main__": - parser = argparse.ArgumentParser() - parser.add_argument("--in-file", type=str, required=True) - parser.add_argument("--out-file", type=str, default="sharegpt_split.json") - parser.add_argument("--begin", type=int) - parser.add_argument("--end", type=int) - parser.add_argument("--model-name-or-path", type=str, required=True) - parser.add_argument("--max-length", type=int, default=2048) - args = parser.parse_args() - main(args) diff --git a/spaces/Jamkonams/AutoGPT/autogpt/commands/git_operations.py b/spaces/Jamkonams/AutoGPT/autogpt/commands/git_operations.py deleted file mode 100644 index 028f3b8da44c85e01d20ccc5d4a5fa72c759008b..0000000000000000000000000000000000000000 --- a/spaces/Jamkonams/AutoGPT/autogpt/commands/git_operations.py +++ /dev/null @@ -1,26 +0,0 @@ -"""Git operations for autogpt""" -import git - -from autogpt.config import Config -from autogpt.workspace import path_in_workspace - -CFG = Config() - - -def clone_repository(repo_url: str, clone_path: str) -> str: - """Clone a GitHub repository locally - - Args: - repo_url (str): The URL of the repository to clone - clone_path (str): The path to clone the repository to - - Returns: - str: The result of the clone operation""" - split_url = repo_url.split("//") - auth_repo_url = f"//{CFG.github_username}:{CFG.github_api_key}@".join(split_url) - safe_clone_path = path_in_workspace(clone_path) - try: - git.Repo.clone_from(auth_repo_url, safe_clone_path) - return f"""Cloned {repo_url} to {safe_clone_path}""" - except Exception as e: - return f"Error: {str(e)}" diff --git a/spaces/KPCGD/bingo/src/components/ui/button.tsx b/spaces/KPCGD/bingo/src/components/ui/button.tsx deleted file mode 100644 index 281da005124fa94c89a9a9db7605748a92b60865..0000000000000000000000000000000000000000 --- a/spaces/KPCGD/bingo/src/components/ui/button.tsx +++ /dev/null @@ -1,57 +0,0 @@ -import * as React from 'react' -import { Slot } from '@radix-ui/react-slot' -import { cva, type VariantProps } from 'class-variance-authority' - -import { cn } from '@/lib/utils' - -const buttonVariants = cva( - 'inline-flex items-center justify-center rounded-md text-sm font-medium shadow ring-offset-background transition-colors outline-none disabled:pointer-events-none disabled:opacity-50', - { - variants: { - variant: { - default: - 'bg-primary text-primary-foreground shadow-md hover:bg-primary/90', - destructive: - 'bg-destructive text-destructive-foreground hover:bg-destructive/90', - outline: - 'border border-input hover:bg-accent hover:text-accent-foreground', - secondary: - 'bg-secondary text-secondary-foreground hover:bg-secondary/80', - ghost: 'shadow-none hover:bg-accent hover:text-accent-foreground', - link: 'text-primary underline-offset-4 shadow-none hover:underline' - }, - size: { - default: 'h-8 px-4 py-2', - sm: 'h-8 rounded-md px-3', - lg: 'h-11 rounded-md px-8', - icon: 'h-8 w-8 p-0' - } - }, - defaultVariants: { - variant: 'default', - size: 'default' - } - } -) - -export interface ButtonProps - extends React.ButtonHTMLAttributes, - VariantProps { - asChild?: boolean -} - -const Button = React.forwardRef( - ({ className, variant, size, asChild = false, ...props }, ref) => { - const Comp = asChild ? Slot : 'button' - return ( - - ) - } -) -Button.displayName = 'Button' - -export { Button, buttonVariants } diff --git a/spaces/KalbeDigitalLab/ham1000-skin-classification/README.md b/spaces/KalbeDigitalLab/ham1000-skin-classification/README.md deleted file mode 100644 index acf1d20540da24461cd0ab67ee511f9ca1751569..0000000000000000000000000000000000000000 --- a/spaces/KalbeDigitalLab/ham1000-skin-classification/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Skin Cancer Classification -emoji: 🏆 -colorFrom: red -colorTo: pink -sdk: gradio -sdk_version: 3.47.1 -app_file: app.py -pinned: false -license: unknown ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference \ No newline at end of file diff --git a/spaces/KalbeDigitalLab/pathology_nuclei_segmentation_classification/style.css b/spaces/KalbeDigitalLab/pathology_nuclei_segmentation_classification/style.css deleted file mode 100644 index 7bab106934b9856dd6c8c4f21379305eb6960d58..0000000000000000000000000000000000000000 --- a/spaces/KalbeDigitalLab/pathology_nuclei_segmentation_classification/style.css +++ /dev/null @@ -1,79 +0,0 @@ -* { - box-sizing: border-box; -} - -body { - font-family: 'Source Sans Pro', sans-serif; - font-size: 16px; -} - -.container { - width: 100%; - margin: 0 auto; -} - -.title { - font-size: 24px !important; - font-weight: 600 !important; - letter-spacing: 0em; - text-align: center; - color: #374159 !important; -} - -.subtitle { - font-size: 24px !important; - font-style: italic; - font-weight: 400 !important; - letter-spacing: 0em; - text-align: center; - color: #1d652a !important; - padding-bottom: 0.5em; -} - -.overview-heading { - font-size: 24px !important; - font-weight: 600 !important; - letter-spacing: 0em; - text-align: left; -} - -.overview-content { - font-size: 14px !important; - font-weight: 400 !important; - line-height: 30px !important; - letter-spacing: 0em; - text-align: left; -} - -.content-image { - width: 100% !important; - height: auto !important; -} - -.vl { - border-left: 5px solid #1d652a; - padding-left: 20px; - color: #1d652a !important; -} - -.grid-container { - display: grid; - grid-template-columns: 1fr 2fr; - gap: 20px; - align-items: flex-start; - margin-bottom: 0.7em; -} - -@media screen and (max-width: 768px) { - .container { - width: 90%; - } - - .grid-container { - display: block; - } - - .overview-heading { - font-size: 18px !important; - } -} \ No newline at end of file diff --git a/spaces/Kevin676/ChatGPT-with-Voice-Cloning-in-Chinese/ppg2mel/train/loss.py b/spaces/Kevin676/ChatGPT-with-Voice-Cloning-in-Chinese/ppg2mel/train/loss.py deleted file mode 100644 index 301248cc1ef24c549499e10396ae6c3afab3ba09..0000000000000000000000000000000000000000 --- a/spaces/Kevin676/ChatGPT-with-Voice-Cloning-in-Chinese/ppg2mel/train/loss.py +++ /dev/null @@ -1,50 +0,0 @@ -from typing import Dict -from typing import Tuple - -import torch -import torch.nn as nn -import torch.nn.functional as F - -from ..utils.nets_utils import make_pad_mask - - -class MaskedMSELoss(nn.Module): - def __init__(self, frames_per_step): - super().__init__() - self.frames_per_step = frames_per_step - self.mel_loss_criterion = nn.MSELoss(reduction='none') - # self.loss = nn.MSELoss() - self.stop_loss_criterion = nn.BCEWithLogitsLoss(reduction='none') - - def get_mask(self, lengths, max_len=None): - # lengths: [B,] - if max_len is None: - max_len = torch.max(lengths) - batch_size = lengths.size(0) - seq_range = torch.arange(0, max_len).long() - seq_range_expand = seq_range.unsqueeze(0).expand(batch_size, max_len).to(lengths.device) - seq_length_expand = lengths.unsqueeze(1).expand_as(seq_range_expand) - return (seq_range_expand < seq_length_expand).float() - - def forward(self, mel_pred, mel_pred_postnet, mel_trg, lengths, - stop_target, stop_pred): - ## process stop_target - B = stop_target.size(0) - stop_target = stop_target.reshape(B, -1, self.frames_per_step)[:, :, 0] - stop_lengths = torch.ceil(lengths.float() / self.frames_per_step).long() - stop_mask = self.get_mask(stop_lengths, int(mel_trg.size(1)/self.frames_per_step)) - - mel_trg.requires_grad = False - # (B, T, 1) - mel_mask = self.get_mask(lengths, mel_trg.size(1)).unsqueeze(-1) - # (B, T, D) - mel_mask = mel_mask.expand_as(mel_trg) - mel_loss_pre = (self.mel_loss_criterion(mel_pred, mel_trg) * mel_mask).sum() / mel_mask.sum() - mel_loss_post = (self.mel_loss_criterion(mel_pred_postnet, mel_trg) * mel_mask).sum() / mel_mask.sum() - - mel_loss = mel_loss_pre + mel_loss_post - - # stop token loss - stop_loss = torch.sum(self.stop_loss_criterion(stop_pred, stop_target) * stop_mask) / stop_mask.sum() - - return mel_loss, stop_loss diff --git a/spaces/Kimata/Sanskrit-TTS/utils/cleaners.py b/spaces/Kimata/Sanskrit-TTS/utils/cleaners.py deleted file mode 100644 index 868a236f3fa483f12e7a56120834662c80e1450d..0000000000000000000000000000000000000000 --- a/spaces/Kimata/Sanskrit-TTS/utils/cleaners.py +++ /dev/null @@ -1,5 +0,0 @@ -def sanskrit_cleaners(text): - text = text.replace('॥', '।').replace('ॐ', 'ओम्') - if len(text)==0 or text[-1] != '।': - text += ' ।' - return text diff --git a/spaces/LanguageBind/LanguageBind/languagebind/video/tokenization_video.py b/spaces/LanguageBind/LanguageBind/languagebind/video/tokenization_video.py deleted file mode 100644 index 2864429c098770fd37fd61e8a7b82d1fee5b12dd..0000000000000000000000000000000000000000 --- a/spaces/LanguageBind/LanguageBind/languagebind/video/tokenization_video.py +++ /dev/null @@ -1,77 +0,0 @@ -from transformers import CLIPTokenizer -from transformers.utils import logging - -logger = logging.get_logger(__name__) - -VOCAB_FILES_NAMES = { - "vocab_file": "vocab.json", - "merges_file": "merges.txt", -} - -PRETRAINED_VOCAB_FILES_MAP = { - "vocab_file": { - "lb203/LanguageBind-Video": "https://huggingface.co/lb203/LanguageBind-Video/resolve/main/vocab.json", - }, - "merges_file": { - "lb203/LanguageBind-Video": "https://huggingface.co/lb203/LanguageBind-Video/resolve/main/merges.txt", - }, -} - -PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = { - "lb203/LanguageBind-Video": 77, -} - - -PRETRAINED_INIT_CONFIGURATION = { - "lb203/LanguageBind-Video": {}, -} - -class LanguageBindVideoTokenizer(CLIPTokenizer): - """ - Construct a CLIP tokenizer. Based on byte-level Byte-Pair-Encoding. - - This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to - this superclass for more information regarding those methods. - - Args: - vocab_file (`str`): - Path to the vocabulary file. - merges_file (`str`): - Path to the merges file. - errors (`str`, *optional*, defaults to `"replace"`): - Paradigm to follow when decoding bytes to UTF-8. See - [bytes.decode](https://docs.python.org/3/library/stdtypes.html#bytes.decode) for more information. - unk_token (`str`, *optional*, defaults to `<|endoftext|>`): - The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this - token instead. - bos_token (`str`, *optional*, defaults to `<|startoftext|>`): - The beginning of sequence token. - eos_token (`str`, *optional*, defaults to `<|endoftext|>`): - The end of sequence token. - """ - - vocab_files_names = VOCAB_FILES_NAMES - pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP - max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES - model_input_names = ["input_ids", "attention_mask"] - - def __init__( - self, - vocab_file, - merges_file, - errors="replace", - unk_token="<|endoftext|>", - bos_token="<|startoftext|>", - eos_token="<|endoftext|>", - pad_token="<|endoftext|>", # hack to enable padding - **kwargs, - ): - super(LanguageBindVideoTokenizer, self).__init__( - vocab_file, - merges_file, - errors, - unk_token, - bos_token, - eos_token, - pad_token, # hack to enable padding - **kwargs,) \ No newline at end of file diff --git a/spaces/Latryna/roop/app.py b/spaces/Latryna/roop/app.py deleted file mode 100644 index fe9a516e99129636b838903af8a4fab32f15d9cf..0000000000000000000000000000000000000000 --- a/spaces/Latryna/roop/app.py +++ /dev/null @@ -1,72 +0,0 @@ -# -* coding:UTF-8 -* -# !/usr/bin/env python -import numpy as np -import gradio as gr -import roop.globals -from roop.core import ( - start, - decode_execution_providers, - suggest_max_memory, - suggest_execution_threads, -) -from roop.processors.frame.core import get_frame_processors_modules -from roop.utilities import normalize_output_path -import os -from PIL import Image - - -def swap_face(source_file, target_file,doFaceEnhancer): - - source_path = "input.jpg" - target_path = "target.jpg" - - source_image = Image.fromarray(source_file) - source_image.save(source_path) - target_image = Image.fromarray(target_file) - target_image.save(target_path) - - print("source_path: ", source_path) - print("target_path: ", target_path) - - roop.globals.source_path = source_path - roop.globals.target_path = target_path - output_path = "output.jpg" - roop.globals.output_path = normalize_output_path( - roop.globals.source_path, roop.globals.target_path, output_path - ) - if doFaceEnhancer == True: - roop.globals.frame_processors = ["face_swapper","face_enhancer"] - else: - roop.globals.frame_processors = ["face_swapper"] - roop.globals.headless = True - roop.globals.keep_fps = True - roop.globals.keep_audio = True - roop.globals.keep_frames = False - roop.globals.many_faces = False - roop.globals.video_encoder = "libx264" - roop.globals.video_quality = 18 - roop.globals.max_memory = suggest_max_memory() - roop.globals.execution_providers = decode_execution_providers(["cuda"]) - roop.globals.execution_threads = suggest_execution_threads() - - print( - "start process", - roop.globals.source_path, - roop.globals.target_path, - roop.globals.output_path, - ) - - for frame_processor in get_frame_processors_modules( - roop.globals.frame_processors - ): - if not frame_processor.pre_check(): - return - - start() - return output_path - - -app = gr.Interface( - fn=swap_face, inputs=[gr.Image(), gr.Image(),gr.Checkbox(label="face_enhancer?", info="do face enhancer?")], outputs="image" -) -app.launch() diff --git a/spaces/LaynzKunz/Aesthetic_RVC_Inference_HF/lib/uvr5_pack/lib_v5/nets_123812KB.py b/spaces/LaynzKunz/Aesthetic_RVC_Inference_HF/lib/uvr5_pack/lib_v5/nets_123812KB.py deleted file mode 100644 index becbfae85683a13bbb19d3ea6c840da24e61e01e..0000000000000000000000000000000000000000 --- a/spaces/LaynzKunz/Aesthetic_RVC_Inference_HF/lib/uvr5_pack/lib_v5/nets_123812KB.py +++ /dev/null @@ -1,122 +0,0 @@ -import torch -from torch import nn -import torch.nn.functional as F - -from . import layers_123821KB as layers - - -class BaseASPPNet(nn.Module): - def __init__(self, nin, ch, dilations=(4, 8, 16)): - super(BaseASPPNet, self).__init__() - self.enc1 = layers.Encoder(nin, ch, 3, 2, 1) - self.enc2 = layers.Encoder(ch, ch * 2, 3, 2, 1) - self.enc3 = layers.Encoder(ch * 2, ch * 4, 3, 2, 1) - self.enc4 = layers.Encoder(ch * 4, ch * 8, 3, 2, 1) - - self.aspp = layers.ASPPModule(ch * 8, ch * 16, dilations) - - self.dec4 = layers.Decoder(ch * (8 + 16), ch * 8, 3, 1, 1) - self.dec3 = layers.Decoder(ch * (4 + 8), ch * 4, 3, 1, 1) - self.dec2 = layers.Decoder(ch * (2 + 4), ch * 2, 3, 1, 1) - self.dec1 = layers.Decoder(ch * (1 + 2), ch, 3, 1, 1) - - def __call__(self, x): - h, e1 = self.enc1(x) - h, e2 = self.enc2(h) - h, e3 = self.enc3(h) - h, e4 = self.enc4(h) - - h = self.aspp(h) - - h = self.dec4(h, e4) - h = self.dec3(h, e3) - h = self.dec2(h, e2) - h = self.dec1(h, e1) - - return h - - -class CascadedASPPNet(nn.Module): - def __init__(self, n_fft): - super(CascadedASPPNet, self).__init__() - self.stg1_low_band_net = BaseASPPNet(2, 32) - self.stg1_high_band_net = BaseASPPNet(2, 32) - - self.stg2_bridge = layers.Conv2DBNActiv(34, 16, 1, 1, 0) - self.stg2_full_band_net = BaseASPPNet(16, 32) - - self.stg3_bridge = layers.Conv2DBNActiv(66, 32, 1, 1, 0) - self.stg3_full_band_net = BaseASPPNet(32, 64) - - self.out = nn.Conv2d(64, 2, 1, bias=False) - self.aux1_out = nn.Conv2d(32, 2, 1, bias=False) - self.aux2_out = nn.Conv2d(32, 2, 1, bias=False) - - self.max_bin = n_fft // 2 - self.output_bin = n_fft // 2 + 1 - - self.offset = 128 - - def forward(self, x, aggressiveness=None): - mix = x.detach() - x = x.clone() - - x = x[:, :, : self.max_bin] - - bandw = x.size()[2] // 2 - aux1 = torch.cat( - [ - self.stg1_low_band_net(x[:, :, :bandw]), - self.stg1_high_band_net(x[:, :, bandw:]), - ], - dim=2, - ) - - h = torch.cat([x, aux1], dim=1) - aux2 = self.stg2_full_band_net(self.stg2_bridge(h)) - - h = torch.cat([x, aux1, aux2], dim=1) - h = self.stg3_full_band_net(self.stg3_bridge(h)) - - mask = torch.sigmoid(self.out(h)) - mask = F.pad( - input=mask, - pad=(0, 0, 0, self.output_bin - mask.size()[2]), - mode="replicate", - ) - - if self.training: - aux1 = torch.sigmoid(self.aux1_out(aux1)) - aux1 = F.pad( - input=aux1, - pad=(0, 0, 0, self.output_bin - aux1.size()[2]), - mode="replicate", - ) - aux2 = torch.sigmoid(self.aux2_out(aux2)) - aux2 = F.pad( - input=aux2, - pad=(0, 0, 0, self.output_bin - aux2.size()[2]), - mode="replicate", - ) - return mask * mix, aux1 * mix, aux2 * mix - else: - if aggressiveness: - mask[:, :, : aggressiveness["split_bin"]] = torch.pow( - mask[:, :, : aggressiveness["split_bin"]], - 1 + aggressiveness["value"] / 3, - ) - mask[:, :, aggressiveness["split_bin"] :] = torch.pow( - mask[:, :, aggressiveness["split_bin"] :], - 1 + aggressiveness["value"], - ) - - return mask * mix - - def predict(self, x_mag, aggressiveness=None): - h = self.forward(x_mag, aggressiveness) - - if self.offset > 0: - h = h[:, :, :, self.offset : -self.offset] - assert h.size()[3] > 0 - - return h diff --git a/spaces/LeoLeoLeo1/ChuanhuChatGPT/ChuanhuChatbot.py b/spaces/LeoLeoLeo1/ChuanhuChatGPT/ChuanhuChatbot.py deleted file mode 100644 index 3accfc0a0988200809532aeffbc3fc52af14ea5d..0000000000000000000000000000000000000000 --- a/spaces/LeoLeoLeo1/ChuanhuChatGPT/ChuanhuChatbot.py +++ /dev/null @@ -1,159 +0,0 @@ -import gradio as gr -# import openai -import os -import sys -import argparse -from utils import * -from presets import * - - -my_api_key = "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" # 在这里输入你的 API 密钥 - -#if we are running in Docker -if os.environ.get('dockerrun') == 'yes': - dockerflag = True -else: - dockerflag = False - -authflag = False - -if dockerflag: - my_api_key = os.environ.get('my_api_key') - if my_api_key == "empty": - print("Please give a api key!") - sys.exit(1) - #auth - username = os.environ.get('USERNAME') - password = os.environ.get('PASSWORD') - if not (isinstance(username, type(None)) or isinstance(password, type(None))): - authflag = True -else: - if not my_api_key and os.path.exists("api_key.txt") and os.path.getsize("api_key.txt"): - with open("api_key.txt", "r") as f: - my_api_key = f.read().strip() - if os.path.exists("auth.json"): - with open("auth.json", "r") as f: - auth = json.load(f) - username = auth["username"] - password = auth["password"] - if username != "" and password != "": - authflag = True - -gr.Chatbot.postprocess = postprocess - -with gr.Blocks(css=customCSS) as demo: - gr.HTML(title) - with gr.Row(): - keyTxt = gr.Textbox(show_label=False, placeholder=f"在这里输入你的OpenAI API-key...", - value=my_api_key, type="password", visible=not HIDE_MY_KEY).style(container=True) - use_streaming_checkbox = gr.Checkbox(label="实时传输回答", value=True, visible=enable_streaming_option) - chatbot = gr.Chatbot() # .style(color_map=("#1D51EE", "#585A5B")) - history = gr.State([]) - token_count = gr.State([]) - promptTemplates = gr.State(load_template(get_template_names(plain=True)[0], mode=2)) - TRUECOMSTANT = gr.State(True) - FALSECONSTANT = gr.State(False) - topic = gr.State("未命名对话历史记录") - - with gr.Row(): - with gr.Column(scale=12): - user_input = gr.Textbox(show_label=False, placeholder="在这里输入").style( - container=False) - with gr.Column(min_width=50, scale=1): - submitBtn = gr.Button("🚀", variant="primary") - with gr.Row(): - emptyBtn = gr.Button("🧹 新的对话") - retryBtn = gr.Button("🔄 重新生成") - delLastBtn = gr.Button("🗑️ 删除最近一条对话") - reduceTokenBtn = gr.Button("♻️ 总结对话") - status_display = gr.Markdown("status: ready") - systemPromptTxt = gr.Textbox(show_label=True, placeholder=f"在这里输入System Prompt...", - label="System prompt", value=initial_prompt).style(container=True) - with gr.Accordion(label="加载Prompt模板", open=False): - with gr.Column(): - with gr.Row(): - with gr.Column(scale=6): - templateFileSelectDropdown = gr.Dropdown(label="选择Prompt模板集合文件", choices=get_template_names(plain=True), multiselect=False, value=get_template_names(plain=True)[0]) - with gr.Column(scale=1): - templateRefreshBtn = gr.Button("🔄 刷新") - templaeFileReadBtn = gr.Button("📂 读入模板") - with gr.Row(): - with gr.Column(scale=6): - templateSelectDropdown = gr.Dropdown(label="从Prompt模板中加载", choices=load_template(get_template_names(plain=True)[0], mode=1), multiselect=False, value=load_template(get_template_names(plain=True)[0], mode=1)[0]) - with gr.Column(scale=1): - templateApplyBtn = gr.Button("⬇️ 应用") - with gr.Accordion(label="保存/加载对话历史记录", open=False): - with gr.Column(): - with gr.Row(): - with gr.Column(scale=6): - saveFileName = gr.Textbox( - show_label=True, placeholder=f"在这里输入保存的文件名...", label="设置保存文件名", value="对话历史记录").style(container=True) - with gr.Column(scale=1): - saveHistoryBtn = gr.Button("💾 保存对话") - with gr.Row(): - with gr.Column(scale=6): - historyFileSelectDropdown = gr.Dropdown(label="从列表中加载对话", choices=get_history_names(plain=True), multiselect=False, value=get_history_names(plain=True)[0]) - with gr.Column(scale=1): - historyRefreshBtn = gr.Button("🔄 刷新") - historyReadBtn = gr.Button("📂 读入对话") - #inputs, top_p, temperature, top_k, repetition_penalty - with gr.Accordion("参数", open=False): - top_p = gr.Slider(minimum=-0, maximum=1.0, value=1.0, step=0.05, - interactive=True, label="Top-p (nucleus sampling)",) - temperature = gr.Slider(minimum=-0, maximum=5.0, value=1.0, - step=0.1, interactive=True, label="Temperature",) - #top_k = gr.Slider( minimum=1, maximum=50, value=4, step=1, interactive=True, label="Top-k",) - #repetition_penalty = gr.Slider( minimum=0.1, maximum=3.0, value=1.03, step=0.01, interactive=True, label="Repetition Penalty", ) - gr.Markdown(description) - - - user_input.submit(predict, [keyTxt, systemPromptTxt, history, user_input, chatbot, token_count, top_p, temperature, use_streaming_checkbox], [chatbot, history, status_display, token_count], show_progress=True) - user_input.submit(reset_textbox, [], [user_input]) - - submitBtn.click(predict, [keyTxt, systemPromptTxt, history, user_input, chatbot, token_count, top_p, temperature, use_streaming_checkbox], [chatbot, history, status_display, token_count], show_progress=True) - submitBtn.click(reset_textbox, [], [user_input]) - - emptyBtn.click(reset_state, outputs=[chatbot, history, token_count, status_display], show_progress=True) - - retryBtn.click(retry, [keyTxt, systemPromptTxt, history, chatbot, token_count, top_p, temperature, use_streaming_checkbox], [chatbot, history, status_display, token_count], show_progress=True) - - delLastBtn.click(delete_last_conversation, [chatbot, history, token_count, use_streaming_checkbox], [ - chatbot, history, token_count, status_display], show_progress=True) - - reduceTokenBtn.click(reduce_token_size, [keyTxt, systemPromptTxt, history, chatbot, token_count, top_p, temperature, use_streaming_checkbox], [chatbot, history, status_display, token_count], show_progress=True) - - saveHistoryBtn.click(save_chat_history, [ - saveFileName, systemPromptTxt, history, chatbot], None, show_progress=True) - - saveHistoryBtn.click(get_history_names, None, [historyFileSelectDropdown]) - - historyRefreshBtn.click(get_history_names, None, [historyFileSelectDropdown]) - - historyReadBtn.click(load_chat_history, [historyFileSelectDropdown, systemPromptTxt, history, chatbot], [saveFileName, systemPromptTxt, history, chatbot], show_progress=True) - - templateRefreshBtn.click(get_template_names, None, [templateFileSelectDropdown]) - - templaeFileReadBtn.click(load_template, [templateFileSelectDropdown], [promptTemplates, templateSelectDropdown], show_progress=True) - - templateApplyBtn.click(get_template_content, [promptTemplates, templateSelectDropdown, systemPromptTxt], [systemPromptTxt], show_progress=True) - -print("川虎的温馨提示:访问 http://localhost:7860 查看界面") -# 默认开启本地服务器,默认可以直接从IP访问,默认不创建公开分享链接 -demo.title = "liyi's ChatGPT 🚀" - -if __name__ == "__main__": - #if running in Docker - if dockerflag: - if authflag: - demo.queue().launch(server_name="0.0.0.0", server_port=7860,auth=(username, password)) - else: - demo.queue().launch(server_name="0.0.0.0", server_port=7860, share=False) - #if not running in Docker - else: - if authflag: - demo.queue().launch(share=False, auth=(username, password)) - else: - demo.queue().launch(share=False) # 改为 share=True 可以创建公开分享链接 - #demo.queue().launch(server_name="0.0.0.0", server_port=7860, share=False) # 可自定义端口 - #demo.queue().launch(server_name="0.0.0.0", server_port=7860,auth=("在这里填写用户名", "在这里填写密码")) # 可设置用户名与密码 - #demo.queue().launch(auth=("在这里填写用户名", "在这里填写密码")) # 适合Nginx反向代理 diff --git a/spaces/Liu-LAB/GPT-academic/crazy_functions/pdf_fns/parse_pdf.py b/spaces/Liu-LAB/GPT-academic/crazy_functions/pdf_fns/parse_pdf.py deleted file mode 100644 index 8a7117adb61a1fb67c911e04d8968ac803885dd1..0000000000000000000000000000000000000000 --- a/spaces/Liu-LAB/GPT-academic/crazy_functions/pdf_fns/parse_pdf.py +++ /dev/null @@ -1,30 +0,0 @@ -import requests -import random -from functools import lru_cache -class GROBID_OFFLINE_EXCEPTION(Exception): pass - -def get_avail_grobid_url(): - from toolbox import get_conf - GROBID_URLS, = get_conf('GROBID_URLS') - if len(GROBID_URLS) == 0: return None - try: - _grobid_url = random.choice(GROBID_URLS) # 随机负载均衡 - if _grobid_url.endswith('/'): _grobid_url = _grobid_url.rstrip('/') - res = requests.get(_grobid_url+'/api/isalive') - if res.text=='true': return _grobid_url - else: return None - except: - return None - -@lru_cache(maxsize=32) -def parse_pdf(pdf_path, grobid_url): - import scipdf # pip install scipdf_parser - if grobid_url.endswith('/'): grobid_url = grobid_url.rstrip('/') - try: - article_dict = scipdf.parse_pdf_to_dict(pdf_path, grobid_url=grobid_url) - except GROBID_OFFLINE_EXCEPTION: - raise GROBID_OFFLINE_EXCEPTION("GROBID服务不可用,请修改config中的GROBID_URL,可修改成本地GROBID服务。") - except: - raise RuntimeError("解析PDF失败,请检查PDF是否损坏。") - return article_dict - diff --git a/spaces/LuxOAI/ChatGpt-Web/.github/ISSUE_TEMPLATE/feature_request.md b/spaces/LuxOAI/ChatGpt-Web/.github/ISSUE_TEMPLATE/feature_request.md deleted file mode 100644 index 25c36ab679f0b77bcf754d940ee4f3962c41b131..0000000000000000000000000000000000000000 --- a/spaces/LuxOAI/ChatGpt-Web/.github/ISSUE_TEMPLATE/feature_request.md +++ /dev/null @@ -1,20 +0,0 @@ ---- -name: Feature request -about: Suggest an idea for this project -title: "[Feature] " -labels: '' -assignees: '' - ---- - -**Is your feature request related to a problem? Please describe.** -A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] - -**Describe the solution you'd like** -A clear and concise description of what you want to happen. - -**Describe alternatives you've considered** -A clear and concise description of any alternative solutions or features you've considered. - -**Additional context** -Add any other context or screenshots about the feature request here. diff --git a/spaces/MCkernick/Image_Restoration_Colorization/utils.py b/spaces/MCkernick/Image_Restoration_Colorization/utils.py deleted file mode 100644 index 38824291160deec62dafd5865fdbebc1824c3d3b..0000000000000000000000000000000000000000 --- a/spaces/MCkernick/Image_Restoration_Colorization/utils.py +++ /dev/null @@ -1,32 +0,0 @@ -import cv2 -import os -import cv2 -import shutil -import sys -from subprocess import call - -def run_cmd(command): - try: - call(command, shell=True) - except KeyboardInterrupt: - print("Process interrupted") - sys.exit(1) - -def Restoration(image): - os.makedirs("Temp") - os.makedirs("Temp/input") - print(type(image)) - cv2.imwrite("Temp/input/input_img.png", image) - - command = ("python run.py --input_folder " - + "Temp/input" - + " --output_folder " - + "Temp" - + " --GPU " - + "-1" - + " --with_scratch") - run_cmd(command) - - result = cv2.imread("Temp/final_output/input_img.png") - shutil.rmtree("Temp") - return result \ No newline at end of file diff --git a/spaces/Mahiruoshi/BangDream-Bert-VITS2/modules.py b/spaces/Mahiruoshi/BangDream-Bert-VITS2/modules.py deleted file mode 100644 index b1f89a2f837f190a3dd5de52e7a4e183f1024306..0000000000000000000000000000000000000000 --- a/spaces/Mahiruoshi/BangDream-Bert-VITS2/modules.py +++ /dev/null @@ -1,597 +0,0 @@ -import math -import torch -from torch import nn -from torch.nn import functional as F - -from torch.nn import Conv1d -from torch.nn.utils import weight_norm, remove_weight_norm - -import commons -from commons import init_weights, get_padding -from transforms import piecewise_rational_quadratic_transform -from attentions import Encoder - -LRELU_SLOPE = 0.1 - - -class LayerNorm(nn.Module): - def __init__(self, channels, eps=1e-5): - super().__init__() - self.channels = channels - self.eps = eps - - self.gamma = nn.Parameter(torch.ones(channels)) - self.beta = nn.Parameter(torch.zeros(channels)) - - def forward(self, x): - x = x.transpose(1, -1) - x = F.layer_norm(x, (self.channels,), self.gamma, self.beta, self.eps) - return x.transpose(1, -1) - - -class ConvReluNorm(nn.Module): - def __init__( - self, - in_channels, - hidden_channels, - out_channels, - kernel_size, - n_layers, - p_dropout, - ): - super().__init__() - self.in_channels = in_channels - self.hidden_channels = hidden_channels - self.out_channels = out_channels - self.kernel_size = kernel_size - self.n_layers = n_layers - self.p_dropout = p_dropout - assert n_layers > 1, "Number of layers should be larger than 0." - - self.conv_layers = nn.ModuleList() - self.norm_layers = nn.ModuleList() - self.conv_layers.append( - nn.Conv1d( - in_channels, hidden_channels, kernel_size, padding=kernel_size // 2 - ) - ) - self.norm_layers.append(LayerNorm(hidden_channels)) - self.relu_drop = nn.Sequential(nn.ReLU(), nn.Dropout(p_dropout)) - for _ in range(n_layers - 1): - self.conv_layers.append( - nn.Conv1d( - hidden_channels, - hidden_channels, - kernel_size, - padding=kernel_size // 2, - ) - ) - self.norm_layers.append(LayerNorm(hidden_channels)) - self.proj = nn.Conv1d(hidden_channels, out_channels, 1) - self.proj.weight.data.zero_() - self.proj.bias.data.zero_() - - def forward(self, x, x_mask): - x_org = x - for i in range(self.n_layers): - x = self.conv_layers[i](x * x_mask) - x = self.norm_layers[i](x) - x = self.relu_drop(x) - x = x_org + self.proj(x) - return x * x_mask - - -class DDSConv(nn.Module): - """ - Dialted and Depth-Separable Convolution - """ - - def __init__(self, channels, kernel_size, n_layers, p_dropout=0.0): - super().__init__() - self.channels = channels - self.kernel_size = kernel_size - self.n_layers = n_layers - self.p_dropout = p_dropout - - self.drop = nn.Dropout(p_dropout) - self.convs_sep = nn.ModuleList() - self.convs_1x1 = nn.ModuleList() - self.norms_1 = nn.ModuleList() - self.norms_2 = nn.ModuleList() - for i in range(n_layers): - dilation = kernel_size**i - padding = (kernel_size * dilation - dilation) // 2 - self.convs_sep.append( - nn.Conv1d( - channels, - channels, - kernel_size, - groups=channels, - dilation=dilation, - padding=padding, - ) - ) - self.convs_1x1.append(nn.Conv1d(channels, channels, 1)) - self.norms_1.append(LayerNorm(channels)) - self.norms_2.append(LayerNorm(channels)) - - def forward(self, x, x_mask, g=None): - if g is not None: - x = x + g - for i in range(self.n_layers): - y = self.convs_sep[i](x * x_mask) - y = self.norms_1[i](y) - y = F.gelu(y) - y = self.convs_1x1[i](y) - y = self.norms_2[i](y) - y = F.gelu(y) - y = self.drop(y) - x = x + y - return x * x_mask - - -class WN(torch.nn.Module): - def __init__( - self, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=0, - p_dropout=0, - ): - super(WN, self).__init__() - assert kernel_size % 2 == 1 - self.hidden_channels = hidden_channels - self.kernel_size = (kernel_size,) - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.gin_channels = gin_channels - self.p_dropout = p_dropout - - self.in_layers = torch.nn.ModuleList() - self.res_skip_layers = torch.nn.ModuleList() - self.drop = nn.Dropout(p_dropout) - - if gin_channels != 0: - cond_layer = torch.nn.Conv1d( - gin_channels, 2 * hidden_channels * n_layers, 1 - ) - self.cond_layer = torch.nn.utils.weight_norm(cond_layer, name="weight") - - for i in range(n_layers): - dilation = dilation_rate**i - padding = int((kernel_size * dilation - dilation) / 2) - in_layer = torch.nn.Conv1d( - hidden_channels, - 2 * hidden_channels, - kernel_size, - dilation=dilation, - padding=padding, - ) - in_layer = torch.nn.utils.weight_norm(in_layer, name="weight") - self.in_layers.append(in_layer) - - # last one is not necessary - if i < n_layers - 1: - res_skip_channels = 2 * hidden_channels - else: - res_skip_channels = hidden_channels - - res_skip_layer = torch.nn.Conv1d(hidden_channels, res_skip_channels, 1) - res_skip_layer = torch.nn.utils.weight_norm(res_skip_layer, name="weight") - self.res_skip_layers.append(res_skip_layer) - - def forward(self, x, x_mask, g=None, **kwargs): - output = torch.zeros_like(x) - n_channels_tensor = torch.IntTensor([self.hidden_channels]) - - if g is not None: - g = self.cond_layer(g) - - for i in range(self.n_layers): - x_in = self.in_layers[i](x) - if g is not None: - cond_offset = i * 2 * self.hidden_channels - g_l = g[:, cond_offset : cond_offset + 2 * self.hidden_channels, :] - else: - g_l = torch.zeros_like(x_in) - - acts = commons.fused_add_tanh_sigmoid_multiply(x_in, g_l, n_channels_tensor) - acts = self.drop(acts) - - res_skip_acts = self.res_skip_layers[i](acts) - if i < self.n_layers - 1: - res_acts = res_skip_acts[:, : self.hidden_channels, :] - x = (x + res_acts) * x_mask - output = output + res_skip_acts[:, self.hidden_channels :, :] - else: - output = output + res_skip_acts - return output * x_mask - - def remove_weight_norm(self): - if self.gin_channels != 0: - torch.nn.utils.remove_weight_norm(self.cond_layer) - for l in self.in_layers: - torch.nn.utils.remove_weight_norm(l) - for l in self.res_skip_layers: - torch.nn.utils.remove_weight_norm(l) - - -class ResBlock1(torch.nn.Module): - def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5)): - super(ResBlock1, self).__init__() - self.convs1 = nn.ModuleList( - [ - weight_norm( - Conv1d( - channels, - channels, - kernel_size, - 1, - dilation=dilation[0], - padding=get_padding(kernel_size, dilation[0]), - ) - ), - weight_norm( - Conv1d( - channels, - channels, - kernel_size, - 1, - dilation=dilation[1], - padding=get_padding(kernel_size, dilation[1]), - ) - ), - weight_norm( - Conv1d( - channels, - channels, - kernel_size, - 1, - dilation=dilation[2], - padding=get_padding(kernel_size, dilation[2]), - ) - ), - ] - ) - self.convs1.apply(init_weights) - - self.convs2 = nn.ModuleList( - [ - weight_norm( - Conv1d( - channels, - channels, - kernel_size, - 1, - dilation=1, - padding=get_padding(kernel_size, 1), - ) - ), - weight_norm( - Conv1d( - channels, - channels, - kernel_size, - 1, - dilation=1, - padding=get_padding(kernel_size, 1), - ) - ), - weight_norm( - Conv1d( - channels, - channels, - kernel_size, - 1, - dilation=1, - padding=get_padding(kernel_size, 1), - ) - ), - ] - ) - self.convs2.apply(init_weights) - - def forward(self, x, x_mask=None): - for c1, c2 in zip(self.convs1, self.convs2): - xt = F.leaky_relu(x, LRELU_SLOPE) - if x_mask is not None: - xt = xt * x_mask - xt = c1(xt) - xt = F.leaky_relu(xt, LRELU_SLOPE) - if x_mask is not None: - xt = xt * x_mask - xt = c2(xt) - x = xt + x - if x_mask is not None: - x = x * x_mask - return x - - def remove_weight_norm(self): - for l in self.convs1: - remove_weight_norm(l) - for l in self.convs2: - remove_weight_norm(l) - - -class ResBlock2(torch.nn.Module): - def __init__(self, channels, kernel_size=3, dilation=(1, 3)): - super(ResBlock2, self).__init__() - self.convs = nn.ModuleList( - [ - weight_norm( - Conv1d( - channels, - channels, - kernel_size, - 1, - dilation=dilation[0], - padding=get_padding(kernel_size, dilation[0]), - ) - ), - weight_norm( - Conv1d( - channels, - channels, - kernel_size, - 1, - dilation=dilation[1], - padding=get_padding(kernel_size, dilation[1]), - ) - ), - ] - ) - self.convs.apply(init_weights) - - def forward(self, x, x_mask=None): - for c in self.convs: - xt = F.leaky_relu(x, LRELU_SLOPE) - if x_mask is not None: - xt = xt * x_mask - xt = c(xt) - x = xt + x - if x_mask is not None: - x = x * x_mask - return x - - def remove_weight_norm(self): - for l in self.convs: - remove_weight_norm(l) - - -class Log(nn.Module): - def forward(self, x, x_mask, reverse=False, **kwargs): - if not reverse: - y = torch.log(torch.clamp_min(x, 1e-5)) * x_mask - logdet = torch.sum(-y, [1, 2]) - return y, logdet - else: - x = torch.exp(x) * x_mask - return x - - -class Flip(nn.Module): - def forward(self, x, *args, reverse=False, **kwargs): - x = torch.flip(x, [1]) - if not reverse: - logdet = torch.zeros(x.size(0)).to(dtype=x.dtype, device=x.device) - return x, logdet - else: - return x - - -class ElementwiseAffine(nn.Module): - def __init__(self, channels): - super().__init__() - self.channels = channels - self.m = nn.Parameter(torch.zeros(channels, 1)) - self.logs = nn.Parameter(torch.zeros(channels, 1)) - - def forward(self, x, x_mask, reverse=False, **kwargs): - if not reverse: - y = self.m + torch.exp(self.logs) * x - y = y * x_mask - logdet = torch.sum(self.logs * x_mask, [1, 2]) - return y, logdet - else: - x = (x - self.m) * torch.exp(-self.logs) * x_mask - return x - - -class ResidualCouplingLayer(nn.Module): - def __init__( - self, - channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - p_dropout=0, - gin_channels=0, - mean_only=False, - ): - assert channels % 2 == 0, "channels should be divisible by 2" - super().__init__() - self.channels = channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.half_channels = channels // 2 - self.mean_only = mean_only - - self.pre = nn.Conv1d(self.half_channels, hidden_channels, 1) - self.enc = WN( - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - p_dropout=p_dropout, - gin_channels=gin_channels, - ) - self.post = nn.Conv1d(hidden_channels, self.half_channels * (2 - mean_only), 1) - self.post.weight.data.zero_() - self.post.bias.data.zero_() - - def forward(self, x, x_mask, g=None, reverse=False): - x0, x1 = torch.split(x, [self.half_channels] * 2, 1) - h = self.pre(x0) * x_mask - h = self.enc(h, x_mask, g=g) - stats = self.post(h) * x_mask - if not self.mean_only: - m, logs = torch.split(stats, [self.half_channels] * 2, 1) - else: - m = stats - logs = torch.zeros_like(m) - - if not reverse: - x1 = m + x1 * torch.exp(logs) * x_mask - x = torch.cat([x0, x1], 1) - logdet = torch.sum(logs, [1, 2]) - return x, logdet - else: - x1 = (x1 - m) * torch.exp(-logs) * x_mask - x = torch.cat([x0, x1], 1) - return x - - -class ConvFlow(nn.Module): - def __init__( - self, - in_channels, - filter_channels, - kernel_size, - n_layers, - num_bins=10, - tail_bound=5.0, - ): - super().__init__() - self.in_channels = in_channels - self.filter_channels = filter_channels - self.kernel_size = kernel_size - self.n_layers = n_layers - self.num_bins = num_bins - self.tail_bound = tail_bound - self.half_channels = in_channels // 2 - - self.pre = nn.Conv1d(self.half_channels, filter_channels, 1) - self.convs = DDSConv(filter_channels, kernel_size, n_layers, p_dropout=0.0) - self.proj = nn.Conv1d( - filter_channels, self.half_channels * (num_bins * 3 - 1), 1 - ) - self.proj.weight.data.zero_() - self.proj.bias.data.zero_() - - def forward(self, x, x_mask, g=None, reverse=False): - x0, x1 = torch.split(x, [self.half_channels] * 2, 1) - h = self.pre(x0) - h = self.convs(h, x_mask, g=g) - h = self.proj(h) * x_mask - - b, c, t = x0.shape - h = h.reshape(b, c, -1, t).permute(0, 1, 3, 2) # [b, cx?, t] -> [b, c, t, ?] - - unnormalized_widths = h[..., : self.num_bins] / math.sqrt(self.filter_channels) - unnormalized_heights = h[..., self.num_bins : 2 * self.num_bins] / math.sqrt( - self.filter_channels - ) - unnormalized_derivatives = h[..., 2 * self.num_bins :] - - x1, logabsdet = piecewise_rational_quadratic_transform( - x1, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=reverse, - tails="linear", - tail_bound=self.tail_bound, - ) - - x = torch.cat([x0, x1], 1) * x_mask - logdet = torch.sum(logabsdet * x_mask, [1, 2]) - if not reverse: - return x, logdet - else: - return x - - -class TransformerCouplingLayer(nn.Module): - def __init__( - self, - channels, - hidden_channels, - kernel_size, - n_layers, - n_heads, - p_dropout=0, - filter_channels=0, - mean_only=False, - wn_sharing_parameter=None, - gin_channels=0, - ): - assert channels % 2 == 0, "channels should be divisible by 2" - super().__init__() - self.channels = channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.n_layers = n_layers - self.half_channels = channels // 2 - self.mean_only = mean_only - - self.pre = nn.Conv1d(self.half_channels, hidden_channels, 1) - self.enc = ( - Encoder( - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - isflow=True, - gin_channels=gin_channels, - ) - if wn_sharing_parameter is None - else wn_sharing_parameter - ) - self.post = nn.Conv1d(hidden_channels, self.half_channels * (2 - mean_only), 1) - self.post.weight.data.zero_() - self.post.bias.data.zero_() - - def forward(self, x, x_mask, g=None, reverse=False): - x0, x1 = torch.split(x, [self.half_channels] * 2, 1) - h = self.pre(x0) * x_mask - h = self.enc(h, x_mask, g=g) - stats = self.post(h) * x_mask - if not self.mean_only: - m, logs = torch.split(stats, [self.half_channels] * 2, 1) - else: - m = stats - logs = torch.zeros_like(m) - - if not reverse: - x1 = m + x1 * torch.exp(logs) * x_mask - x = torch.cat([x0, x1], 1) - logdet = torch.sum(logs, [1, 2]) - return x, logdet - else: - x1 = (x1 - m) * torch.exp(-logs) * x_mask - x = torch.cat([x0, x1], 1) - return x - - x1, logabsdet = piecewise_rational_quadratic_transform( - x1, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=reverse, - tails="linear", - tail_bound=self.tail_bound, - ) - - x = torch.cat([x0, x1], 1) * x_mask - logdet = torch.sum(logabsdet * x_mask, [1, 2]) - if not reverse: - return x, logdet - else: - return x diff --git a/spaces/Manmay/tortoise-tts/tortoise/models/xtransformers.py b/spaces/Manmay/tortoise-tts/tortoise/models/xtransformers.py deleted file mode 100644 index 8be2df455c46bf8c89efb0d5fdbb704a9fb622f6..0000000000000000000000000000000000000000 --- a/spaces/Manmay/tortoise-tts/tortoise/models/xtransformers.py +++ /dev/null @@ -1,1248 +0,0 @@ -import math -from collections import namedtuple -from functools import partial -from inspect import isfunction - -import torch -import torch.nn.functional as F -from einops import rearrange, repeat -from torch import nn, einsum - -DEFAULT_DIM_HEAD = 64 - -Intermediates = namedtuple('Intermediates', [ - 'pre_softmax_attn', - 'post_softmax_attn' -]) - -LayerIntermediates = namedtuple('Intermediates', [ - 'hiddens', - 'attn_intermediates', - 'past_key_values', -]) - - -# helpers - -def exists(val): - return val is not None - - -def default(val, d): - if exists(val): - return val - return d() if isfunction(d) else d - - -def cast_tuple(val, depth): - return val if isinstance(val, tuple) else (val,) * depth - - -class always(): - def __init__(self, val): - self.val = val - - def __call__(self, *args, **kwargs): - return self.val - - -class not_equals(): - def __init__(self, val): - self.val = val - - def __call__(self, x, *args, **kwargs): - return x != self.val - - -class equals(): - def __init__(self, val): - self.val = val - - def __call__(self, x, *args, **kwargs): - return x == self.val - - -def max_neg_value(tensor): - return -torch.finfo(tensor.dtype).max - - -def l2norm(t): - return F.normalize(t, p=2, dim=-1) - - -# init helpers - -def init_zero_(layer): - nn.init.constant_(layer.weight, 0.) - if exists(layer.bias): - nn.init.constant_(layer.bias, 0.) - - -# keyword argument helpers - -def pick_and_pop(keys, d): - values = list(map(lambda key: d.pop(key), keys)) - return dict(zip(keys, values)) - - -def group_dict_by_key(cond, d): - return_val = [dict(), dict()] - for key in d.keys(): - match = bool(cond(key)) - ind = int(not match) - return_val[ind][key] = d[key] - return (*return_val,) - - -def string_begins_with(prefix, str): - return str.startswith(prefix) - - -def group_by_key_prefix(prefix, d): - return group_dict_by_key(partial(string_begins_with, prefix), d) - - -def groupby_prefix_and_trim(prefix, d): - kwargs_with_prefix, kwargs = group_dict_by_key(partial(string_begins_with, prefix), d) - kwargs_without_prefix = dict(map(lambda x: (x[0][len(prefix):], x[1]), tuple(kwargs_with_prefix.items()))) - return kwargs_without_prefix, kwargs - - -# activations - -class ReluSquared(nn.Module): - def forward(self, x): - return F.relu(x) ** 2 - - -# positional embeddings - -class AbsolutePositionalEmbedding(nn.Module): - def __init__(self, dim, max_seq_len): - super().__init__() - self.scale = dim ** -0.5 - self.emb = nn.Embedding(max_seq_len, dim) - - def forward(self, x): - n = torch.arange(x.shape[1], device=x.device) - pos_emb = self.emb(n) - pos_emb = rearrange(pos_emb, 'n d -> () n d') - return pos_emb * self.scale - - -class FixedPositionalEmbedding(nn.Module): - def __init__(self, dim): - super().__init__() - inv_freq = 1. / (10000 ** (torch.arange(0, dim, 2).float() / dim)) - self.register_buffer('inv_freq', inv_freq) - - def forward(self, x, seq_dim=1, offset=0): - t = torch.arange(x.shape[seq_dim], device=x.device).type_as(self.inv_freq) + offset - sinusoid_inp = torch.einsum('i , j -> i j', t, self.inv_freq) - emb = torch.cat((sinusoid_inp.sin(), sinusoid_inp.cos()), dim=-1) - return rearrange(emb, 'n d -> () n d') - - -class RelativePositionBias(nn.Module): - def __init__(self, scale, causal=False, num_buckets=32, max_distance=128, heads=8): - super().__init__() - self.scale = scale - self.causal = causal - self.num_buckets = num_buckets - self.max_distance = max_distance - self.relative_attention_bias = nn.Embedding(num_buckets, heads) - - @staticmethod - def _relative_position_bucket(relative_position, causal=True, num_buckets=32, max_distance=128): - ret = 0 - n = -relative_position - if not causal: - num_buckets //= 2 - ret += (n < 0).long() * num_buckets - n = torch.abs(n) - else: - n = torch.max(n, torch.zeros_like(n)) - - max_exact = num_buckets // 2 - is_small = n < max_exact - - val_if_large = max_exact + ( - torch.log(n.float() / max_exact) / math.log(max_distance / max_exact) * (num_buckets - max_exact) - ).long() - val_if_large = torch.min(val_if_large, torch.full_like(val_if_large, num_buckets - 1)) - - ret += torch.where(is_small, n, val_if_large) - return ret - - def forward(self, qk_dots): - i, j, device = *qk_dots.shape[-2:], qk_dots.device - q_pos = torch.arange(i, dtype=torch.long, device=device) - k_pos = torch.arange(j, dtype=torch.long, device=device) - rel_pos = k_pos[None, :] - q_pos[:, None] - rp_bucket = self._relative_position_bucket(rel_pos, causal=self.causal, num_buckets=self.num_buckets, - max_distance=self.max_distance) - values = self.relative_attention_bias(rp_bucket) - bias = rearrange(values, 'i j h -> () h i j') - return qk_dots + (bias * self.scale) - - -class AlibiPositionalBias(nn.Module): - def __init__(self, heads, **kwargs): - super().__init__() - self.heads = heads - slopes = torch.Tensor(self._get_slopes(heads)) - slopes = rearrange(slopes, 'h -> () h () ()') - self.register_buffer('slopes', slopes, persistent=False) - self.register_buffer('bias', None, persistent=False) - - @staticmethod - def _get_slopes(heads): - def get_slopes_power_of_2(n): - start = (2 ** (-2 ** -(math.log2(n) - 3))) - ratio = start - return [start * ratio ** i for i in range(n)] - - if math.log2(heads).is_integer(): - return get_slopes_power_of_2(heads) - - closest_power_of_2 = 2 ** math.floor(math.log2(heads)) - return get_slopes_power_of_2(closest_power_of_2) + get_slopes_power_of_2(2 * closest_power_of_2)[0::2][ - :heads - closest_power_of_2] - - def forward(self, qk_dots): - h, i, j, device = *qk_dots.shape[-3:], qk_dots.device - - if exists(self.bias) and self.bias.shape[-1] >= j: - return qk_dots + self.bias[..., :j] - - bias = torch.arange(j, device=device) - bias = rearrange(bias, 'j -> () () () j') - bias = bias * self.slopes - - num_heads_unalibied = h - bias.shape[1] - bias = F.pad(bias, (0, 0, 0, 0, 0, num_heads_unalibied)) - - self.register_buffer('bias', bias, persistent=False) - return qk_dots + self.bias - - -class LearnedAlibiPositionalBias(AlibiPositionalBias): - def __init__(self, heads, bidirectional=False): - super().__init__(heads) - los_slopes = torch.log(self.slopes) - self.learned_logslopes = nn.Parameter(los_slopes) - - self.bidirectional = bidirectional - if self.bidirectional: - self.learned_logslopes_future = nn.Parameter(los_slopes) - - def forward(self, qk_dots): - h, i, j, device = *qk_dots.shape[-3:], qk_dots.device - - def get_slopes(param): - return F.pad(param.exp(), (0, 0, 0, 0, 0, h - param.shape[1])) - - if exists(self.bias) and self.bias.shape[-1] >= j: - bias = self.bias[..., :i, :j] - else: - i_arange = torch.arange(i, device=device) - j_arange = torch.arange(j, device=device) - bias = rearrange(j_arange, 'j -> 1 1 1 j') - rearrange(i_arange, 'i -> 1 1 i 1') - self.register_buffer('bias', bias, persistent=False) - - if self.bidirectional: - past_slopes = get_slopes(self.learned_logslopes) - future_slopes = get_slopes(self.learned_logslopes_future) - bias = torch.tril(bias * past_slopes) + torch.triu(bias * future_slopes) - else: - slopes = get_slopes(self.learned_logslopes) - bias = bias * slopes - - return qk_dots + bias - - -class RotaryEmbedding(nn.Module): - def __init__(self, dim): - super().__init__() - inv_freq = 1. / (10000 ** (torch.arange(0, dim, 2).float() / dim)) - self.register_buffer('inv_freq', inv_freq) - - def forward(self, max_seq_len, device): - t = torch.arange(max_seq_len, device=device).type_as(self.inv_freq) - freqs = torch.einsum('i , j -> i j', t, self.inv_freq) - emb = torch.cat((freqs, freqs), dim=-1) - return rearrange(emb, 'n d -> () () n d') - - -def rotate_half(x): - x = rearrange(x, '... (j d) -> ... j d', j=2) - x1, x2 = x.unbind(dim=-2) - return torch.cat((-x2, x1), dim=-1) - - -def apply_rotary_pos_emb(t, freqs): - seq_len = t.shape[-2] - freqs = freqs[:, :, -seq_len:] - return (t * freqs.cos()) + (rotate_half(t) * freqs.sin()) - - -# norms - -class Scale(nn.Module): - def __init__(self, value, fn): - super().__init__() - self.value = value - self.fn = fn - - def forward(self, x, **kwargs): - out = self.fn(x, **kwargs) - scale_fn = lambda t: t * self.value - - if not isinstance(out, tuple): - return scale_fn(out) - - return (scale_fn(out[0]), *out[1:]) - - -class Rezero(nn.Module): - def __init__(self, fn): - super().__init__() - self.fn = fn - self.g = nn.Parameter(torch.zeros(1)) - - def forward(self, x, **kwargs): - out = self.fn(x, **kwargs) - rezero_fn = lambda t: t * self.g - - if not isinstance(out, tuple): - return rezero_fn(out) - - return (rezero_fn(out[0]), *out[1:]) - - -class ScaleNorm(nn.Module): - def __init__(self, dim, eps=1e-5): - super().__init__() - self.scale = dim ** -0.5 - self.eps = eps - self.g = nn.Parameter(torch.ones(1)) - - def forward(self, x): - norm = torch.norm(x, dim=-1, keepdim=True) * self.scale - return x / norm.clamp(min=self.eps) * self.g - - -class RMSNorm(nn.Module): - def __init__(self, dim, eps=1e-8): - super().__init__() - self.scale = dim ** -0.5 - self.eps = eps - self.g = nn.Parameter(torch.ones(dim)) - - def forward(self, x): - norm = torch.norm(x, dim=-1, keepdim=True) * self.scale - return x / norm.clamp(min=self.eps) * self.g - - -class RMSScaleShiftNorm(nn.Module): - def __init__(self, dim, eps=1e-8): - super().__init__() - self.scale = dim ** -0.5 - self.eps = eps - self.g = nn.Parameter(torch.ones(dim)) - self.scale_shift_process = nn.Linear(dim * 2, dim * 2) - - def forward(self, x, norm_scale_shift_inp): - norm = torch.norm(x, dim=-1, keepdim=True) * self.scale - norm = x / norm.clamp(min=self.eps) * self.g - - ss_emb = self.scale_shift_process(norm_scale_shift_inp) - scale, shift = torch.chunk(ss_emb, 2, dim=1) - h = norm * (1 + scale.unsqueeze(1)) + shift.unsqueeze(1) - return h - - -# residual and residual gates - -class Residual(nn.Module): - def __init__(self, dim, scale_residual=False): - super().__init__() - self.residual_scale = nn.Parameter(torch.ones(dim)) if scale_residual else None - - def forward(self, x, residual): - if exists(self.residual_scale): - residual = residual * self.residual_scale - - return x + residual - - -class GRUGating(nn.Module): - def __init__(self, dim, scale_residual=False): - super().__init__() - self.gru = nn.GRUCell(dim, dim) - self.residual_scale = nn.Parameter(torch.ones(dim)) if scale_residual else None - - def forward(self, x, residual): - if exists(self.residual_scale): - residual = residual * self.residual_scale - - gated_output = self.gru( - rearrange(x, 'b n d -> (b n) d'), - rearrange(residual, 'b n d -> (b n) d') - ) - - return gated_output.reshape_as(x) - - -# token shifting - -def shift(t, amount, mask=None): - if amount == 0: - return t - - if exists(mask): - t = t.masked_fill(~mask[..., None], 0.) - - return F.pad(t, (0, 0, amount, -amount), value=0.) - - -class ShiftTokens(nn.Module): - def __init__(self, shifts, fn): - super().__init__() - self.fn = fn - self.shifts = tuple(shifts) - - def forward(self, x, **kwargs): - mask = kwargs.get('mask', None) - shifts = self.shifts - segments = len(shifts) - feats_per_shift = x.shape[-1] // segments - splitted = x.split(feats_per_shift, dim=-1) - segments_to_shift, rest = splitted[:segments], splitted[segments:] - segments_to_shift = list(map(lambda args: shift(*args, mask=mask), zip(segments_to_shift, shifts))) - x = torch.cat((*segments_to_shift, *rest), dim=-1) - return self.fn(x, **kwargs) - - -# feedforward - -class GLU(nn.Module): - def __init__(self, dim_in, dim_out, activation): - super().__init__() - self.act = activation - self.proj = nn.Linear(dim_in, dim_out * 2) - - def forward(self, x): - x, gate = self.proj(x).chunk(2, dim=-1) - return x * self.act(gate) - - -class FeedForward(nn.Module): - def __init__( - self, - dim, - dim_out=None, - mult=4, - glu=False, - relu_squared=False, - post_act_ln=False, - dropout=0., - zero_init_output=False - ): - super().__init__() - inner_dim = int(dim * mult) - dim_out = default(dim_out, dim) - activation = ReluSquared() if relu_squared else nn.GELU() - - project_in = nn.Sequential( - nn.Linear(dim, inner_dim), - activation - ) if not glu else GLU(dim, inner_dim, activation) - - self.net = nn.Sequential( - project_in, - nn.LayerNorm(inner_dim) if post_act_ln else nn.Identity(), - nn.Dropout(dropout), - nn.Linear(inner_dim, dim_out) - ) - - # init last linear layer to 0 - if zero_init_output: - init_zero_(self.net[-1]) - - def forward(self, x): - return self.net(x) - - -# attention. - -class Attention(nn.Module): - def __init__( - self, - dim, - dim_head=DEFAULT_DIM_HEAD, - heads=8, - causal=False, - talking_heads=False, - head_scale=False, - collab_heads=False, - collab_compression=.3, - sparse_topk=None, - use_entmax15=False, - num_mem_kv=0, - dropout=0., - on_attn=False, - gate_values=False, - zero_init_output=False, - max_attend_past=None, - qk_norm=False, - scale_init_value=None, - rel_pos_bias=False, - rel_pos_num_buckets=32, - rel_pos_max_distance=128, - ): - super().__init__() - self.scale = dim_head ** -0.5 - - self.heads = heads - self.causal = causal - self.max_attend_past = max_attend_past - - qk_dim = v_dim = dim_head * heads - - # collaborative heads - self.collab_heads = collab_heads - if self.collab_heads: - qk_dim = int(collab_compression * qk_dim) - self.collab_mixing = nn.Parameter(torch.randn(heads, qk_dim)) - - self.to_q = nn.Linear(dim, qk_dim, bias=False) - self.to_k = nn.Linear(dim, qk_dim, bias=False) - self.to_v = nn.Linear(dim, v_dim, bias=False) - - self.dropout = nn.Dropout(dropout) - - # add GLU gating for aggregated values, from alphafold2 - self.to_v_gate = None - if gate_values: - self.to_v_gate = nn.Linear(dim, v_dim) - nn.init.constant_(self.to_v_gate.weight, 0) - nn.init.constant_(self.to_v_gate.bias, 1) - - # cosine sim attention - self.qk_norm = qk_norm - if qk_norm: - scale_init_value = default(scale_init_value, - -3) # if not provided, initialize as though it were sequence length of 1024 - self.scale = nn.Parameter(torch.ones(1, heads, 1, 1) * scale_init_value) - - # talking heads - self.talking_heads = talking_heads - if talking_heads: - self.pre_softmax_proj = nn.Parameter(torch.randn(heads, heads)) - self.post_softmax_proj = nn.Parameter(torch.randn(heads, heads)) - - # head scaling - self.head_scale = head_scale - if head_scale: - self.head_scale_params = nn.Parameter(torch.ones(1, heads, 1, 1)) - - # explicit topk sparse attention - self.sparse_topk = sparse_topk - - # entmax - self.attn_fn = F.softmax - - # add memory key / values - self.num_mem_kv = num_mem_kv - if num_mem_kv > 0: - self.mem_k = nn.Parameter(torch.randn(heads, num_mem_kv, dim_head)) - self.mem_v = nn.Parameter(torch.randn(heads, num_mem_kv, dim_head)) - - # attention on attention - self.attn_on_attn = on_attn - self.to_out = nn.Sequential(nn.Linear(v_dim, dim * 2), nn.GLU()) if on_attn else nn.Linear(v_dim, dim) - - self.rel_pos_bias = rel_pos_bias - if rel_pos_bias: - assert rel_pos_num_buckets <= rel_pos_max_distance, 'number of relative position buckets must be less than the relative position max distance' - self.rel_pos = RelativePositionBias(scale=dim_head ** 0.5, causal=causal, heads=heads, - num_buckets=rel_pos_num_buckets, max_distance=rel_pos_max_distance) - - # init output projection 0 - if zero_init_output: - init_zero_(self.to_out) - - def forward( - self, - x, - context=None, - mask=None, - context_mask=None, - attn_mask=None, - sinusoidal_emb=None, - rotary_pos_emb=None, - prev_attn=None, - mem=None, - layer_past=None, - ): - b, n, _, h, talking_heads, collab_heads, head_scale, scale, device, has_context = *x.shape, self.heads, self.talking_heads, self.collab_heads, self.head_scale, self.scale, x.device, exists( - context) - kv_input = default(context, x) - - q_input = x - k_input = kv_input - v_input = kv_input - - if exists(mem): - k_input = torch.cat((mem, k_input), dim=-2) - v_input = torch.cat((mem, v_input), dim=-2) - - if exists(sinusoidal_emb): - # in shortformer, the query would start at a position offset depending on the past cached memory - offset = k_input.shape[-2] - q_input.shape[-2] - q_input = q_input + sinusoidal_emb(q_input, offset=offset) - k_input = k_input + sinusoidal_emb(k_input) - - q = self.to_q(q_input) - k = self.to_k(k_input) - v = self.to_v(v_input) - - if not collab_heads: - q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h=h), (q, k, v)) - else: - q = einsum('b i d, h d -> b h i d', q, self.collab_mixing) - k = rearrange(k, 'b n d -> b () n d') - v = rearrange(v, 'b n (h d) -> b h n d', h=h) - - if layer_past is not None: - past_key, past_value = layer_past - k = torch.cat([past_key, k], dim=-2) - v = torch.cat([past_value, v], dim=-2) - k_cache = k - v_cache = v - - if exists(rotary_pos_emb) and not has_context: - l = rotary_pos_emb.shape[-1] - (ql, qr), (kl, kr), (vl, vr) = map(lambda t: (t[..., :l], t[..., l:]), (q, k, v)) - ql, kl, vl = map(lambda t: apply_rotary_pos_emb(t, rotary_pos_emb), (ql, kl, vl)) - q, k, v = map(lambda t: torch.cat(t, dim=-1), ((ql, qr), (kl, kr), (vl, vr))) - - input_mask = None - if any(map(exists, (mask, context_mask))): - q_mask = default(mask, lambda: torch.ones((b, n), device=device).bool()) - k_mask = q_mask if not exists(context) else context_mask - k_mask = default(k_mask, lambda: torch.ones((b, k.shape[-2]), device=device).bool()) - q_mask = rearrange(q_mask, 'b i -> b () i ()') - k_mask = rearrange(k_mask, 'b j -> b () () j') - input_mask = q_mask * k_mask - - if self.num_mem_kv > 0: - mem_k, mem_v = map(lambda t: repeat(t, 'h n d -> b h n d', b=b), (self.mem_k, self.mem_v)) - k = torch.cat((mem_k, k), dim=-2) - v = torch.cat((mem_v, v), dim=-2) - if exists(input_mask): - input_mask = F.pad(input_mask, (self.num_mem_kv, 0), value=True) - - if collab_heads: - k = k.expand(-1, h, -1, -1) - - if self.qk_norm: - q, k = map(l2norm, (q, k)) - scale = 1 / (self.scale.exp().clamp(min=1e-2)) - - dots = einsum('b h i d, b h j d -> b h i j', q, k) * scale - mask_value = max_neg_value(dots) - - if exists(prev_attn): - dots = dots + prev_attn - - pre_softmax_attn = dots.clone() - - if talking_heads: - dots = einsum('b h i j, h k -> b k i j', dots, self.pre_softmax_proj).contiguous() - - if self.rel_pos_bias: - dots = self.rel_pos(dots) - - if exists(input_mask): - dots.masked_fill_(~input_mask, mask_value) - del input_mask - - if exists(attn_mask): - assert 2 <= attn_mask.ndim <= 4, 'attention mask must have greater than 2 dimensions but less than or equal to 4' - if attn_mask.ndim == 2: - attn_mask = rearrange(attn_mask, 'i j -> () () i j') - elif attn_mask.ndim == 3: - attn_mask = rearrange(attn_mask, 'h i j -> () h i j') - dots.masked_fill_(~attn_mask, mask_value) - - if exists(self.max_attend_past): - i, j = dots.shape[-2:] - range_q = torch.arange(j - i, j, device=device) - range_k = torch.arange(j, device=device) - dist = rearrange(range_q, 'i -> () () i ()') - rearrange(range_k, 'j -> () () () j') - mask = dist > self.max_attend_past - dots.masked_fill_(mask, mask_value) - del mask - - if self.causal: - i, j = dots.shape[-2:] - r = torch.arange(i, device=device) - mask = rearrange(r, 'i -> () () i ()') < rearrange(r, 'j -> () () () j') - mask = F.pad(mask, (j - i, 0), value=False) - dots.masked_fill_(mask, mask_value) - del mask - - if exists(self.sparse_topk) and self.sparse_topk < dots.shape[-1]: - top, _ = dots.topk(self.sparse_topk, dim=-1) - vk = top[..., -1].unsqueeze(-1).expand_as(dots) - mask = dots < vk - dots.masked_fill_(mask, mask_value) - del mask - - attn = self.attn_fn(dots, dim=-1) - post_softmax_attn = attn.clone() - - attn = self.dropout(attn) - - if talking_heads: - attn = einsum('b h i j, h k -> b k i j', attn, self.post_softmax_proj).contiguous() - - out = einsum('b h i j, b h j d -> b h i d', attn, v) - - if head_scale: - out = out * self.head_scale_params - - out = rearrange(out, 'b h n d -> b n (h d)') - - if exists(self.to_v_gate): - gates = self.to_v_gate(x) - out = out * gates.sigmoid() - - intermediates = Intermediates( - pre_softmax_attn=pre_softmax_attn, - post_softmax_attn=post_softmax_attn - ) - - return self.to_out(out), intermediates, k_cache, v_cache - - -class AttentionLayers(nn.Module): - def __init__( - self, - dim, - depth, - heads=8, - causal=False, - cross_attend=False, - only_cross=False, - use_scalenorm=False, - use_rms_scaleshift_norm=False, - use_rmsnorm=False, - use_rezero=False, - alibi_pos_bias=False, - alibi_num_heads=None, - alibi_learned=False, - position_infused_attn=False, - rotary_pos_emb=False, - rotary_emb_dim=None, - custom_layers=None, - sandwich_coef=None, - par_ratio=None, - residual_attn=False, - cross_residual_attn=False, - macaron=False, - pre_norm=True, - gate_residual=False, - scale_residual=False, - shift_tokens=0, - sandwich_norm=False, - use_qk_norm_attn=False, - qk_norm_attn_seq_len=None, - zero_init_branch_output=False, - **kwargs - ): - super().__init__() - ff_kwargs, kwargs = groupby_prefix_and_trim('ff_', kwargs) - attn_kwargs, _ = groupby_prefix_and_trim('attn_', kwargs) - - dim_head = attn_kwargs.get('dim_head', DEFAULT_DIM_HEAD) - - self.dim = dim - self.depth = depth - self.layers = nn.ModuleList([]) - self.causal = causal - - rel_pos_bias = 'rel_pos_bias' in attn_kwargs - self.has_pos_emb = position_infused_attn or rel_pos_bias or rotary_pos_emb - self.pia_pos_emb = FixedPositionalEmbedding(dim) if position_infused_attn else None - - rotary_emb_dim = max(default(rotary_emb_dim, dim_head // 2), 32) - self.rotary_pos_emb = RotaryEmbedding(rotary_emb_dim) if rotary_pos_emb else None - - assert not ( - alibi_pos_bias and rel_pos_bias), 'you can only choose Alibi positional bias or T5 relative positional bias, not both' - - if alibi_pos_bias: - alibi_num_heads = default(alibi_num_heads, heads) - assert alibi_num_heads <= heads, 'number of ALiBi heads must be less than the total number of heads' - alibi_pos_klass = LearnedAlibiPositionalBias if alibi_learned or not causal else AlibiPositionalBias - self.rel_pos = alibi_pos_klass(heads=alibi_num_heads, bidirectional=not causal) - else: - self.rel_pos = None - - assert not (not pre_norm and sandwich_norm), 'sandwich norm cannot be used when not using prenorm' - self.pre_norm = pre_norm - self.sandwich_norm = sandwich_norm - - self.residual_attn = residual_attn - self.cross_residual_attn = cross_residual_attn - self.cross_attend = cross_attend - - norm_class = ScaleNorm if use_scalenorm else nn.LayerNorm - norm_class = RMSNorm if use_rmsnorm else norm_class - norm_class = RMSScaleShiftNorm if use_rms_scaleshift_norm else norm_class - norm_fn = partial(norm_class, dim) - - norm_fn = nn.Identity if use_rezero else norm_fn - branch_fn = Rezero if use_rezero else None - - if cross_attend and not only_cross: - default_block = ('a', 'c', 'f') - elif cross_attend and only_cross: - default_block = ('c', 'f') - else: - default_block = ('a', 'f') - - if macaron: - default_block = ('f',) + default_block - - # qk normalization - - if use_qk_norm_attn: - attn_scale_init_value = -math.log(math.log2(qk_norm_attn_seq_len ** 2 - qk_norm_attn_seq_len)) if exists( - qk_norm_attn_seq_len) else None - attn_kwargs = {**attn_kwargs, 'qk_norm': True, 'scale_init_value': attn_scale_init_value} - - # zero init - - if zero_init_branch_output: - attn_kwargs = {**attn_kwargs, 'zero_init_output': True} - ff_kwargs = {**ff_kwargs, 'zero_init_output': True} - - # calculate layer block order - - if exists(custom_layers): - layer_types = custom_layers - elif exists(par_ratio): - par_depth = depth * len(default_block) - assert 1 < par_ratio <= par_depth, 'par ratio out of range' - default_block = tuple(filter(not_equals('f'), default_block)) - par_attn = par_depth // par_ratio - depth_cut = par_depth * 2 // 3 # 2 / 3 attention layer cutoff suggested by PAR paper - par_width = (depth_cut + depth_cut // par_attn) // par_attn - assert len(default_block) <= par_width, 'default block is too large for par_ratio' - par_block = default_block + ('f',) * (par_width - len(default_block)) - par_head = par_block * par_attn - layer_types = par_head + ('f',) * (par_depth - len(par_head)) - elif exists(sandwich_coef): - assert sandwich_coef > 0 and sandwich_coef <= depth, 'sandwich coefficient should be less than the depth' - layer_types = ('a',) * sandwich_coef + default_block * (depth - sandwich_coef) + ('f',) * sandwich_coef - else: - layer_types = default_block * depth - - self.layer_types = layer_types - self.num_attn_layers = len(list(filter(equals('a'), layer_types))) - - # calculate token shifting - - shift_tokens = cast_tuple(shift_tokens, len(layer_types)) - - # iterate and construct layers - - for ind, (layer_type, layer_shift_tokens) in enumerate(zip(self.layer_types, shift_tokens)): - is_last_layer = ind == (len(self.layer_types) - 1) - - if layer_type == 'a': - layer = Attention(dim, heads=heads, causal=causal, **attn_kwargs) - elif layer_type == 'c': - layer = Attention(dim, heads=heads, **attn_kwargs) - elif layer_type == 'f': - layer = FeedForward(dim, **ff_kwargs) - layer = layer if not macaron else Scale(0.5, layer) - else: - raise Exception(f'invalid layer type {layer_type}') - - if layer_shift_tokens > 0: - shift_range_upper = layer_shift_tokens + 1 - shift_range_lower = -layer_shift_tokens if not causal else 0 - layer = ShiftTokens(range(shift_range_lower, shift_range_upper), layer) - - if exists(branch_fn): - layer = branch_fn(layer) - - residual_fn = GRUGating if gate_residual else Residual - residual = residual_fn(dim, scale_residual=scale_residual) - - layer_uses_qk_norm = use_qk_norm_attn and layer_type in ('a', 'c') - - pre_branch_norm = norm_fn() if pre_norm and not layer_uses_qk_norm else None - post_branch_norm = norm_fn() if sandwich_norm or layer_uses_qk_norm else None - post_main_norm = norm_fn() if not pre_norm and not is_last_layer else None - - norms = nn.ModuleList([ - pre_branch_norm, - post_branch_norm, - post_main_norm - ]) - - self.layers.append(nn.ModuleList([ - norms, - layer, - residual - ])) - - def forward( - self, - x, - context=None, - full_context=None, # for passing a list of hidden states from an encoder - mask=None, - context_mask=None, - attn_mask=None, - mems=None, - return_hiddens=False, - norm_scale_shift_inp=None, - past_key_values=None, - expected_seq_len=None, - ): - - assert not (self.cross_attend ^ (exists(context) or exists( - full_context))), 'context must be passed in if cross_attend is set to True' - assert context is None or full_context is None, 'only one of full_context or context can be provided' - - hiddens = [] - intermediates = [] - prev_attn = None - prev_cross_attn = None - - mems = mems.copy() if exists(mems) else [None] * self.num_attn_layers - norm_args = {} - if exists(norm_scale_shift_inp): - norm_args['norm_scale_shift_inp'] = norm_scale_shift_inp - - rotary_pos_emb = None - if exists(self.rotary_pos_emb): - if not self.training and self.causal: - assert expected_seq_len is not None, "To decode a transformer with rotary embeddings, you must specify an `expected_seq_len`" - elif expected_seq_len is None: - expected_seq_len = 0 - seq_len = x.shape[1] - if past_key_values is not None: - seq_len += past_key_values[0][0].shape[-2] - max_rotary_emb_length = max(list(map(lambda m: (m.shape[1] if exists(m) else 0) + seq_len, mems)) + [expected_seq_len]) - rotary_pos_emb = self.rotary_pos_emb(max_rotary_emb_length, x.device) - - present_key_values = [] - cross_attn_count = 0 - for ind, (layer_type, (norm, block, residual_fn)) in enumerate(zip(self.layer_types, self.layers)): - if layer_type == 'a': - layer_mem = mems.pop(0) if mems else None - - residual = x - - pre_branch_norm, post_branch_norm, post_main_norm = norm - - if exists(pre_branch_norm): - x = pre_branch_norm(x, **norm_args) - - if layer_type == 'a' or layer_type == 'c': - if past_key_values is not None: - layer_kv = past_key_values.pop(0) - layer_past = tuple(s.to(x.device) for s in layer_kv) - else: - layer_past = None - - if layer_type == 'a': - out, inter, k, v = block(x, None, mask, None, attn_mask, self.pia_pos_emb, rotary_pos_emb, - prev_attn, layer_mem, layer_past) - elif layer_type == 'c': - if exists(full_context): - out, inter, k, v = block(x, full_context[cross_attn_count], mask, context_mask, None, None, - None, prev_attn, None, layer_past) - else: - out, inter, k, v = block(x, context, mask, context_mask, None, None, None, prev_attn, None, layer_past) - elif layer_type == 'f': - out = block(x) - - if layer_type == 'a' or layer_type == 'c' and present_key_values is not None: - present_key_values.append((k.detach(), v.detach())) - - if exists(post_branch_norm): - out = post_branch_norm(out, **norm_args) - - x = residual_fn(out, residual) - - if layer_type in ('a', 'c'): - intermediates.append(inter) - - if layer_type == 'a' and self.residual_attn: - prev_attn = inter.pre_softmax_attn - elif layer_type == 'c' and self.cross_residual_attn: - prev_cross_attn = inter.pre_softmax_attn - - if exists(post_main_norm): - x = post_main_norm(x, **norm_args) - - if layer_type == 'c': - cross_attn_count += 1 - - if layer_type == 'f': - hiddens.append(x) - - if return_hiddens: - intermediates = LayerIntermediates( - hiddens=hiddens, - attn_intermediates=intermediates, - past_key_values=present_key_values - ) - - return x, intermediates - - return x - - -class Encoder(AttentionLayers): - def __init__(self, **kwargs): - assert 'causal' not in kwargs, 'cannot set causality on encoder' - super().__init__(causal=False, **kwargs) - - -class Decoder(AttentionLayers): - def __init__(self, **kwargs): - assert 'causal' not in kwargs, 'cannot set causality on decoder' - super().__init__(causal=True, **kwargs) - - -class CrossAttender(AttentionLayers): - def __init__(self, **kwargs): - super().__init__(cross_attend=True, only_cross=True, **kwargs) - - -class ViTransformerWrapper(nn.Module): - def __init__( - self, - *, - image_size, - patch_size, - attn_layers, - num_classes=None, - dropout=0., - emb_dropout=0. - ): - super().__init__() - assert isinstance(attn_layers, Encoder), 'attention layers must be an Encoder' - assert image_size % patch_size == 0, 'image dimensions must be divisible by the patch size' - dim = attn_layers.dim - num_patches = (image_size // patch_size) ** 2 - patch_dim = 3 * patch_size ** 2 - - self.patch_size = patch_size - - self.pos_embedding = nn.Parameter(torch.randn(1, num_patches + 1, dim)) - self.patch_to_embedding = nn.Linear(patch_dim, dim) - self.cls_token = nn.Parameter(torch.randn(1, 1, dim)) - self.dropout = nn.Dropout(emb_dropout) - - self.attn_layers = attn_layers - self.norm = nn.LayerNorm(dim) - self.mlp_head = FeedForward(dim, dim_out=num_classes, dropout=dropout) if exists(num_classes) else None - - def forward( - self, - img, - return_embeddings=False - ): - p = self.patch_size - - x = rearrange(img, 'b c (h p1) (w p2) -> b (h w) (p1 p2 c)', p1=p, p2=p) - x = self.patch_to_embedding(x) - b, n, _ = x.shape - - cls_tokens = repeat(self.cls_token, '() n d -> b n d', b=b) - x = torch.cat((cls_tokens, x), dim=1) - x = x + self.pos_embedding[:, :(n + 1)] - x = self.dropout(x) - - x = self.attn_layers(x) - x = self.norm(x) - - if not exists(self.mlp_head) or return_embeddings: - return x - - return self.mlp_head(x[:, 0]) - - -class TransformerWrapper(nn.Module): - def __init__( - self, - *, - num_tokens, - max_seq_len, - attn_layers, - emb_dim=None, - max_mem_len=0., - shift_mem_down=0, - emb_dropout=0., - num_memory_tokens=None, - tie_embedding=False, - use_pos_emb=True - ): - super().__init__() - assert isinstance(attn_layers, AttentionLayers), 'attention layers must be one of Encoder or Decoder' - - dim = attn_layers.dim - emb_dim = default(emb_dim, dim) - - self.max_seq_len = max_seq_len - self.max_mem_len = max_mem_len - self.shift_mem_down = shift_mem_down - - self.token_emb = nn.Embedding(num_tokens, emb_dim) - self.pos_emb = AbsolutePositionalEmbedding(emb_dim, max_seq_len) if ( - use_pos_emb and not attn_layers.has_pos_emb) else always(0) - self.emb_dropout = nn.Dropout(emb_dropout) - - self.project_emb = nn.Linear(emb_dim, dim) if emb_dim != dim else nn.Identity() - self.attn_layers = attn_layers - self.norm = nn.LayerNorm(dim) - - self.init_() - - self.to_logits = nn.Linear(dim, num_tokens) if not tie_embedding else lambda t: t @ self.token_emb.weight.t() - - # memory tokens (like [cls]) from Memory Transformers paper - num_memory_tokens = default(num_memory_tokens, 0) - self.num_memory_tokens = num_memory_tokens - if num_memory_tokens > 0: - self.memory_tokens = nn.Parameter(torch.randn(num_memory_tokens, dim)) - - def init_(self): - nn.init.kaiming_normal_(self.token_emb.weight) - - def forward( - self, - x, - return_embeddings=False, - mask=None, - return_hiddens=False, - return_attn=False, - mems=None, - use_cache=False, - **kwargs - ): - b, n, device, num_mem = *x.shape, x.device, self.num_memory_tokens - x = self.token_emb(x) - x = x + self.pos_emb(x) - x = self.emb_dropout(x) - - x = self.project_emb(x) - - if num_mem > 0: - mem = repeat(self.memory_tokens, 'n d -> b n d', b=b) - x = torch.cat((mem, x), dim=1) - - # auto-handle masking after appending memory tokens - if exists(mask): - mask = F.pad(mask, (num_mem, 0), value=True) - - if self.shift_mem_down and exists(mems): - mems_l, mems_r = mems[:self.shift_mem_down], mems[self.shift_mem_down:] - mems = [*mems_r, *mems_l] - - x, intermediates = self.attn_layers(x, mask=mask, mems=mems, return_hiddens=True, **kwargs) - x = self.norm(x) - - mem, x = x[:, :num_mem], x[:, num_mem:] - - out = self.to_logits(x) if not return_embeddings else x - - if return_hiddens: - hiddens = intermediates.hiddens - return out, hiddens - - res = [out] - if return_attn: - attn_maps = list(map(lambda t: t.post_softmax_attn, intermediates.attn_intermediates)) - res.append(attn_maps) - if use_cache: - res.append(intermediates.past_key_values) - - if len(res) > 1: - return tuple(res) - return res[0] - - -class ContinuousTransformerWrapper(nn.Module): - def __init__( - self, - *, - max_seq_len, - attn_layers, - dim_in=None, - dim_out=None, - emb_dim=None, - emb_dropout=0., - use_pos_emb=True - ): - super().__init__() - assert isinstance(attn_layers, AttentionLayers), 'attention layers must be one of Encoder or Decoder' - - dim = attn_layers.dim - - self.max_seq_len = max_seq_len - - self.pos_emb = AbsolutePositionalEmbedding(dim, max_seq_len) if ( - use_pos_emb and not attn_layers.has_pos_emb) else always(0) - self.emb_dropout = nn.Dropout(emb_dropout) - - self.project_in = nn.Linear(dim_in, dim) if exists(dim_in) else nn.Identity() - - self.attn_layers = attn_layers - self.norm = nn.LayerNorm(dim) - - self.project_out = nn.Linear(dim, dim_out) if exists(dim_out) else nn.Identity() - - def forward( - self, - x, - return_embeddings=False, - mask=None, - return_attn=False, - mems=None, - use_cache=False, - **kwargs - ): - b, n, _, device = *x.shape, x.device - - x = self.project_in(x) - x = x + self.pos_emb(x) - x = self.emb_dropout(x) - - x, intermediates = self.attn_layers(x, mask=mask, mems=mems, return_hiddens=True, **kwargs) - x = self.norm(x) - - out = self.project_out(x) if not return_embeddings else x - - res = [out] - if return_attn: - attn_maps = list(map(lambda t: t.post_softmax_attn, intermediates.attn_intermediates)) - res.append(attn_maps) - if use_cache: - res.append(intermediates.past_key_values) - - if len(res) > 1: - return tuple(res) - return res[0] - diff --git a/spaces/MashiroSA/sovits-emu-voice-transform/modules/enhancer.py b/spaces/MashiroSA/sovits-emu-voice-transform/modules/enhancer.py deleted file mode 100644 index 37676311f7d8dc4ddc2a5244dedc27b2437e04f5..0000000000000000000000000000000000000000 --- a/spaces/MashiroSA/sovits-emu-voice-transform/modules/enhancer.py +++ /dev/null @@ -1,105 +0,0 @@ -import numpy as np -import torch -import torch.nn.functional as F -from vdecoder.nsf_hifigan.nvSTFT import STFT -from vdecoder.nsf_hifigan.models import load_model -from torchaudio.transforms import Resample - -class Enhancer: - def __init__(self, enhancer_type, enhancer_ckpt, device=None): - if device is None: - device = 'cuda' if torch.cuda.is_available() else 'cpu' - self.device = device - - if enhancer_type == 'nsf-hifigan': - self.enhancer = NsfHifiGAN(enhancer_ckpt, device=self.device) - else: - raise ValueError(f" [x] Unknown enhancer: {enhancer_type}") - - self.resample_kernel = {} - self.enhancer_sample_rate = self.enhancer.sample_rate() - self.enhancer_hop_size = self.enhancer.hop_size() - - def enhance(self, - audio, # 1, T - sample_rate, - f0, # 1, n_frames, 1 - hop_size, - adaptive_key = 0, - silence_front = 0 - ): - # enhancer start time - start_frame = int(silence_front * sample_rate / hop_size) - real_silence_front = start_frame * hop_size / sample_rate - audio = audio[:, int(np.round(real_silence_front * sample_rate)) : ] - f0 = f0[: , start_frame :, :] - - # adaptive parameters - adaptive_factor = 2 ** ( -adaptive_key / 12) - adaptive_sample_rate = 100 * int(np.round(self.enhancer_sample_rate / adaptive_factor / 100)) - real_factor = self.enhancer_sample_rate / adaptive_sample_rate - - # resample the ddsp output - if sample_rate == adaptive_sample_rate: - audio_res = audio - else: - key_str = str(sample_rate) + str(adaptive_sample_rate) - if key_str not in self.resample_kernel: - self.resample_kernel[key_str] = Resample(sample_rate, adaptive_sample_rate, lowpass_filter_width = 128).to(self.device) - audio_res = self.resample_kernel[key_str](audio) - - n_frames = int(audio_res.size(-1) // self.enhancer_hop_size + 1) - - # resample f0 - f0_np = f0.squeeze(0).squeeze(-1).cpu().numpy() - f0_np *= real_factor - time_org = (hop_size / sample_rate) * np.arange(len(f0_np)) / real_factor - time_frame = (self.enhancer_hop_size / self.enhancer_sample_rate) * np.arange(n_frames) - f0_res = np.interp(time_frame, time_org, f0_np, left=f0_np[0], right=f0_np[-1]) - f0_res = torch.from_numpy(f0_res).unsqueeze(0).float().to(self.device) # 1, n_frames - - # enhance - enhanced_audio, enhancer_sample_rate = self.enhancer(audio_res, f0_res) - - # resample the enhanced output - if adaptive_factor != 0: - key_str = str(adaptive_sample_rate) + str(enhancer_sample_rate) - if key_str not in self.resample_kernel: - self.resample_kernel[key_str] = Resample(adaptive_sample_rate, enhancer_sample_rate, lowpass_filter_width = 128).to(self.device) - enhanced_audio = self.resample_kernel[key_str](enhanced_audio) - - # pad the silence frames - if start_frame > 0: - enhanced_audio = F.pad(enhanced_audio, (int(np.round(enhancer_sample_rate * real_silence_front)), 0)) - - return enhanced_audio, enhancer_sample_rate - - -class NsfHifiGAN(torch.nn.Module): - def __init__(self, model_path, device=None): - super().__init__() - if device is None: - device = 'cuda' if torch.cuda.is_available() else 'cpu' - self.device = device - print('| Load HifiGAN: ', model_path) - self.model, self.h = load_model(model_path, device=self.device) - - def sample_rate(self): - return self.h.sampling_rate - - def hop_size(self): - return self.h.hop_size - - def forward(self, audio, f0): - stft = STFT( - self.h.sampling_rate, - self.h.num_mels, - self.h.n_fft, - self.h.win_size, - self.h.hop_size, - self.h.fmin, - self.h.fmax) - with torch.no_grad(): - mel = stft.get_mel(audio) - enhanced_audio = self.model(mel, f0[:,:mel.size(-1)]).view(-1) - return enhanced_audio, self.h.sampling_rate \ No newline at end of file diff --git a/spaces/MattyWhite/ChatGPT-ImageCaptioner2/tools/preprocess_imagenet22k.py b/spaces/MattyWhite/ChatGPT-ImageCaptioner2/tools/preprocess_imagenet22k.py deleted file mode 100644 index 6dda56c222a30c7be23fafbdab4be3fe611597e2..0000000000000000000000000000000000000000 --- a/spaces/MattyWhite/ChatGPT-ImageCaptioner2/tools/preprocess_imagenet22k.py +++ /dev/null @@ -1,148 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) Facebook, Inc. and its affiliates. - -import os -import numpy as np -import sys - -sys.path.insert(0, 'third_party/CenterNet2/projects/CenterNet2/') -sys.path.insert(0, 'third_party/Deformable-DETR') -from detic.data.tar_dataset import _TarDataset, DiskTarDataset -import pickle -import io -import gzip -import time - - -class _RawTarDataset(object): - - def __init__(self, filename, indexname, preload=False): - self.filename = filename - self.names = [] - self.offsets = [] - - for l in open(indexname): - ll = l.split() - a, b, c = ll[:3] - offset = int(b[:-1]) - if l.endswith('** Block of NULs **\n'): - self.offsets.append(offset) - break - else: - if c.endswith('JPEG'): - self.names.append(c) - self.offsets.append(offset) - else: - # ignore directories - pass - if preload: - self.data = np.memmap(filename, mode='r', dtype='uint8') - else: - self.data = None - - def __len__(self): - return len(self.names) - - def __getitem__(self, idx): - if self.data is None: - self.data = np.memmap(self.filename, mode='r', dtype='uint8') - ofs = self.offsets[idx] * 512 - fsize = 512 * (self.offsets[idx + 1] - self.offsets[idx]) - data = self.data[ofs:ofs + fsize] - - if data[:13].tostring() == '././@LongLink': - data = data[3 * 512:] - else: - data = data[512:] - - # just to make it more fun a few JPEGs are GZIP compressed... - # catch this case - if tuple(data[:2]) == (0x1f, 0x8b): - s = io.StringIO(data.tostring()) - g = gzip.GzipFile(None, 'r', 0, s) - sdata = g.read() - else: - sdata = data.tostring() - return sdata - - - -def preprocess(): - # Follow https://github.com/Alibaba-MIIL/ImageNet21K/blob/main/dataset_preprocessing/processing_script.sh - # Expect 12358684 samples with 11221 classes - # ImageNet folder has 21841 classes (synsets) - - i22kdir = '/datasets01/imagenet-22k/062717/' - i22ktarlogs = '/checkpoint/imisra/datasets/imagenet-22k/tarindex' - class_names_file = '/checkpoint/imisra/datasets/imagenet-22k/words.txt' - - output_dir = '/checkpoint/zhouxy/Datasets/ImageNet/metadata-22k/' - i22knpytarlogs = '/checkpoint/zhouxy/Datasets/ImageNet/metadata-22k/tarindex_npy' - print('Listing dir') - log_files = os.listdir(i22ktarlogs) - log_files = [x for x in log_files if x.endswith(".tarlog")] - log_files.sort() - chunk_datasets = [] - dataset_lens = [] - min_count = 0 - create_npy_tarlogs = True - print('Creating folders') - if create_npy_tarlogs: - os.makedirs(i22knpytarlogs, exist_ok=True) - for log_file in log_files: - syn = log_file.replace(".tarlog", "") - dataset = _RawTarDataset(os.path.join(i22kdir, syn + ".tar"), - os.path.join(i22ktarlogs, syn + ".tarlog"), - preload=False) - names = np.array(dataset.names) - offsets = np.array(dataset.offsets, dtype=np.int64) - np.save(os.path.join(i22knpytarlogs, f"{syn}_names.npy"), names) - np.save(os.path.join(i22knpytarlogs, f"{syn}_offsets.npy"), offsets) - - os.makedirs(output_dir, exist_ok=True) - - start_time = time.time() - for log_file in log_files: - syn = log_file.replace(".tarlog", "") - dataset = _TarDataset(os.path.join(i22kdir, syn + ".tar"), i22knpytarlogs) - # dataset = _RawTarDataset(os.path.join(i22kdir, syn + ".tar"), - # os.path.join(i22ktarlogs, syn + ".tarlog"), - # preload=False) - dataset_lens.append(len(dataset)) - end_time = time.time() - print(f"Time {end_time - start_time}") - - - dataset_lens = np.array(dataset_lens) - dataset_valid = dataset_lens > min_count - - syn2class = {} - with open(class_names_file) as fh: - for line in fh: - line = line.strip().split("\t") - syn2class[line[0]] = line[1] - - tarlog_files = [] - class_names = [] - tar_files = [] - for k in range(len(dataset_valid)): - if not dataset_valid[k]: - continue - syn = log_files[k].replace(".tarlog", "") - tarlog_files.append(os.path.join(i22ktarlogs, syn + ".tarlog")) - tar_files.append(os.path.join(i22kdir, syn + ".tar")) - class_names.append(syn2class[syn]) - - tarlog_files = np.array(tarlog_files) - tar_files = np.array(tar_files) - class_names = np.array(class_names) - print(f"Have {len(class_names)} classes and {dataset_lens[dataset_valid].sum()} samples") - - np.save(os.path.join(output_dir, "tarlog_files.npy"), tarlog_files) - np.save(os.path.join(output_dir, "tar_files.npy"), tar_files) - np.save(os.path.join(output_dir, "class_names.npy"), class_names) - np.save(os.path.join(output_dir, "tar_files.npy"), tar_files) - - -if __name__ == "__main__": - preprocess() diff --git a/spaces/McLovin171/runwayml-stable-diffusion-v1-5/README.md b/spaces/McLovin171/runwayml-stable-diffusion-v1-5/README.md deleted file mode 100644 index 8b055e18c4fe2ef223c590e12db89ee51adbe883..0000000000000000000000000000000000000000 --- a/spaces/McLovin171/runwayml-stable-diffusion-v1-5/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Runwayml Stable Diffusion V1 5 -emoji: 🚀 -colorFrom: purple -colorTo: pink -sdk: gradio -sdk_version: 3.34.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Mellow-ai/PhotoAI_Mellow/annotator/uniformer/mmseg/core/utils/__init__.py b/spaces/Mellow-ai/PhotoAI_Mellow/annotator/uniformer/mmseg/core/utils/__init__.py deleted file mode 100644 index f2678b321c295bcceaef945111ac3524be19d6e4..0000000000000000000000000000000000000000 --- a/spaces/Mellow-ai/PhotoAI_Mellow/annotator/uniformer/mmseg/core/utils/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from .misc import add_prefix - -__all__ = ['add_prefix'] diff --git a/spaces/Mwebrania/clasmaLAB/README.md b/spaces/Mwebrania/clasmaLAB/README.md deleted file mode 100644 index e8dc69939f81504eb44e44b27f55c5a6d49c4d0f..0000000000000000000000000000000000000000 --- a/spaces/Mwebrania/clasmaLAB/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Classification Of Maize Diseases -emoji: 🌍 -colorFrom: purple -colorTo: indigo -sdk: gradio -sdk_version: 3.1.4 -app_file: app.py -pinned: false -duplicated_from: Mwebrania/classification_of_maize_diseases ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Nee001/bing0/src/components/chat-header.tsx b/spaces/Nee001/bing0/src/components/chat-header.tsx deleted file mode 100644 index c6664b8dee61179f844d45c5bd650518fc2cb4c2..0000000000000000000000000000000000000000 --- a/spaces/Nee001/bing0/src/components/chat-header.tsx +++ /dev/null @@ -1,12 +0,0 @@ -import LogoIcon from '@/assets/images/logo.svg' -import Image from 'next/image' - -export function ChatHeader() { - return ( -
    - -
    欢迎使用新必应
    -
    由 AI 支持的网页版 Copilot
    -
    - ) -} diff --git a/spaces/Niansuh/DALL-E/index.html b/spaces/Niansuh/DALL-E/index.html deleted file mode 100644 index 8a0d96d5f17d744cc71e5e0b1424de677cf7dbf0..0000000000000000000000000000000000000000 --- a/spaces/Niansuh/DALL-E/index.html +++ /dev/null @@ -1,295 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - -
    - - - \ No newline at end of file diff --git a/spaces/Not-Grim-Refer/Code-to-Detailed-English-Description/readme.md b/spaces/Not-Grim-Refer/Code-to-Detailed-English-Description/readme.md deleted file mode 100644 index 1f39d266ecf9f01607a0ec1aa757a86f7dac0e90..0000000000000000000000000000000000000000 --- a/spaces/Not-Grim-Refer/Code-to-Detailed-English-Description/readme.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -title: Code-to-Detailed-English-Description -emoji: 🌍 -colorFrom: red -colorTo: red -sdk: gradio -sdk_version: 3.30.3 -app_file: app.py -pinned: true -license: mit - ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-refer \ No newline at end of file diff --git a/spaces/OAOA/DifFace/basicsr/metrics/fid.py b/spaces/OAOA/DifFace/basicsr/metrics/fid.py deleted file mode 100644 index 1b0ba6df1de96d93a60c1cfd3dc1fcf4d3d31533..0000000000000000000000000000000000000000 --- a/spaces/OAOA/DifFace/basicsr/metrics/fid.py +++ /dev/null @@ -1,89 +0,0 @@ -import numpy as np -import torch -import torch.nn as nn -from scipy import linalg -from tqdm import tqdm - -from basicsr.archs.inception import InceptionV3 - - -def load_patched_inception_v3(device='cuda', resize_input=True, normalize_input=False): - # we may not resize the input, but in [rosinality/stylegan2-pytorch] it - # does resize the input. - inception = InceptionV3([3], resize_input=resize_input, normalize_input=normalize_input) - inception = nn.DataParallel(inception).eval().to(device) - return inception - - -@torch.no_grad() -def extract_inception_features(data_generator, inception, len_generator=None, device='cuda'): - """Extract inception features. - - Args: - data_generator (generator): A data generator. - inception (nn.Module): Inception model. - len_generator (int): Length of the data_generator to show the - progressbar. Default: None. - device (str): Device. Default: cuda. - - Returns: - Tensor: Extracted features. - """ - if len_generator is not None: - pbar = tqdm(total=len_generator, unit='batch', desc='Extract') - else: - pbar = None - features = [] - - for data in data_generator: - if pbar: - pbar.update(1) - data = data.to(device) - feature = inception(data)[0].view(data.shape[0], -1) - features.append(feature.to('cpu')) - if pbar: - pbar.close() - features = torch.cat(features, 0) - return features - - -def calculate_fid(mu1, sigma1, mu2, sigma2, eps=1e-6): - """Numpy implementation of the Frechet Distance. - - The Frechet distance between two multivariate Gaussians X_1 ~ N(mu_1, C_1) and X_2 ~ N(mu_2, C_2) is: - d^2 = ||mu_1 - mu_2||^2 + Tr(C_1 + C_2 - 2*sqrt(C_1*C_2)). - Stable version by Dougal J. Sutherland. - - Args: - mu1 (np.array): The sample mean over activations. - sigma1 (np.array): The covariance matrix over activations for generated samples. - mu2 (np.array): The sample mean over activations, precalculated on an representative data set. - sigma2 (np.array): The covariance matrix over activations, precalculated on an representative data set. - - Returns: - float: The Frechet Distance. - """ - assert mu1.shape == mu2.shape, 'Two mean vectors have different lengths' - assert sigma1.shape == sigma2.shape, ('Two covariances have different dimensions') - - cov_sqrt, _ = linalg.sqrtm(sigma1 @ sigma2, disp=False) - - # Product might be almost singular - if not np.isfinite(cov_sqrt).all(): - print('Product of cov matrices is singular. Adding {eps} to diagonal of cov estimates') - offset = np.eye(sigma1.shape[0]) * eps - cov_sqrt = linalg.sqrtm((sigma1 + offset) @ (sigma2 + offset)) - - # Numerical error might give slight imaginary component - if np.iscomplexobj(cov_sqrt): - if not np.allclose(np.diagonal(cov_sqrt).imag, 0, atol=1e-3): - m = np.max(np.abs(cov_sqrt.imag)) - raise ValueError(f'Imaginary component {m}') - cov_sqrt = cov_sqrt.real - - mean_diff = mu1 - mu2 - mean_norm = mean_diff @ mean_diff - trace = np.trace(sigma1) + np.trace(sigma2) - 2 * np.trace(cov_sqrt) - fid = mean_norm + trace - - return fid diff --git a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/fairseq/tasks/semisupervised_translation.py b/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/fairseq/tasks/semisupervised_translation.py deleted file mode 100644 index b2f9bf9a733d94e50b588e4316b4a02e1c8bcf51..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/fairseq/tasks/semisupervised_translation.py +++ /dev/null @@ -1,485 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import logging -import os -from collections import OrderedDict - -from fairseq import utils -from fairseq.data import ( - BacktranslationDataset, - IndexedCachedDataset, - IndexedDataset, - IndexedRawTextDataset, - LanguagePairDataset, - NoisingDataset, - RoundRobinZipDatasets, - data_utils, - indexed_dataset, -) -from fairseq.models import FairseqMultiModel -from fairseq.sequence_generator import SequenceGenerator - -from . import register_task -from .multilingual_translation import MultilingualTranslationTask - - -logger = logging.getLogger(__name__) - - -def _get_bt_dataset_key(lang_pair): - return "bt:" + lang_pair - - -def _get_denoising_dataset_key(lang_pair): - return "denoising:" + lang_pair - - -# ported from UnsupervisedMT -def parse_lambda_config(x): - """ - Parse the configuration of lambda coefficient (for scheduling). - x = "3" # lambda will be a constant equal to x - x = "0:1,1000:0" # lambda will start from 1 and linearly decrease - # to 0 during the first 1000 iterations - x = "0:0,1000:0,2000:1" # lambda will be equal to 0 for the first 1000 - # iterations, then will linearly increase to 1 until iteration 2000 - """ - split = x.split(",") - if len(split) == 1: - return float(x), None - else: - split = [s.split(os.pathsep) for s in split] - assert all(len(s) == 2 for s in split) - assert all(k.isdigit() for k, _ in split) - assert all( - int(split[i][0]) < int(split[i + 1][0]) for i in range(len(split) - 1) - ) - return float(split[0][1]), [(int(k), float(v)) for k, v in split] - - -@register_task("semisupervised_translation") -class SemisupervisedTranslationTask(MultilingualTranslationTask): - """A task for training multiple translation models simultaneously. - - We iterate round-robin over batches from multiple language pairs, ordered - according to the `--lang-pairs` argument. - - The training loop is roughly: - - for i in range(len(epoch)): - for lang_pair in args.lang_pairs: - batch = next_batch_for_lang_pair(lang_pair) - loss = criterion(model_for_lang_pair(lang_pair), batch) - loss.backward() - optimizer.step() - - In practice, `next_batch_for_lang_pair` is abstracted in a FairseqDataset - (e.g., `RoundRobinZipDatasets`) and `model_for_lang_pair` is a model that - implements the `FairseqMultiModel` interface. - - During inference it is required to specify a single `--source-lang` and - `--target-lang`, instead of `--lang-pairs`. - """ - - @staticmethod - def add_args(parser): - """Add task-specific arguments to the parser.""" - # fmt: off - MultilingualTranslationTask.add_args(parser) - parser.add_argument('--lambda-parallel-config', default="1.0", type=str, metavar='CONFIG', - help='cross-entropy reconstruction coefficient (parallel data). ' - 'use fixed weight during training if set to floating point number. ' - 'use piecewise linear function over number of updates to schedule the ' - 'weight with the format: w0:step0,w1:step1,...') - parser.add_argument('--lambda-denoising-config', default="0.0", type=str, metavar='CONFIG', - help='Cross-entropy reconstruction coefficient (denoising autoencoding)' - 'use fixed weight during training if set to floating point number. ' - 'use piecewise linear function over number of updates to schedule the ' - 'weight with the format: w0:step0,w1:step1,...') - parser.add_argument('--lambda-otf-bt-config', default="0.0", type=str, metavar='CONFIG', - help='cross-entropy reconstruction coefficient (on-the-fly back-translation parallel data)' - 'use fixed weight during training if set to floating point number. ' - 'use piecewise linear function over number of updates to schedule the ' - 'weight with the format: w0:step0,w1:step1,...') - parser.add_argument('--bt-max-len-a', default=1.1, type=float, metavar='N', - help='generate back-translated sequences of maximum length ax + b, where x is the ' - 'source length') - parser.add_argument('--bt-max-len-b', default=10.0, type=float, metavar='N', - help='generate back-translated sequences of maximum length ax + b, where x is the ' - 'source length') - parser.add_argument('--bt-beam-size', default=1, type=int, metavar='N', - help='beam size used in beam search of online back-translation') - parser.add_argument('--max-word-shuffle-distance', default=3.0, type=float, metavar='N', - help='maximum word shuffle distance for denoising autoencoding data generation') - parser.add_argument('--word-dropout-prob', default=0.1, type=float, metavar='N', - help='word dropout probability for denoising autoencoding data generation') - parser.add_argument('--word-blanking-prob', default=0.2, type=float, metavar='N', - help='word blanking probability for denoising autoencoding data generation') - # fmt: on - - def __init__(self, args, dicts, training): - super().__init__(args, dicts, training) - self.lambda_parallel, self.lambda_parallel_steps = parse_lambda_config( - args.lambda_parallel_config - ) - self.lambda_otf_bt, self.lambda_otf_bt_steps = parse_lambda_config( - args.lambda_otf_bt_config - ) - self.lambda_denoising, self.lambda_denoising_steps = parse_lambda_config( - args.lambda_denoising_config - ) - if self.lambda_denoising > 0.0 or self.lambda_denoising_steps is not None: - denoising_lang_pairs = [ - "%s-%s" % (tgt, tgt) - for tgt in {lang_pair.split("-")[1] for lang_pair in args.lang_pairs} - ] - self.model_lang_pairs = self.model_lang_pairs + denoising_lang_pairs - self.backtranslate_datasets = {} - self.backtranslators = {} - - @classmethod - def setup_task(cls, args, **kwargs): - dicts, training = MultilingualTranslationTask.prepare(args, **kwargs) - return cls(args, dicts, training) - - def load_dataset(self, split, epoch=1, **kwargs): - """Load a dataset split.""" - paths = utils.split_paths(self.args.data) - assert len(paths) > 0 - data_path = paths[(epoch - 1) % len(paths)] - - def split_exists(split, src, tgt, lang): - if src is not None: - filename = os.path.join( - data_path, "{}.{}-{}.{}".format(split, src, tgt, lang) - ) - else: - filename = os.path.join( - data_path, "{}.{}-None.{}".format(split, src, tgt) - ) - return indexed_dataset.dataset_exists(filename, impl=self.args.dataset_impl) - - def load_indexed_dataset(path, dictionary): - return data_utils.load_indexed_dataset( - path, dictionary, self.args.dataset_impl - ) - - # load parallel datasets - src_datasets, tgt_datasets = {}, {} - if ( - self.lambda_parallel > 0.0 - or self.lambda_parallel_steps is not None - or not split.startswith("train") - ): - for lang_pair in self.lang_pairs: - src, tgt = lang_pair.split("-") - if split_exists(split, src, tgt, src): - prefix = os.path.join( - data_path, "{}.{}-{}.".format(split, src, tgt) - ) - elif split_exists(split, tgt, src, src): - prefix = os.path.join( - data_path, "{}.{}-{}.".format(split, tgt, src) - ) - else: - continue - src_datasets[lang_pair] = load_indexed_dataset( - prefix + src, self.dicts[src] - ) - tgt_datasets[lang_pair] = load_indexed_dataset( - prefix + tgt, self.dicts[tgt] - ) - logger.info( - "parallel-{} {} {} examples".format( - data_path, split, len(src_datasets[lang_pair]) - ) - ) - if len(src_datasets) == 0: - raise FileNotFoundError( - "Dataset not found: {} ({})".format(split, data_path) - ) - - # back translation datasets - backtranslate_datasets = {} - if ( - self.lambda_otf_bt > 0.0 or self.lambda_otf_bt_steps is not None - ) and split.startswith("train"): - for lang_pair in self.lang_pairs: - src, tgt = lang_pair.split("-") - if not split_exists(split, tgt, None, tgt): - raise FileNotFoundError( - "Dataset not found: backtranslation {} ({})".format( - split, data_path - ) - ) - filename = os.path.join( - data_path, "{}.{}-None.{}".format(split, tgt, tgt) - ) - dataset = load_indexed_dataset(filename, self.dicts[tgt]) - lang_pair_dataset_tgt = LanguagePairDataset( - dataset, - dataset.sizes, - self.dicts[tgt], - left_pad_source=self.args.left_pad_source, - left_pad_target=self.args.left_pad_target, - ) - lang_pair_dataset = LanguagePairDataset( - dataset, - dataset.sizes, - src_dict=self.dicts[src], - tgt=dataset, - tgt_sizes=dataset.sizes, - tgt_dict=self.dicts[tgt], - left_pad_source=self.args.left_pad_source, - left_pad_target=self.args.left_pad_target, - ) - backtranslate_datasets[lang_pair] = BacktranslationDataset( - tgt_dataset=self.alter_dataset_langtok( - lang_pair_dataset_tgt, - src_eos=self.dicts[tgt].eos(), - src_lang=tgt, - tgt_lang=src, - ), - backtranslation_fn=self.backtranslators[lang_pair], - src_dict=self.dicts[src], - tgt_dict=self.dicts[tgt], - output_collater=self.alter_dataset_langtok( - lang_pair_dataset=lang_pair_dataset, - src_eos=self.dicts[src].eos(), - src_lang=src, - tgt_eos=self.dicts[tgt].eos(), - tgt_lang=tgt, - ).collater, - ) - logger.info( - "backtranslate-{}: {} {} {} examples".format( - tgt, - data_path, - split, - len(backtranslate_datasets[lang_pair]), - ) - ) - self.backtranslate_datasets[lang_pair] = backtranslate_datasets[ - lang_pair - ] - - # denoising autoencoder - noising_datasets = {} - if ( - self.lambda_denoising > 0.0 or self.lambda_denoising_steps is not None - ) and split.startswith("train"): - for lang_pair in self.lang_pairs: - _, tgt = lang_pair.split("-") - if not split_exists(split, tgt, None, tgt): - continue - filename = os.path.join( - data_path, "{}.{}-None.{}".format(split, tgt, tgt) - ) - tgt_dataset1 = load_indexed_dataset(filename, self.dicts[tgt]) - tgt_dataset2 = load_indexed_dataset(filename, self.dicts[tgt]) - noising_dataset = NoisingDataset( - tgt_dataset1, - self.dicts[tgt], - seed=1, - max_word_shuffle_distance=self.args.max_word_shuffle_distance, - word_dropout_prob=self.args.word_dropout_prob, - word_blanking_prob=self.args.word_blanking_prob, - ) - noising_datasets[lang_pair] = self.alter_dataset_langtok( - LanguagePairDataset( - noising_dataset, - tgt_dataset1.sizes, - self.dicts[tgt], - tgt_dataset2, - tgt_dataset2.sizes, - self.dicts[tgt], - left_pad_source=self.args.left_pad_source, - left_pad_target=self.args.left_pad_target, - ), - src_eos=self.dicts[tgt].eos(), - src_lang=tgt, - tgt_eos=self.dicts[tgt].eos(), - tgt_lang=tgt, - ) - logger.info( - "denoising-{}: {} {} {} examples".format( - tgt, - data_path, - split, - len(noising_datasets[lang_pair]), - ) - ) - - def language_pair_dataset(lang_pair): - src, tgt = lang_pair.split("-") - src_dataset, tgt_dataset = src_datasets[lang_pair], tgt_datasets[lang_pair] - return self.alter_dataset_langtok( - LanguagePairDataset( - src_dataset, - src_dataset.sizes, - self.dicts[src], - tgt_dataset, - tgt_dataset.sizes, - self.dicts[tgt], - left_pad_source=self.args.left_pad_source, - left_pad_target=self.args.left_pad_target, - ), - self.dicts[src].eos(), - src, - self.dicts[tgt].eos(), - tgt, - ) - - self.datasets[split] = RoundRobinZipDatasets( - OrderedDict( - [ - (lang_pair, language_pair_dataset(lang_pair)) - for lang_pair in src_datasets.keys() - ] - + [ - (_get_bt_dataset_key(lang_pair), dataset) - for lang_pair, dataset in backtranslate_datasets.items() - ] - + [ - (_get_denoising_dataset_key(lang_pair), dataset) - for lang_pair, dataset in noising_datasets.items() - ] - ), - eval_key=None - if self.training - else "%s-%s" % (self.args.source_lang, self.args.target_lang), - ) - - def build_model(self, args): - from fairseq import models - - model = models.build_model(args, self) - if not isinstance(model, FairseqMultiModel): - raise ValueError( - "SemisupervisedTranslationTask requires a FairseqMultiModel architecture" - ) - - # create SequenceGenerator for each model that has backtranslation dependency on it - self.sequence_generators = {} - if ( - self.lambda_otf_bt > 0.0 or self.lambda_otf_bt_steps is not None - ) and self.training: - for lang_pair in self.lang_pairs: - src, tgt = lang_pair.split("-") - key = "{}-{}".format(tgt, src) - self.sequence_generators[key] = SequenceGenerator( - [model.models[key]], - tgt_dict=self.dicts[src], - beam_size=args.bt_beam_size, - max_len_a=args.bt_max_len_a, - max_len_b=args.bt_max_len_b, - ) - decoder_lang_tok_idx = self.get_decoder_langtok(src) - - def backtranslate_fn( - sample, - model=model.models[key], - bos_token=decoder_lang_tok_idx, - sequence_generator=self.sequence_generators[key], - ): - return sequence_generator.generate( - [model], - sample, - bos_token=bos_token, - ) - - self.backtranslators[lang_pair] = backtranslate_fn - - return model - - def train_step( - self, sample, model, criterion, optimizer, update_num, ignore_grad=False - ): - model.train() - - if update_num > 0: - self.update_step(update_num) - - agg_loss, agg_sample_size, agg_logging_output = 0.0, 0.0, {} - - def forward_backward(model, samples, logging_output_key, weight): - nonlocal agg_loss, agg_sample_size, agg_logging_output - if samples is None or len(samples) == 0: - return - loss, sample_size, logging_output = criterion(model, samples) - if ignore_grad: - loss *= 0 - else: - loss *= weight - optimizer.backward(loss) - agg_loss += loss.detach().item() - # TODO make summing of the sample sizes configurable - agg_sample_size += sample_size - for k in logging_output: - agg_logging_output[k] += logging_output[k] - agg_logging_output[logging_output_key] += logging_output[k] - - if self.lambda_parallel > 0.0: - for lang_pair in self.lang_pairs: - forward_backward( - model.models[lang_pair], - sample[lang_pair], - lang_pair, - self.lambda_parallel, - ) - - if self.lambda_otf_bt > 0.0: - for lang_pair in self.lang_pairs: - sample_key = _get_bt_dataset_key(lang_pair) - forward_backward( - model.models[lang_pair], - sample[sample_key], - sample_key, - self.lambda_otf_bt, - ) - - if self.lambda_denoising > 0.0: - for lang_pair in self.lang_pairs: - _, tgt = lang_pair.split("-") - sample_key = _get_denoising_dataset_key(lang_pair) - forward_backward( - model.models["{0}-{0}".format(tgt)], - sample[sample_key], - sample_key, - self.lambda_denoising, - ) - - return agg_loss, agg_sample_size, agg_logging_output - - def update_step(self, num_updates): - def lambda_step_func(config, n_iter): - """ - Update a lambda value according to its schedule configuration. - """ - ranges = [ - i - for i in range(len(config) - 1) - if config[i][0] <= n_iter < config[i + 1][0] - ] - if len(ranges) == 0: - assert n_iter >= config[-1][0] - return config[-1][1] - assert len(ranges) == 1 - i = ranges[0] - x_a, y_a = config[i] - x_b, y_b = config[i + 1] - return y_a + (n_iter - x_a) * float(y_b - y_a) / float(x_b - x_a) - - if self.lambda_parallel_steps is not None: - self.lambda_parallel = lambda_step_func( - self.lambda_parallel_steps, num_updates - ) - if self.lambda_denoising_steps is not None: - self.lambda_denoising = lambda_step_func( - self.lambda_denoising_steps, num_updates - ) - if self.lambda_otf_bt_steps is not None: - self.lambda_otf_bt = lambda_step_func(self.lambda_otf_bt_steps, num_updates) diff --git a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/fairseq/models/huggingface/__init__.py b/spaces/OFA-Sys/OFA-Image_Caption/fairseq/fairseq/models/huggingface/__init__.py deleted file mode 100644 index f7911c2c8edf516855023a285b18935e5389ec02..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/fairseq/models/huggingface/__init__.py +++ /dev/null @@ -1,20 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import importlib -import os - - -# automatically import any Python files in the models/huggingface/ directory -models_dir = os.path.dirname(__file__) -for file in os.listdir(models_dir): - path = os.path.join(models_dir, file) - if ( - not file.startswith("_") - and not file.startswith(".") - and (file.endswith(".py") or os.path.isdir(path)) - ): - model_name = file[: file.find(".py")] if file.endswith(".py") else file - module = importlib.import_module("fairseq.models.huggingface." + model_name) diff --git a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/fairseq/models/nat/nonautoregressive_transformer.py b/spaces/OFA-Sys/OFA-Image_Caption/fairseq/fairseq/models/nat/nonautoregressive_transformer.py deleted file mode 100644 index d114202d25fbd1dca66c7abebb0b0a8bffbe094d..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/fairseq/models/nat/nonautoregressive_transformer.py +++ /dev/null @@ -1,456 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import torch -import torch.nn.functional as F -from fairseq import utils -from fairseq.iterative_refinement_generator import DecoderOut -from fairseq.models import register_model, register_model_architecture -from fairseq.models.nat import FairseqNATDecoder, FairseqNATModel, ensemble_decoder -from fairseq.models.transformer import Embedding -from fairseq.modules.transformer_sentence_encoder import init_bert_params - - -def _mean_pooling(enc_feats, src_masks): - # enc_feats: T x B x C - # src_masks: B x T or None - if src_masks is None: - enc_feats = enc_feats.mean(0) - else: - src_masks = (~src_masks).transpose(0, 1).type_as(enc_feats) - enc_feats = ( - (enc_feats / src_masks.sum(0)[None, :, None]) * src_masks[:, :, None] - ).sum(0) - return enc_feats - - -def _argmax(x, dim): - return (x == x.max(dim, keepdim=True)[0]).type_as(x) - - -def _uniform_assignment(src_lens, trg_lens): - max_trg_len = trg_lens.max() - steps = (src_lens.float() - 1) / (trg_lens.float() - 1) # step-size - # max_trg_len - index_t = utils.new_arange(trg_lens, max_trg_len).float() - index_t = steps[:, None] * index_t[None, :] # batch_size X max_trg_len - index_t = torch.round(index_t).long().detach() - return index_t - - -@register_model("nonautoregressive_transformer") -class NATransformerModel(FairseqNATModel): - @property - def allow_length_beam(self): - return True - - @staticmethod - def add_args(parser): - FairseqNATModel.add_args(parser) - - # length prediction - parser.add_argument( - "--src-embedding-copy", - action="store_true", - help="copy encoder word embeddings as the initial input of the decoder", - ) - parser.add_argument( - "--pred-length-offset", - action="store_true", - help="predicting the length difference between the target and source sentences", - ) - parser.add_argument( - "--sg-length-pred", - action="store_true", - help="stop the gradients back-propagated from the length predictor", - ) - parser.add_argument( - "--length-loss-factor", - type=float, - help="weights on the length prediction loss", - ) - - @classmethod - def build_decoder(cls, args, tgt_dict, embed_tokens): - decoder = NATransformerDecoder(args, tgt_dict, embed_tokens) - if getattr(args, "apply_bert_init", False): - decoder.apply(init_bert_params) - return decoder - - def forward( - self, src_tokens, src_lengths, prev_output_tokens, tgt_tokens, **kwargs - ): - # encoding - encoder_out = self.encoder(src_tokens, src_lengths=src_lengths, **kwargs) - - # length prediction - length_out = self.decoder.forward_length( - normalize=False, encoder_out=encoder_out - ) - length_tgt = self.decoder.forward_length_prediction( - length_out, encoder_out, tgt_tokens - ) - - # decoding - word_ins_out = self.decoder( - normalize=False, - prev_output_tokens=prev_output_tokens, - encoder_out=encoder_out, - ) - - return { - "word_ins": { - "out": word_ins_out, - "tgt": tgt_tokens, - "mask": tgt_tokens.ne(self.pad), - "ls": self.args.label_smoothing, - "nll_loss": True, - }, - "length": { - "out": length_out, - "tgt": length_tgt, - "factor": self.decoder.length_loss_factor, - }, - } - - def forward_decoder(self, decoder_out, encoder_out, decoding_format=None, **kwargs): - step = decoder_out.step - output_tokens = decoder_out.output_tokens - output_scores = decoder_out.output_scores - history = decoder_out.history - - # execute the decoder - output_masks = output_tokens.ne(self.pad) - _scores, _tokens = self.decoder( - normalize=True, - prev_output_tokens=output_tokens, - encoder_out=encoder_out, - step=step, - ).max(-1) - - output_tokens.masked_scatter_(output_masks, _tokens[output_masks]) - output_scores.masked_scatter_(output_masks, _scores[output_masks]) - if history is not None: - history.append(output_tokens.clone()) - - return decoder_out._replace( - output_tokens=output_tokens, - output_scores=output_scores, - attn=None, - history=history, - ) - - def initialize_output_tokens(self, encoder_out, src_tokens): - # length prediction - length_tgt = self.decoder.forward_length_prediction( - self.decoder.forward_length(normalize=True, encoder_out=encoder_out), - encoder_out=encoder_out, - ) - - max_length = length_tgt.clamp_(min=2).max() - idx_length = utils.new_arange(src_tokens, max_length) - - initial_output_tokens = src_tokens.new_zeros( - src_tokens.size(0), max_length - ).fill_(self.pad) - initial_output_tokens.masked_fill_( - idx_length[None, :] < length_tgt[:, None], self.unk - ) - initial_output_tokens[:, 0] = self.bos - initial_output_tokens.scatter_(1, length_tgt[:, None] - 1, self.eos) - - initial_output_scores = initial_output_tokens.new_zeros( - *initial_output_tokens.size() - ).type_as(encoder_out["encoder_out"][0]) - - return DecoderOut( - output_tokens=initial_output_tokens, - output_scores=initial_output_scores, - attn=None, - step=0, - max_step=0, - history=None, - ) - - def regenerate_length_beam(self, decoder_out, beam_size): - output_tokens = decoder_out.output_tokens - length_tgt = output_tokens.ne(self.pad).sum(1) - length_tgt = ( - length_tgt[:, None] - + utils.new_arange(length_tgt, 1, beam_size) - - beam_size // 2 - ) - length_tgt = length_tgt.view(-1).clamp_(min=2) - max_length = length_tgt.max() - idx_length = utils.new_arange(length_tgt, max_length) - - initial_output_tokens = output_tokens.new_zeros( - length_tgt.size(0), max_length - ).fill_(self.pad) - initial_output_tokens.masked_fill_( - idx_length[None, :] < length_tgt[:, None], self.unk - ) - initial_output_tokens[:, 0] = self.bos - initial_output_tokens.scatter_(1, length_tgt[:, None] - 1, self.eos) - - initial_output_scores = initial_output_tokens.new_zeros( - *initial_output_tokens.size() - ).type_as(decoder_out.output_scores) - - return decoder_out._replace( - output_tokens=initial_output_tokens, output_scores=initial_output_scores - ) - - -class NATransformerDecoder(FairseqNATDecoder): - def __init__(self, args, dictionary, embed_tokens, no_encoder_attn=False): - super().__init__( - args, dictionary, embed_tokens, no_encoder_attn=no_encoder_attn - ) - self.dictionary = dictionary - self.bos = dictionary.bos() - self.unk = dictionary.unk() - self.eos = dictionary.eos() - - self.encoder_embed_dim = args.encoder_embed_dim - self.sg_length_pred = getattr(args, "sg_length_pred", False) - self.pred_length_offset = getattr(args, "pred_length_offset", False) - self.length_loss_factor = getattr(args, "length_loss_factor", 0.1) - self.src_embedding_copy = getattr(args, "src_embedding_copy", False) - self.embed_length = Embedding(256, self.encoder_embed_dim, None) - - @ensemble_decoder - def forward(self, normalize, encoder_out, prev_output_tokens, step=0, **unused): - features, _ = self.extract_features( - prev_output_tokens, - encoder_out=encoder_out, - embedding_copy=(step == 0) & self.src_embedding_copy, - ) - decoder_out = self.output_layer(features) - return F.log_softmax(decoder_out, -1) if normalize else decoder_out - - @ensemble_decoder - def forward_length(self, normalize, encoder_out): - enc_feats = encoder_out["encoder_out"][0] # T x B x C - if len(encoder_out["encoder_padding_mask"]) > 0: - src_masks = encoder_out["encoder_padding_mask"][0] # B x T - else: - src_masks = None - enc_feats = _mean_pooling(enc_feats, src_masks) - if self.sg_length_pred: - enc_feats = enc_feats.detach() - length_out = F.linear(enc_feats, self.embed_length.weight) - return F.log_softmax(length_out, -1) if normalize else length_out - - def extract_features( - self, - prev_output_tokens, - encoder_out=None, - early_exit=None, - embedding_copy=False, - **unused - ): - """ - Similar to *forward* but only return features. - - Inputs: - prev_output_tokens: Tensor(B, T) - encoder_out: a dictionary of hidden states and masks - - Returns: - tuple: - - the decoder's features of shape `(batch, tgt_len, embed_dim)` - - a dictionary with any model-specific outputs - the LevenshteinTransformer decoder has full-attention to all generated tokens - """ - # embedding - if embedding_copy: - src_embd = encoder_out["encoder_embedding"][0] - if len(encoder_out["encoder_padding_mask"]) > 0: - src_mask = encoder_out["encoder_padding_mask"][0] - else: - src_mask = None - src_mask = ( - ~src_mask - if src_mask is not None - else prev_output_tokens.new_ones(*src_embd.size()[:2]).bool() - ) - - x, decoder_padding_mask = self.forward_embedding( - prev_output_tokens, - self.forward_copying_source( - src_embd, src_mask, prev_output_tokens.ne(self.padding_idx) - ), - ) - - else: - - x, decoder_padding_mask = self.forward_embedding(prev_output_tokens) - - # B x T x C -> T x B x C - x = x.transpose(0, 1) - attn = None - inner_states = [x] - - # decoder layers - for i, layer in enumerate(self.layers): - - # early exit from the decoder. - if (early_exit is not None) and (i >= early_exit): - break - - x, attn, _ = layer( - x, - encoder_out["encoder_out"][0] - if (encoder_out is not None and len(encoder_out["encoder_out"]) > 0) - else None, - encoder_out["encoder_padding_mask"][0] - if ( - encoder_out is not None - and len(encoder_out["encoder_padding_mask"]) > 0 - ) - else None, - self_attn_mask=None, - self_attn_padding_mask=decoder_padding_mask, - ) - inner_states.append(x) - - if self.layer_norm: - x = self.layer_norm(x) - - # T x B x C -> B x T x C - x = x.transpose(0, 1) - - if self.project_out_dim is not None: - x = self.project_out_dim(x) - - return x, {"attn": attn, "inner_states": inner_states} - - def forward_embedding(self, prev_output_tokens, states=None): - # embed positions - positions = ( - self.embed_positions(prev_output_tokens) - if self.embed_positions is not None - else None - ) - - # embed tokens and positions - if states is None: - x = self.embed_scale * self.embed_tokens(prev_output_tokens) - if self.project_in_dim is not None: - x = self.project_in_dim(x) - else: - x = states - - if positions is not None: - x += positions - x = self.dropout_module(x) - decoder_padding_mask = prev_output_tokens.eq(self.padding_idx) - return x, decoder_padding_mask - - def forward_copying_source(self, src_embeds, src_masks, tgt_masks): - length_sources = src_masks.sum(1) - length_targets = tgt_masks.sum(1) - mapped_inputs = _uniform_assignment(length_sources, length_targets).masked_fill( - ~tgt_masks, 0 - ) - copied_embedding = torch.gather( - src_embeds, - 1, - mapped_inputs.unsqueeze(-1).expand( - *mapped_inputs.size(), src_embeds.size(-1) - ), - ) - return copied_embedding - - def forward_length_prediction(self, length_out, encoder_out, tgt_tokens=None): - enc_feats = encoder_out["encoder_out"][0] # T x B x C - if len(encoder_out["encoder_padding_mask"]) > 0: - src_masks = encoder_out["encoder_padding_mask"][0] # B x T - else: - src_masks = None - if self.pred_length_offset: - if src_masks is None: - src_lengs = enc_feats.new_ones(enc_feats.size(1)).fill_( - enc_feats.size(0) - ) - else: - src_lengs = (~src_masks).transpose(0, 1).type_as(enc_feats).sum(0) - src_lengs = src_lengs.long() - - if tgt_tokens is not None: - # obtain the length target - tgt_lengs = tgt_tokens.ne(self.padding_idx).sum(1).long() - if self.pred_length_offset: - length_tgt = tgt_lengs - src_lengs + 128 - else: - length_tgt = tgt_lengs - length_tgt = length_tgt.clamp(min=0, max=255) - - else: - # predict the length target (greedy for now) - # TODO: implementing length-beam - pred_lengs = length_out.max(-1)[1] - if self.pred_length_offset: - length_tgt = pred_lengs - 128 + src_lengs - else: - length_tgt = pred_lengs - - return length_tgt - - -@register_model_architecture( - "nonautoregressive_transformer", "nonautoregressive_transformer" -) -def base_architecture(args): - args.encoder_embed_path = getattr(args, "encoder_embed_path", None) - args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512) - args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 2048) - args.encoder_layers = getattr(args, "encoder_layers", 6) - args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 8) - args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False) - args.encoder_learned_pos = getattr(args, "encoder_learned_pos", False) - args.decoder_embed_path = getattr(args, "decoder_embed_path", None) - args.decoder_embed_dim = getattr(args, "decoder_embed_dim", args.encoder_embed_dim) - args.decoder_ffn_embed_dim = getattr( - args, "decoder_ffn_embed_dim", args.encoder_ffn_embed_dim - ) - args.decoder_layers = getattr(args, "decoder_layers", 6) - args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8) - args.decoder_normalize_before = getattr(args, "decoder_normalize_before", False) - args.decoder_learned_pos = getattr(args, "decoder_learned_pos", False) - args.attention_dropout = getattr(args, "attention_dropout", 0.0) - args.activation_dropout = getattr(args, "activation_dropout", 0.0) - args.activation_fn = getattr(args, "activation_fn", "relu") - args.dropout = getattr(args, "dropout", 0.1) - args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None) - args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0) - args.share_decoder_input_output_embed = getattr( - args, "share_decoder_input_output_embed", False - ) - args.share_all_embeddings = getattr(args, "share_all_embeddings", False) - args.no_token_positional_embeddings = getattr( - args, "no_token_positional_embeddings", False - ) - args.adaptive_input = getattr(args, "adaptive_input", False) - args.apply_bert_init = getattr(args, "apply_bert_init", False) - - args.decoder_output_dim = getattr( - args, "decoder_output_dim", args.decoder_embed_dim - ) - args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim) - - # --- special arguments --- - args.sg_length_pred = getattr(args, "sg_length_pred", False) - args.pred_length_offset = getattr(args, "pred_length_offset", False) - args.length_loss_factor = getattr(args, "length_loss_factor", 0.1) - args.src_embedding_copy = getattr(args, "src_embedding_copy", False) - - -@register_model_architecture( - "nonautoregressive_transformer", "nonautoregressive_transformer_wmt_en_de" -) -def nonautoregressive_transformer_wmt_en_de(args): - base_architecture(args) diff --git a/spaces/OFA-Sys/OFA-vqa/fairseq/examples/wav2vec/unsupervised/scripts/wav2vec_extract_features.py b/spaces/OFA-Sys/OFA-vqa/fairseq/examples/wav2vec/unsupervised/scripts/wav2vec_extract_features.py deleted file mode 100644 index b07e274d202414ce40d00aa64a27cf97bb49c1c3..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-vqa/fairseq/examples/wav2vec/unsupervised/scripts/wav2vec_extract_features.py +++ /dev/null @@ -1,119 +0,0 @@ -#!/usr/bin/env python3 -u -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import argparse -import os -import os.path as osp -import tqdm -import torch -import torch.nn.functional as F -from shutil import copyfile - -from npy_append_array import NpyAppendArray - -import fairseq -import soundfile as sf - - -def get_parser(): - parser = argparse.ArgumentParser( - description="compute kmeans codebook from kaldi-computed feats" - ) - # fmt: off - parser.add_argument('data', help='location of tsv files') - parser.add_argument('--split', help='which split to read', required=True) - parser.add_argument('--save-dir', help='where to save the output', required=True) - parser.add_argument('--checkpoint', type=str, help='checkpoint for wav2vec ctc model', required=True) - parser.add_argument('--layer', type=int, default=14, help='which layer to use') - # fmt: on - - return parser - - -class Wav2VecFeatureReader(object): - def __init__(self, cp_file, layer): - model, cfg, task = fairseq.checkpoint_utils.load_model_ensemble_and_task( - [cp_file] - ) - model = model[0] - model.eval() - model.cuda() - self.model = model - self.task = task - self.layer = layer - - def read_audio(self, fname): - """Load an audio file and return PCM along with the sample rate""" - wav, sr = sf.read(fname) - assert sr == 16e3 - - return wav - - def get_feats(self, loc): - x = self.read_audio(loc) - with torch.no_grad(): - source = torch.from_numpy(x).float().cuda() - if self.task.cfg.normalize: - assert source.dim() == 1, source.dim() - with torch.no_grad(): - source = F.layer_norm(source, source.shape) - source = source.view(1, -1) - - m_res = self.model(source=source, mask=False, features_only=True, layer=self.layer) - return m_res["x"].squeeze(0).cpu() - - -def get_iterator(args): - with open(osp.join(args.data, args.split) + ".tsv", "r") as fp: - lines = fp.read().split("\n") - root = lines.pop(0).strip() - files = [osp.join(root, line.split("\t")[0]) for line in lines if len(line) > 0] - - num = len(files) - reader = Wav2VecFeatureReader(args.checkpoint, args.layer) - - def iterate(): - for fname in files: - w2v_feats = reader.get_feats(fname) - yield w2v_feats - - return iterate, num - - -def main(): - parser = get_parser() - args = parser.parse_args() - - os.makedirs(args.save_dir, exist_ok=True) - - def create_files(dest): - copyfile(osp.join(args.data, args.split) + ".tsv", dest + ".tsv") - if osp.exists(osp.join(args.data, args.split) + ".wrd"): - copyfile(osp.join(args.data, args.split) + ".wrd", dest + ".wrd") - if osp.exists(osp.join(args.data, args.split) + ".phn"): - copyfile(osp.join(args.data, args.split) + ".phn", dest + ".phn") - - if osp.exists(dest + ".npy"): - os.remove(dest + ".npy") - npaa = NpyAppendArray(dest + ".npy") - return npaa - - save_path = osp.join(args.save_dir, args.split) - npaa = create_files(save_path) - - generator, num = get_iterator(args) - iterator = generator() - - with open(save_path + ".lengths", "w") as l_f: - for w2v_feats in tqdm.tqdm(iterator, total=num): - print(len(w2v_feats), file=l_f) - - if len(w2v_feats) > 0: - npaa.append(w2v_feats.numpy()) - - -if __name__ == "__main__": - main() diff --git a/spaces/OpenGVLab/InternGPT/iGPT/models/grit_src/third_party/CenterNet2/detectron2/layers/batch_norm.py b/spaces/OpenGVLab/InternGPT/iGPT/models/grit_src/third_party/CenterNet2/detectron2/layers/batch_norm.py deleted file mode 100644 index 09a6c66cf6f4b21c38a7829b029f0ab5deda1f9e..0000000000000000000000000000000000000000 --- a/spaces/OpenGVLab/InternGPT/iGPT/models/grit_src/third_party/CenterNet2/detectron2/layers/batch_norm.py +++ /dev/null @@ -1,276 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -import torch -import torch.distributed as dist -from fvcore.nn.distributed import differentiable_all_reduce -from torch import nn -from torch.nn import functional as F - -from detectron2.utils import comm, env - -from .wrappers import BatchNorm2d - - -class FrozenBatchNorm2d(nn.Module): - """ - BatchNorm2d where the batch statistics and the affine parameters are fixed. - - It contains non-trainable buffers called - "weight" and "bias", "running_mean", "running_var", - initialized to perform identity transformation. - - The pre-trained backbone models from Caffe2 only contain "weight" and "bias", - which are computed from the original four parameters of BN. - The affine transform `x * weight + bias` will perform the equivalent - computation of `(x - running_mean) / sqrt(running_var) * weight + bias`. - When loading a backbone model from Caffe2, "running_mean" and "running_var" - will be left unchanged as identity transformation. - - Other pre-trained backbone models may contain all 4 parameters. - - The forward is implemented by `F.batch_norm(..., training=False)`. - """ - - _version = 3 - - def __init__(self, num_features, eps=1e-5): - super().__init__() - self.num_features = num_features - self.eps = eps - self.register_buffer("weight", torch.ones(num_features)) - self.register_buffer("bias", torch.zeros(num_features)) - self.register_buffer("running_mean", torch.zeros(num_features)) - self.register_buffer("running_var", torch.ones(num_features) - eps) - - def forward(self, x): - if x.requires_grad: - # When gradients are needed, F.batch_norm will use extra memory - # because its backward op computes gradients for weight/bias as well. - scale = self.weight * (self.running_var + self.eps).rsqrt() - bias = self.bias - self.running_mean * scale - scale = scale.reshape(1, -1, 1, 1) - bias = bias.reshape(1, -1, 1, 1) - out_dtype = x.dtype # may be half - return x * scale.to(out_dtype) + bias.to(out_dtype) - else: - # When gradients are not needed, F.batch_norm is a single fused op - # and provide more optimization opportunities. - return F.batch_norm( - x, - self.running_mean, - self.running_var, - self.weight, - self.bias, - training=False, - eps=self.eps, - ) - - def _load_from_state_dict( - self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs - ): - version = local_metadata.get("version", None) - - if version is None or version < 2: - # No running_mean/var in early versions - # This will silent the warnings - if prefix + "running_mean" not in state_dict: - state_dict[prefix + "running_mean"] = torch.zeros_like(self.running_mean) - if prefix + "running_var" not in state_dict: - state_dict[prefix + "running_var"] = torch.ones_like(self.running_var) - - super()._load_from_state_dict( - state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs - ) - - def __repr__(self): - return "FrozenBatchNorm2d(num_features={}, eps={})".format(self.num_features, self.eps) - - @classmethod - def convert_frozen_batchnorm(cls, module): - """ - Convert all BatchNorm/SyncBatchNorm in module into FrozenBatchNorm. - - Args: - module (torch.nn.Module): - - Returns: - If module is BatchNorm/SyncBatchNorm, returns a new module. - Otherwise, in-place convert module and return it. - - Similar to convert_sync_batchnorm in - https://github.com/pytorch/pytorch/blob/master/torch/nn/modules/batchnorm.py - """ - bn_module = nn.modules.batchnorm - bn_module = (bn_module.BatchNorm2d, bn_module.SyncBatchNorm) - res = module - if isinstance(module, bn_module): - res = cls(module.num_features) - if module.affine: - res.weight.data = module.weight.data.clone().detach() - res.bias.data = module.bias.data.clone().detach() - res.running_mean.data = module.running_mean.data - res.running_var.data = module.running_var.data - res.eps = module.eps - else: - for name, child in module.named_children(): - new_child = cls.convert_frozen_batchnorm(child) - if new_child is not child: - res.add_module(name, new_child) - return res - - -def get_norm(norm, out_channels): - """ - Args: - norm (str or callable): either one of BN, SyncBN, FrozenBN, GN; - or a callable that takes a channel number and returns - the normalization layer as a nn.Module. - - Returns: - nn.Module or None: the normalization layer - """ - if norm is None: - return None - if isinstance(norm, str): - if len(norm) == 0: - return None - norm = { - "BN": BatchNorm2d, - # Fixed in https://github.com/pytorch/pytorch/pull/36382 - "SyncBN": NaiveSyncBatchNorm if env.TORCH_VERSION <= (1, 5) else nn.SyncBatchNorm, - "FrozenBN": FrozenBatchNorm2d, - "GN": lambda channels: nn.GroupNorm(32, channels), - # for debugging: - "nnSyncBN": nn.SyncBatchNorm, - "naiveSyncBN": NaiveSyncBatchNorm, - # expose stats_mode N as an option to caller, required for zero-len inputs - "naiveSyncBN_N": lambda channels: NaiveSyncBatchNorm(channels, stats_mode="N"), - }[norm] - return norm(out_channels) - - -class NaiveSyncBatchNorm(BatchNorm2d): - """ - In PyTorch<=1.5, ``nn.SyncBatchNorm`` has incorrect gradient - when the batch size on each worker is different. - (e.g., when scale augmentation is used, or when it is applied to mask head). - - This is a slower but correct alternative to `nn.SyncBatchNorm`. - - Note: - There isn't a single definition of Sync BatchNorm. - - When ``stats_mode==""``, this module computes overall statistics by using - statistics of each worker with equal weight. The result is true statistics - of all samples (as if they are all on one worker) only when all workers - have the same (N, H, W). This mode does not support inputs with zero batch size. - - When ``stats_mode=="N"``, this module computes overall statistics by weighting - the statistics of each worker by their ``N``. The result is true statistics - of all samples (as if they are all on one worker) only when all workers - have the same (H, W). It is slower than ``stats_mode==""``. - - Even though the result of this module may not be the true statistics of all samples, - it may still be reasonable because it might be preferrable to assign equal weights - to all workers, regardless of their (H, W) dimension, instead of putting larger weight - on larger images. From preliminary experiments, little difference is found between such - a simplified implementation and an accurate computation of overall mean & variance. - """ - - def __init__(self, *args, stats_mode="", **kwargs): - super().__init__(*args, **kwargs) - assert stats_mode in ["", "N"] - self._stats_mode = stats_mode - - def forward(self, input): - if comm.get_world_size() == 1 or not self.training: - return super().forward(input) - - B, C = input.shape[0], input.shape[1] - - half_input = input.dtype == torch.float16 - if half_input: - # fp16 does not have good enough numerics for the reduction here - input = input.float() - mean = torch.mean(input, dim=[0, 2, 3]) - meansqr = torch.mean(input * input, dim=[0, 2, 3]) - - if self._stats_mode == "": - assert B > 0, 'SyncBatchNorm(stats_mode="") does not support zero batch size.' - vec = torch.cat([mean, meansqr], dim=0) - vec = differentiable_all_reduce(vec) * (1.0 / dist.get_world_size()) - mean, meansqr = torch.split(vec, C) - momentum = self.momentum - else: - if B == 0: - vec = torch.zeros([2 * C + 1], device=mean.device, dtype=mean.dtype) - vec = vec + input.sum() # make sure there is gradient w.r.t input - else: - vec = torch.cat( - [mean, meansqr, torch.ones([1], device=mean.device, dtype=mean.dtype)], dim=0 - ) - vec = differentiable_all_reduce(vec * B) - - total_batch = vec[-1].detach() - momentum = total_batch.clamp(max=1) * self.momentum # no update if total_batch is 0 - mean, meansqr, _ = torch.split(vec / total_batch.clamp(min=1), C) # avoid div-by-zero - - var = meansqr - mean * mean - invstd = torch.rsqrt(var + self.eps) - scale = self.weight * invstd - bias = self.bias - mean * scale - scale = scale.reshape(1, -1, 1, 1) - bias = bias.reshape(1, -1, 1, 1) - - self.running_mean += momentum * (mean.detach() - self.running_mean) - self.running_var += momentum * (var.detach() - self.running_var) - ret = input * scale + bias - if half_input: - ret = ret.half() - return ret - - -class CycleBatchNormList(nn.ModuleList): - """ - Implement domain-specific BatchNorm by cycling. - - When a BatchNorm layer is used for multiple input domains or input - features, it might need to maintain a separate test-time statistics - for each domain. See Sec 5.2 in :paper:`rethinking-batchnorm`. - - This module implements it by using N separate BN layers - and it cycles through them every time a forward() is called. - - NOTE: The caller of this module MUST guarantee to always call - this module by multiple of N times. Otherwise its test-time statistics - will be incorrect. - """ - - def __init__(self, length: int, bn_class=nn.BatchNorm2d, **kwargs): - """ - Args: - length: number of BatchNorm layers to cycle. - bn_class: the BatchNorm class to use - kwargs: arguments of the BatchNorm class, such as num_features. - """ - self._affine = kwargs.pop("affine", True) - super().__init__([bn_class(**kwargs, affine=False) for k in range(length)]) - if self._affine: - # shared affine, domain-specific BN - channels = self[0].num_features - self.weight = nn.Parameter(torch.ones(channels)) - self.bias = nn.Parameter(torch.zeros(channels)) - self._pos = 0 - - def forward(self, x): - ret = self[self._pos](x) - self._pos = (self._pos + 1) % len(self) - - if self._affine: - w = self.weight.reshape(1, -1, 1, 1) - b = self.bias.reshape(1, -1, 1, 1) - return ret * w + b - else: - return ret - - def extra_repr(self): - return f"affine={self._affine}" diff --git a/spaces/OpenMotionLab/MotionGPT/pyrender/pyrender/sampler.py b/spaces/OpenMotionLab/MotionGPT/pyrender/pyrender/sampler.py deleted file mode 100644 index e4784d068f808a40a56c8e748d83175f7f4e6233..0000000000000000000000000000000000000000 --- a/spaces/OpenMotionLab/MotionGPT/pyrender/pyrender/sampler.py +++ /dev/null @@ -1,102 +0,0 @@ -"""Samplers, conforming to the glTF 2.0 standards as specified in -https://github.com/KhronosGroup/glTF/tree/master/specification/2.0#reference-sampler - -Author: Matthew Matl -""" -from .constants import GLTF - - -class Sampler(object): - """Texture sampler properties for filtering and wrapping modes. - - Parameters - ---------- - name : str, optional - The user-defined name of this object. - magFilter : int, optional - Magnification filter. Valid values: - - :attr:`.GLTF.NEAREST` - - :attr:`.GLTF.LINEAR` - minFilter : int, optional - Minification filter. Valid values: - - :attr:`.GLTF.NEAREST` - - :attr:`.GLTF.LINEAR` - - :attr:`.GLTF.NEAREST_MIPMAP_NEAREST` - - :attr:`.GLTF.LINEAR_MIPMAP_NEAREST` - - :attr:`.GLTF.NEAREST_MIPMAP_LINEAR` - - :attr:`.GLTF.LINEAR_MIPMAP_LINEAR` - wrapS : int, optional - S (U) wrapping mode. Valid values: - - :attr:`.GLTF.CLAMP_TO_EDGE` - - :attr:`.GLTF.MIRRORED_REPEAT` - - :attr:`.GLTF.REPEAT` - wrapT : int, optional - T (V) wrapping mode. Valid values: - - :attr:`.GLTF.CLAMP_TO_EDGE` - - :attr:`.GLTF.MIRRORED_REPEAT` - - :attr:`.GLTF.REPEAT` - """ - - def __init__(self, - name=None, - magFilter=None, - minFilter=None, - wrapS=GLTF.REPEAT, - wrapT=GLTF.REPEAT): - self.name = name - self.magFilter = magFilter - self.minFilter = minFilter - self.wrapS = wrapS - self.wrapT = wrapT - - @property - def name(self): - """str : The user-defined name of this object. - """ - return self._name - - @name.setter - def name(self, value): - if value is not None: - value = str(value) - self._name = value - - @property - def magFilter(self): - """int : Magnification filter type. - """ - return self._magFilter - - @magFilter.setter - def magFilter(self, value): - self._magFilter = value - - @property - def minFilter(self): - """int : Minification filter type. - """ - return self._minFilter - - @minFilter.setter - def minFilter(self, value): - self._minFilter = value - - @property - def wrapS(self): - """int : S (U) wrapping mode. - """ - return self._wrapS - - @wrapS.setter - def wrapS(self, value): - self._wrapS = value - - @property - def wrapT(self): - """int : T (V) wrapping mode. - """ - return self._wrapT - - @wrapT.setter - def wrapT(self, value): - self._wrapT = value diff --git a/spaces/PAIR/PAIR-Diffusion/ldm/modules/midas/midas/midas_net_custom.py b/spaces/PAIR/PAIR-Diffusion/ldm/modules/midas/midas/midas_net_custom.py deleted file mode 100644 index 50e4acb5e53d5fabefe3dde16ab49c33c2b7797c..0000000000000000000000000000000000000000 --- a/spaces/PAIR/PAIR-Diffusion/ldm/modules/midas/midas/midas_net_custom.py +++ /dev/null @@ -1,128 +0,0 @@ -"""MidashNet: Network for monocular depth estimation trained by mixing several datasets. -This file contains code that is adapted from -https://github.com/thomasjpfan/pytorch_refinenet/blob/master/pytorch_refinenet/refinenet/refinenet_4cascade.py -""" -import torch -import torch.nn as nn - -from .base_model import BaseModel -from .blocks import FeatureFusionBlock, FeatureFusionBlock_custom, Interpolate, _make_encoder - - -class MidasNet_small(BaseModel): - """Network for monocular depth estimation. - """ - - def __init__(self, path=None, features=64, backbone="efficientnet_lite3", non_negative=True, exportable=True, channels_last=False, align_corners=True, - blocks={'expand': True}): - """Init. - - Args: - path (str, optional): Path to saved model. Defaults to None. - features (int, optional): Number of features. Defaults to 256. - backbone (str, optional): Backbone network for encoder. Defaults to resnet50 - """ - print("Loading weights: ", path) - - super(MidasNet_small, self).__init__() - - use_pretrained = False if path else True - - self.channels_last = channels_last - self.blocks = blocks - self.backbone = backbone - - self.groups = 1 - - features1=features - features2=features - features3=features - features4=features - self.expand = False - if "expand" in self.blocks and self.blocks['expand'] == True: - self.expand = True - features1=features - features2=features*2 - features3=features*4 - features4=features*8 - - self.pretrained, self.scratch = _make_encoder(self.backbone, features, use_pretrained, groups=self.groups, expand=self.expand, exportable=exportable) - - self.scratch.activation = nn.ReLU(False) - - self.scratch.refinenet4 = FeatureFusionBlock_custom(features4, self.scratch.activation, deconv=False, bn=False, expand=self.expand, align_corners=align_corners) - self.scratch.refinenet3 = FeatureFusionBlock_custom(features3, self.scratch.activation, deconv=False, bn=False, expand=self.expand, align_corners=align_corners) - self.scratch.refinenet2 = FeatureFusionBlock_custom(features2, self.scratch.activation, deconv=False, bn=False, expand=self.expand, align_corners=align_corners) - self.scratch.refinenet1 = FeatureFusionBlock_custom(features1, self.scratch.activation, deconv=False, bn=False, align_corners=align_corners) - - - self.scratch.output_conv = nn.Sequential( - nn.Conv2d(features, features//2, kernel_size=3, stride=1, padding=1, groups=self.groups), - Interpolate(scale_factor=2, mode="bilinear"), - nn.Conv2d(features//2, 32, kernel_size=3, stride=1, padding=1), - self.scratch.activation, - nn.Conv2d(32, 1, kernel_size=1, stride=1, padding=0), - nn.ReLU(True) if non_negative else nn.Identity(), - nn.Identity(), - ) - - if path: - self.load(path) - - - def forward(self, x): - """Forward pass. - - Args: - x (tensor): input data (image) - - Returns: - tensor: depth - """ - if self.channels_last==True: - print("self.channels_last = ", self.channels_last) - x.contiguous(memory_format=torch.channels_last) - - - layer_1 = self.pretrained.layer1(x) - layer_2 = self.pretrained.layer2(layer_1) - layer_3 = self.pretrained.layer3(layer_2) - layer_4 = self.pretrained.layer4(layer_3) - - layer_1_rn = self.scratch.layer1_rn(layer_1) - layer_2_rn = self.scratch.layer2_rn(layer_2) - layer_3_rn = self.scratch.layer3_rn(layer_3) - layer_4_rn = self.scratch.layer4_rn(layer_4) - - - path_4 = self.scratch.refinenet4(layer_4_rn) - path_3 = self.scratch.refinenet3(path_4, layer_3_rn) - path_2 = self.scratch.refinenet2(path_3, layer_2_rn) - path_1 = self.scratch.refinenet1(path_2, layer_1_rn) - - out = self.scratch.output_conv(path_1) - - return torch.squeeze(out, dim=1) - - - -def fuse_model(m): - prev_previous_type = nn.Identity() - prev_previous_name = '' - previous_type = nn.Identity() - previous_name = '' - for name, module in m.named_modules(): - if prev_previous_type == nn.Conv2d and previous_type == nn.BatchNorm2d and type(module) == nn.ReLU: - # print("FUSED ", prev_previous_name, previous_name, name) - torch.quantization.fuse_modules(m, [prev_previous_name, previous_name, name], inplace=True) - elif prev_previous_type == nn.Conv2d and previous_type == nn.BatchNorm2d: - # print("FUSED ", prev_previous_name, previous_name) - torch.quantization.fuse_modules(m, [prev_previous_name, previous_name], inplace=True) - # elif previous_type == nn.Conv2d and type(module) == nn.ReLU: - # print("FUSED ", previous_name, name) - # torch.quantization.fuse_modules(m, [previous_name, name], inplace=True) - - prev_previous_type = previous_type - prev_previous_name = previous_name - previous_type = type(module) - previous_name = name \ No newline at end of file diff --git a/spaces/PascalNotin/Tranception_design/tranception/utils/scoring_utils.py b/spaces/PascalNotin/Tranception_design/tranception/utils/scoring_utils.py deleted file mode 100644 index 4ed9a6f249320eb1184942d3a410b900f6b17692..0000000000000000000000000000000000000000 --- a/spaces/PascalNotin/Tranception_design/tranception/utils/scoring_utils.py +++ /dev/null @@ -1,203 +0,0 @@ -import os -import tqdm -import re -import numpy as np -import pandas as pd - -import torch -from torch.nn import CrossEntropyLoss, NLLLoss -from torch.utils.data.sampler import Sampler, SequentialSampler - -from transformers import DataCollatorForLanguageModeling, PreTrainedTokenizerFast -from datasets import Dataset - -AA_vocab = "ACDEFGHIKLMNPQRSTVWY" - -def get_mutated_sequence(focus_seq, mutant, start_idx=1, AA_vocab=AA_vocab): - """ - Helper function that mutates an input sequence (focus_seq) via an input mutation triplet (substitutions only). - Mutation triplet are typically based on 1-indexing: start_idx is used for switching to 0-indexing. - """ - mutated_seq = list(focus_seq) - for mutation in mutant.split(":"): - try: - from_AA, position, to_AA = mutation[0], int(mutation[1:-1]), mutation[-1] - except: - print("Issue with mutant: "+str(mutation)) - relative_position = position - start_idx - assert (from_AA==focus_seq[relative_position]), "Invalid from_AA or mutant position: "+str(mutation)+" from_AA: "+str(from_AA) + " relative pos: "+str(relative_position) + " focus_seq: "+str(focus_seq) - assert (to_AA in AA_vocab) , "Mutant to_AA is invalid: "+str(mutation) - mutated_seq[relative_position] = to_AA - return "".join(mutated_seq) - -def nanmean(v, *args, inplace=False, **kwargs): - if not inplace: - v = v.clone() - is_nan = torch.isnan(v) - v[is_nan] = 0 - return v.sum(*args, **kwargs) / (~is_nan).float().sum(*args, **kwargs) - -def nansum(v, *args, inplace=False, **kwargs): - if not inplace: - v = v.clone() - is_nan = torch.isnan(v) - v[is_nan] = 0 - return v.sum(*args, **kwargs) - -def get_optimal_window(mutation_position_relative, seq_len_wo_special, model_window): - """ - Helper function that selects an optimal sequence window that fits the maximum model context size. - If the sequence length is less than the maximum context size, the full sequence is returned. - """ - half_model_window = model_window // 2 - if seq_len_wo_special <= model_window: - return [0,seq_len_wo_special] - elif mutation_position_relative < half_model_window: - return [0,model_window] - elif mutation_position_relative >= seq_len_wo_special - half_model_window: - return [seq_len_wo_special - model_window, seq_len_wo_special] - else: - return [max(0,mutation_position_relative-half_model_window), min(seq_len_wo_special,mutation_position_relative+half_model_window)] - -def sequence_replace_single(sequence, char_to_replace, char_replacements): - char_replacements = list(char_replacements) - positions = [m.start() for m in re.finditer(char_to_replace, sequence)] - replacements = np.random.choice(a=char_replacements, size=len(positions), replace=True) - sequence=list(sequence) - for idx, position in enumerate(positions): - sequence[position]=replacements[idx] - return ''.join(sequence) - -def sequence_replace(sequences, char_to_replace, char_replacements): - """ - Helper function that replaces all Amino Acids passsed in via char_to_replace (as a string of AAs) with Amino Acids sampled from char_replacements (also a string of eligible AAs). - """ - return [sequence_replace_single(sequence, char_to_replace, char_replacements) for sequence in sequences] - -def get_tranception_scores_mutated_sequences(model, mutated_sequence_df, batch_size_inference, score_var_name, target_seq, num_workers=10, reverse=False, indel_mode=False): - """ - Helper function that takes as input a set of mutated sequences (in a pandas dataframe) and returns scores for each mutation. - If target_seq is not None, returns the delta log likelihood wrt that target sequence -- otherwise returns the log likelihood of the protein sequences. - """ - scores = {} - scores['mutated_sequence']=[] - scores['sliced_mutated_sequence']=[] - scores['window_start']=[] - scores['window_end']=[] - scores['score']=[] - with torch.no_grad(): - ds = Dataset.from_pandas(mutated_sequence_df) - ds.set_transform(model.encode_batch) - data_collator = DataCollatorForLanguageModeling( - tokenizer=model.config.tokenizer, - mlm=False) - sampler = SequentialSampler(ds) - ds_loader = torch.utils.data.DataLoader(ds, batch_size=batch_size_inference, sampler=sampler, collate_fn=data_collator, num_workers=num_workers, pin_memory=True, drop_last=False) - mutant_index=0 - for encoded_batch in tqdm.tqdm(ds_loader): - full_batch_length = len(encoded_batch['input_ids']) - mutated_sequence = np.array(mutated_sequence_df['mutated_sequence'][mutant_index:mutant_index+full_batch_length]) - scores['mutated_sequence'] += list(mutated_sequence) - sliced_mutated_sequence = np.array(mutated_sequence_df['sliced_mutated_sequence'][mutant_index:mutant_index+full_batch_length]) - scores['sliced_mutated_sequence'] += list(sliced_mutated_sequence) - window_start = np.array(mutated_sequence_df['window_start'][mutant_index:mutant_index+full_batch_length]) - scores['window_start'] += list(window_start) - window_end = np.array(mutated_sequence_df['window_end'][mutant_index:mutant_index+full_batch_length]) - scores['window_end'] += list(window_end) - for k, v in encoded_batch.items(): - if isinstance(v, torch.Tensor): - encoded_batch[k] = v.to(model.device) - shift_labels = encoded_batch['labels'][..., 1:].contiguous() - if (hasattr(model.config,"retrieval_aggregation_mode")) and (model.config.retrieval_aggregation_mode is not None): - if reverse: - encoded_batch['flip']=torch.tensor([1]*full_batch_length) - encoded_batch['start_slice']=window_start - encoded_batch['end_slice']=window_end - encoded_batch['mutated_sequence'] = mutated_sequence #only mutated_sequence is flipped if the scoring_mirror branch of score_mutants. No need to flip mutated_sequence for MSA re-aligning - fused_shift_log_probas=model(**encoded_batch,return_dict=True).fused_shift_log_probas - loss_fct = NLLLoss(reduction='none') - loss = - loss_fct(input=fused_shift_log_probas.view(-1, fused_shift_log_probas.size(-1)), target=shift_labels.view(-1)).view(fused_shift_log_probas.shape[0],fused_shift_log_probas.shape[1]) - else: - lm_logits=model(**encoded_batch,return_dict=True).logits - shift_logits = lm_logits[..., :-1, :].contiguous() - loss_fct = CrossEntropyLoss(reduction='none') - loss = - loss_fct(input=shift_logits.view(-1, shift_logits.size(-1)), target=shift_labels.view(-1)).view(shift_logits.shape[0],shift_logits.shape[1]) - mask = encoded_batch['attention_mask'][..., 1:].float() - mask[mask==0]=float('nan') - loss *= mask - loss = nansum(loss, dim=1) - scores_batch = list(loss.cpu().numpy()) - full_batch_length = len(encoded_batch['input_ids']) - scores['score'] += scores_batch - mutant_index+=full_batch_length - scores = pd.DataFrame(scores) - if model.config.scoring_window=="sliding": - scores = scores[['mutated_sequence','score']].groupby('mutated_sequence').sum().reset_index() #We need to aggregate scores when using sliding mode - scores['score'] = scores['score'] / scores['mutated_sequence'].map(lambda x: len(x)) - if target_seq is not None: - scores_mutated_seq = scores[scores.mutated_sequence != target_seq] - scores_wt = scores[scores.mutated_sequence == target_seq] - merge_delta = 'mutated_sequence' if model.config.scoring_window=="sliding" else 'window_start' - if model.config.scoring_window=="optimal": - delta_scores = pd.merge(scores_mutated_seq,scores_wt,how='left',on=[merge_delta],suffixes=('','_wt')) - delta_scores[score_var_name] = delta_scores['score'] - delta_scores['score_wt'] - elif model.config.scoring_window=="sliding": - delta_scores = scores_mutated_seq.copy() - delta_scores[score_var_name] = delta_scores['score'] - list(scores_wt['score'])[0] # In sliding mode there is a single reference window for the WT - return delta_scores[['mutated_sequence',score_var_name]] - else: - scores[score_var_name] = scores['score'] - return scores[['mutated_sequence',score_var_name]] - -def get_sequence_slices(df, target_seq, model_context_len, start_idx=1, scoring_window="optimal", indel_mode=False): - """ - Helper function that takes as input a (pandas) dataframe df that contains a list of mutant triplets (substitutions) or full mutated sequences (indels) for scoring. - It returns a processed DMS in which sequences have been sliced to satisfy the maximum context window of the model. - df: (dataframe) Input dataframe to be processed - target_seq: (string) Full reference sequence (wild type) that is mutated in the DMS assay. - model_context_len: (int) Maximum context size for the model. - start_idx: (int) Integer to move to 0-indexing of positions (mutation triplet are typically based on 1-indexing). - scoring_window: (string) Method to slice sequences longer than maximum context size: - - optimal selects a single window as large as possible via the get_optimal_window function (this is the default) - - sliding splits the full sequence in contiguous (non-overlapping) chunks that are of size equal to the max context (except the last chunk which may be shorter) - indel_mode: (bool) Flag to be used when scoring insertions and deletions. Otherwise assumes substitutions. - Note: when scoring indels for sequences that would be longer than the model max context length, it is preferable to use the "sliding" scoring_window. Use "optimal" otherwise. - """ - len_target_seq = len(target_seq) - num_mutants = len(df['mutated_sequence']) - df=df.reset_index(drop=True) - if scoring_window=="optimal": - df['mutation_barycenter'] = df['mutant'].apply(lambda x: int(np.array([int(mutation[1:-1]) - start_idx for mutation in x.split(':')]).mean())) if not indel_mode else df['mutated_sequence'].apply(lambda x: len(x)//2) - df['scoring_optimal_window'] = df['mutation_barycenter'].apply(lambda x: get_optimal_window(x, len_target_seq, model_context_len)) if not indel_mode else df['mutated_sequence'].apply(lambda x: (0,len(x))) - df['sliced_mutated_sequence'] = [df['mutated_sequence'][index][df['scoring_optimal_window'][index][0]:df['scoring_optimal_window'][index][1]] for index in range(num_mutants)] - df['window_start'] = df['scoring_optimal_window'].map(lambda x: x[0]) - df['window_end'] = df['scoring_optimal_window'].map(lambda x: x[1]) - del df['scoring_optimal_window'], df['mutation_barycenter'] - if 'mutant' in df: del df['mutant'] - df_wt=df.copy() - df_wt['mutated_sequence'] = [target_seq] * num_mutants - if indel_mode: # For indels, we set the wild type reference to be always the same (full length) sequence. We assume here that the length is lower than model context size (otherwise "Sliding" mode should be used) - df_wt['window_end'] = df_wt['mutated_sequence'].map(lambda x:len(x)) - df_wt['sliced_mutated_sequence'] = [target_seq[df_wt['window_start'][index]:df_wt['window_end'][index]] for index in range(num_mutants)] - df = pd.concat([df,df_wt], axis=0) - df = df.drop_duplicates() - elif scoring_window=="sliding": - num_windows = 1 + int( len_target_seq / model_context_len) - df_list=[] - start=0 - for window_index in range(1, num_windows+1): - df_sliced = df.copy() - df_sliced['sliced_mutated_sequence'] = df_sliced['mutated_sequence'].map(lambda x: x[start:start+model_context_len]) - df_sliced['window_start'] = [start] * num_mutants - df_sliced['window_end'] = df_sliced['mutated_sequence'].map(lambda x: min(len(x), start+model_context_len)) - df_sliced_wt = df_sliced.copy() - df_sliced_wt['mutated_sequence'] = [target_seq] * num_mutants - df_sliced_wt['sliced_mutated_sequence'] = df_sliced_wt['mutated_sequence'].map(lambda x: x[start:start+model_context_len]) - df_sliced_wt['window_end'] = df_sliced_wt['mutated_sequence'].map(lambda x: min(len(x), start+model_context_len)) #Need to adjust end index if WT and sequence are not same full length - df_list.append(df_sliced) - df_list.append(df_sliced_wt) - start += model_context_len - df_final = pd.concat(df_list,axis=0) - if 'mutant' in df_final: del df_final['mutant'] - df = df_final.drop_duplicates() - return df.reset_index(drop=True) \ No newline at end of file diff --git a/spaces/PeepDaSlan9/Universal-NER-UniNER-7B-definition/app.py b/spaces/PeepDaSlan9/Universal-NER-UniNER-7B-definition/app.py deleted file mode 100644 index 2d8cc14f4f09b9ebc028c5b13f7ed39a6a523418..0000000000000000000000000000000000000000 --- a/spaces/PeepDaSlan9/Universal-NER-UniNER-7B-definition/app.py +++ /dev/null @@ -1,3 +0,0 @@ -import gradio as gr - -gr.Interface.load("models/Universal-NER/UniNER-7B-definition").launch() \ No newline at end of file diff --git a/spaces/Pie31415/control-animation/annotator/uniformer/mmseg/models/utils/inverted_residual.py b/spaces/Pie31415/control-animation/annotator/uniformer/mmseg/models/utils/inverted_residual.py deleted file mode 100644 index 53b8fcd41f71d814738f1ac3f5acd3c3d701bf96..0000000000000000000000000000000000000000 --- a/spaces/Pie31415/control-animation/annotator/uniformer/mmseg/models/utils/inverted_residual.py +++ /dev/null @@ -1,208 +0,0 @@ -from annotator.uniformer.mmcv.cnn import ConvModule -from torch import nn -from torch.utils import checkpoint as cp - -from .se_layer import SELayer - - -class InvertedResidual(nn.Module): - """InvertedResidual block for MobileNetV2. - - Args: - in_channels (int): The input channels of the InvertedResidual block. - out_channels (int): The output channels of the InvertedResidual block. - stride (int): Stride of the middle (first) 3x3 convolution. - expand_ratio (int): Adjusts number of channels of the hidden layer - in InvertedResidual by this amount. - dilation (int): Dilation rate of depthwise conv. Default: 1 - conv_cfg (dict): Config dict for convolution layer. - Default: None, which means using conv2d. - norm_cfg (dict): Config dict for normalization layer. - Default: dict(type='BN'). - act_cfg (dict): Config dict for activation layer. - Default: dict(type='ReLU6'). - with_cp (bool): Use checkpoint or not. Using checkpoint will save some - memory while slowing down the training speed. Default: False. - - Returns: - Tensor: The output tensor. - """ - - def __init__(self, - in_channels, - out_channels, - stride, - expand_ratio, - dilation=1, - conv_cfg=None, - norm_cfg=dict(type='BN'), - act_cfg=dict(type='ReLU6'), - with_cp=False): - super(InvertedResidual, self).__init__() - self.stride = stride - assert stride in [1, 2], f'stride must in [1, 2]. ' \ - f'But received {stride}.' - self.with_cp = with_cp - self.use_res_connect = self.stride == 1 and in_channels == out_channels - hidden_dim = int(round(in_channels * expand_ratio)) - - layers = [] - if expand_ratio != 1: - layers.append( - ConvModule( - in_channels=in_channels, - out_channels=hidden_dim, - kernel_size=1, - conv_cfg=conv_cfg, - norm_cfg=norm_cfg, - act_cfg=act_cfg)) - layers.extend([ - ConvModule( - in_channels=hidden_dim, - out_channels=hidden_dim, - kernel_size=3, - stride=stride, - padding=dilation, - dilation=dilation, - groups=hidden_dim, - conv_cfg=conv_cfg, - norm_cfg=norm_cfg, - act_cfg=act_cfg), - ConvModule( - in_channels=hidden_dim, - out_channels=out_channels, - kernel_size=1, - conv_cfg=conv_cfg, - norm_cfg=norm_cfg, - act_cfg=None) - ]) - self.conv = nn.Sequential(*layers) - - def forward(self, x): - - def _inner_forward(x): - if self.use_res_connect: - return x + self.conv(x) - else: - return self.conv(x) - - if self.with_cp and x.requires_grad: - out = cp.checkpoint(_inner_forward, x) - else: - out = _inner_forward(x) - - return out - - -class InvertedResidualV3(nn.Module): - """Inverted Residual Block for MobileNetV3. - - Args: - in_channels (int): The input channels of this Module. - out_channels (int): The output channels of this Module. - mid_channels (int): The input channels of the depthwise convolution. - kernel_size (int): The kernel size of the depthwise convolution. - Default: 3. - stride (int): The stride of the depthwise convolution. Default: 1. - se_cfg (dict): Config dict for se layer. Default: None, which means no - se layer. - with_expand_conv (bool): Use expand conv or not. If set False, - mid_channels must be the same with in_channels. Default: True. - conv_cfg (dict): Config dict for convolution layer. Default: None, - which means using conv2d. - norm_cfg (dict): Config dict for normalization layer. - Default: dict(type='BN'). - act_cfg (dict): Config dict for activation layer. - Default: dict(type='ReLU'). - with_cp (bool): Use checkpoint or not. Using checkpoint will save some - memory while slowing down the training speed. Default: False. - - Returns: - Tensor: The output tensor. - """ - - def __init__(self, - in_channels, - out_channels, - mid_channels, - kernel_size=3, - stride=1, - se_cfg=None, - with_expand_conv=True, - conv_cfg=None, - norm_cfg=dict(type='BN'), - act_cfg=dict(type='ReLU'), - with_cp=False): - super(InvertedResidualV3, self).__init__() - self.with_res_shortcut = (stride == 1 and in_channels == out_channels) - assert stride in [1, 2] - self.with_cp = with_cp - self.with_se = se_cfg is not None - self.with_expand_conv = with_expand_conv - - if self.with_se: - assert isinstance(se_cfg, dict) - if not self.with_expand_conv: - assert mid_channels == in_channels - - if self.with_expand_conv: - self.expand_conv = ConvModule( - in_channels=in_channels, - out_channels=mid_channels, - kernel_size=1, - stride=1, - padding=0, - conv_cfg=conv_cfg, - norm_cfg=norm_cfg, - act_cfg=act_cfg) - self.depthwise_conv = ConvModule( - in_channels=mid_channels, - out_channels=mid_channels, - kernel_size=kernel_size, - stride=stride, - padding=kernel_size // 2, - groups=mid_channels, - conv_cfg=dict( - type='Conv2dAdaptivePadding') if stride == 2 else conv_cfg, - norm_cfg=norm_cfg, - act_cfg=act_cfg) - - if self.with_se: - self.se = SELayer(**se_cfg) - - self.linear_conv = ConvModule( - in_channels=mid_channels, - out_channels=out_channels, - kernel_size=1, - stride=1, - padding=0, - conv_cfg=conv_cfg, - norm_cfg=norm_cfg, - act_cfg=None) - - def forward(self, x): - - def _inner_forward(x): - out = x - - if self.with_expand_conv: - out = self.expand_conv(out) - - out = self.depthwise_conv(out) - - if self.with_se: - out = self.se(out) - - out = self.linear_conv(out) - - if self.with_res_shortcut: - return x + out - else: - return out - - if self.with_cp and x.requires_grad: - out = cp.checkpoint(_inner_forward, x) - else: - out = _inner_forward(x) - - return out diff --git a/spaces/Pie31415/control-animation/text_to_animation/models/controlnet_flax.py b/spaces/Pie31415/control-animation/text_to_animation/models/controlnet_flax.py deleted file mode 100644 index 8f7582434dea96d1ac084ec217ec8a7cd6bde274..0000000000000000000000000000000000000000 --- a/spaces/Pie31415/control-animation/text_to_animation/models/controlnet_flax.py +++ /dev/null @@ -1,383 +0,0 @@ -# Copyright 2023 The HuggingFace Team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from typing import Tuple, Union - -import flax -import flax.linen as nn -import jax -import jax.numpy as jnp -from flax.core.frozen_dict import FrozenDict - -from diffusers.configuration_utils import ConfigMixin, flax_register_to_config -from diffusers.utils import BaseOutput -from diffusers.models.embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps -from diffusers.models.modeling_flax_utils import FlaxModelMixin -from .unet_2d_blocks_flax import ( - FlaxCrossAttnDownBlock2D, - FlaxDownBlock2D, - FlaxUNetCrossAttnMidBlock2D, -) - - -@flax.struct.dataclass -class FlaxControlNetOutput(BaseOutput): - down_block_res_samples: jnp.ndarray - mid_block_res_sample: jnp.ndarray - - -class FlaxControlNetConditioningEmbedding(nn.Module): - conditioning_embedding_channels: int - block_out_channels: Tuple[int] = (16, 32, 96, 256) - dtype: jnp.dtype = jnp.float32 - - def setup(self): - self.conv_in = nn.Conv( - self.block_out_channels[0], - kernel_size=(3, 3), - padding=((1, 1), (1, 1)), - dtype=self.dtype, - ) - - blocks = [] - for i in range(len(self.block_out_channels) - 1): - channel_in = self.block_out_channels[i] - channel_out = self.block_out_channels[i + 1] - conv1 = nn.Conv( - channel_in, - kernel_size=(3, 3), - padding=((1, 1), (1, 1)), - dtype=self.dtype, - ) - blocks.append(conv1) - conv2 = nn.Conv( - channel_out, - kernel_size=(3, 3), - strides=(2, 2), - padding=((1, 1), (1, 1)), - dtype=self.dtype, - ) - blocks.append(conv2) - self.blocks = blocks - - self.conv_out = nn.Conv( - self.conditioning_embedding_channels, - kernel_size=(3, 3), - padding=((1, 1), (1, 1)), - kernel_init=nn.initializers.zeros_init(), - bias_init=nn.initializers.zeros_init(), - dtype=self.dtype, - ) - - def __call__(self, conditioning): - embedding = self.conv_in(conditioning) - embedding = nn.silu(embedding) - - for block in self.blocks: - embedding = block(embedding) - embedding = nn.silu(embedding) - - embedding = self.conv_out(embedding) - - return embedding - - -@flax_register_to_config -class FlaxControlNetModel(nn.Module, FlaxModelMixin, ConfigMixin): - r""" - Quoting from https://arxiv.org/abs/2302.05543: "Stable Diffusion uses a pre-processing method similar to VQ-GAN - [11] to convert the entire dataset of 512 × 512 images into smaller 64 × 64 “latent images” for stabilized - training. This requires ControlNets to convert image-based conditions to 64 × 64 feature space to match the - convolution size. We use a tiny network E(·) of four convolution layers with 4 × 4 kernels and 2 × 2 strides - (activated by ReLU, channels are 16, 32, 64, 128, initialized with Gaussian weights, trained jointly with the full - model) to encode image-space conditions ... into feature maps ..." - - This model inherits from [`FlaxModelMixin`]. Check the superclass documentation for the generic methods the library - implements for all the models (such as downloading or saving, etc.) - - Also, this model is a Flax Linen [flax.linen.Module](https://flax.readthedocs.io/en/latest/flax.linen.html#module) - subclass. Use it as a regular Flax linen Module and refer to the Flax documentation for all matter related to - general usage and behavior. - - Finally, this model supports inherent JAX features such as: - - [Just-In-Time (JIT) compilation](https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit) - - [Automatic Differentiation](https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation) - - [Vectorization](https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap) - - [Parallelization](https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap) - - Parameters: - sample_size (`int`, *optional*): - The size of the input sample. - in_channels (`int`, *optional*, defaults to 4): - The number of channels in the input sample. - down_block_types (`Tuple[str]`, *optional*, defaults to `("CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "DownBlock2D")`): - The tuple of downsample blocks to use. The corresponding class names will be: "FlaxCrossAttnDownBlock2D", - "FlaxCrossAttnDownBlock2D", "FlaxCrossAttnDownBlock2D", "FlaxDownBlock2D" - block_out_channels (`Tuple[int]`, *optional*, defaults to `(320, 640, 1280, 1280)`): - The tuple of output channels for each block. - layers_per_block (`int`, *optional*, defaults to 2): - The number of layers per block. - attention_head_dim (`int` or `Tuple[int]`, *optional*, defaults to 8): - The dimension of the attention heads. - cross_attention_dim (`int`, *optional*, defaults to 768): - The dimension of the cross attention features. - dropout (`float`, *optional*, defaults to 0): - Dropout probability for down, up and bottleneck blocks. - flip_sin_to_cos (`bool`, *optional*, defaults to `True`): - Whether to flip the sin to cos in the time embedding. - freq_shift (`int`, *optional*, defaults to 0): The frequency shift to apply to the time embedding. - controlnet_conditioning_channel_order (`str`, *optional*, defaults to `rgb`): - The channel order of conditional image. Will convert it to `rgb` if it's `bgr` - conditioning_embedding_out_channels (`tuple`, *optional*, defaults to `(16, 32, 96, 256)`): - The tuple of output channel for each block in conditioning_embedding layer - - - """ - sample_size: int = 32 - in_channels: int = 4 - down_block_types: Tuple[str] = ( - "CrossAttnDownBlock2D", - "CrossAttnDownBlock2D", - "CrossAttnDownBlock2D", - "DownBlock2D", - ) - only_cross_attention: Union[bool, Tuple[bool]] = False - block_out_channels: Tuple[int] = (320, 640, 1280, 1280) - layers_per_block: int = 2 - attention_head_dim: Union[int, Tuple[int]] = 8 - cross_attention_dim: int = 1280 - dropout: float = 0.0 - use_linear_projection: bool = False - dtype: jnp.dtype = jnp.float32 - flip_sin_to_cos: bool = True - freq_shift: int = 0 - controlnet_conditioning_channel_order: str = "rgb" - conditioning_embedding_out_channels: Tuple[int] = (16, 32, 96, 256) - - def init_weights(self, rng: jax.random.KeyArray) -> FrozenDict: - # init input tensors - sample_shape = (1, self.in_channels, self.sample_size, self.sample_size) - sample = jnp.zeros(sample_shape, dtype=jnp.float32) - timesteps = jnp.ones((1,), dtype=jnp.int32) - encoder_hidden_states = jnp.zeros((1, 1, self.cross_attention_dim), dtype=jnp.float32) - controlnet_cond_shape = (1, 3, self.sample_size * 8, self.sample_size * 8) - controlnet_cond = jnp.zeros(controlnet_cond_shape, dtype=jnp.float32) - - params_rng, dropout_rng = jax.random.split(rng) - rngs = {"params": params_rng, "dropout": dropout_rng} - - return self.init(rngs, sample, timesteps, encoder_hidden_states, controlnet_cond)["params"] - - def setup(self): - block_out_channels = self.block_out_channels - time_embed_dim = block_out_channels[0] * 4 - - # input - self.conv_in = nn.Conv( - block_out_channels[0], - kernel_size=(3, 3), - strides=(1, 1), - padding=((1, 1), (1, 1)), - dtype=self.dtype, - ) - - # time - self.time_proj = FlaxTimesteps( - block_out_channels[0], flip_sin_to_cos=self.flip_sin_to_cos, freq_shift=self.config.freq_shift - ) - self.time_embedding = FlaxTimestepEmbedding(time_embed_dim, dtype=self.dtype) - - self.controlnet_cond_embedding = FlaxControlNetConditioningEmbedding( - conditioning_embedding_channels=block_out_channels[0], - block_out_channels=self.conditioning_embedding_out_channels, - ) - - only_cross_attention = self.only_cross_attention - if isinstance(only_cross_attention, bool): - only_cross_attention = (only_cross_attention,) * len(self.down_block_types) - - attention_head_dim = self.attention_head_dim - if isinstance(attention_head_dim, int): - attention_head_dim = (attention_head_dim,) * len(self.down_block_types) - - # down - down_blocks = [] - controlnet_down_blocks = [] - - output_channel = block_out_channels[0] - - controlnet_block = nn.Conv( - output_channel, - kernel_size=(1, 1), - padding="VALID", - kernel_init=nn.initializers.zeros_init(), - bias_init=nn.initializers.zeros_init(), - dtype=self.dtype, - ) - controlnet_down_blocks.append(controlnet_block) - - for i, down_block_type in enumerate(self.down_block_types): - input_channel = output_channel - output_channel = block_out_channels[i] - is_final_block = i == len(block_out_channels) - 1 - - if down_block_type == "CrossAttnDownBlock2D": - down_block = FlaxCrossAttnDownBlock2D( - in_channels=input_channel, - out_channels=output_channel, - dropout=self.dropout, - num_layers=self.layers_per_block, - attn_num_head_channels=attention_head_dim[i], - add_downsample=not is_final_block, - use_linear_projection=self.use_linear_projection, - only_cross_attention=only_cross_attention[i], - dtype=self.dtype, - ) - else: - down_block = FlaxDownBlock2D( - in_channels=input_channel, - out_channels=output_channel, - dropout=self.dropout, - num_layers=self.layers_per_block, - add_downsample=not is_final_block, - dtype=self.dtype, - ) - - down_blocks.append(down_block) - - for _ in range(self.layers_per_block): - controlnet_block = nn.Conv( - output_channel, - kernel_size=(1, 1), - padding="VALID", - kernel_init=nn.initializers.zeros_init(), - bias_init=nn.initializers.zeros_init(), - dtype=self.dtype, - ) - controlnet_down_blocks.append(controlnet_block) - - if not is_final_block: - controlnet_block = nn.Conv( - output_channel, - kernel_size=(1, 1), - padding="VALID", - kernel_init=nn.initializers.zeros_init(), - bias_init=nn.initializers.zeros_init(), - dtype=self.dtype, - ) - controlnet_down_blocks.append(controlnet_block) - - self.down_blocks = down_blocks - self.controlnet_down_blocks = controlnet_down_blocks - - # mid - mid_block_channel = block_out_channels[-1] - self.mid_block = FlaxUNetCrossAttnMidBlock2D( - in_channels=mid_block_channel, - dropout=self.dropout, - attn_num_head_channels=attention_head_dim[-1], - use_linear_projection=self.use_linear_projection, - dtype=self.dtype, - ) - - self.controlnet_mid_block = nn.Conv( - mid_block_channel, - kernel_size=(1, 1), - padding="VALID", - kernel_init=nn.initializers.zeros_init(), - bias_init=nn.initializers.zeros_init(), - dtype=self.dtype, - ) - - def __call__( - self, - sample, - timesteps, - encoder_hidden_states, - controlnet_cond, - conditioning_scale: float = 1.0, - return_dict: bool = True, - train: bool = False, - ) -> Union[FlaxControlNetOutput, Tuple]: - r""" - Args: - sample (`jnp.ndarray`): (batch, channel, height, width) noisy inputs tensor - timestep (`jnp.ndarray` or `float` or `int`): timesteps - encoder_hidden_states (`jnp.ndarray`): (batch_size, sequence_length, hidden_size) encoder hidden states - controlnet_cond (`jnp.ndarray`): (batch, channel, height, width) the conditional input tensor - conditioning_scale: (`float`) the scale factor for controlnet outputs - return_dict (`bool`, *optional*, defaults to `True`): - Whether or not to return a [`models.unet_2d_condition_flax.FlaxUNet2DConditionOutput`] instead of a - plain tuple. - train (`bool`, *optional*, defaults to `False`): - Use deterministic functions and disable dropout when not training. - - Returns: - [`~models.unet_2d_condition_flax.FlaxUNet2DConditionOutput`] or `tuple`: - [`~models.unet_2d_condition_flax.FlaxUNet2DConditionOutput`] if `return_dict` is True, otherwise a `tuple`. - When returning a tuple, the first element is the sample tensor. - """ - channel_order = self.controlnet_conditioning_channel_order - if channel_order == "bgr": - controlnet_cond = jnp.flip(controlnet_cond, axis=1) - - # 1. time - if not isinstance(timesteps, jnp.ndarray): - timesteps = jnp.array([timesteps], dtype=jnp.int32) - elif isinstance(timesteps, jnp.ndarray) and len(timesteps.shape) == 0: - timesteps = timesteps.astype(dtype=jnp.float32) - timesteps = jnp.expand_dims(timesteps, 0) - - t_emb = self.time_proj(timesteps) - t_emb = self.time_embedding(t_emb) - - # 2. pre-process - sample = jnp.transpose(sample, (0, 2, 3, 1)) - sample = self.conv_in(sample) - - controlnet_cond = jnp.transpose(controlnet_cond, (0, 2, 3, 1)) - controlnet_cond = self.controlnet_cond_embedding(controlnet_cond) - sample += controlnet_cond - - # 3. down - down_block_res_samples = (sample,) - for down_block in self.down_blocks: - if isinstance(down_block, FlaxCrossAttnDownBlock2D): - sample, res_samples = down_block(sample, t_emb, encoder_hidden_states, deterministic=not train) - else: - sample, res_samples = down_block(sample, t_emb, deterministic=not train) - down_block_res_samples += res_samples - - # 4. mid - sample = self.mid_block(sample, t_emb, encoder_hidden_states, deterministic=not train) - - # 5. contronet blocks - controlnet_down_block_res_samples = () - for down_block_res_sample, controlnet_block in zip(down_block_res_samples, self.controlnet_down_blocks): - down_block_res_sample = controlnet_block(down_block_res_sample) - controlnet_down_block_res_samples += (down_block_res_sample,) - - down_block_res_samples = controlnet_down_block_res_samples - - mid_block_res_sample = self.controlnet_mid_block(sample) - - # 6. scaling - down_block_res_samples = [sample * conditioning_scale for sample in down_block_res_samples] - mid_block_res_sample *= conditioning_scale - - if not return_dict: - return (down_block_res_samples, mid_block_res_sample) - - return FlaxControlNetOutput( - down_block_res_samples=down_block_res_samples, mid_block_res_sample=mid_block_res_sample - ) \ No newline at end of file diff --git a/spaces/Plurigrid/LifeSim/tailwind.config.js b/spaces/Plurigrid/LifeSim/tailwind.config.js deleted file mode 100644 index ae2ae4e6d45276ced9687f8299fc28df003eeb84..0000000000000000000000000000000000000000 --- a/spaces/Plurigrid/LifeSim/tailwind.config.js +++ /dev/null @@ -1,36 +0,0 @@ -/** @type {import('tailwindcss').Config} */ -module.exports = { - darkMode: ["class"], - content: [ - './pages/**/*.{ts,tsx}', - './components/**/*.{ts,tsx}', - './app/**/*.{ts,tsx}', - './src/**/*.{ts,tsx}', - ], - theme: { - container: { - center: true, - padding: "2rem", - screens: { - "2xl": "1400px", - }, - }, - extend: { - keyframes: { - "accordion-down": { - from: { height: 0 }, - to: { height: "var(--radix-accordion-content-height)" }, - }, - "accordion-up": { - from: { height: "var(--radix-accordion-content-height)" }, - to: { height: 0 }, - }, - }, - animation: { - "accordion-down": "accordion-down 0.2s ease-out", - "accordion-up": "accordion-up 0.2s ease-out", - }, - }, - }, - plugins: [require("tailwindcss-animate")], -} \ No newline at end of file diff --git a/spaces/PrabhuKiranKonda/Streamlit-PDF-Assistant-Docker/components/sidebar/Auth.py b/spaces/PrabhuKiranKonda/Streamlit-PDF-Assistant-Docker/components/sidebar/Auth.py deleted file mode 100644 index 20ff2a55d371d618617ebf759dc556717ae41c3b..0000000000000000000000000000000000000000 --- a/spaces/PrabhuKiranKonda/Streamlit-PDF-Assistant-Docker/components/sidebar/Auth.py +++ /dev/null @@ -1,203 +0,0 @@ -import pyrebase -import streamlit as st -import os -import re -from dotenv import load_dotenv -load_dotenv() - -config = { - "apiKey": os.getenv("FIREBASE_API") or st.secrets['FIREBASE_API'], - "authDomain": "pdf-assistant-streamlit.firebaseapp.com", - "projectId": "pdf-assistant-streamlit", - "storageBucket": "pdf-assistant-streamlit.appspot.com", - "databaseURL": "https://pdf-assistant-streamlit-default-rtdb.firebaseio.com/", - "messagingSenderId": "327866675115", - "appId": "1:327866675115:web:515fb66ac21218531bacee", - "measurementId": "G-408449KY82", -} - -firebase = pyrebase.initialize_app(config) -db = firebase.database() -auth = firebase.auth() - -def upload_data(uid, data, pdf_file): - pdf_files = db.child("users").child(uid).child("pdf_files").get().val() - - if pdf_files is None or pdf_file not in pdf_files: - structure = { - pdf_file: { - "Current Prompt": 1, - "Prompts": { - "Prompt 1": data - } - } - } - db.child("users").child(uid).child("pdf_files").update(structure) - else: - current_prompt = db.child("users").child(uid).child("pdf_files").child(pdf_file).child("Current Prompt").get().val() - - current_prompt = int(current_prompt) + 1 if current_prompt is not None else 1 - - new_structure = { - f"Prompt {current_prompt}": data - } - - db.child("users").child(uid).child("pdf_files").child(pdf_file).child("Prompts").update(new_structure) - db.child("users").child(uid).child("pdf_files").child(pdf_file).child("Current Prompt").set(current_prompt) - - -def login_using_email_and_password(email, password): - try: - st.write("Logging in...") - user = auth.sign_in_with_email_and_password(email, password) - username = db.child("users").child(user["localId"]).child('user_data').get().val()['username'] - st.session_state['username'] = username - st.session_state['logged_in'] = True - st.session_state['uuid'] = user["localId"] - st.session_state['login_failed'] = False - return - except Exception as e: - st.session_state['login_failed'] = True - st.session_state['login_exception'] = e - return - - -def signup_using_email_and_password(email, password, username): - try: - user = auth.create_user_with_email_and_password(email, password) - - user = auth.sign_in_with_email_and_password(email, password) - - db.child("users").child(user["localId"]).child("user_data").set( - { - "username": username.upper(), - "email": email, - "password": password, - "uid": user["localId"], - } - ) - st.success(f"Successfully registered with username: {username.upper()}") - st.session_state['logged_in'] = True - st.session_state['username'] = username.upper() - st.session_state['email'] = email - st.session_state['uuid'] = user['localId'] - return - - except Exception as e: - st.session_state['login_failed'] = True - st.session_state['login_exception'] = e - return - - -def authentication_comp(): - st.write('#### 🔐 Accounts') - login_tab, signup_tab = st.tabs(["Login", "Signup"]) - with login_tab: - login_mail = st.text_input("Email", key="login_mail", placeholder="Enter your email") - - if st.session_state['login_btn_clicked'] == True and login_mail == "": - st.caption(":red[Please enter your email]") - - if st.session_state['login_btn_clicked'] == True and st.session_state['login_failed'] == True: - if "INVALID_EMAIL" in str(st.session_state['login_exception']): - st.caption(":red[Invalid email]") - st.session_state['login_failed'] = False - elif "EMAIL_NOT_FOUND" in str(st.session_state['login_exception']): - st.caption(":red[Email not found]") - st.session_state['login_failed'] = False - - login_pwd = st.text_input("Password", type="password", key="login_pwd", placeholder="Enter your password") - - - if st.session_state['login_btn_clicked'] == True and login_pwd == "": - st.caption(":red[Please enter your password]") - - if st.session_state['login_btn_clicked'] == True and st.session_state['login_failed'] == True: - if "INVALID_PASSWORD" in str(st.session_state['login_exception']): - st.caption(":red[Invalid password]") - st.session_state['login_failed'] = False - - login_btn = st.button("Login 🔓") - - if login_btn: - st.session_state['login_btn_clicked'] = True - - if login_mail == "" and login_pwd == "": # both empty - st.error("Please enter your email and password") - - elif login_mail == "": # only email empty - st.error("Please enter your email") - - elif login_pwd == "": # only password empty - st.error("Please enter your password") - - else: - login_using_email_and_password(login_mail, login_pwd) - st.button("dummy", on_click=st.experimental_rerun()) - - with signup_tab: - signup_username = st.text_input("Username", key="signup_username", placeholder="Username") - if st.session_state.get('signup_btn_clicked') == True and signup_username == "": - st.caption(":red[Please enter your username]") - - signup_mail = st.text_input("Email", key="signup_mail", placeholder="Enter your email") - - if signup_mail != "" and not re.match(r"[^@]+@[^@]+\.[^@]+", signup_mail): # email validation - st.caption(":red[Invalid email address]") - - if st.session_state.get('signup_btn_clicked') == True and signup_mail == "": # if signup button is clicked and email is empty - st.caption(":red[Please enter your email]") - - signup_pwd = st.text_input("Password", type="password", key="signup_pwd", placeholder="Enter your password") # password input - - if len(signup_pwd) >= 1 and len(signup_pwd) < 6: # if password is less than 6 characters - st.caption(":red[Password must be at least 6 characters long.]") - - if st.session_state.get('signup_btn_clicked') == True and signup_pwd == "": # if signup button is clicked and password is empty - st.caption(":red[Please enter your password]") - - confirm_pwd = st.text_input("Confirm Password", type="password", key="confirm_pwd", placeholder="Confirm your password") # confirm password input - if st.session_state.get('signup_btn_clicked') == True and confirm_pwd == "": # if signup button is clicked and confirm password is empty - st.caption(":red[Please confirm your password]") - - if 'signup_btn_clicked' not in st.session_state: # if signup button is not clicked - if len(signup_pwd) >= 1 and len(signup_pwd) >= 6 and len(confirm_pwd) >= 1 and signup_pwd != confirm_pwd: # if password is not empty and confirm password is not empty and passwords do not match - st.caption(":red[Passwords do not match]") - - if st.session_state.get('signup_btn_clicked') == True and signup_pwd != confirm_pwd: # if signup button is clicked and passwords do not match - st.caption(":red[Passwords do not match]") - - signup_btn = st.button("Sign Up and Login 🔓", disabled=len(signup_pwd) >= 1 and len(signup_pwd) < 6) - if signup_btn: - st.session_state['signup_btn_clicked'] = True - - if signup_username == "" and signup_mail == "" and signup_pwd == "" and confirm_pwd == "": # if all fields are empty - st.error("Please enter your username, email and password") - - elif signup_username == "": # if username is empty - st.error("Please enter your username") - - elif signup_mail == "": # if email is empty - st.error("Please enter your email") - - elif signup_pwd == "": # if password is empty - st.error("Please enter your password") - - elif len(signup_pwd) < 6: # if password is less than 6 characters - st.error("Password must be at least 6 characters long") - - elif confirm_pwd == "": # if confirm password is empty - st.error("Please confirm your password") - - elif signup_pwd != confirm_pwd: # if passwords do not match - st.error("Passwords do not match") - - else: - with st.spinner("Signing up..."): - signup_using_email_and_password(signup_mail, signup_pwd, signup_username) - st.button("dummy", on_click=st.experimental_rerun()) - - st.caption(":green[Login or Register to save your Prompts/Responses and to view your History]") - -if __name__ == "__main__": - authentication_comp() \ No newline at end of file diff --git a/spaces/Prof-Reza/Audiocraft_Music-Audio_Generation/audiocraft/utils/profiler.py b/spaces/Prof-Reza/Audiocraft_Music-Audio_Generation/audiocraft/utils/profiler.py deleted file mode 100644 index b45b6d15910b50305c7b212c089ffad3c25b324d..0000000000000000000000000000000000000000 --- a/spaces/Prof-Reza/Audiocraft_Music-Audio_Generation/audiocraft/utils/profiler.py +++ /dev/null @@ -1,38 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -import logging -import typing as tp - -import dora -import torch - - -logger = logging.getLogger(__name__) - - -class Profiler: - """Context manager wrapper for xformers profiler. - """ - def __init__(self, module: torch.nn.Module, enabled: bool = False): - self.profiler: tp.Optional[tp.Any] = None - if enabled: - from xformers.profiler import profile - output_dir = dora.get_xp().folder / 'profiler_data' - logger.info("Profiling activated, results with be saved to %s", output_dir) - self.profiler = profile(output_dir=output_dir, module=module) - - def step(self): - if self.profiler is not None: - self.profiler.step() # type: ignore - - def __enter__(self): - if self.profiler is not None: - return self.profiler.__enter__() # type: ignore - - def __exit__(self, exc_type, exc_value, exc_tb): - if self.profiler is not None: - return self.profiler.__exit__(exc_type, exc_value, exc_tb) # type: ignore diff --git a/spaces/RMXK/RVC_HFF/configs/config.py b/spaces/RMXK/RVC_HFF/configs/config.py deleted file mode 100644 index e3b0205a1f0d62f674b9c3de2c5ab7ee90464945..0000000000000000000000000000000000000000 --- a/spaces/RMXK/RVC_HFF/configs/config.py +++ /dev/null @@ -1,265 +0,0 @@ -import argparse -import os -import sys -import json -from multiprocessing import cpu_count - -import torch - -try: - import intel_extension_for_pytorch as ipex # pylint: disable=import-error, unused-import - if torch.xpu.is_available(): - from infer.modules.ipex import ipex_init - ipex_init() -except Exception: - pass - -import logging - -logger = logging.getLogger(__name__) - - -version_config_list = [ - "v1/32k.json", - "v1/40k.json", - "v1/48k.json", - "v2/48k.json", - "v2/32k.json", -] - - -def singleton_variable(func): - def wrapper(*args, **kwargs): - if not wrapper.instance: - wrapper.instance = func(*args, **kwargs) - return wrapper.instance - - wrapper.instance = None - return wrapper - - -@singleton_variable -class Config: - def __init__(self): - self.device = "cuda:0" - self.is_half = True - self.n_cpu = 0 - self.gpu_name = None - self.json_config = self.load_config_json() - self.gpu_mem = None - ( - self.python_cmd, - self.listen_port, - self.iscolab, - self.noparallel, - self.noautoopen, - self.paperspace, - self.is_cli, - self.grtheme, - self.dml, - ) = self.arg_parse() - self.instead = "" - self.x_pad, self.x_query, self.x_center, self.x_max = self.device_config() - - @staticmethod - def load_config_json() -> dict: - d = {} - for config_file in version_config_list: - with open(f"configs/{config_file}", "r") as f: - d[config_file] = json.load(f) - return d - - @staticmethod - def arg_parse() -> tuple: - exe = sys.executable or "python" - parser = argparse.ArgumentParser() - parser.add_argument("--port", type=int, default=7865, help="Listen port") - parser.add_argument("--pycmd", type=str, default=exe, help="Python command") - parser.add_argument("--colab", action="store_true", help="Launch in colab") - parser.add_argument( - "--noparallel", action="store_true", help="Disable parallel processing" - ) - parser.add_argument( - "--noautoopen", - action="store_true", - help="Do not open in browser automatically", - ) - parser.add_argument( - "--paperspace", - action="store_true", - help="Note that this argument just shares a gradio link for the web UI. Thus can be used on other non-local CLI systems.", - ) - parser.add_argument( - "--is_cli", - action="store_true", - help="Use the CLI instead of setting up a gradio UI. This flag will launch an RVC text interface where you can execute functions from infer-web.py!", - ) - - parser.add_argument( - "-t", - "--theme", - help = "Theme for Gradio. Format - `JohnSmith9982/small_and_pretty` (no backticks)", - default = "JohnSmith9982/small_and_pretty", - type = str - ) - - parser.add_argument( - "--dml", - action="store_true", - help="Use DirectML backend instead of CUDA." - ) - - cmd_opts = parser.parse_args() - - cmd_opts.port = cmd_opts.port if 0 <= cmd_opts.port <= 65535 else 7865 - - return ( - cmd_opts.pycmd, - cmd_opts.port, - cmd_opts.colab, - cmd_opts.noparallel, - cmd_opts.noautoopen, - cmd_opts.paperspace, - cmd_opts.is_cli, - cmd_opts.theme, - cmd_opts.dml, - ) - - # has_mps is only available in nightly pytorch (for now) and MasOS 12.3+. - # check `getattr` and try it for compatibility - @staticmethod - def has_mps() -> bool: - if not torch.backends.mps.is_available(): - return False - try: - torch.zeros(1).to(torch.device("mps")) - return True - except Exception: - return False - - @staticmethod - def has_xpu() -> bool: - if hasattr(torch, "xpu") and torch.xpu.is_available(): - return True - else: - return False - - def use_fp32_config(self): - for config_file in version_config_list: - self.json_config[config_file]["train"]["fp16_run"] = False - - def device_config(self) -> tuple: - if torch.cuda.is_available(): - if self.has_xpu(): - self.device = self.instead = "xpu:0" - self.is_half = True - i_device = int(self.device.split(":")[-1]) - self.gpu_name = torch.cuda.get_device_name(i_device) - if ( - ("16" in self.gpu_name and "V100" not in self.gpu_name.upper()) - or "P40" in self.gpu_name.upper() - or "P10" in self.gpu_name.upper() - or "1060" in self.gpu_name - or "1070" in self.gpu_name - or "1080" in self.gpu_name - ): - logger.info("Found GPU %s, force to fp32", self.gpu_name) - self.is_half = False - self.use_fp32_config() - else: - logger.info("Found GPU %s", self.gpu_name) - self.gpu_mem = int( - torch.cuda.get_device_properties(i_device).total_memory - / 1024 - / 1024 - / 1024 - + 0.4 - ) - if self.gpu_mem <= 4: - with open("infer/modules/train/preprocess.py", "r") as f: - strr = f.read().replace("3.7", "3.0") - with open("infer/modules/train/preprocess.py", "w") as f: - f.write(strr) - elif self.has_mps(): - logger.info("No supported Nvidia GPU found") - self.device = self.instead = "mps" - self.is_half = False - self.use_fp32_config() - else: - logger.info("No supported Nvidia GPU found") - self.device = self.instead = "cpu" - self.is_half = False - self.use_fp32_config() - - if self.n_cpu == 0: - self.n_cpu = cpu_count() - - if self.is_half: - # 6G显存配置 - x_pad = 3 - x_query = 10 - x_center = 60 - x_max = 65 - else: - # 5G显存配置 - x_pad = 1 - x_query = 6 - x_center = 38 - x_max = 41 - - if self.gpu_mem is not None and self.gpu_mem <= 4: - x_pad = 1 - x_query = 5 - x_center = 30 - x_max = 32 - if self.dml: - logger.info("Use DirectML instead") - if ( - os.path.exists( - "runtime\Lib\site-packages\onnxruntime\capi\DirectML.dll" - ) - == False - ): - try: - os.rename( - "runtime\Lib\site-packages\onnxruntime", - "runtime\Lib\site-packages\onnxruntime-cuda", - ) - except: - pass - try: - os.rename( - "runtime\Lib\site-packages\onnxruntime-dml", - "runtime\Lib\site-packages\onnxruntime", - ) - except: - pass - # if self.device != "cpu": - import torch_directml - - self.device = torch_directml.device(torch_directml.default_device()) - self.is_half = False - else: - if self.instead: - logger.info(f"Use {self.instead} instead") - if ( - os.path.exists( - "runtime\Lib\site-packages\onnxruntime\capi\onnxruntime_providers_cuda.dll" - ) - == False - ): - try: - os.rename( - "runtime\Lib\site-packages\onnxruntime", - "runtime\Lib\site-packages\onnxruntime-dml", - ) - except: - pass - try: - os.rename( - "runtime\Lib\site-packages\onnxruntime-cuda", - "runtime\Lib\site-packages\onnxruntime", - ) - except: - pass - return x_pad, x_query, x_center, x_max diff --git a/spaces/Ramse/TTS_Hindi/transformer/__init__.py b/spaces/Ramse/TTS_Hindi/transformer/__init__.py deleted file mode 100644 index 095aac8e5f28ea49c8edc2be29b8d98a1ee91477..0000000000000000000000000000000000000000 --- a/spaces/Ramse/TTS_Hindi/transformer/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -from .Models import Encoder, Decoder -from .Layers import PostNet \ No newline at end of file diff --git a/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/pip/_internal/distributions/base.py b/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/pip/_internal/distributions/base.py deleted file mode 100644 index 75ce2dc9057a20a957abe2fbd4ef094dc4196684..0000000000000000000000000000000000000000 --- a/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/pip/_internal/distributions/base.py +++ /dev/null @@ -1,39 +0,0 @@ -import abc - -from pip._internal.index.package_finder import PackageFinder -from pip._internal.metadata.base import BaseDistribution -from pip._internal.req import InstallRequirement - - -class AbstractDistribution(metaclass=abc.ABCMeta): - """A base class for handling installable artifacts. - - The requirements for anything installable are as follows: - - - we must be able to determine the requirement name - (or we can't correctly handle the non-upgrade case). - - - for packages with setup requirements, we must also be able - to determine their requirements without installing additional - packages (for the same reason as run-time dependencies) - - - we must be able to create a Distribution object exposing the - above metadata. - """ - - def __init__(self, req: InstallRequirement) -> None: - super().__init__() - self.req = req - - @abc.abstractmethod - def get_metadata_distribution(self) -> BaseDistribution: - raise NotImplementedError() - - @abc.abstractmethod - def prepare_distribution_metadata( - self, - finder: PackageFinder, - build_isolation: bool, - check_build_deps: bool, - ) -> None: - raise NotImplementedError() diff --git a/spaces/Realcat/image-matching-webui/third_party/DarkFeat/datasets/gl3d/io.py b/spaces/Realcat/image-matching-webui/third_party/DarkFeat/datasets/gl3d/io.py deleted file mode 100644 index 9b48a2be61ba799d567b7df45c9b9b011cbef4be..0000000000000000000000000000000000000000 --- a/spaces/Realcat/image-matching-webui/third_party/DarkFeat/datasets/gl3d/io.py +++ /dev/null @@ -1,75 +0,0 @@ -import os -import re -import cv2 -import numpy as np - -from ..utils.common import Notify - - -def read_list(list_path): - """Read list.""" - if list_path is None or not os.path.exists(list_path): - print(Notify.FAIL, "Not exist", list_path, Notify.ENDC) - exit(-1) - content = open(list_path).read().splitlines() - return content - - -def load_pfm(pfm_path): - with open(pfm_path, "rb") as fin: - color = None - width = None - height = None - scale = None - data_type = None - header = str(fin.readline().decode("UTF-8")).rstrip() - - if header == "PF": - color = True - elif header == "Pf": - color = False - else: - raise Exception("Not a PFM file.") - - dim_match = re.match(r"^(\d+)\s(\d+)\s$", fin.readline().decode("UTF-8")) - if dim_match: - width, height = map(int, dim_match.groups()) - else: - raise Exception("Malformed PFM header.") - scale = float((fin.readline().decode("UTF-8")).rstrip()) - if scale < 0: # little-endian - data_type = " 0: - img = cv2.resize(img, (config["resize"], config["resize"])) - return img - - -def _parse_depth(depth_paths, idx, config): - depth = load_pfm(depth_paths[idx]) - - if config["resize"] > 0: - target_size = config["resize"] - if config["input_type"] == "raw": - depth = cv2.resize(depth, (int(target_size / 2), int(target_size / 2))) - else: - depth = cv2.resize(depth, (target_size, target_size)) - return depth - - -def _parse_kpts(kpts_paths, idx, config): - kpts = np.load(kpts_paths[idx])["pts"] - # output: [N, 2] (W first H last) - return kpts diff --git a/spaces/Redgon/bingo/tailwind.config.js b/spaces/Redgon/bingo/tailwind.config.js deleted file mode 100644 index 03da3c3c45be6983b9f5ffa6df5f1fd0870e9636..0000000000000000000000000000000000000000 --- a/spaces/Redgon/bingo/tailwind.config.js +++ /dev/null @@ -1,48 +0,0 @@ -/** @type {import('tailwindcss').Config} */ -module.exports = { - content: [ - './src/pages/**/*.{js,ts,jsx,tsx,mdx}', - './src/components/**/*.{js,ts,jsx,tsx,mdx}', - './src/app/**/*.{js,ts,jsx,tsx,mdx}', - './src/ui/**/*.{js,ts,jsx,tsx,mdx}', - ], - "darkMode": "class", - theme: { - extend: { - colors: { - 'primary-blue': 'rgb(var(--color-primary-blue) / )', - secondary: 'rgb(var(--color-secondary) / )', - 'primary-background': 'rgb(var(--primary-background) / )', - 'primary-text': 'rgb(var(--primary-text) / )', - 'secondary-text': 'rgb(var(--secondary-text) / )', - 'light-text': 'rgb(var(--light-text) / )', - 'primary-border': 'rgb(var(--primary-border) / )', - }, - keyframes: { - slideDownAndFade: { - from: { opacity: 0, transform: 'translateY(-2px)' }, - to: { opacity: 1, transform: 'translateY(0)' }, - }, - slideLeftAndFade: { - from: { opacity: 0, transform: 'translateX(2px)' }, - to: { opacity: 1, transform: 'translateX(0)' }, - }, - slideUpAndFade: { - from: { opacity: 0, transform: 'translateY(2px)' }, - to: { opacity: 1, transform: 'translateY(0)' }, - }, - slideRightAndFade: { - from: { opacity: 0, transform: 'translateX(2px)' }, - to: { opacity: 1, transform: 'translateX(0)' }, - }, - }, - animation: { - slideDownAndFade: 'slideDownAndFade 400ms cubic-bezier(0.16, 1, 0.3, 1)', - slideLeftAndFade: 'slideLeftAndFade 400ms cubic-bezier(0.16, 1, 0.3, 1)', - slideUpAndFade: 'slideUpAndFade 400ms cubic-bezier(0.16, 1, 0.3, 1)', - slideRightAndFade: 'slideRightAndFade 400ms cubic-bezier(0.16, 1, 0.3, 1)', - }, - }, - }, - plugins: [require('@headlessui/tailwindcss'), require('tailwind-scrollbar')], -} diff --git a/spaces/Robert001/UniControl-Demo/annotator/uniformer/exp/upernet_global_small/config.py b/spaces/Robert001/UniControl-Demo/annotator/uniformer/exp/upernet_global_small/config.py deleted file mode 100644 index 01db96bf9b0be531aa0eaf62fee51543712f8670..0000000000000000000000000000000000000000 --- a/spaces/Robert001/UniControl-Demo/annotator/uniformer/exp/upernet_global_small/config.py +++ /dev/null @@ -1,38 +0,0 @@ -_base_ = [ - '../../configs/_base_/models/upernet_uniformer.py', - '../../configs/_base_/datasets/ade20k.py', - '../../configs/_base_/default_runtime.py', - '../../configs/_base_/schedules/schedule_160k.py' -] -model = dict( - backbone=dict( - type='UniFormer', - embed_dim=[64, 128, 320, 512], - layers=[3, 4, 8, 3], - head_dim=64, - drop_path_rate=0.25, - windows=False, - hybrid=False - ), - decode_head=dict( - in_channels=[64, 128, 320, 512], - num_classes=150 - ), - auxiliary_head=dict( - in_channels=320, - num_classes=150 - )) - -# AdamW optimizer, no weight decay for position embedding & layer norm in backbone -optimizer = dict(_delete_=True, type='AdamW', lr=0.00006, betas=(0.9, 0.999), weight_decay=0.01, - paramwise_cfg=dict(custom_keys={'absolute_pos_embed': dict(decay_mult=0.), - 'relative_position_bias_table': dict(decay_mult=0.), - 'norm': dict(decay_mult=0.)})) - -lr_config = dict(_delete_=True, policy='poly', - warmup='linear', - warmup_iters=1500, - warmup_ratio=1e-6, - power=1.0, min_lr=0.0, by_epoch=False) - -data=dict(samples_per_gpu=2) \ No newline at end of file diff --git a/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmdet/datasets/pipelines/formating.py b/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmdet/datasets/pipelines/formating.py deleted file mode 100644 index 5781341bd48766a740f23ebba7a85cf8993642d7..0000000000000000000000000000000000000000 --- a/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmdet/datasets/pipelines/formating.py +++ /dev/null @@ -1,364 +0,0 @@ -from collections.abc import Sequence - -import mmcv -import numpy as np -import torch -from mmcv.parallel import DataContainer as DC - -from ..builder import PIPELINES - - -def to_tensor(data): - """Convert objects of various python types to :obj:`torch.Tensor`. - - Supported types are: :class:`numpy.ndarray`, :class:`torch.Tensor`, - :class:`Sequence`, :class:`int` and :class:`float`. - - Args: - data (torch.Tensor | numpy.ndarray | Sequence | int | float): Data to - be converted. - """ - - if isinstance(data, torch.Tensor): - return data - elif isinstance(data, np.ndarray): - return torch.from_numpy(data) - elif isinstance(data, Sequence) and not mmcv.is_str(data): - return torch.tensor(data) - elif isinstance(data, int): - return torch.LongTensor([data]) - elif isinstance(data, float): - return torch.FloatTensor([data]) - else: - raise TypeError(f'type {type(data)} cannot be converted to tensor.') - - -@PIPELINES.register_module() -class ToTensor(object): - """Convert some results to :obj:`torch.Tensor` by given keys. - - Args: - keys (Sequence[str]): Keys that need to be converted to Tensor. - """ - - def __init__(self, keys): - self.keys = keys - - def __call__(self, results): - """Call function to convert data in results to :obj:`torch.Tensor`. - - Args: - results (dict): Result dict contains the data to convert. - - Returns: - dict: The result dict contains the data converted - to :obj:`torch.Tensor`. - """ - for key in self.keys: - results[key] = to_tensor(results[key]) - return results - - def __repr__(self): - return self.__class__.__name__ + f'(keys={self.keys})' - - -@PIPELINES.register_module() -class ImageToTensor(object): - """Convert image to :obj:`torch.Tensor` by given keys. - - The dimension order of input image is (H, W, C). The pipeline will convert - it to (C, H, W). If only 2 dimension (H, W) is given, the output would be - (1, H, W). - - Args: - keys (Sequence[str]): Key of images to be converted to Tensor. - """ - - def __init__(self, keys): - self.keys = keys - - def __call__(self, results): - """Call function to convert image in results to :obj:`torch.Tensor` and - transpose the channel order. - - Args: - results (dict): Result dict contains the image data to convert. - - Returns: - dict: The result dict contains the image converted - to :obj:`torch.Tensor` and transposed to (C, H, W) order. - """ - for key in self.keys: - img = results[key] - if len(img.shape) < 3: - img = np.expand_dims(img, -1) - results[key] = to_tensor(img.transpose(2, 0, 1)) - return results - - def __repr__(self): - return self.__class__.__name__ + f'(keys={self.keys})' - - -@PIPELINES.register_module() -class Transpose(object): - """Transpose some results by given keys. - - Args: - keys (Sequence[str]): Keys of results to be transposed. - order (Sequence[int]): Order of transpose. - """ - - def __init__(self, keys, order): - self.keys = keys - self.order = order - - def __call__(self, results): - """Call function to transpose the channel order of data in results. - - Args: - results (dict): Result dict contains the data to transpose. - - Returns: - dict: The result dict contains the data transposed to \ - ``self.order``. - """ - for key in self.keys: - results[key] = results[key].transpose(self.order) - return results - - def __repr__(self): - return self.__class__.__name__ + \ - f'(keys={self.keys}, order={self.order})' - - -@PIPELINES.register_module() -class ToDataContainer(object): - """Convert results to :obj:`mmcv.DataContainer` by given fields. - - Args: - fields (Sequence[dict]): Each field is a dict like - ``dict(key='xxx', **kwargs)``. The ``key`` in result will - be converted to :obj:`mmcv.DataContainer` with ``**kwargs``. - Default: ``(dict(key='img', stack=True), dict(key='gt_bboxes'), - dict(key='gt_labels'))``. - """ - - def __init__(self, - fields=(dict(key='img', stack=True), dict(key='gt_bboxes'), - dict(key='gt_labels'))): - self.fields = fields - - def __call__(self, results): - """Call function to convert data in results to - :obj:`mmcv.DataContainer`. - - Args: - results (dict): Result dict contains the data to convert. - - Returns: - dict: The result dict contains the data converted to \ - :obj:`mmcv.DataContainer`. - """ - - for field in self.fields: - field = field.copy() - key = field.pop('key') - results[key] = DC(results[key], **field) - return results - - def __repr__(self): - return self.__class__.__name__ + f'(fields={self.fields})' - - -@PIPELINES.register_module() -class DefaultFormatBundle(object): - """Default formatting bundle. - - It simplifies the pipeline of formatting common fields, including "img", - "proposals", "gt_bboxes", "gt_labels", "gt_masks" and "gt_semantic_seg". - These fields are formatted as follows. - - - img: (1)transpose, (2)to tensor, (3)to DataContainer (stack=True) - - proposals: (1)to tensor, (2)to DataContainer - - gt_bboxes: (1)to tensor, (2)to DataContainer - - gt_bboxes_ignore: (1)to tensor, (2)to DataContainer - - gt_labels: (1)to tensor, (2)to DataContainer - - gt_masks: (1)to tensor, (2)to DataContainer (cpu_only=True) - - gt_semantic_seg: (1)unsqueeze dim-0 (2)to tensor, \ - (3)to DataContainer (stack=True) - """ - - def __call__(self, results): - """Call function to transform and format common fields in results. - - Args: - results (dict): Result dict contains the data to convert. - - Returns: - dict: The result dict contains the data that is formatted with \ - default bundle. - """ - - if 'img' in results: - img = results['img'] - # add default meta keys - results = self._add_default_meta_keys(results) - if len(img.shape) < 3: - img = np.expand_dims(img, -1) - img = np.ascontiguousarray(img.transpose(2, 0, 1)) - results['img'] = DC(to_tensor(img), stack=True) - for key in ['proposals', 'gt_bboxes', 'gt_bboxes_ignore', 'gt_labels']: - if key not in results: - continue - results[key] = DC(to_tensor(results[key])) - if 'gt_masks' in results: - results['gt_masks'] = DC(results['gt_masks'], cpu_only=True) - if 'gt_semantic_seg' in results: - results['gt_semantic_seg'] = DC( - to_tensor(results['gt_semantic_seg'][None, ...]), stack=True) - return results - - def _add_default_meta_keys(self, results): - """Add default meta keys. - - We set default meta keys including `pad_shape`, `scale_factor` and - `img_norm_cfg` to avoid the case where no `Resize`, `Normalize` and - `Pad` are implemented during the whole pipeline. - - Args: - results (dict): Result dict contains the data to convert. - - Returns: - results (dict): Updated result dict contains the data to convert. - """ - img = results['img'] - results.setdefault('pad_shape', img.shape) - results.setdefault('scale_factor', 1.0) - num_channels = 1 if len(img.shape) < 3 else img.shape[2] - results.setdefault( - 'img_norm_cfg', - dict( - mean=np.zeros(num_channels, dtype=np.float32), - std=np.ones(num_channels, dtype=np.float32), - to_rgb=False)) - return results - - def __repr__(self): - return self.__class__.__name__ - - -@PIPELINES.register_module() -class Collect(object): - """Collect data from the loader relevant to the specific task. - - This is usually the last stage of the data loader pipeline. Typically keys - is set to some subset of "img", "proposals", "gt_bboxes", - "gt_bboxes_ignore", "gt_labels", and/or "gt_masks". - - The "img_meta" item is always populated. The contents of the "img_meta" - dictionary depends on "meta_keys". By default this includes: - - - "img_shape": shape of the image input to the network as a tuple \ - (h, w, c). Note that images may be zero padded on the \ - bottom/right if the batch tensor is larger than this shape. - - - "scale_factor": a float indicating the preprocessing scale - - - "flip": a boolean indicating if image flip transform was used - - - "filename": path to the image file - - - "ori_shape": original shape of the image as a tuple (h, w, c) - - - "pad_shape": image shape after padding - - - "img_norm_cfg": a dict of normalization information: - - - mean - per channel mean subtraction - - std - per channel std divisor - - to_rgb - bool indicating if bgr was converted to rgb - - Args: - keys (Sequence[str]): Keys of results to be collected in ``data``. - meta_keys (Sequence[str], optional): Meta keys to be converted to - ``mmcv.DataContainer`` and collected in ``data[img_metas]``. - Default: ``('filename', 'ori_filename', 'ori_shape', 'img_shape', - 'pad_shape', 'scale_factor', 'flip', 'flip_direction', - 'img_norm_cfg')`` - """ - - def __init__(self, - keys, - meta_keys=('filename', 'ori_filename', 'ori_shape', - 'img_shape', 'pad_shape', 'scale_factor', 'flip', - 'flip_direction', 'img_norm_cfg')): - self.keys = keys - self.meta_keys = meta_keys - - def __call__(self, results): - """Call function to collect keys in results. The keys in ``meta_keys`` - will be converted to :obj:mmcv.DataContainer. - - Args: - results (dict): Result dict contains the data to collect. - - Returns: - dict: The result dict contains the following keys - - - keys in``self.keys`` - - ``img_metas`` - """ - - data = {} - img_meta = {} - for key in self.meta_keys: - img_meta[key] = results[key] - data['img_metas'] = DC(img_meta, cpu_only=True) - for key in self.keys: - data[key] = results[key] - return data - - def __repr__(self): - return self.__class__.__name__ + \ - f'(keys={self.keys}, meta_keys={self.meta_keys})' - - -@PIPELINES.register_module() -class WrapFieldsToLists(object): - """Wrap fields of the data dictionary into lists for evaluation. - - This class can be used as a last step of a test or validation - pipeline for single image evaluation or inference. - - Example: - >>> test_pipeline = [ - >>> dict(type='LoadImageFromFile'), - >>> dict(type='Normalize', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - to_rgb=True), - >>> dict(type='Pad', size_divisor=32), - >>> dict(type='ImageToTensor', keys=['img']), - >>> dict(type='Collect', keys=['img']), - >>> dict(type='WrapFieldsToLists') - >>> ] - """ - - def __call__(self, results): - """Call function to wrap fields into lists. - - Args: - results (dict): Result dict contains the data to wrap. - - Returns: - dict: The result dict where value of ``self.keys`` are wrapped \ - into list. - """ - - # Wrap dict fields into lists - for key, val in results.items(): - results[key] = [val] - return results - - def __repr__(self): - return f'{self.__class__.__name__}()' diff --git a/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmdet_null/core/post_processing/merge_augs.py b/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmdet_null/core/post_processing/merge_augs.py deleted file mode 100644 index dbcf79d1ac20ddc32cb1605e06d253803250c855..0000000000000000000000000000000000000000 --- a/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmdet_null/core/post_processing/merge_augs.py +++ /dev/null @@ -1,150 +0,0 @@ -import copy -import warnings - -import numpy as np -import torch -from mmcv import ConfigDict -from mmcv.ops import nms - -from ..bbox import bbox_mapping_back - - -def merge_aug_proposals(aug_proposals, img_metas, cfg): - """Merge augmented proposals (multiscale, flip, etc.) - - Args: - aug_proposals (list[Tensor]): proposals from different testing - schemes, shape (n, 5). Note that they are not rescaled to the - original image size. - - img_metas (list[dict]): list of image info dict where each dict has: - 'img_shape', 'scale_factor', 'flip', and may also contain - 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'. - For details on the values of these keys see - `mmdet/datasets/pipelines/formatting.py:Collect`. - - cfg (dict): rpn test config. - - Returns: - Tensor: shape (n, 4), proposals corresponding to original image scale. - """ - - cfg = copy.deepcopy(cfg) - - # deprecate arguments warning - if 'nms' not in cfg or 'max_num' in cfg or 'nms_thr' in cfg: - warnings.warn( - 'In rpn_proposal or test_cfg, ' - 'nms_thr has been moved to a dict named nms as ' - 'iou_threshold, max_num has been renamed as max_per_img, ' - 'name of original arguments and the way to specify ' - 'iou_threshold of NMS will be deprecated.') - if 'nms' not in cfg: - cfg.nms = ConfigDict(dict(type='nms', iou_threshold=cfg.nms_thr)) - if 'max_num' in cfg: - if 'max_per_img' in cfg: - assert cfg.max_num == cfg.max_per_img, f'You set max_num and ' \ - f'max_per_img at the same time, but get {cfg.max_num} ' \ - f'and {cfg.max_per_img} respectively' \ - f'Please delete max_num which will be deprecated.' - else: - cfg.max_per_img = cfg.max_num - if 'nms_thr' in cfg: - assert cfg.nms.iou_threshold == cfg.nms_thr, f'You set ' \ - f'iou_threshold in nms and ' \ - f'nms_thr at the same time, but get ' \ - f'{cfg.nms.iou_threshold} and {cfg.nms_thr}' \ - f' respectively. Please delete the nms_thr ' \ - f'which will be deprecated.' - - recovered_proposals = [] - for proposals, img_info in zip(aug_proposals, img_metas): - img_shape = img_info['img_shape'] - scale_factor = img_info['scale_factor'] - flip = img_info['flip'] - flip_direction = img_info['flip_direction'] - _proposals = proposals.clone() - _proposals[:, :4] = bbox_mapping_back(_proposals[:, :4], img_shape, - scale_factor, flip, - flip_direction) - recovered_proposals.append(_proposals) - aug_proposals = torch.cat(recovered_proposals, dim=0) - merged_proposals, _ = nms(aug_proposals[:, :4].contiguous(), - aug_proposals[:, -1].contiguous(), - cfg.nms.iou_threshold) - scores = merged_proposals[:, 4] - _, order = scores.sort(0, descending=True) - num = min(cfg.max_per_img, merged_proposals.shape[0]) - order = order[:num] - merged_proposals = merged_proposals[order, :] - return merged_proposals - - -def merge_aug_bboxes(aug_bboxes, aug_scores, img_metas, rcnn_test_cfg): - """Merge augmented detection bboxes and scores. - - Args: - aug_bboxes (list[Tensor]): shape (n, 4*#class) - aug_scores (list[Tensor] or None): shape (n, #class) - img_shapes (list[Tensor]): shape (3, ). - rcnn_test_cfg (dict): rcnn test config. - - Returns: - tuple: (bboxes, scores) - """ - recovered_bboxes = [] - for bboxes, img_info in zip(aug_bboxes, img_metas): - img_shape = img_info[0]['img_shape'] - scale_factor = img_info[0]['scale_factor'] - flip = img_info[0]['flip'] - flip_direction = img_info[0]['flip_direction'] - bboxes = bbox_mapping_back(bboxes, img_shape, scale_factor, flip, - flip_direction) - recovered_bboxes.append(bboxes) - bboxes = torch.stack(recovered_bboxes).mean(dim=0) - if aug_scores is None: - return bboxes - else: - scores = torch.stack(aug_scores).mean(dim=0) - return bboxes, scores - - -def merge_aug_scores(aug_scores): - """Merge augmented bbox scores.""" - if isinstance(aug_scores[0], torch.Tensor): - return torch.mean(torch.stack(aug_scores), dim=0) - else: - return np.mean(aug_scores, axis=0) - - -def merge_aug_masks(aug_masks, img_metas, rcnn_test_cfg, weights=None): - """Merge augmented mask prediction. - - Args: - aug_masks (list[ndarray]): shape (n, #class, h, w) - img_shapes (list[ndarray]): shape (3, ). - rcnn_test_cfg (dict): rcnn test config. - - Returns: - tuple: (bboxes, scores) - """ - recovered_masks = [] - for mask, img_info in zip(aug_masks, img_metas): - flip = img_info[0]['flip'] - flip_direction = img_info[0]['flip_direction'] - if flip: - if flip_direction == 'horizontal': - mask = mask[:, :, :, ::-1] - elif flip_direction == 'vertical': - mask = mask[:, :, ::-1, :] - else: - raise ValueError( - f"Invalid flipping direction '{flip_direction}'") - recovered_masks.append(mask) - - if weights is None: - merged_masks = np.mean(recovered_masks, axis=0) - else: - merged_masks = np.average( - np.array(recovered_masks), axis=0, weights=np.array(weights)) - return merged_masks diff --git a/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmdet_null/models/detectors/trident_faster_rcnn.py b/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmdet_null/models/detectors/trident_faster_rcnn.py deleted file mode 100644 index f0fd80d41407162df71ba5349fc659d4713cdb6e..0000000000000000000000000000000000000000 --- a/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmdet_null/models/detectors/trident_faster_rcnn.py +++ /dev/null @@ -1,66 +0,0 @@ -from ..builder import DETECTORS -from .faster_rcnn import FasterRCNN - - -@DETECTORS.register_module() -class TridentFasterRCNN(FasterRCNN): - """Implementation of `TridentNet `_""" - - def __init__(self, - backbone, - rpn_head, - roi_head, - train_cfg, - test_cfg, - neck=None, - pretrained=None): - - super(TridentFasterRCNN, self).__init__( - backbone=backbone, - neck=neck, - rpn_head=rpn_head, - roi_head=roi_head, - train_cfg=train_cfg, - test_cfg=test_cfg, - pretrained=pretrained) - assert self.backbone.num_branch == self.roi_head.num_branch - assert self.backbone.test_branch_idx == self.roi_head.test_branch_idx - self.num_branch = self.backbone.num_branch - self.test_branch_idx = self.backbone.test_branch_idx - - def simple_test(self, img, img_metas, proposals=None, rescale=False): - """Test without augmentation.""" - assert self.with_bbox, 'Bbox head must be implemented.' - x = self.extract_feat(img) - if proposals is None: - num_branch = (self.num_branch if self.test_branch_idx == -1 else 1) - trident_img_metas = img_metas * num_branch - proposal_list = self.rpn_head.simple_test_rpn(x, trident_img_metas) - else: - proposal_list = proposals - - return self.roi_head.simple_test( - x, proposal_list, trident_img_metas, rescale=rescale) - - def aug_test(self, imgs, img_metas, rescale=False): - """Test with augmentations. - - If rescale is False, then returned bboxes and masks will fit the scale - of imgs[0]. - """ - x = self.extract_feats(imgs) - num_branch = (self.num_branch if self.test_branch_idx == -1 else 1) - trident_img_metas = [img_metas * num_branch for img_metas in img_metas] - proposal_list = self.rpn_head.aug_test_rpn(x, trident_img_metas) - return self.roi_head.aug_test( - x, proposal_list, img_metas, rescale=rescale) - - def forward_train(self, img, img_metas, gt_bboxes, gt_labels, **kwargs): - """make copies of img and gts to fit multi-branch.""" - trident_gt_bboxes = tuple(gt_bboxes * self.num_branch) - trident_gt_labels = tuple(gt_labels * self.num_branch) - trident_img_metas = tuple(img_metas * self.num_branch) - - return super(TridentFasterRCNN, - self).forward_train(img, trident_img_metas, - trident_gt_bboxes, trident_gt_labels) diff --git a/spaces/Robert001/UniControl-Demo/annotator/uniformer_base/mmcv_custom/checkpoint.py b/spaces/Robert001/UniControl-Demo/annotator/uniformer_base/mmcv_custom/checkpoint.py deleted file mode 100644 index c01ddcae760dfaae20c876fff22b8c2af8c0ce52..0000000000000000000000000000000000000000 --- a/spaces/Robert001/UniControl-Demo/annotator/uniformer_base/mmcv_custom/checkpoint.py +++ /dev/null @@ -1,512 +0,0 @@ -''' - * Copyright (c) 2023 Salesforce, Inc. - * All rights reserved. - * SPDX-License-Identifier: Apache License 2.0 - * For full license text, see LICENSE.txt file in the repo root or http://www.apache.org/licenses/ - * By Can Qin - * Modified from ControlNet repo: https://github.com/lllyasviel/ControlNet - * Copyright (c) 2023 Lvmin Zhang and Maneesh Agrawala - * Modified from MMCV repo: From https://github.com/open-mmlab/mmcv - * Copyright (c) OpenMMLab. All rights reserved. -''' - -# Copyright (c) Open-MMLab. All rights reserved. -import io -import os -import os.path as osp -import pkgutil -import time -import warnings -from collections import OrderedDict -from importlib import import_module -from tempfile import TemporaryDirectory - -import torch -import torchvision -from torch.optim import Optimizer -from torch.utils import model_zoo -from torch.nn import functional as F - -import annotator.uniformer.mmcv as mmcv -from annotator.uniformer.mmcv.fileio import FileClient -from annotator.uniformer.mmcv.fileio import load as load_file -from annotator.uniformer.mmcv.parallel import is_module_wrapper -from annotator.uniformer.mmcv.utils import mkdir_or_exist -from annotator.uniformer.mmcv.runner import get_dist_info - -ENV_MMCV_HOME = 'MMCV_HOME' -ENV_XDG_CACHE_HOME = 'XDG_CACHE_HOME' -DEFAULT_CACHE_DIR = '~/.cache' - - -def _get_mmcv_home(): - mmcv_home = os.path.expanduser( - os.getenv( - ENV_MMCV_HOME, - os.path.join( - os.getenv(ENV_XDG_CACHE_HOME, DEFAULT_CACHE_DIR), 'mmcv'))) - - mkdir_or_exist(mmcv_home) - return mmcv_home - - -def load_state_dict(module, state_dict, strict=False, logger=None): - """Load state_dict to a module. - - This method is modified from :meth:`torch.nn.Module.load_state_dict`. - Default value for ``strict`` is set to ``False`` and the message for - param mismatch will be shown even if strict is False. - - Args: - module (Module): Module that receives the state_dict. - state_dict (OrderedDict): Weights. - strict (bool): whether to strictly enforce that the keys - in :attr:`state_dict` match the keys returned by this module's - :meth:`~torch.nn.Module.state_dict` function. Default: ``False``. - logger (:obj:`logging.Logger`, optional): Logger to log the error - message. If not specified, print function will be used. - """ - unexpected_keys = [] - all_missing_keys = [] - err_msg = [] - - metadata = getattr(state_dict, '_metadata', None) - state_dict = state_dict.copy() - if metadata is not None: - state_dict._metadata = metadata - - # use _load_from_state_dict to enable checkpoint version control - def load(module, prefix=''): - # recursively check parallel module in case that the model has a - # complicated structure, e.g., nn.Module(nn.Module(DDP)) - if is_module_wrapper(module): - module = module.module - local_metadata = {} if metadata is None else metadata.get( - prefix[:-1], {}) - module._load_from_state_dict(state_dict, prefix, local_metadata, True, - all_missing_keys, unexpected_keys, - err_msg) - for name, child in module._modules.items(): - if child is not None: - load(child, prefix + name + '.') - - load(module) - load = None # break load->load reference cycle - - # ignore "num_batches_tracked" of BN layers - missing_keys = [ - key for key in all_missing_keys if 'num_batches_tracked' not in key - ] - - if unexpected_keys: - err_msg.append('unexpected key in source ' - f'state_dict: {", ".join(unexpected_keys)}\n') - if missing_keys: - err_msg.append( - f'missing keys in source state_dict: {", ".join(missing_keys)}\n') - - rank, _ = get_dist_info() - if len(err_msg) > 0 and rank == 0: - err_msg.insert( - 0, 'The model and loaded state dict do not match exactly\n') - err_msg = '\n'.join(err_msg) - if strict: - raise RuntimeError(err_msg) - elif logger is not None: - logger.warning(err_msg) - else: - print(err_msg) - - -def load_url_dist(url, model_dir=None): - """In distributed setting, this function only download checkpoint at local - rank 0.""" - rank, world_size = get_dist_info() - rank = int(os.environ.get('LOCAL_RANK', rank)) - if rank == 0: - checkpoint = model_zoo.load_url(url, model_dir=model_dir) - if world_size > 1: - torch.distributed.barrier() - if rank > 0: - checkpoint = model_zoo.load_url(url, model_dir=model_dir) - return checkpoint - - -def load_pavimodel_dist(model_path, map_location=None): - """In distributed setting, this function only download checkpoint at local - rank 0.""" - try: - from pavi import modelcloud - except ImportError: - raise ImportError( - 'Please install pavi to load checkpoint from modelcloud.') - rank, world_size = get_dist_info() - rank = int(os.environ.get('LOCAL_RANK', rank)) - if rank == 0: - model = modelcloud.get(model_path) - with TemporaryDirectory() as tmp_dir: - downloaded_file = osp.join(tmp_dir, model.name) - model.download(downloaded_file) - checkpoint = torch.load(downloaded_file, map_location=map_location) - if world_size > 1: - torch.distributed.barrier() - if rank > 0: - model = modelcloud.get(model_path) - with TemporaryDirectory() as tmp_dir: - downloaded_file = osp.join(tmp_dir, model.name) - model.download(downloaded_file) - checkpoint = torch.load( - downloaded_file, map_location=map_location) - return checkpoint - - -def load_fileclient_dist(filename, backend, map_location): - """In distributed setting, this function only download checkpoint at local - rank 0.""" - rank, world_size = get_dist_info() - rank = int(os.environ.get('LOCAL_RANK', rank)) - allowed_backends = ['ceph'] - if backend not in allowed_backends: - raise ValueError(f'Load from Backend {backend} is not supported.') - if rank == 0: - fileclient = FileClient(backend=backend) - buffer = io.BytesIO(fileclient.get(filename)) - checkpoint = torch.load(buffer, map_location=map_location) - if world_size > 1: - torch.distributed.barrier() - if rank > 0: - fileclient = FileClient(backend=backend) - buffer = io.BytesIO(fileclient.get(filename)) - checkpoint = torch.load(buffer, map_location=map_location) - return checkpoint - - -def get_torchvision_models(): - model_urls = dict() - for _, name, ispkg in pkgutil.walk_packages(torchvision.models.__path__): - if ispkg: - continue - _zoo = import_module(f'torchvision.models.{name}') - if hasattr(_zoo, 'model_urls'): - _urls = getattr(_zoo, 'model_urls') - model_urls.update(_urls) - return model_urls - - -def get_external_models(): - mmcv_home = _get_mmcv_home() - default_json_path = osp.join(mmcv.__path__[0], 'model_zoo/open_mmlab.json') - default_urls = load_file(default_json_path) - assert isinstance(default_urls, dict) - external_json_path = osp.join(mmcv_home, 'open_mmlab.json') - if osp.exists(external_json_path): - external_urls = load_file(external_json_path) - assert isinstance(external_urls, dict) - default_urls.update(external_urls) - - return default_urls - - -def get_mmcls_models(): - mmcls_json_path = osp.join(mmcv.__path__[0], 'model_zoo/mmcls.json') - mmcls_urls = load_file(mmcls_json_path) - - return mmcls_urls - - -def get_deprecated_model_names(): - deprecate_json_path = osp.join(mmcv.__path__[0], - 'model_zoo/deprecated.json') - deprecate_urls = load_file(deprecate_json_path) - assert isinstance(deprecate_urls, dict) - - return deprecate_urls - - -def _process_mmcls_checkpoint(checkpoint): - state_dict = checkpoint['state_dict'] - new_state_dict = OrderedDict() - for k, v in state_dict.items(): - if k.startswith('backbone.'): - new_state_dict[k[9:]] = v - new_checkpoint = dict(state_dict=new_state_dict) - - return new_checkpoint - - -def _load_checkpoint(filename, map_location=None): - """Load checkpoint from somewhere (modelzoo, file, url). - - Args: - filename (str): Accept local filepath, URL, ``torchvision://xxx``, - ``open-mmlab://xxx``. Please refer to ``docs/model_zoo.md`` for - details. - map_location (str | None): Same as :func:`torch.load`. Default: None. - - Returns: - dict | OrderedDict: The loaded checkpoint. It can be either an - OrderedDict storing model weights or a dict containing other - information, which depends on the checkpoint. - """ - if filename.startswith('modelzoo://'): - warnings.warn('The URL scheme of "modelzoo://" is deprecated, please ' - 'use "torchvision://" instead') - model_urls = get_torchvision_models() - model_name = filename[11:] - checkpoint = load_url_dist(model_urls[model_name]) - elif filename.startswith('torchvision://'): - model_urls = get_torchvision_models() - model_name = filename[14:] - checkpoint = load_url_dist(model_urls[model_name]) - elif filename.startswith('open-mmlab://'): - model_urls = get_external_models() - model_name = filename[13:] - deprecated_urls = get_deprecated_model_names() - if model_name in deprecated_urls: - warnings.warn(f'open-mmlab://{model_name} is deprecated in favor ' - f'of open-mmlab://{deprecated_urls[model_name]}') - model_name = deprecated_urls[model_name] - model_url = model_urls[model_name] - # check if is url - if model_url.startswith(('http://', 'https://')): - checkpoint = load_url_dist(model_url) - else: - filename = osp.join(_get_mmcv_home(), model_url) - if not osp.isfile(filename): - raise IOError(f'{filename} is not a checkpoint file') - checkpoint = torch.load(filename, map_location=map_location) - elif filename.startswith('mmcls://'): - model_urls = get_mmcls_models() - model_name = filename[8:] - checkpoint = load_url_dist(model_urls[model_name]) - checkpoint = _process_mmcls_checkpoint(checkpoint) - elif filename.startswith(('http://', 'https://')): - checkpoint = load_url_dist(filename) - elif filename.startswith('pavi://'): - model_path = filename[7:] - checkpoint = load_pavimodel_dist(model_path, map_location=map_location) - elif filename.startswith('s3://'): - checkpoint = load_fileclient_dist( - filename, backend='ceph', map_location=map_location) - else: - if not osp.isfile(filename): - raise IOError(f'{filename} is not a checkpoint file') - checkpoint = torch.load(filename, map_location=map_location) - return checkpoint - - -def load_checkpoint(model, - filename, - map_location='cpu', - strict=False, - logger=None): - """Load checkpoint from a file or URI. - - Args: - model (Module): Module to load checkpoint. - filename (str): Accept local filepath, URL, ``torchvision://xxx``, - ``open-mmlab://xxx``. Please refer to ``docs/model_zoo.md`` for - details. - map_location (str): Same as :func:`torch.load`. - strict (bool): Whether to allow different params for the model and - checkpoint. - logger (:mod:`logging.Logger` or None): The logger for error message. - - Returns: - dict or OrderedDict: The loaded checkpoint. - """ - checkpoint = _load_checkpoint(filename, map_location) - # OrderedDict is a subclass of dict - if not isinstance(checkpoint, dict): - raise RuntimeError( - f'No state_dict found in checkpoint file {filename}') - # get state_dict from checkpoint - if 'state_dict' in checkpoint: - state_dict = checkpoint['state_dict'] - elif 'model' in checkpoint: - state_dict = checkpoint['model'] - else: - state_dict = checkpoint - # strip prefix of state_dict - if list(state_dict.keys())[0].startswith('module.'): - state_dict = {k[7:]: v for k, v in state_dict.items()} - - # for MoBY, load model of online branch - if sorted(list(state_dict.keys()))[0].startswith('encoder'): - state_dict = {k.replace('encoder.', ''): v for k, v in state_dict.items() if k.startswith('encoder.')} - - # reshape absolute position embedding - if state_dict.get('absolute_pos_embed') is not None: - absolute_pos_embed = state_dict['absolute_pos_embed'] - N1, L, C1 = absolute_pos_embed.size() - N2, C2, H, W = model.absolute_pos_embed.size() - if N1 != N2 or C1 != C2 or L != H*W: - logger.warning("Error in loading absolute_pos_embed, pass") - else: - state_dict['absolute_pos_embed'] = absolute_pos_embed.view(N2, H, W, C2).permute(0, 3, 1, 2) - - # interpolate position bias table if needed - relative_position_bias_table_keys = [k for k in state_dict.keys() if "relative_position_bias_table" in k] - for table_key in relative_position_bias_table_keys: - table_pretrained = state_dict[table_key] - table_current = model.state_dict()[table_key] - L1, nH1 = table_pretrained.size() - L2, nH2 = table_current.size() - if nH1 != nH2: - logger.warning(f"Error in loading {table_key}, pass") - else: - if L1 != L2: - S1 = int(L1 ** 0.5) - S2 = int(L2 ** 0.5) - table_pretrained_resized = F.interpolate( - table_pretrained.permute(1, 0).view(1, nH1, S1, S1), - size=(S2, S2), mode='bicubic') - state_dict[table_key] = table_pretrained_resized.view(nH2, L2).permute(1, 0) - - # load state_dict - load_state_dict(model, state_dict, strict, logger) - return checkpoint - - -def weights_to_cpu(state_dict): - """Copy a model state_dict to cpu. - - Args: - state_dict (OrderedDict): Model weights on GPU. - - Returns: - OrderedDict: Model weights on GPU. - """ - state_dict_cpu = OrderedDict() - for key, val in state_dict.items(): - state_dict_cpu[key] = val.cpu() - return state_dict_cpu - - -def _save_to_state_dict(module, destination, prefix, keep_vars): - """Saves module state to `destination` dictionary. - - This method is modified from :meth:`torch.nn.Module._save_to_state_dict`. - - Args: - module (nn.Module): The module to generate state_dict. - destination (dict): A dict where state will be stored. - prefix (str): The prefix for parameters and buffers used in this - module. - """ - for name, param in module._parameters.items(): - if param is not None: - destination[prefix + name] = param if keep_vars else param.detach() - for name, buf in module._buffers.items(): - # remove check of _non_persistent_buffers_set to allow nn.BatchNorm2d - if buf is not None: - destination[prefix + name] = buf if keep_vars else buf.detach() - - -def get_state_dict(module, destination=None, prefix='', keep_vars=False): - """Returns a dictionary containing a whole state of the module. - - Both parameters and persistent buffers (e.g. running averages) are - included. Keys are corresponding parameter and buffer names. - - This method is modified from :meth:`torch.nn.Module.state_dict` to - recursively check parallel module in case that the model has a complicated - structure, e.g., nn.Module(nn.Module(DDP)). - - Args: - module (nn.Module): The module to generate state_dict. - destination (OrderedDict): Returned dict for the state of the - module. - prefix (str): Prefix of the key. - keep_vars (bool): Whether to keep the variable property of the - parameters. Default: False. - - Returns: - dict: A dictionary containing a whole state of the module. - """ - # recursively check parallel module in case that the model has a - # complicated structure, e.g., nn.Module(nn.Module(DDP)) - if is_module_wrapper(module): - module = module.module - - # below is the same as torch.nn.Module.state_dict() - if destination is None: - destination = OrderedDict() - destination._metadata = OrderedDict() - destination._metadata[prefix[:-1]] = local_metadata = dict( - version=module._version) - _save_to_state_dict(module, destination, prefix, keep_vars) - for name, child in module._modules.items(): - if child is not None: - get_state_dict( - child, destination, prefix + name + '.', keep_vars=keep_vars) - for hook in module._state_dict_hooks.values(): - hook_result = hook(module, destination, prefix, local_metadata) - if hook_result is not None: - destination = hook_result - return destination - - -def save_checkpoint(model, filename, optimizer=None, meta=None): - """Save checkpoint to file. - - The checkpoint will have 3 fields: ``meta``, ``state_dict`` and - ``optimizer``. By default ``meta`` will contain version and time info. - - Args: - model (Module): Module whose params are to be saved. - filename (str): Checkpoint filename. - optimizer (:obj:`Optimizer`, optional): Optimizer to be saved. - meta (dict, optional): Metadata to be saved in checkpoint. - """ - if meta is None: - meta = {} - elif not isinstance(meta, dict): - raise TypeError(f'meta must be a dict or None, but got {type(meta)}') - meta.update(mmcv_version=mmcv.__version__, time=time.asctime()) - - if is_module_wrapper(model): - model = model.module - - if hasattr(model, 'CLASSES') and model.CLASSES is not None: - # save class name to the meta - meta.update(CLASSES=model.CLASSES) - - checkpoint = { - 'meta': meta, - 'state_dict': weights_to_cpu(get_state_dict(model)) - } - # save optimizer state dict in the checkpoint - if isinstance(optimizer, Optimizer): - checkpoint['optimizer'] = optimizer.state_dict() - elif isinstance(optimizer, dict): - checkpoint['optimizer'] = {} - for name, optim in optimizer.items(): - checkpoint['optimizer'][name] = optim.state_dict() - - if filename.startswith('pavi://'): - try: - from pavi import modelcloud - from pavi.exception import NodeNotFoundError - except ImportError: - raise ImportError( - 'Please install pavi to load checkpoint from modelcloud.') - model_path = filename[7:] - root = modelcloud.Folder() - model_dir, model_name = osp.split(model_path) - try: - model = modelcloud.get(model_dir) - except NodeNotFoundError: - model = root.create_training_model(model_dir) - with TemporaryDirectory() as tmp_dir: - checkpoint_file = osp.join(tmp_dir, model_name) - with open(checkpoint_file, 'wb') as f: - torch.save(checkpoint, f) - f.flush() - model.create_file(checkpoint_file, name=model_name) - else: - mmcv.mkdir_or_exist(osp.dirname(filename)) - # immediately flush buffer - with open(filename, 'wb') as f: - torch.save(checkpoint, f) - f.flush() \ No newline at end of file diff --git a/spaces/SIGGRAPH2022/Text2Human/Text2Human/data/pose_attr_dataset.py b/spaces/SIGGRAPH2022/Text2Human/Text2Human/data/pose_attr_dataset.py deleted file mode 100644 index 7245846cb321db91c7935edbae83f7c451110725..0000000000000000000000000000000000000000 --- a/spaces/SIGGRAPH2022/Text2Human/Text2Human/data/pose_attr_dataset.py +++ /dev/null @@ -1,109 +0,0 @@ -import os -import os.path -import random - -import numpy as np -import torch -import torch.utils.data as data -from PIL import Image - - -class DeepFashionAttrPoseDataset(data.Dataset): - - def __init__(self, - pose_dir, - texture_ann_dir, - shape_ann_path, - downsample_factor=2, - xflip=False): - self._densepose_path = pose_dir - self._image_fnames_target = [] - self._image_fnames = [] - self.upper_fused_attrs = [] - self.lower_fused_attrs = [] - self.outer_fused_attrs = [] - self.shape_attrs = [] - - self.downsample_factor = downsample_factor - self.xflip = xflip - - # load attributes - assert os.path.exists(f'{texture_ann_dir}/upper_fused.txt') - for idx, row in enumerate( - open(os.path.join(f'{texture_ann_dir}/upper_fused.txt'), 'r')): - annotations = row.split() - self._image_fnames_target.append(annotations[0]) - self._image_fnames.append(f'{annotations[0].split(".")[0]}.png') - self.upper_fused_attrs.append(int(annotations[1])) - - assert len(self._image_fnames_target) == len(self.upper_fused_attrs) - - assert os.path.exists(f'{texture_ann_dir}/lower_fused.txt') - for idx, row in enumerate( - open(os.path.join(f'{texture_ann_dir}/lower_fused.txt'), 'r')): - annotations = row.split() - assert self._image_fnames_target[idx] == annotations[0] - self.lower_fused_attrs.append(int(annotations[1])) - - assert len(self._image_fnames_target) == len(self.lower_fused_attrs) - - assert os.path.exists(f'{texture_ann_dir}/outer_fused.txt') - for idx, row in enumerate( - open(os.path.join(f'{texture_ann_dir}/outer_fused.txt'), 'r')): - annotations = row.split() - assert self._image_fnames_target[idx] == annotations[0] - self.outer_fused_attrs.append(int(annotations[1])) - - assert len(self._image_fnames_target) == len(self.outer_fused_attrs) - - assert os.path.exists(shape_ann_path) - for idx, row in enumerate(open(os.path.join(shape_ann_path), 'r')): - annotations = row.split() - assert self._image_fnames_target[idx] == annotations[0] - self.shape_attrs.append([int(i) for i in annotations[1:]]) - - def _open_file(self, path_prefix, fname): - return open(os.path.join(path_prefix, fname), 'rb') - - def _load_densepose(self, raw_idx): - fname = self._image_fnames[raw_idx] - fname = f'{fname[:-4]}_densepose.png' - with self._open_file(self._densepose_path, fname) as f: - densepose = Image.open(f) - if self.downsample_factor != 1: - width, height = densepose.size - width = width // self.downsample_factor - height = height // self.downsample_factor - densepose = densepose.resize( - size=(width, height), resample=Image.NEAREST) - # channel-wise IUV order, [3, H, W] - densepose = np.array(densepose)[:, :, 2:].transpose(2, 0, 1) - return densepose.astype(np.float32) - - def __getitem__(self, index): - pose = self._load_densepose(index) - shape_attr = self.shape_attrs[index] - shape_attr = torch.LongTensor(shape_attr) - - if self.xflip and random.random() > 0.5: - pose = pose[:, :, ::-1].copy() - - upper_fused_attr = self.upper_fused_attrs[index] - lower_fused_attr = self.lower_fused_attrs[index] - outer_fused_attr = self.outer_fused_attrs[index] - - pose = pose / 12. - 1 - - return_dict = { - 'densepose': pose, - 'img_name': self._image_fnames_target[index], - 'shape_attr': shape_attr, - 'upper_fused_attr': upper_fused_attr, - 'lower_fused_attr': lower_fused_attr, - 'outer_fused_attr': outer_fused_attr, - } - - return return_dict - - def __len__(self): - return len(self._image_fnames) diff --git a/spaces/SarthakSidhant/Go-Cattle/diseases/ketosis.md b/spaces/SarthakSidhant/Go-Cattle/diseases/ketosis.md deleted file mode 100644 index 5c286bc0b53f08d4595f535d134f5dfcccf5d15c..0000000000000000000000000000000000000000 --- a/spaces/SarthakSidhant/Go-Cattle/diseases/ketosis.md +++ /dev/null @@ -1,43 +0,0 @@ -## Ketosis - -**Information:** Ketosis is a metabolic disorder that affects cattle. It is caused by a buildup of ketones in the blood. Ketones are produced when the body breaks down fat for energy. - -**Symptoms:** - -* Loss of appetite -* Weight loss -* Depression -* Drooling -* Increased thirst -* Increased urination -* Weakness -* Difficulty breathing -* Pale mucous membranes -* Accumulation of ketones in the urine - -**Remedies:** - -* Ketosis is a medical emergency and requires immediate treatment. -* Treatment usually involves fluids and electrolytes to prevent dehydration. -* The cow may also need to be given an injection of glucose or dextrose to provide energy. -* In severe cases, the cow may need to be hospitalized. - -**Causes:** - -* Ketosis is caused by a buildup of ketones in the blood. Ketones are produced when the body breaks down fat for energy. -* This can happen for a number of reasons, including: - * **Diet:** A diet that is too low in carbohydrates can lead to ketosis. - * **Stress:** Stress can also lead to ketosis. - * **Lactation:** Lactating cows are more likely to develop ketosis because they need more energy to produce milk. - * **Pregnancy:** Pregnant cows are also more likely to develop ketosis because they need more energy to support the growth of the fetus. - -**Prevention:** - -* The best way to prevent ketosis is to feed cattle a diet that is high in carbohydrates. -* Cattle should also be provided with plenty of fresh water. -* Stress should be minimized. -* Lactating cows should be fed a diet that is high in energy. -* Pregnant cows should be fed a diet that is high in energy and protein. -* Animals should be monitored for signs of ketosis, such as loss of appetite, weight loss, and depression. -* If an animal is suspected of having ketosis, it should be taken to a veterinarian immediately for diagnosis and treatment. - diff --git a/spaces/SeaLLMs/SeaLLM-Chat-13b/README.md b/spaces/SeaLLMs/SeaLLM-Chat-13b/README.md deleted file mode 100644 index ba284e6777e8215a0c569931dc6f502d17e02115..0000000000000000000000000000000000000000 --- a/spaces/SeaLLMs/SeaLLM-Chat-13b/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: SeaLLM-Chat-13b -emoji: 📚 -colorFrom: yellow -colorTo: pink -sdk: gradio -sdk_version: 3.48.0 -app_file: app.py -pinned: false -license: llama2 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Sonnt/Fracture_Webapp/mLogsFunctions/rmOutliers.py b/spaces/Sonnt/Fracture_Webapp/mLogsFunctions/rmOutliers.py deleted file mode 100644 index f25932b9eb0c33a74009f5d52b83b43886c1c957..0000000000000000000000000000000000000000 --- a/spaces/Sonnt/Fracture_Webapp/mLogsFunctions/rmOutliers.py +++ /dev/null @@ -1,135 +0,0 @@ -import numpy as np -import pandas as pd -import streamlit as st -import altair as alt -from streamlit_vega_lite import altair_component - -from .fx import * -from mLogsFunctions import * - -def rmOutliers(df): - _o1, _o2 = st.columns([1,8]) - with _o1: - st.session_state = selection_info(df,"method", "option_w", "option_x", "option_y", "option_c") - - #Crossplot and bar plot----------------------------------------------------------------------- - with _o2: - def rm_outliers(data): - interval = interval_define() - col21, col22 = st.columns(2) - with col21: - selected_points = altair_component(make_selection(data, - interval, - st.session_state.option_x, - st.session_state.option_y, - st.session_state.option_c, - ) - ) - if len(selected_points) > 0: - del[selected_points['name']] - - with col22: - selected_df = None - if len(selected_points) != 0: - query = ' & '.join( - f'{crange[0]} <= `{col}` <= {crange[1]}' - for col, crange in selected_points.items()) - selected_df = data.query(query) - st.write(f"Total selected points: {len(selected_df)}") - st.dataframe(selected_df, width=800, height=260,use_container_width=False) - else: - st.write("No Selection") - - if selected_df is not None: - st.write("Histogram of selected data:") - histogram_x = bar_plot(selected_df, st.session_state.option_x) - histogram_y = bar_plot(selected_df, st.session_state.option_y) - st.write(alt.hconcat(histogram_x,histogram_y)) - else: - st.write("Histogram of entire data:") - histogram_x = bar_plot(data, st.session_state.option_x) - histogram_y = bar_plot(data, st.session_state.option_y) - st.write(alt.hconcat(histogram_x,histogram_y)) - - #Outlier Removal----------------------------------------------------------------------- - st.write('---') - df_nomarlized = data.copy() - curve_editting = st.selectbox("Select curve to edit:", - key="selected_curve", - options=columns_list(data, no_depth=True, no_well=True), - ) - n_value = int(st.text_input("Number of rows for Mean calculation ", "5")) - - def normalize_outlier(df_nomarlized, selected_df, curve, n_value): - n=n_value//2 - for i in selected_df.index: - df_nomarlized.loc[[i],curve.upper()] = df_nomarlized.loc[i-n:i+n,curve.upper()].mean() - return df_nomarlized - def remove_data_point(df_nomarlized, selected_df, curve): - for i in selected_df.index: - df_nomarlized[i, curve] = 0 #ERROR ALARM!!!! - # df_nomarlized = df_nomarlized.drop(index=i) #ERROR ALARM!!!! - return df_nomarlized - - if st.button("Outliers Processing"): - st.session_state.fdata = normalize_outlier(df_nomarlized, selected_df, curve_editting, n_value) - _well = "".join((st.session_state.fdata.WELL.unique()).tolist()) - st.session_state.loc_data = pd.concat([df[(df["WELL"] != _well)],st.session_state.fdata], axis=0) - selected_df = None - if st.button("Remove"): - st.session_state.fdata = remove_data_point(df_nomarlized, selected_df, curve_editting) - _well = "".join((st.session_state.fdata.WELL.unique()).tolist()) - st.write(_well) - st.write(type(_well)) - st.session_state.loc_data = pd.concat([df[(df["WELL"] != _well)],st.session_state.fdata], axis=0) - selected_df = None - - #Curve View----------------------------------------------------------------------- - def plt_curs(data, option_w): - data_plt = data[data["WELL"] == option_w] - if plotting_curves != []: - for i, c in enumerate(plotting_curves): - charts_dict[i] = curve_plot(data=data_plt,filted_data=selected_df, x_column=c) - - # with col2: - charts_dict={} - plotting_curves = st.multiselect("Select curves to plot:", key="curvs_plt", options=columns_list(data, no_depth=True, no_well=True)) - - if st.session_state.option_w is not None: - if 'loc_data' not in st.session_state: - plt_curs(df_nomarlized, st.session_state.option_w) - else: - plt_curs(st.session_state.loc_data, st.session_state.option_w) - - #Show Curve----------------------------------------------------------------------- - st.write(alt.concat(*charts_dict.values()).configure(autosize='fit'))#.configure_concat(spacing=0)) - - #------------------------ - def check_method(df): - if st.session_state.method == "Single Well": - data = df[df.WELL == st.session_state.option_w] - data = data.sort_values(by=['DEPTH']) - data = data.reset_index().drop(["index"], axis=1) - else: - data = df - return data - #------------------------ - - if 'loc_data' not in st.session_state: - data = check_method(df) - else: - data = check_method(st.session_state.loc_data) - - rm_outliers(data) - - # # Download -------------------------------------------------------------- - st.write('---') - st.write("Download final result to csv file") - if "loc_data" not in st.session_state: - saving_df = df - else: - saving_df = st.session_state.loc_data - st.download_button(label='Download', - data = saving_df.to_csv(), - file_name='Query_data.csv', - mime='text/csv') diff --git a/spaces/Sonnt/Fracture_Webapp/pages/2_Exploratory_Data_Analysis.py b/spaces/Sonnt/Fracture_Webapp/pages/2_Exploratory_Data_Analysis.py deleted file mode 100644 index 31023939e3eee3179d7ee03f55c6f21d9d36b82b..0000000000000000000000000000000000000000 --- a/spaces/Sonnt/Fracture_Webapp/pages/2_Exploratory_Data_Analysis.py +++ /dev/null @@ -1,92 +0,0 @@ -import numpy as np -import streamlit as st -import pandas as pd -import os -from ui import * -from mLogsFunctions import * - -#Streamlit Dashboard------------------------------------------------------------------------------------------ -pagetile = """

    EXPLORATORY DATA ANALYSIS

    """ -set_page_config(page='custom') -hide_menu_button() -condense_layout() - -logo_site, info_site = st.columns([1.5, 8.5]) -with logo_site: - st.image("https://i.ibb.co/Yd42K98/LogoVPI.png", use_column_width='auto') -with info_site: - # st.set_option('deprecation.showfileUploaderEncoding', False) - # st.set_option('maxUploadSize', 200*1024) # 200 MB - st.markdown(pagetile, unsafe_allow_html=True) - # Option 1: CSV File Loading - st.write('You can load your csv file using the file upload or selection from LAS Exploration option below.') - st.subheader("1. CSV File Loading") - df = csv_uploader() - df = tweak_data(df,resample=False, reindex=True) - - # Option 2: CSV from LAS Exploration - st.subheader("2. CSV from LAS Exploration") - dir_path = '/work/2022_VPIMLogs_WebApp/data/merged/' - csv_files = [filename for filename in os.listdir(dir_path) if filename.endswith('.csv')] - selected_csv_file= st.multiselect('Select a CSV file', csv_files, key = 'st.session_state.selected_well_multi') - - # # Đọc file csv được chọn vào DataFrame - if selected_csv_file: # Nếu người dùng đã chọn file CSV - # Đọc file csv được chọn vào DataFrame - file_path = '/work/2022_VPIMLogs_WebApp/data/merged/' - merged_data = pd.concat([pd.read_csv(file_path + f) for f in selected_csv_file]) - df = tweak_data(merged_data, resample=False, reindex=True) - else: # Nếu người dùng không chọn file CSV - merged_data = df - df = tweak_data(merged_data, resample=False, reindex=True) -#|CHECK DATA EXISTENCE----------------------------------------------------------------------------------------- -if df is not None: - curves = columns_list(df, no_depth=True, no_well=True) - well_names = np.sort(df.WELL.unique()) -#|TABS-ESTABLISHING----------------------------------------------------------------------------------------- - tab1, tab2, tab3, tab4, tab5 = st.tabs(['DataFrame', - 'DataStatistics', - '3D Scatter Points', - 'CurvesView', - 'OutliersRemoval' - ]) - #|TABS-1----------------------------------------------------------------------------------------- - st.write('---') - with tab1: - st.dataframe(df, width=1400, height=500) - - #|TABS-2----------------------------------------------------------------------------------------- - with tab2: - st.radio('DataVisualizationMethod', - key='displayTab2', - options=['DataStatistics', - 'Missing Statistic', - 'Curve Distribution', - 'Histogram Overlay', - 'Cross-Plot', - 'PairPlot'], - horizontal=True) - if st.session_state.displayTab2 == 'DataStatistics': - subtab21(df, well_names) - elif st.session_state.displayTab2 == 'Missing Statistic': - subtab22(df) - elif st.session_state.displayTab2 == 'Curve Distribution': - subtab23(df, curves) - elif st.session_state.displayTab2 == 'Histogram Overlay': - subtab24(df, curves) - elif st.session_state.displayTab2 == 'Cross-Plot': - subtab25(df, curves) - elif st.session_state.displayTab2 == 'PairPlot': - subtab26(df, curves) - else: - subtab21(df, well_names) - - #|TABS-3----------------------------------------------------------------------------------------- - with tab3: - scatterPoint3D(df) - #|TABS-4----------------------------------------------------------------------------------------- - with tab4: - stViewCurves(df) - #|TABS-5----------------------------------------------------------------------------------------- - with tab5: - rmOutliers(df) \ No newline at end of file diff --git a/spaces/Sparkles-AI/design-look-a-likes/Dockerfile b/spaces/Sparkles-AI/design-look-a-likes/Dockerfile deleted file mode 100644 index f195ebdc798af0231825fd7427f848638b23ddad..0000000000000000000000000000000000000000 --- a/spaces/Sparkles-AI/design-look-a-likes/Dockerfile +++ /dev/null @@ -1,12 +0,0 @@ -FROM tensorflow/tensorflow:2.12.0-gpu - -WORKDIR /code - -COPY ./requirements.txt /code/requirements.txt - -RUN pip install --no-cache-dir --upgrade -r /code/requirements.txt - -COPY . . - -CMD ["uvicorn", "server:app", "--reload", "--host", "0.0.0.0", "--port", "7860"] - diff --git a/spaces/SuYuanS/AudioCraft_Plus/tests/__init__.py b/spaces/SuYuanS/AudioCraft_Plus/tests/__init__.py deleted file mode 100644 index 0952fcc3f57e34b3747962e9ebd6fc57aeea63fa..0000000000000000000000000000000000000000 --- a/spaces/SuYuanS/AudioCraft_Plus/tests/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. diff --git a/spaces/Sudhanshu976/NLP_FULL_APP/pages/5_SENTIMENT-ANALYZER.py b/spaces/Sudhanshu976/NLP_FULL_APP/pages/5_SENTIMENT-ANALYZER.py deleted file mode 100644 index 3e67b32b8939ebc61844e345994a0f0a2fab90ff..0000000000000000000000000000000000000000 --- a/spaces/Sudhanshu976/NLP_FULL_APP/pages/5_SENTIMENT-ANALYZER.py +++ /dev/null @@ -1,40 +0,0 @@ -import streamlit as st -from transformers import AutoTokenizer , AutoModelForSequenceClassification -import torch - - -tokenizer = AutoTokenizer.from_pretrained('nlptown/bert-base-multilingual-uncased-sentiment') -model = AutoModelForSequenceClassification.from_pretrained('nlptown/bert-base-multilingual-uncased-sentiment') - -st.set_page_config( - page_title="NLP WEB APP" -) - - -st.title("SENTIMENT ANALYZER") -st.sidebar.success("Select a page above") - - -message= st.text_input("ENTER THE MESSAGE") - -if st.button("PREDICT"): - tokens = tokenizer.encode(message , return_tensors='pt') - output = model(tokens) - result = int(torch.argmax(output.logits))+1 - - - if result==1: - st.header("TOO MUCH NEGATIVE STATEMENT") - st.header("RATING : ⭐ ") - elif result==2: - st.header("NEGATIVE STATEMENT") - st.header("RATING : ⭐⭐") - elif result==3: - st.header("NEUTRAL STATEMENT") - st.header("RATING : ⭐⭐⭐") - elif result==4: - st.header("POSITIVE STATEMENT") - st.header("RATING : ⭐⭐⭐⭐ ") - elif result==5: - st.header("TOO MUCH POSITIVE STATEMENT") - st.header("RATING : ⭐⭐⭐⭐⭐ ") diff --git a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/aiohttp/locks.py b/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/aiohttp/locks.py deleted file mode 100644 index de2dc83d09dd950fc1ed8d7edaeb20e7697c94ba..0000000000000000000000000000000000000000 --- a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/aiohttp/locks.py +++ /dev/null @@ -1,41 +0,0 @@ -import asyncio -import collections -from typing import Any, Deque, Optional - - -class EventResultOrError: - """Event asyncio lock helper class. - - Wraps the Event asyncio lock allowing either to awake the - locked Tasks without any error or raising an exception. - - thanks to @vorpalsmith for the simple design. - """ - - def __init__(self, loop: asyncio.AbstractEventLoop) -> None: - self._loop = loop - self._exc: Optional[BaseException] = None - self._event = asyncio.Event() - self._waiters: Deque[asyncio.Future[Any]] = collections.deque() - - def set(self, exc: Optional[BaseException] = None) -> None: - self._exc = exc - self._event.set() - - async def wait(self) -> Any: - waiter = self._loop.create_task(self._event.wait()) - self._waiters.append(waiter) - try: - val = await waiter - finally: - self._waiters.remove(waiter) - - if self._exc is not None: - raise self._exc - - return val - - def cancel(self) -> None: - """Cancel all waiters""" - for waiter in self._waiters: - waiter.cancel() diff --git a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/anyio/to_thread.py b/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/anyio/to_thread.py deleted file mode 100644 index 9315d1ecf16eee45cd129ce17e48041a7f82348a..0000000000000000000000000000000000000000 --- a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/anyio/to_thread.py +++ /dev/null @@ -1,67 +0,0 @@ -from __future__ import annotations - -from typing import Callable, TypeVar -from warnings import warn - -from ._core._eventloop import get_asynclib -from .abc import CapacityLimiter - -T_Retval = TypeVar("T_Retval") - - -async def run_sync( - func: Callable[..., T_Retval], - *args: object, - cancellable: bool = False, - limiter: CapacityLimiter | None = None, -) -> T_Retval: - """ - Call the given function with the given arguments in a worker thread. - - If the ``cancellable`` option is enabled and the task waiting for its completion is cancelled, - the thread will still run its course but its return value (or any raised exception) will be - ignored. - - :param func: a callable - :param args: positional arguments for the callable - :param cancellable: ``True`` to allow cancellation of the operation - :param limiter: capacity limiter to use to limit the total amount of threads running - (if omitted, the default limiter is used) - :return: an awaitable that yields the return value of the function. - - """ - return await get_asynclib().run_sync_in_worker_thread( - func, *args, cancellable=cancellable, limiter=limiter - ) - - -async def run_sync_in_worker_thread( - func: Callable[..., T_Retval], - *args: object, - cancellable: bool = False, - limiter: CapacityLimiter | None = None, -) -> T_Retval: - warn( - "run_sync_in_worker_thread() has been deprecated, use anyio.to_thread.run_sync() instead", - DeprecationWarning, - ) - return await run_sync(func, *args, cancellable=cancellable, limiter=limiter) - - -def current_default_thread_limiter() -> CapacityLimiter: - """ - Return the capacity limiter that is used by default to limit the number of concurrent threads. - - :return: a capacity limiter object - - """ - return get_asynclib().current_default_thread_limiter() - - -def current_default_worker_thread_limiter() -> CapacityLimiter: - warn( - "current_default_worker_thread_limiter() has been deprecated, " - "use anyio.to_thread.current_default_thread_limiter() instead", - DeprecationWarning, - ) - return current_default_thread_limiter() diff --git a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/docarray/documents/image.py b/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/docarray/documents/image.py deleted file mode 100644 index e0072b622ab53b478e7d66709f2164085be7734f..0000000000000000000000000000000000000000 --- a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/docarray/documents/image.py +++ /dev/null @@ -1,116 +0,0 @@ -from typing import TYPE_CHECKING, Any, Optional, Type, TypeVar, Union - -import numpy as np - -from docarray.base_doc import BaseDoc -from docarray.typing import AnyEmbedding, ImageBytes, ImageUrl -from docarray.typing.tensor.abstract_tensor import AbstractTensor -from docarray.typing.tensor.image.image_tensor import ImageTensor -from docarray.utils._internal.misc import import_library - -if TYPE_CHECKING: - import tensorflow as tf # type: ignore - import torch -else: - tf = import_library('tensorflow', raise_error=False) - torch = import_library('torch', raise_error=False) - -T = TypeVar('T', bound='ImageDoc') - - -class ImageDoc(BaseDoc): - """ - Document for handling images. - - It can contain: - - - an [`ImageUrl`][docarray.typing.url.ImageUrl] (`Image.url`) - - an [`ImageTensor`](../../../api_references/typing/tensor/image) (`Image.tensor`) - - an [`AnyEmbedding`](../../../api_references/typing/tensor/embedding) (`Image.embedding`) - - an [`ImageBytes`][docarray.typing.bytes.ImageBytes] object (`ImageDoc.bytes_`) - - You can use this Document directly: - - ```python - from docarray.documents import ImageDoc - - # use it directly - image = ImageDoc( - url='https://github.com/docarray/docarray/blob/main/tests/toydata/image-data/apple.png?raw=true' - ) - image.tensor = image.url.load() - # model = MyEmbeddingModel() - # image.embedding = model(image.tensor) - ``` - - You can extend this Document: - - ```python - from docarray.documents import ImageDoc - from docarray.typing import AnyEmbedding - from typing import Optional - - - # extend it - class MyImage(ImageDoc): - second_embedding: Optional[AnyEmbedding] - - - image = MyImage( - url='https://github.com/docarray/docarray/blob/main/tests/toydata/image-data/apple.png?raw=true' - ) - image.tensor = image.url.load() - # model = MyEmbeddingModel() - # image.embedding = model(image.tensor) - # image.second_embedding = model(image.tensor) - ``` - - You can use this Document for composition: - - ```python - from docarray import BaseDoc - from docarray.documents import ImageDoc, TextDoc - - - # compose it - class MultiModalDoc(BaseDoc): - image: ImageDoc - text: TextDoc - - - mmdoc = MultiModalDoc( - image=ImageDoc( - url='https://github.com/docarray/docarray/blob/main/tests/toydata/image-data/apple.png?raw=true' - ), - text=TextDoc(text='hello world, how are you doing?'), - ) - mmdoc.image.tensor = mmdoc.image.url.load() - - # or - mmdoc.image.bytes_ = mmdoc.image.url.load_bytes() - mmdoc.image.tensor = mmdoc.image.bytes_.load() - ``` - """ - - url: Optional[ImageUrl] - tensor: Optional[ImageTensor] - embedding: Optional[AnyEmbedding] - bytes_: Optional[ImageBytes] - - @classmethod - def validate( - cls: Type[T], - value: Union[str, AbstractTensor, Any], - ) -> T: - if isinstance(value, str): - value = cls(url=value) - elif ( - isinstance(value, (AbstractTensor, np.ndarray)) - or (torch is not None and isinstance(value, torch.Tensor)) - or (tf is not None and isinstance(value, tf.Tensor)) - ): - value = cls(tensor=value) - elif isinstance(value, bytes): - value = cls(byte=value) - - return super().validate(value) diff --git a/spaces/Superlang/ImageProcessor/annotator/oneformer/detectron2/structures/image_list.py b/spaces/Superlang/ImageProcessor/annotator/oneformer/detectron2/structures/image_list.py deleted file mode 100644 index 86c8b9512a5fd8abda7fdf058a63b19f809e46f6..0000000000000000000000000000000000000000 --- a/spaces/Superlang/ImageProcessor/annotator/oneformer/detectron2/structures/image_list.py +++ /dev/null @@ -1,129 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -from __future__ import division -from typing import Any, Dict, List, Optional, Tuple -import torch -from torch import device -from torch.nn import functional as F - -from annotator.oneformer.detectron2.layers.wrappers import move_device_like, shapes_to_tensor - - -class ImageList(object): - """ - Structure that holds a list of images (of possibly - varying sizes) as a single tensor. - This works by padding the images to the same size. - The original sizes of each image is stored in `image_sizes`. - - Attributes: - image_sizes (list[tuple[int, int]]): each tuple is (h, w). - During tracing, it becomes list[Tensor] instead. - """ - - def __init__(self, tensor: torch.Tensor, image_sizes: List[Tuple[int, int]]): - """ - Arguments: - tensor (Tensor): of shape (N, H, W) or (N, C_1, ..., C_K, H, W) where K >= 1 - image_sizes (list[tuple[int, int]]): Each tuple is (h, w). It can - be smaller than (H, W) due to padding. - """ - self.tensor = tensor - self.image_sizes = image_sizes - - def __len__(self) -> int: - return len(self.image_sizes) - - def __getitem__(self, idx) -> torch.Tensor: - """ - Access the individual image in its original size. - - Args: - idx: int or slice - - Returns: - Tensor: an image of shape (H, W) or (C_1, ..., C_K, H, W) where K >= 1 - """ - size = self.image_sizes[idx] - return self.tensor[idx, ..., : size[0], : size[1]] - - @torch.jit.unused - def to(self, *args: Any, **kwargs: Any) -> "ImageList": - cast_tensor = self.tensor.to(*args, **kwargs) - return ImageList(cast_tensor, self.image_sizes) - - @property - def device(self) -> device: - return self.tensor.device - - @staticmethod - def from_tensors( - tensors: List[torch.Tensor], - size_divisibility: int = 0, - pad_value: float = 0.0, - padding_constraints: Optional[Dict[str, int]] = None, - ) -> "ImageList": - """ - Args: - tensors: a tuple or list of `torch.Tensor`, each of shape (Hi, Wi) or - (C_1, ..., C_K, Hi, Wi) where K >= 1. The Tensors will be padded - to the same shape with `pad_value`. - size_divisibility (int): If `size_divisibility > 0`, add padding to ensure - the common height and width is divisible by `size_divisibility`. - This depends on the model and many models need a divisibility of 32. - pad_value (float): value to pad. - padding_constraints (optional[Dict]): If given, it would follow the format as - {"size_divisibility": int, "square_size": int}, where `size_divisibility` will - overwrite the above one if presented and `square_size` indicates the - square padding size if `square_size` > 0. - Returns: - an `ImageList`. - """ - assert len(tensors) > 0 - assert isinstance(tensors, (tuple, list)) - for t in tensors: - assert isinstance(t, torch.Tensor), type(t) - assert t.shape[:-2] == tensors[0].shape[:-2], t.shape - - image_sizes = [(im.shape[-2], im.shape[-1]) for im in tensors] - image_sizes_tensor = [shapes_to_tensor(x) for x in image_sizes] - max_size = torch.stack(image_sizes_tensor).max(0).values - - if padding_constraints is not None: - square_size = padding_constraints.get("square_size", 0) - if square_size > 0: - # pad to square. - max_size[0] = max_size[1] = square_size - if "size_divisibility" in padding_constraints: - size_divisibility = padding_constraints["size_divisibility"] - if size_divisibility > 1: - stride = size_divisibility - # the last two dims are H,W, both subject to divisibility requirement - max_size = (max_size + (stride - 1)).div(stride, rounding_mode="floor") * stride - - # handle weirdness of scripting and tracing ... - if torch.jit.is_scripting(): - max_size: List[int] = max_size.to(dtype=torch.long).tolist() - else: - if torch.jit.is_tracing(): - image_sizes = image_sizes_tensor - - if len(tensors) == 1: - # This seems slightly (2%) faster. - # TODO: check whether it's faster for multiple images as well - image_size = image_sizes[0] - padding_size = [0, max_size[-1] - image_size[1], 0, max_size[-2] - image_size[0]] - batched_imgs = F.pad(tensors[0], padding_size, value=pad_value).unsqueeze_(0) - else: - # max_size can be a tensor in tracing mode, therefore convert to list - batch_shape = [len(tensors)] + list(tensors[0].shape[:-2]) + list(max_size) - device = ( - None if torch.jit.is_scripting() else ("cpu" if torch.jit.is_tracing() else None) - ) - batched_imgs = tensors[0].new_full(batch_shape, pad_value, device=device) - batched_imgs = move_device_like(batched_imgs, tensors[0]) - for i, img in enumerate(tensors): - # Use `batched_imgs` directly instead of `img, pad_img = zip(tensors, batched_imgs)` - # Tracing mode cannot capture `copy_()` of temporary locals - batched_imgs[i, ..., : img.shape[-2], : img.shape[-1]].copy_(img) - - return ImageList(batched_imgs.contiguous(), image_sizes) diff --git a/spaces/Swth/Hi/app.py b/spaces/Swth/Hi/app.py deleted file mode 100644 index 0e8880283600cdaa4b9803fd03ecf422ff6c6279..0000000000000000000000000000000000000000 --- a/spaces/Swth/Hi/app.py +++ /dev/null @@ -1,32 +0,0 @@ - -from sagemaker.huggingface import HuggingFaceModel -import sagemaker - -role = sagemaker.get_execution_role() -# Hub Model configuration. https://huggingface.co/models -hub = { - 'HF_MODEL_ID':'dalle-mini/dalle-mega', - 'HF_TASK':'text2text-generation' -} - -# create Hugging Face Model Class -huggingface_model = HuggingFaceModel( - transformers_version='4.17.0', - pytorch_version='1.10.2', - py_version='py38', - env=hub, - role=role, -) - -# deploy model to SageMaker Inference -predictor = huggingface_model.deploy( - initial_instance_count=1, # number of instances - instance_type='ml.m5.xlarge' # ec2 instance type -) - -predictor.predict({ - 'inputs': No input example has been defined for this model task. -}) -git add app.py -git commit -m "Add application file" -git push \ No newline at end of file diff --git a/spaces/TEnngal/bingo/src/components/ui/sheet.tsx b/spaces/TEnngal/bingo/src/components/ui/sheet.tsx deleted file mode 100644 index c9f5ce0f81a91067bb013e988a07eb1e6bf6953b..0000000000000000000000000000000000000000 --- a/spaces/TEnngal/bingo/src/components/ui/sheet.tsx +++ /dev/null @@ -1,122 +0,0 @@ -'use client' - -import * as React from 'react' -import * as SheetPrimitive from '@radix-ui/react-dialog' - -import { cn } from '@/lib/utils' -import { IconClose } from '@/components/ui/icons' - -const Sheet = SheetPrimitive.Root - -const SheetTrigger = SheetPrimitive.Trigger - -const SheetClose = SheetPrimitive.Close - -const SheetPortal = ({ - className, - children, - ...props -}: SheetPrimitive.DialogPortalProps) => ( - - {children} - -) -SheetPortal.displayName = SheetPrimitive.Portal.displayName - -const SheetOverlay = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef ->(({ className, children, ...props }, ref) => ( - -)) -SheetOverlay.displayName = SheetPrimitive.Overlay.displayName - -const SheetContent = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef ->(({ className, children, ...props }, ref) => ( - - - {children} - - - Close - - - -)) -SheetContent.displayName = SheetPrimitive.Content.displayName - -const SheetHeader = ({ - className, - ...props -}: React.HTMLAttributes) => ( -
    -) -SheetHeader.displayName = 'SheetHeader' - -const SheetFooter = ({ - className, - ...props -}: React.HTMLAttributes) => ( -
    -) -SheetFooter.displayName = 'SheetFooter' - -const SheetTitle = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef ->(({ className, ...props }, ref) => ( - -)) -SheetTitle.displayName = SheetPrimitive.Title.displayName - -const SheetDescription = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef ->(({ className, ...props }, ref) => ( - -)) -SheetDescription.displayName = SheetPrimitive.Description.displayName - -export { - Sheet, - SheetTrigger, - SheetClose, - SheetContent, - SheetHeader, - SheetFooter, - SheetTitle, - SheetDescription -} diff --git a/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/cachecontrol/controller.py b/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/cachecontrol/controller.py deleted file mode 100644 index 7f23529f1155cd3bbfde335ccdb7fc483b9d2d19..0000000000000000000000000000000000000000 --- a/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/cachecontrol/controller.py +++ /dev/null @@ -1,439 +0,0 @@ -# SPDX-FileCopyrightText: 2015 Eric Larson -# -# SPDX-License-Identifier: Apache-2.0 - -""" -The httplib2 algorithms ported for use with requests. -""" -import logging -import re -import calendar -import time -from email.utils import parsedate_tz - -from pip._vendor.requests.structures import CaseInsensitiveDict - -from .cache import DictCache, SeparateBodyBaseCache -from .serialize import Serializer - - -logger = logging.getLogger(__name__) - -URI = re.compile(r"^(([^:/?#]+):)?(//([^/?#]*))?([^?#]*)(\?([^#]*))?(#(.*))?") - -PERMANENT_REDIRECT_STATUSES = (301, 308) - - -def parse_uri(uri): - """Parses a URI using the regex given in Appendix B of RFC 3986. - - (scheme, authority, path, query, fragment) = parse_uri(uri) - """ - groups = URI.match(uri).groups() - return (groups[1], groups[3], groups[4], groups[6], groups[8]) - - -class CacheController(object): - """An interface to see if request should cached or not.""" - - def __init__( - self, cache=None, cache_etags=True, serializer=None, status_codes=None - ): - self.cache = DictCache() if cache is None else cache - self.cache_etags = cache_etags - self.serializer = serializer or Serializer() - self.cacheable_status_codes = status_codes or (200, 203, 300, 301, 308) - - @classmethod - def _urlnorm(cls, uri): - """Normalize the URL to create a safe key for the cache""" - (scheme, authority, path, query, fragment) = parse_uri(uri) - if not scheme or not authority: - raise Exception("Only absolute URIs are allowed. uri = %s" % uri) - - scheme = scheme.lower() - authority = authority.lower() - - if not path: - path = "/" - - # Could do syntax based normalization of the URI before - # computing the digest. See Section 6.2.2 of Std 66. - request_uri = query and "?".join([path, query]) or path - defrag_uri = scheme + "://" + authority + request_uri - - return defrag_uri - - @classmethod - def cache_url(cls, uri): - return cls._urlnorm(uri) - - def parse_cache_control(self, headers): - known_directives = { - # https://tools.ietf.org/html/rfc7234#section-5.2 - "max-age": (int, True), - "max-stale": (int, False), - "min-fresh": (int, True), - "no-cache": (None, False), - "no-store": (None, False), - "no-transform": (None, False), - "only-if-cached": (None, False), - "must-revalidate": (None, False), - "public": (None, False), - "private": (None, False), - "proxy-revalidate": (None, False), - "s-maxage": (int, True), - } - - cc_headers = headers.get("cache-control", headers.get("Cache-Control", "")) - - retval = {} - - for cc_directive in cc_headers.split(","): - if not cc_directive.strip(): - continue - - parts = cc_directive.split("=", 1) - directive = parts[0].strip() - - try: - typ, required = known_directives[directive] - except KeyError: - logger.debug("Ignoring unknown cache-control directive: %s", directive) - continue - - if not typ or not required: - retval[directive] = None - if typ: - try: - retval[directive] = typ(parts[1].strip()) - except IndexError: - if required: - logger.debug( - "Missing value for cache-control " "directive: %s", - directive, - ) - except ValueError: - logger.debug( - "Invalid value for cache-control directive " "%s, must be %s", - directive, - typ.__name__, - ) - - return retval - - def cached_request(self, request): - """ - Return a cached response if it exists in the cache, otherwise - return False. - """ - cache_url = self.cache_url(request.url) - logger.debug('Looking up "%s" in the cache', cache_url) - cc = self.parse_cache_control(request.headers) - - # Bail out if the request insists on fresh data - if "no-cache" in cc: - logger.debug('Request header has "no-cache", cache bypassed') - return False - - if "max-age" in cc and cc["max-age"] == 0: - logger.debug('Request header has "max_age" as 0, cache bypassed') - return False - - # Request allows serving from the cache, let's see if we find something - cache_data = self.cache.get(cache_url) - if cache_data is None: - logger.debug("No cache entry available") - return False - - if isinstance(self.cache, SeparateBodyBaseCache): - body_file = self.cache.get_body(cache_url) - else: - body_file = None - - # Check whether it can be deserialized - resp = self.serializer.loads(request, cache_data, body_file) - if not resp: - logger.warning("Cache entry deserialization failed, entry ignored") - return False - - # If we have a cached permanent redirect, return it immediately. We - # don't need to test our response for other headers b/c it is - # intrinsically "cacheable" as it is Permanent. - # - # See: - # https://tools.ietf.org/html/rfc7231#section-6.4.2 - # - # Client can try to refresh the value by repeating the request - # with cache busting headers as usual (ie no-cache). - if int(resp.status) in PERMANENT_REDIRECT_STATUSES: - msg = ( - "Returning cached permanent redirect response " - "(ignoring date and etag information)" - ) - logger.debug(msg) - return resp - - headers = CaseInsensitiveDict(resp.headers) - if not headers or "date" not in headers: - if "etag" not in headers: - # Without date or etag, the cached response can never be used - # and should be deleted. - logger.debug("Purging cached response: no date or etag") - self.cache.delete(cache_url) - logger.debug("Ignoring cached response: no date") - return False - - now = time.time() - date = calendar.timegm(parsedate_tz(headers["date"])) - current_age = max(0, now - date) - logger.debug("Current age based on date: %i", current_age) - - # TODO: There is an assumption that the result will be a - # urllib3 response object. This may not be best since we - # could probably avoid instantiating or constructing the - # response until we know we need it. - resp_cc = self.parse_cache_control(headers) - - # determine freshness - freshness_lifetime = 0 - - # Check the max-age pragma in the cache control header - if "max-age" in resp_cc: - freshness_lifetime = resp_cc["max-age"] - logger.debug("Freshness lifetime from max-age: %i", freshness_lifetime) - - # If there isn't a max-age, check for an expires header - elif "expires" in headers: - expires = parsedate_tz(headers["expires"]) - if expires is not None: - expire_time = calendar.timegm(expires) - date - freshness_lifetime = max(0, expire_time) - logger.debug("Freshness lifetime from expires: %i", freshness_lifetime) - - # Determine if we are setting freshness limit in the - # request. Note, this overrides what was in the response. - if "max-age" in cc: - freshness_lifetime = cc["max-age"] - logger.debug( - "Freshness lifetime from request max-age: %i", freshness_lifetime - ) - - if "min-fresh" in cc: - min_fresh = cc["min-fresh"] - # adjust our current age by our min fresh - current_age += min_fresh - logger.debug("Adjusted current age from min-fresh: %i", current_age) - - # Return entry if it is fresh enough - if freshness_lifetime > current_age: - logger.debug('The response is "fresh", returning cached response') - logger.debug("%i > %i", freshness_lifetime, current_age) - return resp - - # we're not fresh. If we don't have an Etag, clear it out - if "etag" not in headers: - logger.debug('The cached response is "stale" with no etag, purging') - self.cache.delete(cache_url) - - # return the original handler - return False - - def conditional_headers(self, request): - cache_url = self.cache_url(request.url) - resp = self.serializer.loads(request, self.cache.get(cache_url)) - new_headers = {} - - if resp: - headers = CaseInsensitiveDict(resp.headers) - - if "etag" in headers: - new_headers["If-None-Match"] = headers["ETag"] - - if "last-modified" in headers: - new_headers["If-Modified-Since"] = headers["Last-Modified"] - - return new_headers - - def _cache_set(self, cache_url, request, response, body=None, expires_time=None): - """ - Store the data in the cache. - """ - if isinstance(self.cache, SeparateBodyBaseCache): - # We pass in the body separately; just put a placeholder empty - # string in the metadata. - self.cache.set( - cache_url, - self.serializer.dumps(request, response, b""), - expires=expires_time, - ) - self.cache.set_body(cache_url, body) - else: - self.cache.set( - cache_url, - self.serializer.dumps(request, response, body), - expires=expires_time, - ) - - def cache_response(self, request, response, body=None, status_codes=None): - """ - Algorithm for caching requests. - - This assumes a requests Response object. - """ - # From httplib2: Don't cache 206's since we aren't going to - # handle byte range requests - cacheable_status_codes = status_codes or self.cacheable_status_codes - if response.status not in cacheable_status_codes: - logger.debug( - "Status code %s not in %s", response.status, cacheable_status_codes - ) - return - - response_headers = CaseInsensitiveDict(response.headers) - - if "date" in response_headers: - date = calendar.timegm(parsedate_tz(response_headers["date"])) - else: - date = 0 - - # If we've been given a body, our response has a Content-Length, that - # Content-Length is valid then we can check to see if the body we've - # been given matches the expected size, and if it doesn't we'll just - # skip trying to cache it. - if ( - body is not None - and "content-length" in response_headers - and response_headers["content-length"].isdigit() - and int(response_headers["content-length"]) != len(body) - ): - return - - cc_req = self.parse_cache_control(request.headers) - cc = self.parse_cache_control(response_headers) - - cache_url = self.cache_url(request.url) - logger.debug('Updating cache with response from "%s"', cache_url) - - # Delete it from the cache if we happen to have it stored there - no_store = False - if "no-store" in cc: - no_store = True - logger.debug('Response header has "no-store"') - if "no-store" in cc_req: - no_store = True - logger.debug('Request header has "no-store"') - if no_store and self.cache.get(cache_url): - logger.debug('Purging existing cache entry to honor "no-store"') - self.cache.delete(cache_url) - if no_store: - return - - # https://tools.ietf.org/html/rfc7234#section-4.1: - # A Vary header field-value of "*" always fails to match. - # Storing such a response leads to a deserialization warning - # during cache lookup and is not allowed to ever be served, - # so storing it can be avoided. - if "*" in response_headers.get("vary", ""): - logger.debug('Response header has "Vary: *"') - return - - # If we've been given an etag, then keep the response - if self.cache_etags and "etag" in response_headers: - expires_time = 0 - if response_headers.get("expires"): - expires = parsedate_tz(response_headers["expires"]) - if expires is not None: - expires_time = calendar.timegm(expires) - date - - expires_time = max(expires_time, 14 * 86400) - - logger.debug("etag object cached for {0} seconds".format(expires_time)) - logger.debug("Caching due to etag") - self._cache_set(cache_url, request, response, body, expires_time) - - # Add to the cache any permanent redirects. We do this before looking - # that the Date headers. - elif int(response.status) in PERMANENT_REDIRECT_STATUSES: - logger.debug("Caching permanent redirect") - self._cache_set(cache_url, request, response, b"") - - # Add to the cache if the response headers demand it. If there - # is no date header then we can't do anything about expiring - # the cache. - elif "date" in response_headers: - date = calendar.timegm(parsedate_tz(response_headers["date"])) - # cache when there is a max-age > 0 - if "max-age" in cc and cc["max-age"] > 0: - logger.debug("Caching b/c date exists and max-age > 0") - expires_time = cc["max-age"] - self._cache_set( - cache_url, - request, - response, - body, - expires_time, - ) - - # If the request can expire, it means we should cache it - # in the meantime. - elif "expires" in response_headers: - if response_headers["expires"]: - expires = parsedate_tz(response_headers["expires"]) - if expires is not None: - expires_time = calendar.timegm(expires) - date - else: - expires_time = None - - logger.debug( - "Caching b/c of expires header. expires in {0} seconds".format( - expires_time - ) - ) - self._cache_set( - cache_url, - request, - response, - body, - expires_time, - ) - - def update_cached_response(self, request, response): - """On a 304 we will get a new set of headers that we want to - update our cached value with, assuming we have one. - - This should only ever be called when we've sent an ETag and - gotten a 304 as the response. - """ - cache_url = self.cache_url(request.url) - - cached_response = self.serializer.loads(request, self.cache.get(cache_url)) - - if not cached_response: - # we didn't have a cached response - return response - - # Lets update our headers with the headers from the new request: - # http://tools.ietf.org/html/draft-ietf-httpbis-p4-conditional-26#section-4.1 - # - # The server isn't supposed to send headers that would make - # the cached body invalid. But... just in case, we'll be sure - # to strip out ones we know that might be problmatic due to - # typical assumptions. - excluded_headers = ["content-length"] - - cached_response.headers.update( - dict( - (k, v) - for k, v in response.headers.items() - if k.lower() not in excluded_headers - ) - ) - - # we want a 200 b/c we have content via the cache - cached_response.status = 200 - - # update our cache - self._cache_set(cache_url, request, cached_response) - - return cached_response diff --git a/spaces/Tefa90/ehartford-dolphin-2.1-mistral-7b/README.md b/spaces/Tefa90/ehartford-dolphin-2.1-mistral-7b/README.md deleted file mode 100644 index 9fd0442091499bf509f72e514801afa0c332d805..0000000000000000000000000000000000000000 --- a/spaces/Tefa90/ehartford-dolphin-2.1-mistral-7b/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Ehartford Dolphin 2.1 Mistral 7b -emoji: 🐠 -colorFrom: red -colorTo: yellow -sdk: gradio -sdk_version: 3.47.1 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/TencentARC/VLog/models/grit_src/third_party/CenterNet2/detectron2/modeling/meta_arch/fcos.py b/spaces/TencentARC/VLog/models/grit_src/third_party/CenterNet2/detectron2/modeling/meta_arch/fcos.py deleted file mode 100644 index 55cdb76e836214a2b5a7a4a5a5c47e3382dee86d..0000000000000000000000000000000000000000 --- a/spaces/TencentARC/VLog/models/grit_src/third_party/CenterNet2/detectron2/modeling/meta_arch/fcos.py +++ /dev/null @@ -1,303 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. - -import logging -from typing import List, Optional, Tuple -import torch -from fvcore.nn import sigmoid_focal_loss_jit -from torch import Tensor, nn -from torch.nn import functional as F - -from detectron2.layers import ShapeSpec, batched_nms -from detectron2.structures import Boxes, ImageList, Instances, pairwise_point_box_distance -from detectron2.utils.events import get_event_storage - -from ..anchor_generator import DefaultAnchorGenerator -from ..backbone import Backbone -from ..box_regression import Box2BoxTransformLinear, _dense_box_regression_loss -from .dense_detector import DenseDetector -from .retinanet import RetinaNetHead - -__all__ = ["FCOS"] - - -logger = logging.getLogger(__name__) - - -class FCOS(DenseDetector): - """ - Implement FCOS in :paper:`fcos`. - """ - - def __init__( - self, - *, - backbone: Backbone, - head: nn.Module, - head_in_features: Optional[List[str]] = None, - box2box_transform=None, - num_classes, - center_sampling_radius: float = 1.5, - focal_loss_alpha=0.25, - focal_loss_gamma=2.0, - test_score_thresh=0.2, - test_topk_candidates=1000, - test_nms_thresh=0.6, - max_detections_per_image=100, - pixel_mean, - pixel_std, - ): - """ - Args: - center_sampling_radius: radius of the "center" of a groundtruth box, - within which all anchor points are labeled positive. - Other arguments mean the same as in :class:`RetinaNet`. - """ - super().__init__( - backbone, head, head_in_features, pixel_mean=pixel_mean, pixel_std=pixel_std - ) - - self.num_classes = num_classes - - # FCOS uses one anchor point per location. - # We represent the anchor point by a box whose size equals the anchor stride. - feature_shapes = backbone.output_shape() - fpn_strides = [feature_shapes[k].stride for k in self.head_in_features] - self.anchor_generator = DefaultAnchorGenerator( - sizes=[[k] for k in fpn_strides], aspect_ratios=[1.0], strides=fpn_strides - ) - - # FCOS parameterizes box regression by a linear transform, - # where predictions are normalized by anchor stride (equal to anchor size). - if box2box_transform is None: - box2box_transform = Box2BoxTransformLinear(normalize_by_size=True) - self.box2box_transform = box2box_transform - - self.center_sampling_radius = float(center_sampling_radius) - - # Loss parameters: - self.focal_loss_alpha = focal_loss_alpha - self.focal_loss_gamma = focal_loss_gamma - - # Inference parameters: - self.test_score_thresh = test_score_thresh - self.test_topk_candidates = test_topk_candidates - self.test_nms_thresh = test_nms_thresh - self.max_detections_per_image = max_detections_per_image - - def forward_training(self, images, features, predictions, gt_instances): - # Transpose the Hi*Wi*A dimension to the middle: - pred_logits, pred_anchor_deltas, pred_centerness = self._transpose_dense_predictions( - predictions, [self.num_classes, 4, 1] - ) - anchors = self.anchor_generator(features) - gt_labels, gt_boxes = self.label_anchors(anchors, gt_instances) - return self.losses( - anchors, pred_logits, gt_labels, pred_anchor_deltas, gt_boxes, pred_centerness - ) - - @torch.no_grad() - def match_anchors(self, anchors: List[Boxes], gt_instances: List[Instances]): - """ - Match anchors with ground truth boxes. - - Args: - anchors: #level boxes, from the highest resolution to lower resolution - gt_instances: ground truth instances per image - - Returns: - List[Tensor]: - #image tensors, each is a vector of matched gt - indices (or -1 for unmatched anchors) for all anchors. - """ - num_anchors_per_level = [len(x) for x in anchors] - anchors = Boxes.cat(anchors) # Rx4 - anchor_centers = anchors.get_centers() # Rx2 - anchor_sizes = anchors.tensor[:, 2] - anchors.tensor[:, 0] # R - - lower_bound = anchor_sizes * 4 - lower_bound[: num_anchors_per_level[0]] = 0 - upper_bound = anchor_sizes * 8 - upper_bound[-num_anchors_per_level[-1] :] = float("inf") - - matched_indices = [] - for gt_per_image in gt_instances: - gt_centers = gt_per_image.gt_boxes.get_centers() # Nx2 - # FCOS with center sampling: anchor point must be close enough to gt center. - pairwise_match = (anchor_centers[:, None, :] - gt_centers[None, :, :]).abs_().max( - dim=2 - ).values < self.center_sampling_radius * anchor_sizes[:, None] - pairwise_dist = pairwise_point_box_distance(anchor_centers, gt_per_image.gt_boxes) - - # The original FCOS anchor matching rule: anchor point must be inside gt - pairwise_match &= pairwise_dist.min(dim=2).values > 0 - - # Multilevel anchor matching in FCOS: each anchor is only responsible - # for certain scale range. - pairwise_dist = pairwise_dist.max(dim=2).values - pairwise_match &= (pairwise_dist > lower_bound[:, None]) & ( - pairwise_dist < upper_bound[:, None] - ) - - # Match the GT box with minimum area, if there are multiple GT matches - gt_areas = gt_per_image.gt_boxes.area() # N - pairwise_match = pairwise_match.to(torch.float32) * (1e8 - gt_areas[None, :]) - min_values, matched_idx = pairwise_match.max(dim=1) # R, per-anchor match - matched_idx[min_values < 1e-5] = -1 # Unmatched anchors are assigned -1 - - matched_indices.append(matched_idx) - return matched_indices - - @torch.no_grad() - def label_anchors(self, anchors, gt_instances): - """ - Same interface as :meth:`RetinaNet.label_anchors`, but implemented with FCOS - anchor matching rule. - - Unlike RetinaNet, there are no ignored anchors. - """ - matched_indices = self.match_anchors(anchors, gt_instances) - - matched_labels, matched_boxes = [], [] - for gt_index, gt_per_image in zip(matched_indices, gt_instances): - label = gt_per_image.gt_classes[gt_index.clip(min=0)] - label[gt_index < 0] = self.num_classes # background - - matched_gt_boxes = gt_per_image.gt_boxes[gt_index.clip(min=0)] - - matched_labels.append(label) - matched_boxes.append(matched_gt_boxes) - return matched_labels, matched_boxes - - def losses( - self, anchors, pred_logits, gt_labels, pred_anchor_deltas, gt_boxes, pred_centerness - ): - """ - This method is almost identical to :meth:`RetinaNet.losses`, with an extra - "loss_centerness" in the returned dict. - """ - num_images = len(gt_labels) - gt_labels = torch.stack(gt_labels) # (N, R) - - pos_mask = (gt_labels >= 0) & (gt_labels != self.num_classes) - num_pos_anchors = pos_mask.sum().item() - get_event_storage().put_scalar("num_pos_anchors", num_pos_anchors / num_images) - normalizer = self._ema_update("loss_normalizer", max(num_pos_anchors, 1), 300) - - # classification and regression loss - gt_labels_target = F.one_hot(gt_labels, num_classes=self.num_classes + 1)[ - :, :, :-1 - ] # no loss for the last (background) class - loss_cls = sigmoid_focal_loss_jit( - torch.cat(pred_logits, dim=1), - gt_labels_target.to(pred_logits[0].dtype), - alpha=self.focal_loss_alpha, - gamma=self.focal_loss_gamma, - reduction="sum", - ) - - loss_box_reg = _dense_box_regression_loss( - anchors, - self.box2box_transform, - pred_anchor_deltas, - [x.tensor for x in gt_boxes], - pos_mask, - box_reg_loss_type="giou", - ) - - ctrness_targets = self.compute_ctrness_targets(anchors, gt_boxes) # NxR - pred_centerness = torch.cat(pred_centerness, dim=1).squeeze(dim=2) # NxR - ctrness_loss = F.binary_cross_entropy_with_logits( - pred_centerness[pos_mask], ctrness_targets[pos_mask], reduction="sum" - ) - return { - "loss_fcos_cls": loss_cls / normalizer, - "loss_fcos_loc": loss_box_reg / normalizer, - "loss_fcos_ctr": ctrness_loss / normalizer, - } - - def compute_ctrness_targets(self, anchors, gt_boxes): # NxR - anchors = Boxes.cat(anchors).tensor # Rx4 - reg_targets = [self.box2box_transform.get_deltas(anchors, m.tensor) for m in gt_boxes] - reg_targets = torch.stack(reg_targets, dim=0) # NxRx4 - if len(reg_targets) == 0: - return reg_targets.new_zeros(len(reg_targets)) - left_right = reg_targets[:, :, [0, 2]] - top_bottom = reg_targets[:, :, [1, 3]] - ctrness = (left_right.min(dim=-1)[0] / left_right.max(dim=-1)[0]) * ( - top_bottom.min(dim=-1)[0] / top_bottom.max(dim=-1)[0] - ) - return torch.sqrt(ctrness) - - def forward_inference( - self, images: ImageList, features: List[Tensor], predictions: List[List[Tensor]] - ): - pred_logits, pred_anchor_deltas, pred_centerness = self._transpose_dense_predictions( - predictions, [self.num_classes, 4, 1] - ) - anchors = self.anchor_generator(features) - - results: List[Instances] = [] - for img_idx, image_size in enumerate(images.image_sizes): - scores_per_image = [ - # Multiply and sqrt centerness & classification scores - # (See eqn. 4 in https://arxiv.org/abs/2006.09214) - torch.sqrt(x[img_idx].sigmoid_() * y[img_idx].sigmoid_()) - for x, y in zip(pred_logits, pred_centerness) - ] - deltas_per_image = [x[img_idx] for x in pred_anchor_deltas] - results_per_image = self.inference_single_image( - anchors, scores_per_image, deltas_per_image, image_size - ) - results.append(results_per_image) - return results - - def inference_single_image( - self, - anchors: List[Boxes], - box_cls: List[Tensor], - box_delta: List[Tensor], - image_size: Tuple[int, int], - ): - """ - Identical to :meth:`RetinaNet.inference_single_image. - """ - pred = self._decode_multi_level_predictions( - anchors, - box_cls, - box_delta, - self.test_score_thresh, - self.test_topk_candidates, - image_size, - ) - keep = batched_nms( - pred.pred_boxes.tensor, pred.scores, pred.pred_classes, self.test_nms_thresh - ) - return pred[keep[: self.max_detections_per_image]] - - -class FCOSHead(RetinaNetHead): - """ - The head used in :paper:`fcos`. It adds an additional centerness - prediction branch on top of :class:`RetinaNetHead`. - """ - - def __init__(self, *, input_shape: List[ShapeSpec], conv_dims: List[int], **kwargs): - super().__init__(input_shape=input_shape, conv_dims=conv_dims, num_anchors=1, **kwargs) - # Unlike original FCOS, we do not add an additional learnable scale layer - # because it's found to have no benefits after normalizing regression targets by stride. - self._num_features = len(input_shape) - self.ctrness = nn.Conv2d(conv_dims[-1], 1, kernel_size=3, stride=1, padding=1) - torch.nn.init.normal_(self.ctrness.weight, std=0.01) - torch.nn.init.constant_(self.ctrness.bias, 0) - - def forward(self, features): - assert len(features) == self._num_features - logits = [] - bbox_reg = [] - ctrness = [] - for feature in features: - logits.append(self.cls_score(self.cls_subnet(feature))) - bbox_feature = self.bbox_subnet(feature) - bbox_reg.append(self.bbox_pred(bbox_feature)) - ctrness.append(self.ctrness(bbox_feature)) - return logits, bbox_reg, ctrness diff --git a/spaces/ThirdEyeData/Component_Repair_Time_Prediction/README.md b/spaces/ThirdEyeData/Component_Repair_Time_Prediction/README.md deleted file mode 100644 index c9436975c93a22744e38a0bc8272da0848b6887f..0000000000000000000000000000000000000000 --- a/spaces/ThirdEyeData/Component_Repair_Time_Prediction/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Component Repair Time Prediction -emoji: 🐢 -colorFrom: purple -colorTo: yellow -sdk: streamlit -sdk_version: 1.17.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/ThirdEyeData/Customer-Conversion-Prediction/matumizi/__init__.py b/spaces/ThirdEyeData/Customer-Conversion-Prediction/matumizi/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/Tj/LangChain-ChatGPT-plugins/README.md b/spaces/Tj/LangChain-ChatGPT-plugins/README.md deleted file mode 100644 index b07d19d0581f51dc5a3859fddc30d0993af07f79..0000000000000000000000000000000000000000 --- a/spaces/Tj/LangChain-ChatGPT-plugins/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: LangChain + ChatGPT Plugins -emoji: 🦜🧩 -colorFrom: purple -colorTo: purple -sdk: gradio -sdk_version: 3.28.2 -app_file: app.py -pinned: false -duplicated_from: dragonSwing/LangChain-ChatGPT-plugins ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Tuyet3005/Sentiment_Analysis_using_BERT/Homepage.py b/spaces/Tuyet3005/Sentiment_Analysis_using_BERT/Homepage.py deleted file mode 100644 index 68d3eac3dc354d0656cef7f94637adc19b2796bf..0000000000000000000000000000000000000000 --- a/spaces/Tuyet3005/Sentiment_Analysis_using_BERT/Homepage.py +++ /dev/null @@ -1,46 +0,0 @@ -import streamlit as st -from st_pages import Page, show_pages - -st.set_page_config(page_title="Sentiment Analysis", page_icon="🏠") - -show_pages( - [ - Page("streamlit_app.py/Homepage.py", "Home", "🏠"), - Page( - "streamlit_app.py/pages/Sentiment_Analysis.py", "Sentiment Analysis", "📝" - ), - ] -) - -st.title("Final Project in Machine Learning Course - Sentiment Analysis") -st.markdown( - """ - **Team members:** - | Student ID | Full Name | - | ---------- | ------------------------ | - | 19120600 | Bùi Nguyên Nghĩa | - | 20120089 | Lê Xuân Hoàng | - | 20120422 | Nguyễn Thị Ánh Tuyết | - | 20120460 | Lê Nguyễn Hải Dương | - | 20120494 | Lê Xuân Huy | - """ -) - -st.header("The Need for Sentiment Analysis") -st.markdown( - """ - Sentiment analysis algorithms are used to detect sentiment in a comment or a review. - It is said that around 90% of consumers read online reviews before visiting a business or buying a product. - These reviews can be positive or negative or neutral, and it is important to know what the customers are saying about your business. - """ -) - -st.header("Technology used") -st.markdown( - """ - In this demo, we used BERT as the model for sentiment analysis. BERT is a transformer-based model that was proposed in 2018 by Google. - It is a pre-trained model that can be used for various NLP tasks such as sentiment analysis, question answering, etc. - """ -) - - diff --git a/spaces/Volkopat/SegmentAnythingxGroundingDINO/groundingdino/models/GroundingDINO/csrc/MsDeformAttn/ms_deform_attn.h b/spaces/Volkopat/SegmentAnythingxGroundingDINO/groundingdino/models/GroundingDINO/csrc/MsDeformAttn/ms_deform_attn.h deleted file mode 100644 index c7408eba007b424194618baa63726657e36875e3..0000000000000000000000000000000000000000 --- a/spaces/Volkopat/SegmentAnythingxGroundingDINO/groundingdino/models/GroundingDINO/csrc/MsDeformAttn/ms_deform_attn.h +++ /dev/null @@ -1,64 +0,0 @@ -/*! -************************************************************************************************** -* Deformable DETR -* Copyright (c) 2020 SenseTime. All Rights Reserved. -* Licensed under the Apache License, Version 2.0 [see LICENSE for details] -************************************************************************************************** -* Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0 -************************************************************************************************** -*/ - -#pragma once - -#include "ms_deform_attn_cpu.h" - -#ifdef WITH_CUDA -#include "ms_deform_attn_cuda.h" -#endif - -namespace groundingdino { - -at::Tensor -ms_deform_attn_forward( - const at::Tensor &value, - const at::Tensor &spatial_shapes, - const at::Tensor &level_start_index, - const at::Tensor &sampling_loc, - const at::Tensor &attn_weight, - const int im2col_step) -{ - if (value.type().is_cuda()) - { -#ifdef WITH_CUDA - return ms_deform_attn_cuda_forward( - value, spatial_shapes, level_start_index, sampling_loc, attn_weight, im2col_step); -#else - AT_ERROR("Not compiled with GPU support"); -#endif - } - AT_ERROR("Not implemented on the CPU"); -} - -std::vector -ms_deform_attn_backward( - const at::Tensor &value, - const at::Tensor &spatial_shapes, - const at::Tensor &level_start_index, - const at::Tensor &sampling_loc, - const at::Tensor &attn_weight, - const at::Tensor &grad_output, - const int im2col_step) -{ - if (value.type().is_cuda()) - { -#ifdef WITH_CUDA - return ms_deform_attn_cuda_backward( - value, spatial_shapes, level_start_index, sampling_loc, attn_weight, grad_output, im2col_step); -#else - AT_ERROR("Not compiled with GPU support"); -#endif - } - AT_ERROR("Not implemented on the CPU"); -} - -} // namespace groundingdino \ No newline at end of file diff --git a/spaces/WZUN666/vits-uma-genshin-honkai/commons.py b/spaces/WZUN666/vits-uma-genshin-honkai/commons.py deleted file mode 100644 index 40fcc05364d4815971f5c6f9dbb8dcef8e3ec1e9..0000000000000000000000000000000000000000 --- a/spaces/WZUN666/vits-uma-genshin-honkai/commons.py +++ /dev/null @@ -1,172 +0,0 @@ -import math -import torch -from torch.nn import functional as F -import torch.jit - - -def script_method(fn, _rcb=None): - return fn - - -def script(obj, optimize=True, _frames_up=0, _rcb=None): - return obj - - -torch.jit.script_method = script_method -torch.jit.script = script - - -def init_weights(m, mean=0.0, std=0.01): - classname = m.__class__.__name__ - if classname.find("Conv") != -1: - m.weight.data.normal_(mean, std) - - -def get_padding(kernel_size, dilation=1): - return int((kernel_size*dilation - dilation)/2) - - -def convert_pad_shape(pad_shape): - l = pad_shape[::-1] - pad_shape = [item for sublist in l for item in sublist] - return pad_shape - - -def intersperse(lst, item): - result = [item] * (len(lst) * 2 + 1) - result[1::2] = lst - return result - - -def kl_divergence(m_p, logs_p, m_q, logs_q): - """KL(P||Q)""" - kl = (logs_q - logs_p) - 0.5 - kl += 0.5 * (torch.exp(2. * logs_p) + ((m_p - m_q)**2)) * torch.exp(-2. * logs_q) - return kl - - -def rand_gumbel(shape): - """Sample from the Gumbel distribution, protect from overflows.""" - uniform_samples = torch.rand(shape) * 0.99998 + 0.00001 - return -torch.log(-torch.log(uniform_samples)) - - -def rand_gumbel_like(x): - g = rand_gumbel(x.size()).to(dtype=x.dtype, device=x.device) - return g - - -def slice_segments(x, ids_str, segment_size=4): - ret = torch.zeros_like(x[:, :, :segment_size]) - for i in range(x.size(0)): - idx_str = ids_str[i] - idx_end = idx_str + segment_size - ret[i] = x[i, :, idx_str:idx_end] - return ret - - -def rand_slice_segments(x, x_lengths=None, segment_size=4): - b, d, t = x.size() - if x_lengths is None: - x_lengths = t - ids_str_max = x_lengths - segment_size + 1 - ids_str = (torch.rand([b]).to(device=x.device) * ids_str_max).to(dtype=torch.long) - ret = slice_segments(x, ids_str, segment_size) - return ret, ids_str - - -def get_timing_signal_1d( - length, channels, min_timescale=1.0, max_timescale=1.0e4): - position = torch.arange(length, dtype=torch.float) - num_timescales = channels // 2 - log_timescale_increment = ( - math.log(float(max_timescale) / float(min_timescale)) / - (num_timescales - 1)) - inv_timescales = min_timescale * torch.exp( - torch.arange(num_timescales, dtype=torch.float) * -log_timescale_increment) - scaled_time = position.unsqueeze(0) * inv_timescales.unsqueeze(1) - signal = torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], 0) - signal = F.pad(signal, [0, 0, 0, channels % 2]) - signal = signal.view(1, channels, length) - return signal - - -def add_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4): - b, channels, length = x.size() - signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale) - return x + signal.to(dtype=x.dtype, device=x.device) - - -def cat_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4, axis=1): - b, channels, length = x.size() - signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale) - return torch.cat([x, signal.to(dtype=x.dtype, device=x.device)], axis) - - -def subsequent_mask(length): - mask = torch.tril(torch.ones(length, length)).unsqueeze(0).unsqueeze(0) - return mask - - -@torch.jit.script -def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels): - n_channels_int = n_channels[0] - in_act = input_a + input_b - t_act = torch.tanh(in_act[:, :n_channels_int, :]) - s_act = torch.sigmoid(in_act[:, n_channels_int:, :]) - acts = t_act * s_act - return acts - - -def convert_pad_shape(pad_shape): - l = pad_shape[::-1] - pad_shape = [item for sublist in l for item in sublist] - return pad_shape - - -def shift_1d(x): - x = F.pad(x, convert_pad_shape([[0, 0], [0, 0], [1, 0]]))[:, :, :-1] - return x - - -def sequence_mask(length, max_length=None): - if max_length is None: - max_length = length.max() - x = torch.arange(max_length, dtype=length.dtype, device=length.device) - return x.unsqueeze(0) < length.unsqueeze(1) - - -def generate_path(duration, mask): - """ - duration: [b, 1, t_x] - mask: [b, 1, t_y, t_x] - """ - device = duration.device - - b, _, t_y, t_x = mask.shape - cum_duration = torch.cumsum(duration, -1) - - cum_duration_flat = cum_duration.view(b * t_x) - path = sequence_mask(cum_duration_flat, t_y).to(mask.dtype) - path = path.view(b, t_x, t_y) - path = path - F.pad(path, convert_pad_shape([[0, 0], [1, 0], [0, 0]]))[:, :-1] - path = path.unsqueeze(1).transpose(2,3) * mask - return path - - -def clip_grad_value_(parameters, clip_value, norm_type=2): - if isinstance(parameters, torch.Tensor): - parameters = [parameters] - parameters = list(filter(lambda p: p.grad is not None, parameters)) - norm_type = float(norm_type) - if clip_value is not None: - clip_value = float(clip_value) - - total_norm = 0 - for p in parameters: - param_norm = p.grad.data.norm(norm_type) - total_norm += param_norm.item() ** norm_type - if clip_value is not None: - p.grad.data.clamp_(min=-clip_value, max=clip_value) - total_norm = total_norm ** (1. / norm_type) - return total_norm diff --git a/spaces/WayneLinn/Singapore_Air_Quality_Prediction/README.md b/spaces/WayneLinn/Singapore_Air_Quality_Prediction/README.md deleted file mode 100644 index 0bed8676e646cbcacf1c1f2c9b41ce99119b4c8c..0000000000000000000000000000000000000000 --- a/spaces/WayneLinn/Singapore_Air_Quality_Prediction/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Singapore Air Quality Prediction -emoji: 🌖 -colorFrom: indigo -colorTo: indigo -sdk: gradio -sdk_version: 3.16.2 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Widium/Style-Recreation/functions/vgg.py b/spaces/Widium/Style-Recreation/functions/vgg.py deleted file mode 100644 index 9ecf1d9d7ac0b195c356663a22aed870ea523d35..0000000000000000000000000000000000000000 --- a/spaces/Widium/Style-Recreation/functions/vgg.py +++ /dev/null @@ -1,74 +0,0 @@ -# *************************************************************************** # -# # -# vgg.py # -# # -# By: Widium # -# Github : https://github.com/widium # -# # -# Created: 2022/11/15 15:25:02 by ebennace # -# Updated: 2023/05/03 16:05:48 by Widium # -# # -# **************************************************************************** ## =============== Import =================== # -import tensorflow as tf -import numpy as np - -from tensorflow.keras.applications import VGG19 -from keras import Model - -# ===================================================== # - -def create_list_of_vgg_layer(): - """ - Create a list of VGG19 layer names that are important for style transfer. - - Returns: - list: A list of VGG19 layer names used for style transfer. - """ - style_layer_names = [ - 'block1_conv1', - 'block2_conv1', - 'block3_conv1', - 'block4_conv1', - 'block5_conv1' - ] - - return (style_layer_names) - -# ===================================================== # - -def load_vgg19()-> Model: - """ - Load the pre-trained VGG19 model from Keras with ImageNet weights. - - Returns: - Model: The VGG19 model without the top classification layers. - """ - vgg = VGG19(include_top=False, weights='imagenet') - return vgg - -# ===================================================== # - -def create_multi_output_model(style_layers : list)-> Model: - """ - Create a multi-output model using VGG19 for style transfer. - - Args: - style_layers (list): A list of style layer names from VGG19 model. - - Returns: - Model: A model with multiple outputs for the specified style layers. - """ - vgg19 = load_vgg19() - - layers_name = style_layers - layers_output = list() - - for name in layers_name: - layer = vgg19.get_layer(name) - output = layer.output - layers_output.append(output) - - multi_output_model = Model([vgg19.input], layers_output) - multi_output_model.trainable = False - - return (multi_output_model) \ No newline at end of file diff --git a/spaces/Winnie-Kay/Distbert-Sentiments/README.md b/spaces/Winnie-Kay/Distbert-Sentiments/README.md deleted file mode 100644 index 1092282b552a16c4c3947af0a6793f16d2af1219..0000000000000000000000000000000000000000 --- a/spaces/Winnie-Kay/Distbert-Sentiments/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Distbert Sentiments -emoji: 📚 -colorFrom: yellow -colorTo: yellow -sdk: gradio -sdk_version: 3.29.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Xikless/instructpix2pix/edit_app.py b/spaces/Xikless/instructpix2pix/edit_app.py deleted file mode 100644 index 0359e815ad51b1a2291dd8943555568e452981ad..0000000000000000000000000000000000000000 --- a/spaces/Xikless/instructpix2pix/edit_app.py +++ /dev/null @@ -1,192 +0,0 @@ -from __future__ import annotations - -import math -import random - -import gradio as gr -import torch -from PIL import Image, ImageOps -from diffusers import StableDiffusionInstructPix2PixPipeline - - -help_text = """ -If you're not getting what you want, there may be a few reasons: -1. Is the image not changing enough? Your Image CFG weight may be too high. This value dictates how similar the output should be to the input. It's possible your edit requires larger changes from the original image, and your Image CFG weight isn't allowing that. Alternatively, your Text CFG weight may be too low. This value dictates how much to listen to the text instruction. The default Image CFG of 1.5 and Text CFG of 7.5 are a good starting point, but aren't necessarily optimal for each edit. Try: - * Decreasing the Image CFG weight, or - * Increasing the Text CFG weight, or -2. Conversely, is the image changing too much, such that the details in the original image aren't preserved? Try: - * Increasing the Image CFG weight, or - * Decreasing the Text CFG weight -3. Try generating results with different random seeds by setting "Randomize Seed" and running generation multiple times. You can also try setting "Randomize CFG" to sample new Text CFG and Image CFG values each time. -4. Rephrasing the instruction sometimes improves results (e.g., "turn him into a dog" vs. "make him a dog" vs. "as a dog"). -5. Increasing the number of steps sometimes improves results. -6. Do faces look weird? The Stable Diffusion autoencoder has a hard time with faces that are small in the image. Try: - * Cropping the image so the face takes up a larger portion of the frame. -""" - - -example_instructions = [ - "Make it a picasso painting", - "as if it were by modigliani", - "convert to a bronze statue", - "Turn it into an anime.", - "have it look like a graphic novel", - "make him gain weight", - "what would he look like bald?", - "Have him smile", - "Put him in a cocktail party.", - "move him at the beach.", - "add dramatic lighting", - "Convert to black and white", - "What if it were snowing?", - "Give him a leather jacket", - "Turn him into a cyborg!", - "make him wear a beanie", -] - -model_id = "timbrooks/instruct-pix2pix" - -def main(): - pipe = StableDiffusionInstructPix2PixPipeline.from_pretrained(model_id, torch_dtype=torch.float16, safety_checker=None).to("cuda") - example_image = Image.open("imgs/example.jpg").convert("RGB") - - def load_example( - steps: int, - randomize_seed: bool, - seed: int, - randomize_cfg: bool, - text_cfg_scale: float, - image_cfg_scale: float, - ): - example_instruction = random.choice(example_instructions) - return [example_image, example_instruction] + generate( - example_image, - example_instruction, - steps, - randomize_seed, - seed, - randomize_cfg, - text_cfg_scale, - image_cfg_scale, - ) - - def generate( - input_image: Image.Image, - instruction: str, - steps: int, - randomize_seed: bool, - seed: int, - randomize_cfg: bool, - text_cfg_scale: float, - image_cfg_scale: float, - ): - seed = random.randint(0, 100000) if randomize_seed else seed - text_cfg_scale = round(random.uniform(6.0, 9.0), ndigits=2) if randomize_cfg else text_cfg_scale - image_cfg_scale = round(random.uniform(1.2, 1.8), ndigits=2) if randomize_cfg else image_cfg_scale - - width, height = input_image.size - factor = 512 / max(width, height) - factor = math.ceil(min(width, height) * factor / 64) * 64 / min(width, height) - width = int((width * factor) // 64) * 64 - height = int((height * factor) // 64) * 64 - input_image = ImageOps.fit(input_image, (width, height), method=Image.Resampling.LANCZOS) - - if instruction == "": - return [input_image, seed] - - generator = torch.manual_seed(seed) - edited_image = pipe( - instruction, image=input_image, - guidance_scale=text_cfg_scale, image_guidance_scale=image_cfg_scale, - num_inference_steps=steps, generator=generator, - ).images[0] - return [seed, text_cfg_scale, image_cfg_scale, edited_image] - - def reset(): - return [0, "Randomize Seed", 1371, "Fix CFG", 7.5, 1.5, None] - - with gr.Blocks() as demo: - gr.HTML("""

    - InstructPix2Pix: Learning to Follow Image Editing Instructions -

    -

    For faster inference without waiting in queue, you may duplicate the space and upgrade to GPU in settings. -
    - -Duplicate Space -

    """) - with gr.Row(): - with gr.Column(scale=1, min_width=100): - generate_button = gr.Button("Generate") - with gr.Column(scale=1, min_width=100): - load_button = gr.Button("Load Example") - with gr.Column(scale=1, min_width=100): - reset_button = gr.Button("Reset") - with gr.Column(scale=3): - instruction = gr.Textbox(lines=1, label="Edit Instruction", interactive=True) - - with gr.Row(): - input_image = gr.Image(label="Input Image", type="pil", interactive=True) - edited_image = gr.Image(label=f"Edited Image", type="pil", interactive=False) - input_image.style(height=512, width=512) - edited_image.style(height=512, width=512) - - with gr.Row(): - steps = gr.Number(value=50, precision=0, label="Steps", interactive=True) - randomize_seed = gr.Radio( - ["Fix Seed", "Randomize Seed"], - value="Randomize Seed", - type="index", - show_label=False, - interactive=True, - ) - seed = gr.Number(value=1371, precision=0, label="Seed", interactive=True) - randomize_cfg = gr.Radio( - ["Fix CFG", "Randomize CFG"], - value="Fix CFG", - type="index", - show_label=False, - interactive=True, - ) - text_cfg_scale = gr.Number(value=7.5, label=f"Text CFG", interactive=True) - image_cfg_scale = gr.Number(value=1.5, label=f"Image CFG", interactive=True) - - gr.Markdown(help_text) - - load_button.click( - fn=load_example, - inputs=[ - steps, - randomize_seed, - seed, - randomize_cfg, - text_cfg_scale, - image_cfg_scale, - ], - outputs=[input_image, instruction, seed, text_cfg_scale, image_cfg_scale, edited_image], - ) - generate_button.click( - fn=generate, - inputs=[ - input_image, - instruction, - steps, - randomize_seed, - seed, - randomize_cfg, - text_cfg_scale, - image_cfg_scale, - ], - outputs=[seed, text_cfg_scale, image_cfg_scale, edited_image], - ) - reset_button.click( - fn=reset, - inputs=[], - outputs=[steps, randomize_seed, seed, randomize_cfg, text_cfg_scale, image_cfg_scale, edited_image], - ) - - demo.queue(concurrency_count=1) - demo.launch(share=False) - - -if __name__ == "__main__": - main() diff --git a/spaces/XlalalaX/VITS-Umamusume-voice-synthesizer/monotonic_align/__init__.py b/spaces/XlalalaX/VITS-Umamusume-voice-synthesizer/monotonic_align/__init__.py deleted file mode 100644 index 3d7009c40fea3a98168e3e3bc9ae061e91327422..0000000000000000000000000000000000000000 --- a/spaces/XlalalaX/VITS-Umamusume-voice-synthesizer/monotonic_align/__init__.py +++ /dev/null @@ -1,19 +0,0 @@ -import numpy as np -import torch -from .monotonic_align.core import maximum_path_c - - -def maximum_path(neg_cent, mask): - """ Cython optimized version. - neg_cent: [b, t_t, t_s] - mask: [b, t_t, t_s] - """ - device = neg_cent.device - dtype = neg_cent.dtype - neg_cent = neg_cent.data.cpu().numpy().astype(np.float32) - path = np.zeros(neg_cent.shape, dtype=np.int32) - - t_t_max = mask.sum(1)[:, 0].data.cpu().numpy().astype(np.int32) - t_s_max = mask.sum(2)[:, 0].data.cpu().numpy().astype(np.int32) - maximum_path_c(path, neg_cent, t_t_max, t_s_max) - return torch.from_numpy(path).to(device=device, dtype=dtype) diff --git a/spaces/XuZhang999/ProArticles/app.py b/spaces/XuZhang999/ProArticles/app.py deleted file mode 100644 index f72828e5152d2ea364fa5e205c2619c7607c857d..0000000000000000000000000000000000000000 --- a/spaces/XuZhang999/ProArticles/app.py +++ /dev/null @@ -1,64 +0,0 @@ -import os -import openai -import gradio as gr -import zipfile - -import logging -import datetime - -# 创建日志目录 -log_dir = "/home/xzhang/softwares/logs" -if not os.path.exists(log_dir): - os.makedirs(log_dir) -current_time = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S") -log_file = os.path.join(log_dir, f"{current_time}.log") -logging.basicConfig(filename=log_file, level=logging.DEBUG, format='%(asctime)s %(levelname)s %(message)s') - -def chat(keyword): - prompt = f"write an article about {keyword} with 1000 to 1500 words" - completion = openai.ChatCompletion.create( - model = "gpt-3.5-turbo", - messages = [ - {"role": "user", "content": prompt} - ]) - - return completion.choices[0].message.content - -def write_articles(openai_key, keywords, article_per_keyword): - openai.api_key = openai_key - keyword_list = keywords.split(",") - result_files = [] - print(keyword_list) - - for keyword in keyword_list: - keyword = keyword.strip() # Remove leading and trailing spaces - zip_filename = f'{keyword}.zip' - with zipfile.ZipFile(zip_filename, 'w') as myzip: - for i in range(int(article_per_keyword)): - article = chat(keyword) - filename = f"{keyword}_{i + 1}.txt" - print('===========filename:', filename) - with open(filename, "w") as f: - f.write(article) - myzip.write(filename) - os.remove(filename) # Remove the file after adding it to the zip - result_files.append(zip_filename) - print('===========result_files:', result_files) - - return result_files - -iface = gr.Interface( - fn=write_articles, - inputs=[gr.inputs.Textbox(lines=2, placeholder="Enter your GPT's API..."), - gr.inputs.Textbox(lines=2, placeholder="Enter keywords separated by commas..."), - gr.inputs.Slider(minimum=1, maximum=500, default=1, step=1)], - outputs=gr.outputs.File(label="Download Zip Files"), -) -iface.queue() - -try: - if __name__ == "__main__": - iface.launch() -except Exception as e: - logging.debug(f"An exception occurred: {e}") - print(f"An exception occurred: {e}") diff --git a/spaces/XzJosh/Bekki-Bert-VITS2/modules.py b/spaces/XzJosh/Bekki-Bert-VITS2/modules.py deleted file mode 100644 index 92e0f32a51c472bfd1659a50a95a95d195281d2b..0000000000000000000000000000000000000000 --- a/spaces/XzJosh/Bekki-Bert-VITS2/modules.py +++ /dev/null @@ -1,452 +0,0 @@ -import copy -import math -import numpy as np -import scipy -import torch -from torch import nn -from torch.nn import functional as F - -from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d -from torch.nn.utils import weight_norm, remove_weight_norm - -import commons -from commons import init_weights, get_padding -from transforms import piecewise_rational_quadratic_transform -from attentions import Encoder - -LRELU_SLOPE = 0.1 - -class LayerNorm(nn.Module): - def __init__(self, channels, eps=1e-5): - super().__init__() - self.channels = channels - self.eps = eps - - self.gamma = nn.Parameter(torch.ones(channels)) - self.beta = nn.Parameter(torch.zeros(channels)) - - def forward(self, x): - x = x.transpose(1, -1) - x = F.layer_norm(x, (self.channels,), self.gamma, self.beta, self.eps) - return x.transpose(1, -1) - -class ConvReluNorm(nn.Module): - def __init__(self, in_channels, hidden_channels, out_channels, kernel_size, n_layers, p_dropout): - super().__init__() - self.in_channels = in_channels - self.hidden_channels = hidden_channels - self.out_channels = out_channels - self.kernel_size = kernel_size - self.n_layers = n_layers - self.p_dropout = p_dropout - assert n_layers > 1, "Number of layers should be larger than 0." - - self.conv_layers = nn.ModuleList() - self.norm_layers = nn.ModuleList() - self.conv_layers.append(nn.Conv1d(in_channels, hidden_channels, kernel_size, padding=kernel_size//2)) - self.norm_layers.append(LayerNorm(hidden_channels)) - self.relu_drop = nn.Sequential( - nn.ReLU(), - nn.Dropout(p_dropout)) - for _ in range(n_layers-1): - self.conv_layers.append(nn.Conv1d(hidden_channels, hidden_channels, kernel_size, padding=kernel_size//2)) - self.norm_layers.append(LayerNorm(hidden_channels)) - self.proj = nn.Conv1d(hidden_channels, out_channels, 1) - self.proj.weight.data.zero_() - self.proj.bias.data.zero_() - - def forward(self, x, x_mask): - x_org = x - for i in range(self.n_layers): - x = self.conv_layers[i](x * x_mask) - x = self.norm_layers[i](x) - x = self.relu_drop(x) - x = x_org + self.proj(x) - return x * x_mask - - -class DDSConv(nn.Module): - """ - Dialted and Depth-Separable Convolution - """ - def __init__(self, channels, kernel_size, n_layers, p_dropout=0.): - super().__init__() - self.channels = channels - self.kernel_size = kernel_size - self.n_layers = n_layers - self.p_dropout = p_dropout - - self.drop = nn.Dropout(p_dropout) - self.convs_sep = nn.ModuleList() - self.convs_1x1 = nn.ModuleList() - self.norms_1 = nn.ModuleList() - self.norms_2 = nn.ModuleList() - for i in range(n_layers): - dilation = kernel_size ** i - padding = (kernel_size * dilation - dilation) // 2 - self.convs_sep.append(nn.Conv1d(channels, channels, kernel_size, - groups=channels, dilation=dilation, padding=padding - )) - self.convs_1x1.append(nn.Conv1d(channels, channels, 1)) - self.norms_1.append(LayerNorm(channels)) - self.norms_2.append(LayerNorm(channels)) - - def forward(self, x, x_mask, g=None): - if g is not None: - x = x + g - for i in range(self.n_layers): - y = self.convs_sep[i](x * x_mask) - y = self.norms_1[i](y) - y = F.gelu(y) - y = self.convs_1x1[i](y) - y = self.norms_2[i](y) - y = F.gelu(y) - y = self.drop(y) - x = x + y - return x * x_mask - - -class WN(torch.nn.Module): - def __init__(self, hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=0, p_dropout=0): - super(WN, self).__init__() - assert(kernel_size % 2 == 1) - self.hidden_channels =hidden_channels - self.kernel_size = kernel_size, - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.gin_channels = gin_channels - self.p_dropout = p_dropout - - self.in_layers = torch.nn.ModuleList() - self.res_skip_layers = torch.nn.ModuleList() - self.drop = nn.Dropout(p_dropout) - - if gin_channels != 0: - cond_layer = torch.nn.Conv1d(gin_channels, 2*hidden_channels*n_layers, 1) - self.cond_layer = torch.nn.utils.weight_norm(cond_layer, name='weight') - - for i in range(n_layers): - dilation = dilation_rate ** i - padding = int((kernel_size * dilation - dilation) / 2) - in_layer = torch.nn.Conv1d(hidden_channels, 2*hidden_channels, kernel_size, - dilation=dilation, padding=padding) - in_layer = torch.nn.utils.weight_norm(in_layer, name='weight') - self.in_layers.append(in_layer) - - # last one is not necessary - if i < n_layers - 1: - res_skip_channels = 2 * hidden_channels - else: - res_skip_channels = hidden_channels - - res_skip_layer = torch.nn.Conv1d(hidden_channels, res_skip_channels, 1) - res_skip_layer = torch.nn.utils.weight_norm(res_skip_layer, name='weight') - self.res_skip_layers.append(res_skip_layer) - - def forward(self, x, x_mask, g=None, **kwargs): - output = torch.zeros_like(x) - n_channels_tensor = torch.IntTensor([self.hidden_channels]) - - if g is not None: - g = self.cond_layer(g) - - for i in range(self.n_layers): - x_in = self.in_layers[i](x) - if g is not None: - cond_offset = i * 2 * self.hidden_channels - g_l = g[:,cond_offset:cond_offset+2*self.hidden_channels,:] - else: - g_l = torch.zeros_like(x_in) - - acts = commons.fused_add_tanh_sigmoid_multiply( - x_in, - g_l, - n_channels_tensor) - acts = self.drop(acts) - - res_skip_acts = self.res_skip_layers[i](acts) - if i < self.n_layers - 1: - res_acts = res_skip_acts[:,:self.hidden_channels,:] - x = (x + res_acts) * x_mask - output = output + res_skip_acts[:,self.hidden_channels:,:] - else: - output = output + res_skip_acts - return output * x_mask - - def remove_weight_norm(self): - if self.gin_channels != 0: - torch.nn.utils.remove_weight_norm(self.cond_layer) - for l in self.in_layers: - torch.nn.utils.remove_weight_norm(l) - for l in self.res_skip_layers: - torch.nn.utils.remove_weight_norm(l) - - -class ResBlock1(torch.nn.Module): - def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5)): - super(ResBlock1, self).__init__() - self.convs1 = nn.ModuleList([ - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0], - padding=get_padding(kernel_size, dilation[0]))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1], - padding=get_padding(kernel_size, dilation[1]))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[2], - padding=get_padding(kernel_size, dilation[2]))) - ]) - self.convs1.apply(init_weights) - - self.convs2 = nn.ModuleList([ - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, - padding=get_padding(kernel_size, 1))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, - padding=get_padding(kernel_size, 1))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, - padding=get_padding(kernel_size, 1))) - ]) - self.convs2.apply(init_weights) - - def forward(self, x, x_mask=None): - for c1, c2 in zip(self.convs1, self.convs2): - xt = F.leaky_relu(x, LRELU_SLOPE) - if x_mask is not None: - xt = xt * x_mask - xt = c1(xt) - xt = F.leaky_relu(xt, LRELU_SLOPE) - if x_mask is not None: - xt = xt * x_mask - xt = c2(xt) - x = xt + x - if x_mask is not None: - x = x * x_mask - return x - - def remove_weight_norm(self): - for l in self.convs1: - remove_weight_norm(l) - for l in self.convs2: - remove_weight_norm(l) - - -class ResBlock2(torch.nn.Module): - def __init__(self, channels, kernel_size=3, dilation=(1, 3)): - super(ResBlock2, self).__init__() - self.convs = nn.ModuleList([ - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0], - padding=get_padding(kernel_size, dilation[0]))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1], - padding=get_padding(kernel_size, dilation[1]))) - ]) - self.convs.apply(init_weights) - - def forward(self, x, x_mask=None): - for c in self.convs: - xt = F.leaky_relu(x, LRELU_SLOPE) - if x_mask is not None: - xt = xt * x_mask - xt = c(xt) - x = xt + x - if x_mask is not None: - x = x * x_mask - return x - - def remove_weight_norm(self): - for l in self.convs: - remove_weight_norm(l) - - -class Log(nn.Module): - def forward(self, x, x_mask, reverse=False, **kwargs): - if not reverse: - y = torch.log(torch.clamp_min(x, 1e-5)) * x_mask - logdet = torch.sum(-y, [1, 2]) - return y, logdet - else: - x = torch.exp(x) * x_mask - return x - - -class Flip(nn.Module): - def forward(self, x, *args, reverse=False, **kwargs): - x = torch.flip(x, [1]) - if not reverse: - logdet = torch.zeros(x.size(0)).to(dtype=x.dtype, device=x.device) - return x, logdet - else: - return x - - -class ElementwiseAffine(nn.Module): - def __init__(self, channels): - super().__init__() - self.channels = channels - self.m = nn.Parameter(torch.zeros(channels,1)) - self.logs = nn.Parameter(torch.zeros(channels,1)) - - def forward(self, x, x_mask, reverse=False, **kwargs): - if not reverse: - y = self.m + torch.exp(self.logs) * x - y = y * x_mask - logdet = torch.sum(self.logs * x_mask, [1,2]) - return y, logdet - else: - x = (x - self.m) * torch.exp(-self.logs) * x_mask - return x - - -class ResidualCouplingLayer(nn.Module): - def __init__(self, - channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - p_dropout=0, - gin_channels=0, - mean_only=False): - assert channels % 2 == 0, "channels should be divisible by 2" - super().__init__() - self.channels = channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.half_channels = channels // 2 - self.mean_only = mean_only - - self.pre = nn.Conv1d(self.half_channels, hidden_channels, 1) - self.enc = WN(hidden_channels, kernel_size, dilation_rate, n_layers, p_dropout=p_dropout, gin_channels=gin_channels) - self.post = nn.Conv1d(hidden_channels, self.half_channels * (2 - mean_only), 1) - self.post.weight.data.zero_() - self.post.bias.data.zero_() - - def forward(self, x, x_mask, g=None, reverse=False): - x0, x1 = torch.split(x, [self.half_channels]*2, 1) - h = self.pre(x0) * x_mask - h = self.enc(h, x_mask, g=g) - stats = self.post(h) * x_mask - if not self.mean_only: - m, logs = torch.split(stats, [self.half_channels]*2, 1) - else: - m = stats - logs = torch.zeros_like(m) - - if not reverse: - x1 = m + x1 * torch.exp(logs) * x_mask - x = torch.cat([x0, x1], 1) - logdet = torch.sum(logs, [1,2]) - return x, logdet - else: - x1 = (x1 - m) * torch.exp(-logs) * x_mask - x = torch.cat([x0, x1], 1) - return x - - -class ConvFlow(nn.Module): - def __init__(self, in_channels, filter_channels, kernel_size, n_layers, num_bins=10, tail_bound=5.0): - super().__init__() - self.in_channels = in_channels - self.filter_channels = filter_channels - self.kernel_size = kernel_size - self.n_layers = n_layers - self.num_bins = num_bins - self.tail_bound = tail_bound - self.half_channels = in_channels // 2 - - self.pre = nn.Conv1d(self.half_channels, filter_channels, 1) - self.convs = DDSConv(filter_channels, kernel_size, n_layers, p_dropout=0.) - self.proj = nn.Conv1d(filter_channels, self.half_channels * (num_bins * 3 - 1), 1) - self.proj.weight.data.zero_() - self.proj.bias.data.zero_() - - def forward(self, x, x_mask, g=None, reverse=False): - x0, x1 = torch.split(x, [self.half_channels]*2, 1) - h = self.pre(x0) - h = self.convs(h, x_mask, g=g) - h = self.proj(h) * x_mask - - b, c, t = x0.shape - h = h.reshape(b, c, -1, t).permute(0, 1, 3, 2) # [b, cx?, t] -> [b, c, t, ?] - - unnormalized_widths = h[..., :self.num_bins] / math.sqrt(self.filter_channels) - unnormalized_heights = h[..., self.num_bins:2*self.num_bins] / math.sqrt(self.filter_channels) - unnormalized_derivatives = h[..., 2 * self.num_bins:] - - x1, logabsdet = piecewise_rational_quadratic_transform(x1, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=reverse, - tails='linear', - tail_bound=self.tail_bound - ) - - x = torch.cat([x0, x1], 1) * x_mask - logdet = torch.sum(logabsdet * x_mask, [1,2]) - if not reverse: - return x, logdet - else: - return x -class TransformerCouplingLayer(nn.Module): - def __init__(self, - channels, - hidden_channels, - kernel_size, - n_layers, - n_heads, - p_dropout=0, - filter_channels=0, - mean_only=False, - wn_sharing_parameter=None, - gin_channels = 0 - ): - assert channels % 2 == 0, "channels should be divisible by 2" - super().__init__() - self.channels = channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.n_layers = n_layers - self.half_channels = channels // 2 - self.mean_only = mean_only - - self.pre = nn.Conv1d(self.half_channels, hidden_channels, 1) - self.enc = Encoder(hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout, isflow = True, gin_channels = gin_channels) if wn_sharing_parameter is None else wn_sharing_parameter - self.post = nn.Conv1d(hidden_channels, self.half_channels * (2 - mean_only), 1) - self.post.weight.data.zero_() - self.post.bias.data.zero_() - - def forward(self, x, x_mask, g=None, reverse=False): - x0, x1 = torch.split(x, [self.half_channels]*2, 1) - h = self.pre(x0) * x_mask - h = self.enc(h, x_mask, g=g) - stats = self.post(h) * x_mask - if not self.mean_only: - m, logs = torch.split(stats, [self.half_channels]*2, 1) - else: - m = stats - logs = torch.zeros_like(m) - - if not reverse: - x1 = m + x1 * torch.exp(logs) * x_mask - x = torch.cat([x0, x1], 1) - logdet = torch.sum(logs, [1,2]) - return x, logdet - else: - x1 = (x1 - m) * torch.exp(-logs) * x_mask - x = torch.cat([x0, x1], 1) - return x - - x1, logabsdet = piecewise_rational_quadratic_transform(x1, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=reverse, - tails='linear', - tail_bound=self.tail_bound - ) - - x = torch.cat([x0, x1], 1) * x_mask - logdet = torch.sum(logabsdet * x_mask, [1,2]) - if not reverse: - return x, logdet - else: - return x diff --git a/spaces/XzJosh/Jiaran-Bert-VITS2/attentions.py b/spaces/XzJosh/Jiaran-Bert-VITS2/attentions.py deleted file mode 100644 index 1192dd7268c20c11010e73a6017ed09549695afe..0000000000000000000000000000000000000000 --- a/spaces/XzJosh/Jiaran-Bert-VITS2/attentions.py +++ /dev/null @@ -1,344 +0,0 @@ -import copy -import math -import torch -from torch import nn -from torch.nn import functional as F - -import commons -import logging - -logger = logging.getLogger(__name__) - -class LayerNorm(nn.Module): - def __init__(self, channels, eps=1e-5): - super().__init__() - self.channels = channels - self.eps = eps - - self.gamma = nn.Parameter(torch.ones(channels)) - self.beta = nn.Parameter(torch.zeros(channels)) - - def forward(self, x): - x = x.transpose(1, -1) - x = F.layer_norm(x, (self.channels,), self.gamma, self.beta, self.eps) - return x.transpose(1, -1) - - - -@torch.jit.script -def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels): - n_channels_int = n_channels[0] - in_act = input_a + input_b - t_act = torch.tanh(in_act[:, :n_channels_int, :]) - s_act = torch.sigmoid(in_act[:, n_channels_int:, :]) - acts = t_act * s_act - return acts - -class Encoder(nn.Module): - def __init__(self, hidden_channels, filter_channels, n_heads, n_layers, kernel_size=1, p_dropout=0., window_size=4, isflow = True, **kwargs): - super().__init__() - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.window_size = window_size - #if isflow: - # cond_layer = torch.nn.Conv1d(256, 2*hidden_channels*n_layers, 1) - # self.cond_pre = torch.nn.Conv1d(hidden_channels, 2*hidden_channels, 1) - # self.cond_layer = weight_norm(cond_layer, name='weight') - # self.gin_channels = 256 - self.cond_layer_idx = self.n_layers - if 'gin_channels' in kwargs: - self.gin_channels = kwargs['gin_channels'] - if self.gin_channels != 0: - self.spk_emb_linear = nn.Linear(self.gin_channels, self.hidden_channels) - # vits2 says 3rd block, so idx is 2 by default - self.cond_layer_idx = kwargs['cond_layer_idx'] if 'cond_layer_idx' in kwargs else 2 - logging.debug(self.gin_channels, self.cond_layer_idx) - assert self.cond_layer_idx < self.n_layers, 'cond_layer_idx should be less than n_layers' - self.drop = nn.Dropout(p_dropout) - self.attn_layers = nn.ModuleList() - self.norm_layers_1 = nn.ModuleList() - self.ffn_layers = nn.ModuleList() - self.norm_layers_2 = nn.ModuleList() - for i in range(self.n_layers): - self.attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout, window_size=window_size)) - self.norm_layers_1.append(LayerNorm(hidden_channels)) - self.ffn_layers.append(FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout)) - self.norm_layers_2.append(LayerNorm(hidden_channels)) - def forward(self, x, x_mask, g=None): - attn_mask = x_mask.unsqueeze(2) * x_mask.unsqueeze(-1) - x = x * x_mask - for i in range(self.n_layers): - if i == self.cond_layer_idx and g is not None: - g = self.spk_emb_linear(g.transpose(1, 2)) - g = g.transpose(1, 2) - x = x + g - x = x * x_mask - y = self.attn_layers[i](x, x, attn_mask) - y = self.drop(y) - x = self.norm_layers_1[i](x + y) - - y = self.ffn_layers[i](x, x_mask) - y = self.drop(y) - x = self.norm_layers_2[i](x + y) - x = x * x_mask - return x - - -class Decoder(nn.Module): - def __init__(self, hidden_channels, filter_channels, n_heads, n_layers, kernel_size=1, p_dropout=0., proximal_bias=False, proximal_init=True, **kwargs): - super().__init__() - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.proximal_bias = proximal_bias - self.proximal_init = proximal_init - - self.drop = nn.Dropout(p_dropout) - self.self_attn_layers = nn.ModuleList() - self.norm_layers_0 = nn.ModuleList() - self.encdec_attn_layers = nn.ModuleList() - self.norm_layers_1 = nn.ModuleList() - self.ffn_layers = nn.ModuleList() - self.norm_layers_2 = nn.ModuleList() - for i in range(self.n_layers): - self.self_attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout, proximal_bias=proximal_bias, proximal_init=proximal_init)) - self.norm_layers_0.append(LayerNorm(hidden_channels)) - self.encdec_attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout)) - self.norm_layers_1.append(LayerNorm(hidden_channels)) - self.ffn_layers.append(FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout, causal=True)) - self.norm_layers_2.append(LayerNorm(hidden_channels)) - - def forward(self, x, x_mask, h, h_mask): - """ - x: decoder input - h: encoder output - """ - self_attn_mask = commons.subsequent_mask(x_mask.size(2)).to(device=x.device, dtype=x.dtype) - encdec_attn_mask = h_mask.unsqueeze(2) * x_mask.unsqueeze(-1) - x = x * x_mask - for i in range(self.n_layers): - y = self.self_attn_layers[i](x, x, self_attn_mask) - y = self.drop(y) - x = self.norm_layers_0[i](x + y) - - y = self.encdec_attn_layers[i](x, h, encdec_attn_mask) - y = self.drop(y) - x = self.norm_layers_1[i](x + y) - - y = self.ffn_layers[i](x, x_mask) - y = self.drop(y) - x = self.norm_layers_2[i](x + y) - x = x * x_mask - return x - - -class MultiHeadAttention(nn.Module): - def __init__(self, channels, out_channels, n_heads, p_dropout=0., window_size=None, heads_share=True, block_length=None, proximal_bias=False, proximal_init=False): - super().__init__() - assert channels % n_heads == 0 - - self.channels = channels - self.out_channels = out_channels - self.n_heads = n_heads - self.p_dropout = p_dropout - self.window_size = window_size - self.heads_share = heads_share - self.block_length = block_length - self.proximal_bias = proximal_bias - self.proximal_init = proximal_init - self.attn = None - - self.k_channels = channels // n_heads - self.conv_q = nn.Conv1d(channels, channels, 1) - self.conv_k = nn.Conv1d(channels, channels, 1) - self.conv_v = nn.Conv1d(channels, channels, 1) - self.conv_o = nn.Conv1d(channels, out_channels, 1) - self.drop = nn.Dropout(p_dropout) - - if window_size is not None: - n_heads_rel = 1 if heads_share else n_heads - rel_stddev = self.k_channels**-0.5 - self.emb_rel_k = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev) - self.emb_rel_v = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev) - - nn.init.xavier_uniform_(self.conv_q.weight) - nn.init.xavier_uniform_(self.conv_k.weight) - nn.init.xavier_uniform_(self.conv_v.weight) - if proximal_init: - with torch.no_grad(): - self.conv_k.weight.copy_(self.conv_q.weight) - self.conv_k.bias.copy_(self.conv_q.bias) - - def forward(self, x, c, attn_mask=None): - q = self.conv_q(x) - k = self.conv_k(c) - v = self.conv_v(c) - - x, self.attn = self.attention(q, k, v, mask=attn_mask) - - x = self.conv_o(x) - return x - - def attention(self, query, key, value, mask=None): - # reshape [b, d, t] -> [b, n_h, t, d_k] - b, d, t_s, t_t = (*key.size(), query.size(2)) - query = query.view(b, self.n_heads, self.k_channels, t_t).transpose(2, 3) - key = key.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3) - value = value.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3) - - scores = torch.matmul(query / math.sqrt(self.k_channels), key.transpose(-2, -1)) - if self.window_size is not None: - assert t_s == t_t, "Relative attention is only available for self-attention." - key_relative_embeddings = self._get_relative_embeddings(self.emb_rel_k, t_s) - rel_logits = self._matmul_with_relative_keys(query /math.sqrt(self.k_channels), key_relative_embeddings) - scores_local = self._relative_position_to_absolute_position(rel_logits) - scores = scores + scores_local - if self.proximal_bias: - assert t_s == t_t, "Proximal bias is only available for self-attention." - scores = scores + self._attention_bias_proximal(t_s).to(device=scores.device, dtype=scores.dtype) - if mask is not None: - scores = scores.masked_fill(mask == 0, -1e4) - if self.block_length is not None: - assert t_s == t_t, "Local attention is only available for self-attention." - block_mask = torch.ones_like(scores).triu(-self.block_length).tril(self.block_length) - scores = scores.masked_fill(block_mask == 0, -1e4) - p_attn = F.softmax(scores, dim=-1) # [b, n_h, t_t, t_s] - p_attn = self.drop(p_attn) - output = torch.matmul(p_attn, value) - if self.window_size is not None: - relative_weights = self._absolute_position_to_relative_position(p_attn) - value_relative_embeddings = self._get_relative_embeddings(self.emb_rel_v, t_s) - output = output + self._matmul_with_relative_values(relative_weights, value_relative_embeddings) - output = output.transpose(2, 3).contiguous().view(b, d, t_t) # [b, n_h, t_t, d_k] -> [b, d, t_t] - return output, p_attn - - def _matmul_with_relative_values(self, x, y): - """ - x: [b, h, l, m] - y: [h or 1, m, d] - ret: [b, h, l, d] - """ - ret = torch.matmul(x, y.unsqueeze(0)) - return ret - - def _matmul_with_relative_keys(self, x, y): - """ - x: [b, h, l, d] - y: [h or 1, m, d] - ret: [b, h, l, m] - """ - ret = torch.matmul(x, y.unsqueeze(0).transpose(-2, -1)) - return ret - - def _get_relative_embeddings(self, relative_embeddings, length): - max_relative_position = 2 * self.window_size + 1 - # Pad first before slice to avoid using cond ops. - pad_length = max(length - (self.window_size + 1), 0) - slice_start_position = max((self.window_size + 1) - length, 0) - slice_end_position = slice_start_position + 2 * length - 1 - if pad_length > 0: - padded_relative_embeddings = F.pad( - relative_embeddings, - commons.convert_pad_shape([[0, 0], [pad_length, pad_length], [0, 0]])) - else: - padded_relative_embeddings = relative_embeddings - used_relative_embeddings = padded_relative_embeddings[:,slice_start_position:slice_end_position] - return used_relative_embeddings - - def _relative_position_to_absolute_position(self, x): - """ - x: [b, h, l, 2*l-1] - ret: [b, h, l, l] - """ - batch, heads, length, _ = x.size() - # Concat columns of pad to shift from relative to absolute indexing. - x = F.pad(x, commons.convert_pad_shape([[0,0],[0,0],[0,0],[0,1]])) - - # Concat extra elements so to add up to shape (len+1, 2*len-1). - x_flat = x.view([batch, heads, length * 2 * length]) - x_flat = F.pad(x_flat, commons.convert_pad_shape([[0,0],[0,0],[0,length-1]])) - - # Reshape and slice out the padded elements. - x_final = x_flat.view([batch, heads, length+1, 2*length-1])[:, :, :length, length-1:] - return x_final - - def _absolute_position_to_relative_position(self, x): - """ - x: [b, h, l, l] - ret: [b, h, l, 2*l-1] - """ - batch, heads, length, _ = x.size() - # padd along column - x = F.pad(x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, length-1]])) - x_flat = x.view([batch, heads, length**2 + length*(length -1)]) - # add 0's in the beginning that will skew the elements after reshape - x_flat = F.pad(x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [length, 0]])) - x_final = x_flat.view([batch, heads, length, 2*length])[:,:,:,1:] - return x_final - - def _attention_bias_proximal(self, length): - """Bias for self-attention to encourage attention to close positions. - Args: - length: an integer scalar. - Returns: - a Tensor with shape [1, 1, length, length] - """ - r = torch.arange(length, dtype=torch.float32) - diff = torch.unsqueeze(r, 0) - torch.unsqueeze(r, 1) - return torch.unsqueeze(torch.unsqueeze(-torch.log1p(torch.abs(diff)), 0), 0) - - -class FFN(nn.Module): - def __init__(self, in_channels, out_channels, filter_channels, kernel_size, p_dropout=0., activation=None, causal=False): - super().__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.filter_channels = filter_channels - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.activation = activation - self.causal = causal - - if causal: - self.padding = self._causal_padding - else: - self.padding = self._same_padding - - self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size) - self.conv_2 = nn.Conv1d(filter_channels, out_channels, kernel_size) - self.drop = nn.Dropout(p_dropout) - - def forward(self, x, x_mask): - x = self.conv_1(self.padding(x * x_mask)) - if self.activation == "gelu": - x = x * torch.sigmoid(1.702 * x) - else: - x = torch.relu(x) - x = self.drop(x) - x = self.conv_2(self.padding(x * x_mask)) - return x * x_mask - - def _causal_padding(self, x): - if self.kernel_size == 1: - return x - pad_l = self.kernel_size - 1 - pad_r = 0 - padding = [[0, 0], [0, 0], [pad_l, pad_r]] - x = F.pad(x, commons.convert_pad_shape(padding)) - return x - - def _same_padding(self, x): - if self.kernel_size == 1: - return x - pad_l = (self.kernel_size - 1) // 2 - pad_r = self.kernel_size // 2 - padding = [[0, 0], [0, 0], [pad_l, pad_r]] - x = F.pad(x, commons.convert_pad_shape(padding)) - return x diff --git a/spaces/Y-T-G/Blur-Anything/export_onnx_model.py b/spaces/Y-T-G/Blur-Anything/export_onnx_model.py deleted file mode 100644 index 00e7a458fb975cf800c2c0e92a8afa00589dcef9..0000000000000000000000000000000000000000 --- a/spaces/Y-T-G/Blur-Anything/export_onnx_model.py +++ /dev/null @@ -1,201 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. - -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -import torch - -from mobile_sam import sam_model_registry -from mobile_sam.utils.onnx import SamOnnxModel - -import argparse -import warnings - -try: - import onnxruntime # type: ignore - - onnxruntime_exists = True -except ImportError: - onnxruntime_exists = False - -parser = argparse.ArgumentParser( - description="Export the SAM prompt encoder and mask decoder to an ONNX model." -) - -parser.add_argument( - "--checkpoint", type=str, required=True, help="The path to the SAM model checkpoint." -) - -parser.add_argument( - "--output", type=str, required=True, help="The filename to save the ONNX model to." -) - -parser.add_argument( - "--model-type", - type=str, - required=True, - help="In ['default', 'vit_h', 'vit_l', 'vit_b']. Which type of SAM model to export.", -) - -parser.add_argument( - "--return-single-mask", - action="store_true", - help=( - "If true, the exported ONNX model will only return the best mask, " - "instead of returning multiple masks. For high resolution images " - "this can improve runtime when upscaling masks is expensive." - ), -) - -parser.add_argument( - "--opset", - type=int, - default=16, - help="The ONNX opset version to use. Must be >=11", -) - -parser.add_argument( - "--quantize-out", - type=str, - default=None, - help=( - "If set, will quantize the model and save it with this name. " - "Quantization is performed with quantize_dynamic from onnxruntime.quantization.quantize." - ), -) - -parser.add_argument( - "--gelu-approximate", - action="store_true", - help=( - "Replace GELU operations with approximations using tanh. Useful " - "for some runtimes that have slow or unimplemented erf ops, used in GELU." - ), -) - -parser.add_argument( - "--use-stability-score", - action="store_true", - help=( - "Replaces the model's predicted mask quality score with the stability " - "score calculated on the low resolution masks using an offset of 1.0. " - ), -) - -parser.add_argument( - "--return-extra-metrics", - action="store_true", - help=( - "The model will return five results: (masks, scores, stability_scores, " - "areas, low_res_logits) instead of the usual three. This can be " - "significantly slower for high resolution outputs." - ), -) - - -def run_export( - model_type: str, - checkpoint: str, - output: str, - opset: int, - return_single_mask: bool, - gelu_approximate: bool = False, - use_stability_score: bool = False, - return_extra_metrics=False, -): - print("Loading model...") - sam = sam_model_registry[model_type](checkpoint=checkpoint) - - onnx_model = SamOnnxModel( - model=sam, - return_single_mask=return_single_mask, - use_stability_score=use_stability_score, - return_extra_metrics=return_extra_metrics, - ) - - if gelu_approximate: - for n, m in onnx_model.named_modules(): - if isinstance(m, torch.nn.GELU): - m.approximate = "tanh" - - dynamic_axes = { - "point_coords": {1: "num_points"}, - "point_labels": {1: "num_points"}, - } - - embed_dim = sam.prompt_encoder.embed_dim - embed_size = sam.prompt_encoder.image_embedding_size - mask_input_size = [4 * x for x in embed_size] - dummy_inputs = { - "image_embeddings": torch.randn(1, embed_dim, *embed_size, dtype=torch.float), - "point_coords": torch.randint(low=0, high=1024, size=(1, 5, 2), dtype=torch.float), - "point_labels": torch.randint(low=0, high=4, size=(1, 5), dtype=torch.float), - "mask_input": torch.randn(1, 1, *mask_input_size, dtype=torch.float), - "has_mask_input": torch.tensor([1], dtype=torch.float), - "orig_im_size": torch.tensor([1500, 2250], dtype=torch.float), - } - - _ = onnx_model(**dummy_inputs) - - output_names = ["masks", "iou_predictions", "low_res_masks"] - - with warnings.catch_warnings(): - warnings.filterwarnings("ignore", category=torch.jit.TracerWarning) - warnings.filterwarnings("ignore", category=UserWarning) - with open(output, "wb") as f: - print(f"Exporting onnx model to {output}...") - torch.onnx.export( - onnx_model, - tuple(dummy_inputs.values()), - f, - export_params=True, - verbose=False, - opset_version=opset, - do_constant_folding=True, - input_names=list(dummy_inputs.keys()), - output_names=output_names, - dynamic_axes=dynamic_axes, - ) - - if onnxruntime_exists: - ort_inputs = {k: to_numpy(v) for k, v in dummy_inputs.items()} - # set cpu provider default - providers = ["CPUExecutionProvider"] - ort_session = onnxruntime.InferenceSession(output, providers=providers) - _ = ort_session.run(None, ort_inputs) - print("Model has successfully been run with ONNXRuntime.") - - -def to_numpy(tensor): - return tensor.cpu().numpy() - - -if __name__ == "__main__": - args = parser.parse_args() - run_export( - model_type=args.model_type, - checkpoint=args.checkpoint, - output=args.output, - opset=args.opset, - return_single_mask=args.return_single_mask, - gelu_approximate=args.gelu_approximate, - use_stability_score=args.use_stability_score, - return_extra_metrics=args.return_extra_metrics, - ) - - if args.quantize_out is not None: - assert onnxruntime_exists, "onnxruntime is required to quantize the model." - from onnxruntime.quantization import QuantType # type: ignore - from onnxruntime.quantization.quantize import quantize_dynamic # type: ignore - - print(f"Quantizing model and writing to {args.quantize_out}...") - quantize_dynamic( - model_input=args.output, - model_output=args.quantize_out, - optimize_model=True, - per_channel=False, - reduce_range=False, - weight_type=QuantType.QUInt8, - ) - print("Done!") diff --git a/spaces/Yan233th/so-vits-svc-models/modules/commons.py b/spaces/Yan233th/so-vits-svc-models/modules/commons.py deleted file mode 100644 index 074888006392e956ce204d8368362dbb2cd4e304..0000000000000000000000000000000000000000 --- a/spaces/Yan233th/so-vits-svc-models/modules/commons.py +++ /dev/null @@ -1,188 +0,0 @@ -import math -import numpy as np -import torch -from torch import nn -from torch.nn import functional as F - -def slice_pitch_segments(x, ids_str, segment_size=4): - ret = torch.zeros_like(x[:, :segment_size]) - for i in range(x.size(0)): - idx_str = ids_str[i] - idx_end = idx_str + segment_size - ret[i] = x[i, idx_str:idx_end] - return ret - -def rand_slice_segments_with_pitch(x, pitch, x_lengths=None, segment_size=4): - b, d, t = x.size() - if x_lengths is None: - x_lengths = t - ids_str_max = x_lengths - segment_size + 1 - ids_str = (torch.rand([b]).to(device=x.device) * ids_str_max).to(dtype=torch.long) - ret = slice_segments(x, ids_str, segment_size) - ret_pitch = slice_pitch_segments(pitch, ids_str, segment_size) - return ret, ret_pitch, ids_str - -def init_weights(m, mean=0.0, std=0.01): - classname = m.__class__.__name__ - if classname.find("Conv") != -1: - m.weight.data.normal_(mean, std) - - -def get_padding(kernel_size, dilation=1): - return int((kernel_size*dilation - dilation)/2) - - -def convert_pad_shape(pad_shape): - l = pad_shape[::-1] - pad_shape = [item for sublist in l for item in sublist] - return pad_shape - - -def intersperse(lst, item): - result = [item] * (len(lst) * 2 + 1) - result[1::2] = lst - return result - - -def kl_divergence(m_p, logs_p, m_q, logs_q): - """KL(P||Q)""" - kl = (logs_q - logs_p) - 0.5 - kl += 0.5 * (torch.exp(2. * logs_p) + ((m_p - m_q)**2)) * torch.exp(-2. * logs_q) - return kl - - -def rand_gumbel(shape): - """Sample from the Gumbel distribution, protect from overflows.""" - uniform_samples = torch.rand(shape) * 0.99998 + 0.00001 - return -torch.log(-torch.log(uniform_samples)) - - -def rand_gumbel_like(x): - g = rand_gumbel(x.size()).to(dtype=x.dtype, device=x.device) - return g - - -def slice_segments(x, ids_str, segment_size=4): - ret = torch.zeros_like(x[:, :, :segment_size]) - for i in range(x.size(0)): - idx_str = ids_str[i] - idx_end = idx_str + segment_size - ret[i] = x[i, :, idx_str:idx_end] - return ret - - -def rand_slice_segments(x, x_lengths=None, segment_size=4): - b, d, t = x.size() - if x_lengths is None: - x_lengths = t - ids_str_max = x_lengths - segment_size + 1 - ids_str = (torch.rand([b]).to(device=x.device) * ids_str_max).to(dtype=torch.long) - ret = slice_segments(x, ids_str, segment_size) - return ret, ids_str - - -def rand_spec_segments(x, x_lengths=None, segment_size=4): - b, d, t = x.size() - if x_lengths is None: - x_lengths = t - ids_str_max = x_lengths - segment_size - ids_str = (torch.rand([b]).to(device=x.device) * ids_str_max).to(dtype=torch.long) - ret = slice_segments(x, ids_str, segment_size) - return ret, ids_str - - -def get_timing_signal_1d( - length, channels, min_timescale=1.0, max_timescale=1.0e4): - position = torch.arange(length, dtype=torch.float) - num_timescales = channels // 2 - log_timescale_increment = ( - math.log(float(max_timescale) / float(min_timescale)) / - (num_timescales - 1)) - inv_timescales = min_timescale * torch.exp( - torch.arange(num_timescales, dtype=torch.float) * -log_timescale_increment) - scaled_time = position.unsqueeze(0) * inv_timescales.unsqueeze(1) - signal = torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], 0) - signal = F.pad(signal, [0, 0, 0, channels % 2]) - signal = signal.view(1, channels, length) - return signal - - -def add_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4): - b, channels, length = x.size() - signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale) - return x + signal.to(dtype=x.dtype, device=x.device) - - -def cat_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4, axis=1): - b, channels, length = x.size() - signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale) - return torch.cat([x, signal.to(dtype=x.dtype, device=x.device)], axis) - - -def subsequent_mask(length): - mask = torch.tril(torch.ones(length, length)).unsqueeze(0).unsqueeze(0) - return mask - - -@torch.jit.script -def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels): - n_channels_int = n_channels[0] - in_act = input_a + input_b - t_act = torch.tanh(in_act[:, :n_channels_int, :]) - s_act = torch.sigmoid(in_act[:, n_channels_int:, :]) - acts = t_act * s_act - return acts - - -def convert_pad_shape(pad_shape): - l = pad_shape[::-1] - pad_shape = [item for sublist in l for item in sublist] - return pad_shape - - -def shift_1d(x): - x = F.pad(x, convert_pad_shape([[0, 0], [0, 0], [1, 0]]))[:, :, :-1] - return x - - -def sequence_mask(length, max_length=None): - if max_length is None: - max_length = length.max() - x = torch.arange(max_length, dtype=length.dtype, device=length.device) - return x.unsqueeze(0) < length.unsqueeze(1) - - -def generate_path(duration, mask): - """ - duration: [b, 1, t_x] - mask: [b, 1, t_y, t_x] - """ - device = duration.device - - b, _, t_y, t_x = mask.shape - cum_duration = torch.cumsum(duration, -1) - - cum_duration_flat = cum_duration.view(b * t_x) - path = sequence_mask(cum_duration_flat, t_y).to(mask.dtype) - path = path.view(b, t_x, t_y) - path = path - F.pad(path, convert_pad_shape([[0, 0], [1, 0], [0, 0]]))[:, :-1] - path = path.unsqueeze(1).transpose(2,3) * mask - return path - - -def clip_grad_value_(parameters, clip_value, norm_type=2): - if isinstance(parameters, torch.Tensor): - parameters = [parameters] - parameters = list(filter(lambda p: p.grad is not None, parameters)) - norm_type = float(norm_type) - if clip_value is not None: - clip_value = float(clip_value) - - total_norm = 0 - for p in parameters: - param_norm = p.grad.data.norm(norm_type) - total_norm += param_norm.item() ** norm_type - if clip_value is not None: - p.grad.data.clamp_(min=-clip_value, max=clip_value) - total_norm = total_norm ** (1. / norm_type) - return total_norm diff --git a/spaces/YeOldHermit/Super-Resolution-Anime-Diffusion/diffusers/experimental/rl/value_guided_sampling.py b/spaces/YeOldHermit/Super-Resolution-Anime-Diffusion/diffusers/experimental/rl/value_guided_sampling.py deleted file mode 100644 index 4dd935f54d608f45c8ae69eda5a571f1bf65084b..0000000000000000000000000000000000000000 --- a/spaces/YeOldHermit/Super-Resolution-Anime-Diffusion/diffusers/experimental/rl/value_guided_sampling.py +++ /dev/null @@ -1,130 +0,0 @@ -# Copyright 2022 The HuggingFace Team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import numpy as np -import torch - -import tqdm - -from ...models.unet_1d import UNet1DModel -from ...pipeline_utils import DiffusionPipeline -from ...utils.dummy_pt_objects import DDPMScheduler - - -class ValueGuidedRLPipeline(DiffusionPipeline): - def __init__( - self, - value_function: UNet1DModel, - unet: UNet1DModel, - scheduler: DDPMScheduler, - env, - ): - super().__init__() - self.value_function = value_function - self.unet = unet - self.scheduler = scheduler - self.env = env - self.data = env.get_dataset() - self.means = dict() - for key in self.data.keys(): - try: - self.means[key] = self.data[key].mean() - except: - pass - self.stds = dict() - for key in self.data.keys(): - try: - self.stds[key] = self.data[key].std() - except: - pass - self.state_dim = env.observation_space.shape[0] - self.action_dim = env.action_space.shape[0] - - def normalize(self, x_in, key): - return (x_in - self.means[key]) / self.stds[key] - - def de_normalize(self, x_in, key): - return x_in * self.stds[key] + self.means[key] - - def to_torch(self, x_in): - if type(x_in) is dict: - return {k: self.to_torch(v) for k, v in x_in.items()} - elif torch.is_tensor(x_in): - return x_in.to(self.unet.device) - return torch.tensor(x_in, device=self.unet.device) - - def reset_x0(self, x_in, cond, act_dim): - for key, val in cond.items(): - x_in[:, key, act_dim:] = val.clone() - return x_in - - def run_diffusion(self, x, conditions, n_guide_steps, scale): - batch_size = x.shape[0] - y = None - for i in tqdm.tqdm(self.scheduler.timesteps): - # create batch of timesteps to pass into model - timesteps = torch.full((batch_size,), i, device=self.unet.device, dtype=torch.long) - for _ in range(n_guide_steps): - with torch.enable_grad(): - x.requires_grad_() - y = self.value_function(x.permute(0, 2, 1), timesteps).sample - grad = torch.autograd.grad([y.sum()], [x])[0] - - posterior_variance = self.scheduler._get_variance(i) - model_std = torch.exp(0.5 * posterior_variance) - grad = model_std * grad - grad[timesteps < 2] = 0 - x = x.detach() - x = x + scale * grad - x = self.reset_x0(x, conditions, self.action_dim) - prev_x = self.unet(x.permute(0, 2, 1), timesteps).sample.permute(0, 2, 1) - # TODO: set prediction_type when instantiating the model - x = self.scheduler.step(prev_x, i, x, predict_epsilon=False)["prev_sample"] - - # apply conditions to the trajectory - x = self.reset_x0(x, conditions, self.action_dim) - x = self.to_torch(x) - return x, y - - def __call__(self, obs, batch_size=64, planning_horizon=32, n_guide_steps=2, scale=0.1): - # normalize the observations and create batch dimension - obs = self.normalize(obs, "observations") - obs = obs[None].repeat(batch_size, axis=0) - - conditions = {0: self.to_torch(obs)} - shape = (batch_size, planning_horizon, self.state_dim + self.action_dim) - - # generate initial noise and apply our conditions (to make the trajectories start at current state) - x1 = torch.randn(shape, device=self.unet.device) - x = self.reset_x0(x1, conditions, self.action_dim) - x = self.to_torch(x) - - # run the diffusion process - x, y = self.run_diffusion(x, conditions, n_guide_steps, scale) - - # sort output trajectories by value - sorted_idx = y.argsort(0, descending=True).squeeze() - sorted_values = x[sorted_idx] - actions = sorted_values[:, :, : self.action_dim] - actions = actions.detach().cpu().numpy() - denorm_actions = self.de_normalize(actions, key="actions") - - # select the action with the highest value - if y is not None: - selected_index = 0 - else: - # if we didn't run value guiding, select a random action - selected_index = np.random.randint(0, batch_size) - denorm_actions = denorm_actions[selected_index, 0] - return denorm_actions diff --git a/spaces/YeOldHermit/Super-Resolution-Anime-Diffusion/diffusers/pipelines/stable_diffusion/safety_checker.py b/spaces/YeOldHermit/Super-Resolution-Anime-Diffusion/diffusers/pipelines/stable_diffusion/safety_checker.py deleted file mode 100644 index 1476c1ede62c6f2189c9025598ddab02169c5f69..0000000000000000000000000000000000000000 --- a/spaces/YeOldHermit/Super-Resolution-Anime-Diffusion/diffusers/pipelines/stable_diffusion/safety_checker.py +++ /dev/null @@ -1,124 +0,0 @@ -# Copyright 2022 The HuggingFace Team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import numpy as np -import torch -import torch.nn as nn - -from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel - -from ...utils import logging - - -logger = logging.get_logger(__name__) - - -def cosine_distance(image_embeds, text_embeds): - normalized_image_embeds = nn.functional.normalize(image_embeds) - normalized_text_embeds = nn.functional.normalize(text_embeds) - return torch.mm(normalized_image_embeds, normalized_text_embeds.t()) - - -class StableDiffusionSafetyChecker(PreTrainedModel): - config_class = CLIPConfig - - _no_split_modules = ["CLIPEncoderLayer"] - - def __init__(self, config: CLIPConfig): - super().__init__(config) - - self.vision_model = CLIPVisionModel(config.vision_config) - self.visual_projection = nn.Linear(config.vision_config.hidden_size, config.projection_dim, bias=False) - - self.concept_embeds = nn.Parameter(torch.ones(17, config.projection_dim), requires_grad=False) - self.special_care_embeds = nn.Parameter(torch.ones(3, config.projection_dim), requires_grad=False) - - self.concept_embeds_weights = nn.Parameter(torch.ones(17), requires_grad=False) - self.special_care_embeds_weights = nn.Parameter(torch.ones(3), requires_grad=False) - - @torch.no_grad() - def forward(self, clip_input, images): - pooled_output = self.vision_model(clip_input)[1] # pooled_output - image_embeds = self.visual_projection(pooled_output) - - # we always cast to float32 as this does not cause significant overhead and is compatible with bfloa16 - special_cos_dist = cosine_distance(image_embeds, self.special_care_embeds).cpu().float().numpy() - cos_dist = cosine_distance(image_embeds, self.concept_embeds).cpu().float().numpy() - - result = [] - batch_size = image_embeds.shape[0] - for i in range(batch_size): - result_img = {"special_scores": {}, "special_care": [], "concept_scores": {}, "bad_concepts": []} - - # increase this value to create a stronger `nfsw` filter - # at the cost of increasing the possibility of filtering benign images - adjustment = 0.0 - - for concept_idx in range(len(special_cos_dist[0])): - concept_cos = special_cos_dist[i][concept_idx] - concept_threshold = self.special_care_embeds_weights[concept_idx].item() - result_img["special_scores"][concept_idx] = round(concept_cos - concept_threshold + adjustment, 3) - if result_img["special_scores"][concept_idx] > 0: - result_img["special_care"].append({concept_idx, result_img["special_scores"][concept_idx]}) - adjustment = 0.01 - - for concept_idx in range(len(cos_dist[0])): - concept_cos = cos_dist[i][concept_idx] - concept_threshold = self.concept_embeds_weights[concept_idx].item() - result_img["concept_scores"][concept_idx] = round(concept_cos - concept_threshold + adjustment, 3) - if result_img["concept_scores"][concept_idx] > 0: - result_img["bad_concepts"].append(concept_idx) - - result.append(result_img) - - # has_nsfw_concepts = [len(res["bad_concepts"]) > 0 for res in result] - has_nsfw_concepts = [False] - - for idx, has_nsfw_concept in enumerate(has_nsfw_concepts): - if has_nsfw_concept: - images[idx] = np.zeros(images[idx].shape) # black image - - if any(has_nsfw_concepts): - logger.warning( - "Potential NSFW content was detected in one or more images. A black image will be returned instead." - " Try again with a different prompt and/or seed." - ) - - return images, has_nsfw_concepts - - @torch.no_grad() - def forward_onnx(self, clip_input: torch.FloatTensor, images: torch.FloatTensor): - pooled_output = self.vision_model(clip_input)[1] # pooled_output - image_embeds = self.visual_projection(pooled_output) - - special_cos_dist = cosine_distance(image_embeds, self.special_care_embeds) - cos_dist = cosine_distance(image_embeds, self.concept_embeds) - - # increase this value to create a stronger `nsfw` filter - # at the cost of increasing the possibility of filtering benign images - adjustment = 0.0 - - special_scores = special_cos_dist - self.special_care_embeds_weights + adjustment - # special_scores = special_scores.round(decimals=3) - special_care = torch.any(special_scores > 0, dim=1) - special_adjustment = special_care * 0.01 - special_adjustment = special_adjustment.unsqueeze(1).expand(-1, cos_dist.shape[1]) - - concept_scores = (cos_dist - self.concept_embeds_weights) + special_adjustment - # concept_scores = concept_scores.round(decimals=3) - has_nsfw_concepts = torch.any(concept_scores > 0, dim=1) - - images[has_nsfw_concepts] = 0.0 # black image - - return images, has_nsfw_concepts diff --git a/spaces/abhilash1910/QA_Albert/README.md b/spaces/abhilash1910/QA_Albert/README.md deleted file mode 100644 index 778129bb45a57937253c3cc7d0943c7fd7e4349d..0000000000000000000000000000000000000000 --- a/spaces/abhilash1910/QA_Albert/README.md +++ /dev/null @@ -1,11 +0,0 @@ ---- -title: QA_Albert -emoji: ⚡ -colorFrom: green -colorTo: purple -sdk: gradio -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference diff --git a/spaces/abrar-lohia/text-2-character-anim/pyrender/.eggs/pyglet-2.0.5-py3.10.egg/pyglet/window/cocoa/__init__.py b/spaces/abrar-lohia/text-2-character-anim/pyrender/.eggs/pyglet-2.0.5-py3.10.egg/pyglet/window/cocoa/__init__.py deleted file mode 100644 index 5dfed13db20f1f18dca01192e0c0b1661d8d0259..0000000000000000000000000000000000000000 --- a/spaces/abrar-lohia/text-2-character-anim/pyrender/.eggs/pyglet-2.0.5-py3.10.egg/pyglet/window/cocoa/__init__.py +++ /dev/null @@ -1,573 +0,0 @@ -from ctypes import * - -import pyglet -from pyglet.window import BaseWindow -from pyglet.window import MouseCursor, DefaultMouseCursor -from pyglet.window import WindowException -from pyglet.event import EventDispatcher - -from pyglet.canvas.cocoa import CocoaCanvas - -from pyglet.libs.darwin import cocoapy, CGPoint - -from .systemcursor import SystemCursor -from .pyglet_delegate import PygletDelegate -from .pyglet_window import PygletWindow, PygletToolWindow -from .pyglet_view import PygletView - -NSApplication = cocoapy.ObjCClass('NSApplication') -NSCursor = cocoapy.ObjCClass('NSCursor') -NSAutoreleasePool = cocoapy.ObjCClass('NSAutoreleasePool') -NSColor = cocoapy.ObjCClass('NSColor') -NSEvent = cocoapy.ObjCClass('NSEvent') -NSImage = cocoapy.ObjCClass('NSImage') - -quartz = cocoapy.quartz -cf = cocoapy.cf - - -class CocoaMouseCursor(MouseCursor): - gl_drawable = False - - def __init__(self, cursorName): - # cursorName is a string identifying one of the named default NSCursors - # e.g. 'pointingHandCursor', and can be sent as message to NSCursor class. - self.cursorName = cursorName - - def set(self): - cursor = getattr(NSCursor, self.cursorName)() - cursor.set() - - -class CocoaWindow(BaseWindow): - - # NSWindow instance. - _nswindow = None - - # Delegate object. - _delegate = None - - # Window properties - _mouse_platform_visible = True - _mouse_ignore_motion = False - - # Flag set during close() method. - _was_closed = False - - # NSWindow style masks. - _style_masks = { - BaseWindow.WINDOW_STYLE_DEFAULT: cocoapy.NSTitledWindowMask | - cocoapy.NSClosableWindowMask | - cocoapy.NSMiniaturizableWindowMask, - BaseWindow.WINDOW_STYLE_DIALOG: cocoapy.NSTitledWindowMask | - cocoapy.NSClosableWindowMask, - BaseWindow.WINDOW_STYLE_TOOL: cocoapy.NSTitledWindowMask | - cocoapy.NSClosableWindowMask | - cocoapy.NSUtilityWindowMask, - BaseWindow.WINDOW_STYLE_BORDERLESS: cocoapy.NSBorderlessWindowMask, - } - - def _recreate(self, changes): - if 'context' in changes: - self.context.set_current() - - if 'fullscreen' in changes: - if not self._fullscreen: # leaving fullscreen - self.screen.release_display() - - self._create() - - def _create(self): - # Create a temporary autorelease pool for this method. - pool = NSAutoreleasePool.alloc().init() - - if self._nswindow: - # The window is about the be recreated so destroy everything - # associated with the old window, then destroy the window itself. - nsview = self.canvas.nsview - self.canvas = None - self._nswindow.orderOut_(None) - self._nswindow.close() - self.context.detach() - self._nswindow.release() - self._nswindow = None - nsview.release() - self._delegate.release() - self._delegate = None - - # Determine window parameters. - content_rect = cocoapy.NSMakeRect(0, 0, self._width, self._height) - WindowClass = PygletWindow - if self._fullscreen: - style_mask = cocoapy.NSBorderlessWindowMask - else: - if self._style not in self._style_masks: - self._style = self.WINDOW_STYLE_DEFAULT - style_mask = self._style_masks[self._style] - if self._resizable: - style_mask |= cocoapy.NSResizableWindowMask - if self._style == BaseWindow.WINDOW_STYLE_TOOL: - WindowClass = PygletToolWindow - - # First create an instance of our NSWindow subclass. - - # FIX ME: - # Need to use this initializer to have any hope of multi-monitor support. - # But currently causes problems on Mac OS X Lion. So for now, we initialize the - # window without including screen information. - # - # self._nswindow = WindowClass.alloc().initWithContentRect_styleMask_backing_defer_screen_( - # content_rect, # contentRect - # style_mask, # styleMask - # NSBackingStoreBuffered, # backing - # False, # defer - # self.screen.get_nsscreen()) # screen - - self._nswindow = WindowClass.alloc().initWithContentRect_styleMask_backing_defer_( - content_rect, # contentRect - style_mask, # styleMask - cocoapy.NSBackingStoreBuffered, # backing - False) # defer - - if self._fullscreen: - # BUG: I suspect that this doesn't do the right thing when using - # multiple monitors (which would be to go fullscreen on the monitor - # where the window is located). However I've no way to test. - blackColor = NSColor.blackColor() - self._nswindow.setBackgroundColor_(blackColor) - self._nswindow.setOpaque_(True) - self.screen.capture_display() - self._nswindow.setLevel_(quartz.CGShieldingWindowLevel()) - self.context.set_full_screen() - self._center_window() - self._mouse_in_window = True - else: - self._set_nice_window_location() - self._mouse_in_window = self._mouse_in_content_rect() - - # Then create a view and set it as our NSWindow's content view. - self._nsview = PygletView.alloc().initWithFrame_cocoaWindow_(content_rect, self) - self._nswindow.setContentView_(self._nsview) - self._nswindow.makeFirstResponder_(self._nsview) - - # Create a canvas with the view as its drawable and attach context to it. - self.canvas = CocoaCanvas(self.display, self.screen, self._nsview) - self.context.attach(self.canvas) - - # Configure the window. - self._nswindow.setAcceptsMouseMovedEvents_(True) - self._nswindow.setReleasedWhenClosed_(False) - self._nswindow.useOptimizedDrawing_(True) - self._nswindow.setPreservesContentDuringLiveResize_(False) - - # Set the delegate. - self._delegate = PygletDelegate.alloc().initWithWindow_(self) - - # Configure CocoaWindow. - self.set_caption(self._caption) - if self._minimum_size is not None: - self.set_minimum_size(*self._minimum_size) - if self._maximum_size is not None: - self.set_maximum_size(*self._maximum_size) - - # TODO: Add support for file drops. - if self._file_drops: - raise NotImplementedError("File drops are not implemented on MacOS") - - self.context.update_geometry() - self.switch_to() - self.set_vsync(self._vsync) - self.set_visible(self._visible) - - pool.drain() - - def _set_nice_window_location(self): - # Construct a list of all visible windows that aren't us. - visible_windows = [win for win in pyglet.app.windows if - win is not self and - win._nswindow and - win._nswindow.isVisible()] - # If there aren't any visible windows, then center this window. - if not visible_windows: - self._center_window() - # Otherwise, cascade from last window in list. - else: - point = visible_windows[-1]._nswindow.cascadeTopLeftFromPoint_(cocoapy.NSZeroPoint) - self._nswindow.cascadeTopLeftFromPoint_(point) - - def _center_window(self): - # [NSWindow center] does not move the window to a true center position - # and also always moves the window to the main display. - x = self.screen.x + int((self.screen.width - self._width) // 2) - y = self.screen.y + int((self.screen.height - self._height) // 2) - self._nswindow.setFrameOrigin_(cocoapy.NSPoint(x, y)) - - def close(self): - # If we've already gone through this once, don't do it again. - if self._was_closed: - return - - # Create a temporary autorelease pool for this method. - pool = NSAutoreleasePool.new() - - # Restore cursor visibility - self.set_mouse_platform_visible(True) - self.set_exclusive_mouse(False) - self.set_exclusive_keyboard(False) - - # Remove the delegate object - if self._delegate: - self._nswindow.setDelegate_(None) - self._delegate.release() - self._delegate = None - - # Remove window from display and remove its view. - if self._nswindow: - self._nswindow.orderOut_(None) - self._nswindow.setContentView_(None) - self._nswindow.close() - - # Restore screen mode. This also releases the display - # if it was captured for fullscreen mode. - self.screen.restore_mode() - - # Remove view from canvas and then remove canvas. - if self.canvas: - self.canvas.nsview.release() - self.canvas.nsview = None - self.canvas = None - - # Do this last, so that we don't see white flash - # when exiting application from fullscreen mode. - super(CocoaWindow, self).close() - - self._was_closed = True - pool.drain() - - def switch_to(self): - if self.context: - self.context.set_current() - - def flip(self): - self.draw_mouse_cursor() - if self.context: - self.context.flip() - - def dispatch_events(self): - self._allow_dispatch_event = True - # Process all pyglet events. - self.dispatch_pending_events() - event = True - - # Dequeue and process all of the pending Cocoa events. - pool = NSAutoreleasePool.new() - NSApp = NSApplication.sharedApplication() - while event and self._nswindow and self._context: - event = NSApp.nextEventMatchingMask_untilDate_inMode_dequeue_( - cocoapy.NSAnyEventMask, None, cocoapy.NSEventTrackingRunLoopMode, True) - - if event: - event_type = event.type() - # Pass on all events. - NSApp.sendEvent_(event) - # And resend key events to special handlers. - if event_type == cocoapy.NSKeyDown and not event.isARepeat(): - NSApp.sendAction_to_from_(cocoapy.get_selector('pygletKeyDown:'), None, event) - elif event_type == cocoapy.NSKeyUp: - NSApp.sendAction_to_from_(cocoapy.get_selector('pygletKeyUp:'), None, event) - elif event_type == cocoapy.NSFlagsChanged: - NSApp.sendAction_to_from_(cocoapy.get_selector('pygletFlagsChanged:'), None, event) - NSApp.updateWindows() - - pool.drain() - - self._allow_dispatch_event = False - - def dispatch_pending_events(self): - while self._event_queue: - event = self._event_queue.pop(0) - EventDispatcher.dispatch_event(self, *event) - - def set_caption(self, caption): - self._caption = caption - if self._nswindow is not None: - self._nswindow.setTitle_(cocoapy.get_NSString(caption)) - - def set_icon(self, *images): - # Only use the biggest image from the list. - max_image = images[0] - for img in images: - if img.width > max_image.width and img.height > max_image.height: - max_image = img - - # Grab image data from pyglet image. - image = max_image.get_image_data() - format = 'ARGB' - bytesPerRow = len(format) * image.width - data = image.get_data(format, -bytesPerRow) - - # Use image data to create a data provider. - # Using CGDataProviderCreateWithData crashes PyObjC 2.2b3, so we create - # a CFDataRef object first and use it to create the data provider. - cfdata = c_void_p(cf.CFDataCreate(None, data, len(data))) - provider = c_void_p(quartz.CGDataProviderCreateWithCFData(cfdata)) - - colorSpace = c_void_p(quartz.CGColorSpaceCreateDeviceRGB()) - - # Then create a CGImage from the provider. - cgimage = c_void_p(quartz.CGImageCreate( - image.width, image.height, 8, 32, bytesPerRow, - colorSpace, - cocoapy.kCGImageAlphaFirst, - provider, - None, - True, - cocoapy.kCGRenderingIntentDefault)) - - if not cgimage: - return - - cf.CFRelease(cfdata) - quartz.CGDataProviderRelease(provider) - quartz.CGColorSpaceRelease(colorSpace) - - # Turn the CGImage into an NSImage. - size = cocoapy.NSMakeSize(image.width, image.height) - nsimage = NSImage.alloc().initWithCGImage_size_(cgimage, size) - if not nsimage: - return - - # And finally set the app icon. - NSApp = NSApplication.sharedApplication() - NSApp.setApplicationIconImage_(nsimage) - nsimage.release() - - def get_location(self): - window_frame = self._nswindow.frame() - rect = self._nswindow.contentRectForFrameRect_(window_frame) - screen_frame = self._nswindow.screen().frame() - screen_width = int(screen_frame.size.width) - screen_height = int(screen_frame.size.height) - return int(rect.origin.x), int(screen_height - rect.origin.y - rect.size.height) - - def set_location(self, x, y): - window_frame = self._nswindow.frame() - rect = self._nswindow.contentRectForFrameRect_(window_frame) - screen_frame = self._nswindow.screen().frame() - screen_width = int(screen_frame.size.width) - screen_height = int(screen_frame.size.height) - origin = cocoapy.NSPoint(x, screen_height - y - rect.size.height) - self._nswindow.setFrameOrigin_(origin) - - def get_framebuffer_size(self): - view = self.context._nscontext.view() - bounds = view.convertRectToBacking_(view.bounds()).size - return int(bounds.width), int(bounds.height) - - def set_size(self, width: int, height: int) -> None: - super().set_size(width, height) - # Move frame origin down so that top-left corner of window doesn't move. - window_frame = self._nswindow.frame() - rect = self._nswindow.contentRectForFrameRect_(window_frame) - rect.origin.y += rect.size.height - height - rect.size.width = width - rect.size.height = height - new_frame = self._nswindow.frameRectForContentRect_(rect) - # The window background flashes when the frame size changes unless it's - # animated, but we can set the window's animationResizeTime to zero. - is_visible = self._nswindow.isVisible() - self._nswindow.setFrame_display_animate_(new_frame, True, is_visible) - self.dispatch_event('on_resize', width, height) - - def set_minimum_size(self, width: int, height: int) -> None: - super().set_minimum_size(width, height) - - if self._nswindow is not None: - ns_minimum_size = cocoapy.NSSize(*self._minimum_size) - self._nswindow.setContentMinSize_(ns_minimum_size) - - def set_maximum_size(self, width: int, height: int) -> None: - super().set_maximum_size(width, height) - - if self._nswindow is not None: - ns_maximum_size = cocoapy.NSSize(*self._maximum_size) - self._nswindow.setContentMaxSize_(ns_maximum_size) - - def activate(self): - if self._nswindow is not None: - NSApp = NSApplication.sharedApplication() - NSApp.activateIgnoringOtherApps_(True) - self._nswindow.makeKeyAndOrderFront_(None) - - def set_visible(self, visible: bool = True) -> None: - super().set_visible(visible) - - if self._nswindow is not None: - if visible: - self.dispatch_event('on_resize', self._width, self._height) - self.dispatch_event('on_show') - self.dispatch_event('on_expose') - self._nswindow.makeKeyAndOrderFront_(None) - else: - self._nswindow.orderOut_(None) - - def minimize(self): - self._mouse_in_window = False - if self._nswindow is not None: - self._nswindow.miniaturize_(None) - - def maximize(self): - if self._nswindow is not None: - self._nswindow.zoom_(None) - - def set_vsync(self, vsync: bool) -> None: - if pyglet.options['vsync'] is not None: - vsync = pyglet.options['vsync'] - - super().set_vsync(vsync) - self.context.set_vsync(vsync) - - def _mouse_in_content_rect(self): - # Returns true if mouse is inside the window's content rectangle. - # Better to use this method to check manually rather than relying - # on instance variables that may not be set correctly. - point = NSEvent.mouseLocation() - window_frame = self._nswindow.frame() - rect = self._nswindow.contentRectForFrameRect_(window_frame) - return cocoapy.foundation.NSMouseInRect(point, rect, False) - - def set_mouse_platform_visible(self, platform_visible=None): - # When the platform_visible argument is supplied with a boolean, then this - # method simply sets whether or not the platform mouse cursor is visible. - if platform_visible is not None: - if platform_visible: - SystemCursor.unhide() - else: - SystemCursor.hide() - # But if it has been called without an argument, it turns into - # a completely different function. Now we are trying to figure out - # whether or not the mouse *should* be visible, and if so, what it should - # look like. - else: - # If we are in mouse exclusive mode, then hide the mouse cursor. - if self._mouse_exclusive: - SystemCursor.hide() - # If we aren't inside the window, then always show the mouse - # and make sure that it is the default cursor. - elif not self._mouse_in_content_rect(): - NSCursor.arrowCursor().set() - SystemCursor.unhide() - # If we are in the window, then what we do depends on both - # the current pyglet-set visibility setting for the mouse and - # the type of the mouse cursor. If the cursor has been hidden - # in the window with set_mouse_visible() then don't show it. - elif not self._mouse_visible: - SystemCursor.hide() - # If the mouse is set as a system-defined cursor, then we - # need to set the cursor and show the mouse. - # *** FIX ME *** - elif isinstance(self._mouse_cursor, CocoaMouseCursor): - self._mouse_cursor.set() - SystemCursor.unhide() - # If the mouse cursor is OpenGL drawable, then it we need to hide - # the system mouse cursor, so that the cursor can draw itself. - elif self._mouse_cursor.gl_drawable: - SystemCursor.hide() - # Otherwise, show the default cursor. - else: - NSCursor.arrowCursor().set() - SystemCursor.unhide() - - def get_system_mouse_cursor(self, name): - # It would make a lot more sense for most of this code to be - # inside the CocoaMouseCursor class, but all of the CURSOR_xxx - # constants are defined as properties of BaseWindow. - if name == self.CURSOR_DEFAULT: - return DefaultMouseCursor() - cursors = { - self.CURSOR_CROSSHAIR: 'crosshairCursor', - self.CURSOR_HAND: 'pointingHandCursor', - self.CURSOR_HELP: 'arrowCursor', - self.CURSOR_NO: 'operationNotAllowedCursor', # Mac OS 10.6 - self.CURSOR_SIZE: 'arrowCursor', - self.CURSOR_SIZE_UP: 'resizeUpCursor', - self.CURSOR_SIZE_UP_RIGHT: 'arrowCursor', - self.CURSOR_SIZE_RIGHT: 'resizeRightCursor', - self.CURSOR_SIZE_DOWN_RIGHT: 'arrowCursor', - self.CURSOR_SIZE_DOWN: 'resizeDownCursor', - self.CURSOR_SIZE_DOWN_LEFT: 'arrowCursor', - self.CURSOR_SIZE_LEFT: 'resizeLeftCursor', - self.CURSOR_SIZE_UP_LEFT: 'arrowCursor', - self.CURSOR_SIZE_UP_DOWN: 'resizeUpDownCursor', - self.CURSOR_SIZE_LEFT_RIGHT: 'resizeLeftRightCursor', - self.CURSOR_TEXT: 'IBeamCursor', - self.CURSOR_WAIT: 'arrowCursor', # No wristwatch cursor in Cocoa - self.CURSOR_WAIT_ARROW: 'arrowCursor', # No wristwatch cursor in Cocoa - } - if name not in cursors: - raise RuntimeError('Unknown cursor name "%s"' % name) - return CocoaMouseCursor(cursors[name]) - - def set_mouse_position(self, x, y, absolute=False): - if absolute: - # If absolute, then x, y is given in global display coordinates - # which sets (0,0) at top left corner of main display. It is possible - # to warp the mouse position to a point inside of another display. - quartz.CGWarpMouseCursorPosition(CGPoint(x,y)) - else: - # Window-relative coordinates: (x, y) are given in window coords - # with (0,0) at bottom-left corner of window and y up. We find - # which display the window is in and then convert x, y into local - # display coords where (0,0) is now top-left of display and y down. - screenInfo = self._nswindow.screen().deviceDescription() - displayID = screenInfo.objectForKey_(cocoapy.get_NSString('NSScreenNumber')) - displayID = displayID.intValue() - displayBounds = quartz.CGDisplayBounds(displayID) - frame = self._nswindow.frame() - windowOrigin = frame.origin - x += windowOrigin.x - y = displayBounds.size.height - windowOrigin.y - y - quartz.CGDisplayMoveCursorToPoint(displayID, cocoapy.NSPoint(x, y)) - - def set_exclusive_mouse(self, exclusive=True): - super().set_exclusive_mouse(exclusive) - if exclusive: - # Skip the next motion event, which would return a large delta. - self._mouse_ignore_motion = True - # Move mouse to center of window. - frame = self._nswindow.frame() - width, height = frame.size.width, frame.size.height - self.set_mouse_position(width/2, height/2) - quartz.CGAssociateMouseAndMouseCursorPosition(False) - else: - quartz.CGAssociateMouseAndMouseCursorPosition(True) - - # Update visibility of mouse cursor. - self.set_mouse_platform_visible() - - def set_exclusive_keyboard(self, exclusive=True): - # http://developer.apple.com/mac/library/technotes/tn2002/tn2062.html - # http://developer.apple.com/library/mac/#technotes/KioskMode/ - - # BUG: System keys like F9 or command-tab are disabled, however - # pyglet also does not receive key press events for them. - - # This flag is queried by window delegate to determine whether - # the quit menu item is active. - super().set_exclusive_keyboard(exclusive) - - if exclusive: - # "Be nice! Don't disable force-quit!" - # -- Patrick Swayze, Road House (1989) - options = cocoapy.NSApplicationPresentationHideDock | \ - cocoapy.NSApplicationPresentationHideMenuBar | \ - cocoapy.NSApplicationPresentationDisableProcessSwitching | \ - cocoapy.NSApplicationPresentationDisableHideApplication - else: - options = cocoapy.NSApplicationPresentationDefault - - NSApp = NSApplication.sharedApplication() - NSApp.setPresentationOptions_(options) - - -__all__ = ["CocoaWindow"] diff --git a/spaces/akhaliq/BlendGAN/ffhq_dataset/landmarks_detector.py b/spaces/akhaliq/BlendGAN/ffhq_dataset/landmarks_detector.py deleted file mode 100644 index 9ffcdf40426238b2481acbbe12e1162531621e04..0000000000000000000000000000000000000000 --- a/spaces/akhaliq/BlendGAN/ffhq_dataset/landmarks_detector.py +++ /dev/null @@ -1,22 +0,0 @@ -import dlib -import cv2 - - -class LandmarksDetector: - def __init__(self, predictor_model_path): - """ - :param predictor_model_path: path to shape_predictor_68_face_landmarks.dat file - """ - self.detector = dlib.get_frontal_face_detector() - self.shape_predictor = dlib.shape_predictor(predictor_model_path) - - def get_landmarks(self, image): - gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) - dets = self.detector(gray, 1) - - for detection in dets: - try: - face_landmarks = [(item.x, item.y) for item in self.shape_predictor(gray, detection).parts()] - yield face_landmarks - except: - print("Exception in get_landmarks()!") diff --git a/spaces/akhaliq/Kapao/utils/loggers/wandb/README.md b/spaces/akhaliq/Kapao/utils/loggers/wandb/README.md deleted file mode 100644 index 8616ea2b6945749dd656e021834fa13230c14b54..0000000000000000000000000000000000000000 --- a/spaces/akhaliq/Kapao/utils/loggers/wandb/README.md +++ /dev/null @@ -1,140 +0,0 @@ -📚 This guide explains how to use **Weights & Biases** (W&B) with YOLOv5 🚀. - * [About Weights & Biases](#about-weights-&-biases) - * [First-Time Setup](#first-time-setup) - * [Viewing runs](#viewing-runs) - * [Advanced Usage: Dataset Versioning and Evaluation](#advanced-usage) - * [Reports: Share your work with the world!](#reports) - -## About Weights & Biases -Think of [W&B](https://wandb.ai/site?utm_campaign=repo_yolo_wandbtutorial) like GitHub for machine learning models. With a few lines of code, save everything you need to debug, compare and reproduce your models — architecture, hyperparameters, git commits, model weights, GPU usage, and even datasets and predictions. - - Used by top researchers including teams at OpenAI, Lyft, Github, and MILA, W&B is part of the new standard of best practices for machine learning. How W&B can help you optimize your machine learning workflows: - - * [Debug](https://wandb.ai/wandb/getting-started/reports/Visualize-Debug-Machine-Learning-Models--VmlldzoyNzY5MDk#Free-2) model performance in real time - * [GPU usage](https://wandb.ai/wandb/getting-started/reports/Visualize-Debug-Machine-Learning-Models--VmlldzoyNzY5MDk#System-4), visualized automatically - * [Custom charts](https://wandb.ai/wandb/customizable-charts/reports/Powerful-Custom-Charts-To-Debug-Model-Peformance--VmlldzoyNzY4ODI) for powerful, extensible visualization - * [Share insights](https://wandb.ai/wandb/getting-started/reports/Visualize-Debug-Machine-Learning-Models--VmlldzoyNzY5MDk#Share-8) interactively with collaborators - * [Optimize hyperparameters](https://docs.wandb.com/sweeps) efficiently - * [Track](https://docs.wandb.com/artifacts) datasets, pipelines, and production models - - ## First-Time Setup -

    - Toggle Details -When you first train, W&B will prompt you to create a new account and will generate an **API key** for you. If you are an existing user you can retrieve your key from https://wandb.ai/authorize. This key is used to tell W&B where to log your data. You only need to supply your key once, and then it is remembered on the same device. - - W&B will create a cloud **project** (default is 'YOLOv5') for your training runs, and each new training run will be provided a unique run **name** within that project as project/name. You can also manually set your project and run name as: - - ```shell - $ python train.py --project ... --name ... - ``` - - -
    - -## Viewing Runs -
    - Toggle Details - Run information streams from your environment to the W&B cloud console as you train. This allows you to monitor and even cancel runs in realtime . All important information is logged: - - * Training & Validation losses - * Metrics: Precision, Recall, mAP@0.5, mAP@0.5:0.95 - * Learning Rate over time - * A bounding box debugging panel, showing the training progress over time - * GPU: Type, **GPU Utilization**, power, temperature, **CUDA memory usage** - * System: Disk I/0, CPU utilization, RAM memory usage - * Your trained model as W&B Artifact - * Environment: OS and Python types, Git repository and state, **training command** - - -
    - -## Advanced Usage -You can leverage W&B artifacts and Tables integration to easily visualize and manage your datasets, models and training evaluations. Here are some quick examples to get you started. -
    -

    1. Visualize and Version Datasets

    - Log, visualize, dynamically query, and understand your data with W&B Tables. You can use the following command to log your dataset as a W&B Table. This will generate a {dataset}_wandb.yaml file which can be used to train from dataset artifact. -
    - Usage - Code $ python utils/logger/wandb/log_dataset.py --project ... --name ... --data .. - - ![Screenshot (64)](https://user-images.githubusercontent.com/15766192/128486078-d8433890-98a3-4d12-8986-b6c0e3fc64b9.png) -
    - -

    2: Train and Log Evaluation simultaneousy

    - This is an extension of the previous section, but it'll also training after uploading the dataset. This also evaluation Table - Evaluation table compares your predictions and ground truths across the validation set for each epoch. It uses the references to the already uploaded datasets, - so no images will be uploaded from your system more than once. -
    - Usage - Code $ python utils/logger/wandb/log_dataset.py --data .. --upload_data - -![Screenshot (72)](https://user-images.githubusercontent.com/15766192/128979739-4cf63aeb-a76f-483f-8861-1c0100b938a5.png) -
    - -

    3: Train using dataset artifact

    - When you upload a dataset as described in the first section, you get a new config file with an added `_wandb` to its name. This file contains the information that - can be used to train a model directly from the dataset artifact. This also logs evaluation -
    - Usage - Code $ python utils/logger/wandb/log_dataset.py --data {data}_wandb.yaml - -![Screenshot (72)](https://user-images.githubusercontent.com/15766192/128979739-4cf63aeb-a76f-483f-8861-1c0100b938a5.png) -
    - -

    4: Save model checkpoints as artifacts

    - To enable saving and versioning checkpoints of your experiment, pass `--save_period n` with the base cammand, where `n` represents checkpoint interval. - You can also log both the dataset and model checkpoints simultaneously. If not passed, only the final model will be logged - -
    - Usage - Code $ python train.py --save_period 1 - -![Screenshot (68)](https://user-images.githubusercontent.com/15766192/128726138-ec6c1f60-639d-437d-b4ee-3acd9de47ef3.png) -
    - -
    - -

    5: Resume runs from checkpoint artifacts.

    -Any run can be resumed using artifacts if the --resume argument starts with wandb-artifact:// prefix followed by the run path, i.e, wandb-artifact://username/project/runid . This doesn't require the model checkpoint to be present on the local system. - -
    - Usage - Code $ python train.py --resume wandb-artifact://{run_path} - -![Screenshot (70)](https://user-images.githubusercontent.com/15766192/128728988-4e84b355-6c87-41ae-a591-14aecf45343e.png) -
    - -

    6: Resume runs from dataset artifact & checkpoint artifacts.

    - Local dataset or model checkpoints are not required. This can be used to resume runs directly on a different device - The syntax is same as the previous section, but you'll need to lof both the dataset and model checkpoints as artifacts, i.e, set bot --upload_dataset or - train from _wandb.yaml file and set --save_period - -
    - Usage - Code $ python train.py --resume wandb-artifact://{run_path} - -![Screenshot (70)](https://user-images.githubusercontent.com/15766192/128728988-4e84b355-6c87-41ae-a591-14aecf45343e.png) -
    - - - - - -

    Reports

    - W&B Reports can be created from your saved runs for sharing online. Once a report is created you will receive a link you can use to publically share your results. Here is an example report created from the COCO128 tutorial trainings of all four YOLOv5 models ([link](https://wandb.ai/glenn-jocher/yolov5_tutorial/reports/YOLOv5-COCO128-Tutorial-Results--VmlldzozMDI5OTY)). - - - - ## Environments - YOLOv5 may be run in any of the following up-to-date verified environments (with all dependencies including [CUDA](https://developer.nvidia.com/cuda)/[CUDNN](https://developer.nvidia.com/cudnn), [Python](https://www.python.org/) and [PyTorch](https://pytorch.org/) preinstalled): - - * **Google Colab and Kaggle** notebooks with free GPU: [![Open In Colab](https://camo.githubusercontent.com/84f0493939e0c4de4e6dbe113251b4bfb5353e57134ffd9fcab6b8714514d4d1/68747470733a2f2f636f6c61622e72657365617263682e676f6f676c652e636f6d2f6173736574732f636f6c61622d62616467652e737667)](https://colab.research.google.com/github/ultralytics/yolov5/blob/master/tutorial.ipynb) [![Open In Kaggle](https://camo.githubusercontent.com/a08ca511178e691ace596a95d334f73cf4ce06e83a5c4a5169b8bb68cac27bef/68747470733a2f2f6b6167676c652e636f6d2f7374617469632f696d616765732f6f70656e2d696e2d6b6167676c652e737667)](https://www.kaggle.com/ultralytics/yolov5) - * **Google Cloud** Deep Learning VM. See [GCP Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/GCP-Quickstart) - * **Amazon** Deep Learning AMI. See [AWS Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/AWS-Quickstart) - * **Docker Image**. See [Docker Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/Docker-Quickstart) [![Docker Pulls](https://camo.githubusercontent.com/280faedaf431e4c0c24fdb30ec00a66d627404e5c4c498210d3f014dd58c2c7e/68747470733a2f2f696d672e736869656c64732e696f2f646f636b65722f70756c6c732f756c7472616c79746963732f796f6c6f76353f6c6f676f3d646f636b6572)](https://hub.docker.com/r/ultralytics/yolov5) - - ## Status - ![CI CPU testing](https://github.com/ultralytics/yolov5/workflows/CI%20CPU%20testing/badge.svg) - - If this badge is green, all [YOLOv5 GitHub Actions](https://github.com/ultralytics/yolov5/actions) Continuous Integration (CI) tests are currently passing. CI tests verify correct operation of YOLOv5 training ([train.py](https://github.com/ultralytics/yolov5/blob/master/train.py)), validation ([val.py](https://github.com/ultralytics/yolov5/blob/master/val.py)), inference ([detect.py](https://github.com/ultralytics/yolov5/blob/master/detect.py)) and export ([export.py](https://github.com/ultralytics/yolov5/blob/master/export.py)) on MacOS, Windows, and Ubuntu every 24 hours and on every commit. - diff --git a/spaces/akhil5466/MyGenAIAvatarSpeech/README.md b/spaces/akhil5466/MyGenAIAvatarSpeech/README.md deleted file mode 100644 index 984fa49f5c946ed25ada756b265c0b9cf666a619..0000000000000000000000000000000000000000 --- a/spaces/akhil5466/MyGenAIAvatarSpeech/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: MyGenAIAvatarSpeech -emoji: 😻 -colorFrom: yellow -colorTo: gray -sdk: gradio -sdk_version: 3.39.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/alamin655/Personas/conversant/search/__init__.py b/spaces/alamin655/Personas/conversant/search/__init__.py deleted file mode 100644 index 4a97acb2edae55495f08836272447d1afbe2f891..0000000000000000000000000000000000000000 --- a/spaces/alamin655/Personas/conversant/search/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -# Copyright (c) 2022 Cohere Inc. and its affiliates. -# -# Licensed under the MIT License (the "License"); -# you may not use this file except in compliance with the License. -# -# You may obtain a copy of the License in the LICENSE file at the top -# level of this repository. - -from conversant.search.document import Document -from conversant.search.local_searcher import LocalSearcher -from conversant.search.searcher import Searcher - -__all__ = ["Document", "Searcher", "LocalSearcher"] diff --git a/spaces/alexray/btc_predictor/tests/test_model_preparation.py b/spaces/alexray/btc_predictor/tests/test_model_preparation.py deleted file mode 100644 index 8d4b2e5f9c87e1e82f0465107bd0465787827c63..0000000000000000000000000000000000000000 --- a/spaces/alexray/btc_predictor/tests/test_model_preparation.py +++ /dev/null @@ -1,39 +0,0 @@ -import unittest -import pandas as pd -import os -from model_preparation import prepare_model - - -class TestModelPreparation(unittest.TestCase): - def setUp(self): - # Create temporary test data - self.test_data_dir = "test_data" - os.makedirs(self.test_data_dir, exist_ok=True) - - # Create sample training features and target CSV files - self.X_train = pd.DataFrame({'feature1': [1, 2, 3], - 'feature2': [4, 5, 6]}) - self.y_train = pd.Series([10, 20, 30]) - self.X_train.to_csv(os.path.join(self.test_data_dir, - "train_features.csv"), index=True) - self.y_train.to_csv(os.path.join(self.test_data_dir, - "train_target.csv"), index=True) - - def tearDown(self): - # Clean up temporary test data - os.remove(os.path.join(self.test_data_dir, "train_features.csv")) - os.remove(os.path.join(self.test_data_dir, "train_target.csv")) - os.remove(os.path.join(self.test_data_dir, "train_prediction.csv")) - os.rmdir(self.test_data_dir) - - def test_prepare_model(self): - # Run the model preparation function - prepare_model(data_dir=self.test_data_dir, - model_name="test_linear_svr_model.pkl") - - # Check if the model file is created - self.assertTrue(os.path.exists("test_linear_svr_model.pkl")) - - -if __name__ == '__main__': - unittest.main() diff --git a/spaces/aliabid94/AutoGPT/autogpt/commands/improve_code.py b/spaces/aliabid94/AutoGPT/autogpt/commands/improve_code.py deleted file mode 100644 index e3440d8b7c6ee8cb62d73df48623ab757c973c59..0000000000000000000000000000000000000000 --- a/spaces/aliabid94/AutoGPT/autogpt/commands/improve_code.py +++ /dev/null @@ -1,29 +0,0 @@ -from __future__ import annotations - -import json - -from autogpt.llm_utils import call_ai_function - - -def improve_code(suggestions: list[str], code: str) -> str: - """ - A function that takes in code and suggestions and returns a response from create - chat completion api call. - - Parameters: - suggestions (List): A list of suggestions around what needs to be improved. - code (str): Code to be improved. - Returns: - A result string from create chat completion. Improved code in response. - """ - - function_string = ( - "def generate_improved_code(suggestions: List[str], code: str) -> str:" - ) - args = [json.dumps(suggestions), code] - description_string = ( - "Improves the provided code based on the suggestions" - " provided, making no other changes." - ) - - return call_ai_function(function_string, args, description_string) diff --git a/spaces/aliabid94/AutoGPT/autogpt/speech/eleven_labs.py b/spaces/aliabid94/AutoGPT/autogpt/speech/eleven_labs.py deleted file mode 100644 index ea84efd8ca9489b40919ecd571813fe954b078e3..0000000000000000000000000000000000000000 --- a/spaces/aliabid94/AutoGPT/autogpt/speech/eleven_labs.py +++ /dev/null @@ -1,86 +0,0 @@ -"""ElevenLabs speech module""" -import os - -import requests -from playsound import playsound - -from autogpt.config import Config -from autogpt.speech.base import VoiceBase - -PLACEHOLDERS = {"your-voice-id"} - - -class ElevenLabsSpeech(VoiceBase): - """ElevenLabs speech class""" - - def _setup(self) -> None: - """Set up the voices, API key, etc. - - Returns: - None: None - """ - - cfg = Config() - default_voices = ["ErXwobaYiN019PkySvjV", "EXAVITQu4vr4xnSDxMaL"] - voice_options = { - "Rachel": "21m00Tcm4TlvDq8ikWAM", - "Domi": "AZnzlk1XvdvUeBnXmlld", - "Bella": "EXAVITQu4vr4xnSDxMaL", - "Antoni": "ErXwobaYiN019PkySvjV", - "Elli": "MF3mGyEYCl7XYWbV9V6O", - "Josh": "TxGEqnHWrfWFTfGW9XjX", - "Arnold": "VR6AewLTigWG4xSOukaG", - "Adam": "pNInz6obpgDQGcFmaJgB", - "Sam": "yoZ06aMxZJJ28mfd3POQ", - } - self._headers = { - "Content-Type": "application/json", - "xi-api-key": cfg.elevenlabs_api_key, - } - self._voices = default_voices.copy() - if cfg.elevenlabs_voice_1_id in voice_options: - cfg.elevenlabs_voice_1_id = voice_options[cfg.elevenlabs_voice_1_id] - if cfg.elevenlabs_voice_2_id in voice_options: - cfg.elevenlabs_voice_2_id = voice_options[cfg.elevenlabs_voice_2_id] - self._use_custom_voice(cfg.elevenlabs_voice_1_id, 0) - self._use_custom_voice(cfg.elevenlabs_voice_2_id, 1) - - def _use_custom_voice(self, voice, voice_index) -> None: - """Use a custom voice if provided and not a placeholder - - Args: - voice (str): The voice ID - voice_index (int): The voice index - - Returns: - None: None - """ - # Placeholder values that should be treated as empty - if voice and voice not in PLACEHOLDERS: - self._voices[voice_index] = voice - - def _speech(self, text: str, voice_index: int = 0) -> bool: - """Speak text using elevenlabs.io's API - - Args: - text (str): The text to speak - voice_index (int, optional): The voice to use. Defaults to 0. - - Returns: - bool: True if the request was successful, False otherwise - """ - tts_url = ( - f"https://api.elevenlabs.io/v1/text-to-speech/{self._voices[voice_index]}" - ) - response = requests.post(tts_url, headers=self._headers, json={"text": text}) - - if response.status_code == 200: - with open("speech.mpeg", "wb") as f: - f.write(response.content) - playsound("speech.mpeg", True) - os.remove("speech.mpeg") - return True - else: - print("Request failed with status code:", response.status_code) - print("Response content:", response.content) - return False diff --git a/spaces/aliceoq/vozes-da-loirinha/lib/infer_pack/modules.py b/spaces/aliceoq/vozes-da-loirinha/lib/infer_pack/modules.py deleted file mode 100644 index c83289df7c79a4810dacd15c050148544ba0b6a9..0000000000000000000000000000000000000000 --- a/spaces/aliceoq/vozes-da-loirinha/lib/infer_pack/modules.py +++ /dev/null @@ -1,522 +0,0 @@ -import copy -import math -import numpy as np -import scipy -import torch -from torch import nn -from torch.nn import functional as F - -from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d -from torch.nn.utils import weight_norm, remove_weight_norm - -from lib.infer_pack import commons -from lib.infer_pack.commons import init_weights, get_padding -from lib.infer_pack.transforms import piecewise_rational_quadratic_transform - - -LRELU_SLOPE = 0.1 - - -class LayerNorm(nn.Module): - def __init__(self, channels, eps=1e-5): - super().__init__() - self.channels = channels - self.eps = eps - - self.gamma = nn.Parameter(torch.ones(channels)) - self.beta = nn.Parameter(torch.zeros(channels)) - - def forward(self, x): - x = x.transpose(1, -1) - x = F.layer_norm(x, (self.channels,), self.gamma, self.beta, self.eps) - return x.transpose(1, -1) - - -class ConvReluNorm(nn.Module): - def __init__( - self, - in_channels, - hidden_channels, - out_channels, - kernel_size, - n_layers, - p_dropout, - ): - super().__init__() - self.in_channels = in_channels - self.hidden_channels = hidden_channels - self.out_channels = out_channels - self.kernel_size = kernel_size - self.n_layers = n_layers - self.p_dropout = p_dropout - assert n_layers > 1, "Number of layers should be larger than 0." - - self.conv_layers = nn.ModuleList() - self.norm_layers = nn.ModuleList() - self.conv_layers.append( - nn.Conv1d( - in_channels, hidden_channels, kernel_size, padding=kernel_size // 2 - ) - ) - self.norm_layers.append(LayerNorm(hidden_channels)) - self.relu_drop = nn.Sequential(nn.ReLU(), nn.Dropout(p_dropout)) - for _ in range(n_layers - 1): - self.conv_layers.append( - nn.Conv1d( - hidden_channels, - hidden_channels, - kernel_size, - padding=kernel_size // 2, - ) - ) - self.norm_layers.append(LayerNorm(hidden_channels)) - self.proj = nn.Conv1d(hidden_channels, out_channels, 1) - self.proj.weight.data.zero_() - self.proj.bias.data.zero_() - - def forward(self, x, x_mask): - x_org = x - for i in range(self.n_layers): - x = self.conv_layers[i](x * x_mask) - x = self.norm_layers[i](x) - x = self.relu_drop(x) - x = x_org + self.proj(x) - return x * x_mask - - -class DDSConv(nn.Module): - """ - Dialted and Depth-Separable Convolution - """ - - def __init__(self, channels, kernel_size, n_layers, p_dropout=0.0): - super().__init__() - self.channels = channels - self.kernel_size = kernel_size - self.n_layers = n_layers - self.p_dropout = p_dropout - - self.drop = nn.Dropout(p_dropout) - self.convs_sep = nn.ModuleList() - self.convs_1x1 = nn.ModuleList() - self.norms_1 = nn.ModuleList() - self.norms_2 = nn.ModuleList() - for i in range(n_layers): - dilation = kernel_size**i - padding = (kernel_size * dilation - dilation) // 2 - self.convs_sep.append( - nn.Conv1d( - channels, - channels, - kernel_size, - groups=channels, - dilation=dilation, - padding=padding, - ) - ) - self.convs_1x1.append(nn.Conv1d(channels, channels, 1)) - self.norms_1.append(LayerNorm(channels)) - self.norms_2.append(LayerNorm(channels)) - - def forward(self, x, x_mask, g=None): - if g is not None: - x = x + g - for i in range(self.n_layers): - y = self.convs_sep[i](x * x_mask) - y = self.norms_1[i](y) - y = F.gelu(y) - y = self.convs_1x1[i](y) - y = self.norms_2[i](y) - y = F.gelu(y) - y = self.drop(y) - x = x + y - return x * x_mask - - -class WN(torch.nn.Module): - def __init__( - self, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=0, - p_dropout=0, - ): - super(WN, self).__init__() - assert kernel_size % 2 == 1 - self.hidden_channels = hidden_channels - self.kernel_size = (kernel_size,) - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.gin_channels = gin_channels - self.p_dropout = p_dropout - - self.in_layers = torch.nn.ModuleList() - self.res_skip_layers = torch.nn.ModuleList() - self.drop = nn.Dropout(p_dropout) - - if gin_channels != 0: - cond_layer = torch.nn.Conv1d( - gin_channels, 2 * hidden_channels * n_layers, 1 - ) - self.cond_layer = torch.nn.utils.weight_norm(cond_layer, name="weight") - - for i in range(n_layers): - dilation = dilation_rate**i - padding = int((kernel_size * dilation - dilation) / 2) - in_layer = torch.nn.Conv1d( - hidden_channels, - 2 * hidden_channels, - kernel_size, - dilation=dilation, - padding=padding, - ) - in_layer = torch.nn.utils.weight_norm(in_layer, name="weight") - self.in_layers.append(in_layer) - - # last one is not necessary - if i < n_layers - 1: - res_skip_channels = 2 * hidden_channels - else: - res_skip_channels = hidden_channels - - res_skip_layer = torch.nn.Conv1d(hidden_channels, res_skip_channels, 1) - res_skip_layer = torch.nn.utils.weight_norm(res_skip_layer, name="weight") - self.res_skip_layers.append(res_skip_layer) - - def forward(self, x, x_mask, g=None, **kwargs): - output = torch.zeros_like(x) - n_channels_tensor = torch.IntTensor([self.hidden_channels]) - - if g is not None: - g = self.cond_layer(g) - - for i in range(self.n_layers): - x_in = self.in_layers[i](x) - if g is not None: - cond_offset = i * 2 * self.hidden_channels - g_l = g[:, cond_offset : cond_offset + 2 * self.hidden_channels, :] - else: - g_l = torch.zeros_like(x_in) - - acts = commons.fused_add_tanh_sigmoid_multiply(x_in, g_l, n_channels_tensor) - acts = self.drop(acts) - - res_skip_acts = self.res_skip_layers[i](acts) - if i < self.n_layers - 1: - res_acts = res_skip_acts[:, : self.hidden_channels, :] - x = (x + res_acts) * x_mask - output = output + res_skip_acts[:, self.hidden_channels :, :] - else: - output = output + res_skip_acts - return output * x_mask - - def remove_weight_norm(self): - if self.gin_channels != 0: - torch.nn.utils.remove_weight_norm(self.cond_layer) - for l in self.in_layers: - torch.nn.utils.remove_weight_norm(l) - for l in self.res_skip_layers: - torch.nn.utils.remove_weight_norm(l) - - -class ResBlock1(torch.nn.Module): - def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5)): - super(ResBlock1, self).__init__() - self.convs1 = nn.ModuleList( - [ - weight_norm( - Conv1d( - channels, - channels, - kernel_size, - 1, - dilation=dilation[0], - padding=get_padding(kernel_size, dilation[0]), - ) - ), - weight_norm( - Conv1d( - channels, - channels, - kernel_size, - 1, - dilation=dilation[1], - padding=get_padding(kernel_size, dilation[1]), - ) - ), - weight_norm( - Conv1d( - channels, - channels, - kernel_size, - 1, - dilation=dilation[2], - padding=get_padding(kernel_size, dilation[2]), - ) - ), - ] - ) - self.convs1.apply(init_weights) - - self.convs2 = nn.ModuleList( - [ - weight_norm( - Conv1d( - channels, - channels, - kernel_size, - 1, - dilation=1, - padding=get_padding(kernel_size, 1), - ) - ), - weight_norm( - Conv1d( - channels, - channels, - kernel_size, - 1, - dilation=1, - padding=get_padding(kernel_size, 1), - ) - ), - weight_norm( - Conv1d( - channels, - channels, - kernel_size, - 1, - dilation=1, - padding=get_padding(kernel_size, 1), - ) - ), - ] - ) - self.convs2.apply(init_weights) - - def forward(self, x, x_mask=None): - for c1, c2 in zip(self.convs1, self.convs2): - xt = F.leaky_relu(x, LRELU_SLOPE) - if x_mask is not None: - xt = xt * x_mask - xt = c1(xt) - xt = F.leaky_relu(xt, LRELU_SLOPE) - if x_mask is not None: - xt = xt * x_mask - xt = c2(xt) - x = xt + x - if x_mask is not None: - x = x * x_mask - return x - - def remove_weight_norm(self): - for l in self.convs1: - remove_weight_norm(l) - for l in self.convs2: - remove_weight_norm(l) - - -class ResBlock2(torch.nn.Module): - def __init__(self, channels, kernel_size=3, dilation=(1, 3)): - super(ResBlock2, self).__init__() - self.convs = nn.ModuleList( - [ - weight_norm( - Conv1d( - channels, - channels, - kernel_size, - 1, - dilation=dilation[0], - padding=get_padding(kernel_size, dilation[0]), - ) - ), - weight_norm( - Conv1d( - channels, - channels, - kernel_size, - 1, - dilation=dilation[1], - padding=get_padding(kernel_size, dilation[1]), - ) - ), - ] - ) - self.convs.apply(init_weights) - - def forward(self, x, x_mask=None): - for c in self.convs: - xt = F.leaky_relu(x, LRELU_SLOPE) - if x_mask is not None: - xt = xt * x_mask - xt = c(xt) - x = xt + x - if x_mask is not None: - x = x * x_mask - return x - - def remove_weight_norm(self): - for l in self.convs: - remove_weight_norm(l) - - -class Log(nn.Module): - def forward(self, x, x_mask, reverse=False, **kwargs): - if not reverse: - y = torch.log(torch.clamp_min(x, 1e-5)) * x_mask - logdet = torch.sum(-y, [1, 2]) - return y, logdet - else: - x = torch.exp(x) * x_mask - return x - - -class Flip(nn.Module): - def forward(self, x, *args, reverse=False, **kwargs): - x = torch.flip(x, [1]) - if not reverse: - logdet = torch.zeros(x.size(0)).to(dtype=x.dtype, device=x.device) - return x, logdet - else: - return x - - -class ElementwiseAffine(nn.Module): - def __init__(self, channels): - super().__init__() - self.channels = channels - self.m = nn.Parameter(torch.zeros(channels, 1)) - self.logs = nn.Parameter(torch.zeros(channels, 1)) - - def forward(self, x, x_mask, reverse=False, **kwargs): - if not reverse: - y = self.m + torch.exp(self.logs) * x - y = y * x_mask - logdet = torch.sum(self.logs * x_mask, [1, 2]) - return y, logdet - else: - x = (x - self.m) * torch.exp(-self.logs) * x_mask - return x - - -class ResidualCouplingLayer(nn.Module): - def __init__( - self, - channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - p_dropout=0, - gin_channels=0, - mean_only=False, - ): - assert channels % 2 == 0, "channels should be divisible by 2" - super().__init__() - self.channels = channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.half_channels = channels // 2 - self.mean_only = mean_only - - self.pre = nn.Conv1d(self.half_channels, hidden_channels, 1) - self.enc = WN( - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - p_dropout=p_dropout, - gin_channels=gin_channels, - ) - self.post = nn.Conv1d(hidden_channels, self.half_channels * (2 - mean_only), 1) - self.post.weight.data.zero_() - self.post.bias.data.zero_() - - def forward(self, x, x_mask, g=None, reverse=False): - x0, x1 = torch.split(x, [self.half_channels] * 2, 1) - h = self.pre(x0) * x_mask - h = self.enc(h, x_mask, g=g) - stats = self.post(h) * x_mask - if not self.mean_only: - m, logs = torch.split(stats, [self.half_channels] * 2, 1) - else: - m = stats - logs = torch.zeros_like(m) - - if not reverse: - x1 = m + x1 * torch.exp(logs) * x_mask - x = torch.cat([x0, x1], 1) - logdet = torch.sum(logs, [1, 2]) - return x, logdet - else: - x1 = (x1 - m) * torch.exp(-logs) * x_mask - x = torch.cat([x0, x1], 1) - return x - - def remove_weight_norm(self): - self.enc.remove_weight_norm() - - -class ConvFlow(nn.Module): - def __init__( - self, - in_channels, - filter_channels, - kernel_size, - n_layers, - num_bins=10, - tail_bound=5.0, - ): - super().__init__() - self.in_channels = in_channels - self.filter_channels = filter_channels - self.kernel_size = kernel_size - self.n_layers = n_layers - self.num_bins = num_bins - self.tail_bound = tail_bound - self.half_channels = in_channels // 2 - - self.pre = nn.Conv1d(self.half_channels, filter_channels, 1) - self.convs = DDSConv(filter_channels, kernel_size, n_layers, p_dropout=0.0) - self.proj = nn.Conv1d( - filter_channels, self.half_channels * (num_bins * 3 - 1), 1 - ) - self.proj.weight.data.zero_() - self.proj.bias.data.zero_() - - def forward(self, x, x_mask, g=None, reverse=False): - x0, x1 = torch.split(x, [self.half_channels] * 2, 1) - h = self.pre(x0) - h = self.convs(h, x_mask, g=g) - h = self.proj(h) * x_mask - - b, c, t = x0.shape - h = h.reshape(b, c, -1, t).permute(0, 1, 3, 2) # [b, cx?, t] -> [b, c, t, ?] - - unnormalized_widths = h[..., : self.num_bins] / math.sqrt(self.filter_channels) - unnormalized_heights = h[..., self.num_bins : 2 * self.num_bins] / math.sqrt( - self.filter_channels - ) - unnormalized_derivatives = h[..., 2 * self.num_bins :] - - x1, logabsdet = piecewise_rational_quadratic_transform( - x1, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=reverse, - tails="linear", - tail_bound=self.tail_bound, - ) - - x = torch.cat([x0, x1], 1) * x_mask - logdet = torch.sum(logabsdet * x_mask, [1, 2]) - if not reverse: - return x, logdet - else: - return x diff --git a/spaces/anubhavmaity/minima/README.md b/spaces/anubhavmaity/minima/README.md deleted file mode 100644 index 4b603fc31c2029de650f8a68f6df3f398f51d3bc..0000000000000000000000000000000000000000 --- a/spaces/anubhavmaity/minima/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Minima -emoji: 🔥 -colorFrom: indigo -colorTo: indigo -sdk: gradio -sdk_version: 3.4 -app_file: app.py -pinned: false -license: apache-2.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/aphenx/bingo/next.config.js b/spaces/aphenx/bingo/next.config.js deleted file mode 100644 index 0e6ccd7fbc91d0459eaaff3e968ce0556789c605..0000000000000000000000000000000000000000 --- a/spaces/aphenx/bingo/next.config.js +++ /dev/null @@ -1,38 +0,0 @@ -/** @type {import('next').NextConfig} */ -const nextConfig = { - // output: 'export', - // assetPrefix: '.', - webpack: (config, { isServer }) => { - if (!isServer) { - config.resolve = { - ...config.resolve, - fallback: { - 'bufferutil': false, - 'utf-8-validate': false, - http: false, - https: false, - stream: false, - // fixes proxy-agent dependencies - net: false, - dns: false, - tls: false, - assert: false, - // fixes next-i18next dependencies - path: false, - fs: false, - // fixes mapbox dependencies - events: false, - // fixes sentry dependencies - process: false - } - }; - } - config.module.exprContextCritical = false; - - return config; - }, -} - -module.exports = (...args) => { - return nextConfig -} diff --git a/spaces/arslan-ahmed/talk-to-arslan/Dockerfile b/spaces/arslan-ahmed/talk-to-arslan/Dockerfile deleted file mode 100644 index 2ae720c2805ac5095c21ce52e71257f3cdd284d4..0000000000000000000000000000000000000000 --- a/spaces/arslan-ahmed/talk-to-arslan/Dockerfile +++ /dev/null @@ -1,19 +0,0 @@ - -# Use an official Python runtime as a parent image -FROM arslan2k12/ttyd_base - -# Set the working directory in the container -WORKDIR /app/ttyd - -# Copy the current directory contents into the container at /usr/src/app -# COPY . /app/ttyd -COPY *.py /app/ttyd - -# to make gradio app accessible to local network (default 127.0.0.1 is only accissible within the container) -ENV GRADIO_SERVER_NAME=0.0.0.0 - -# Install any needed packages specified in requirements.txt -# RUN pip install --no-cache-dir -r requirements.txt # already installed in base image - -# Use ENTRYPOINT to allow passing user arguments -ENTRYPOINT ["python", "app.py"] \ No newline at end of file diff --git a/spaces/artificialguybr/video-dubbing/TTS/TTS/tts/layers/glow_tts/decoder.py b/spaces/artificialguybr/video-dubbing/TTS/TTS/tts/layers/glow_tts/decoder.py deleted file mode 100644 index 61c5174ac5e67885288043885290c2906656c99c..0000000000000000000000000000000000000000 --- a/spaces/artificialguybr/video-dubbing/TTS/TTS/tts/layers/glow_tts/decoder.py +++ /dev/null @@ -1,141 +0,0 @@ -import torch -from torch import nn - -from TTS.tts.layers.generic.normalization import ActNorm -from TTS.tts.layers.glow_tts.glow import CouplingBlock, InvConvNear - - -def squeeze(x, x_mask=None, num_sqz=2): - """GlowTTS squeeze operation - Increase number of channels and reduce number of time steps - by the same factor. - - Note: - each 's' is a n-dimensional vector. - ``[s1,s2,s3,s4,s5,s6] --> [[s1, s3, s5], [s2, s4, s6]]`` - """ - b, c, t = x.size() - - t = (t // num_sqz) * num_sqz - x = x[:, :, :t] - x_sqz = x.view(b, c, t // num_sqz, num_sqz) - x_sqz = x_sqz.permute(0, 3, 1, 2).contiguous().view(b, c * num_sqz, t // num_sqz) - - if x_mask is not None: - x_mask = x_mask[:, :, num_sqz - 1 :: num_sqz] - else: - x_mask = torch.ones(b, 1, t // num_sqz).to(device=x.device, dtype=x.dtype) - return x_sqz * x_mask, x_mask - - -def unsqueeze(x, x_mask=None, num_sqz=2): - """GlowTTS unsqueeze operation (revert the squeeze) - - Note: - each 's' is a n-dimensional vector. - ``[[s1, s3, s5], [s2, s4, s6]] --> [[s1, s3, s5, s2, s4, s6]]`` - """ - b, c, t = x.size() - - x_unsqz = x.view(b, num_sqz, c // num_sqz, t) - x_unsqz = x_unsqz.permute(0, 2, 3, 1).contiguous().view(b, c // num_sqz, t * num_sqz) - - if x_mask is not None: - x_mask = x_mask.unsqueeze(-1).repeat(1, 1, 1, num_sqz).view(b, 1, t * num_sqz) - else: - x_mask = torch.ones(b, 1, t * num_sqz).to(device=x.device, dtype=x.dtype) - return x_unsqz * x_mask, x_mask - - -class Decoder(nn.Module): - """Stack of Glow Decoder Modules. - - :: - - Squeeze -> ActNorm -> InvertibleConv1x1 -> AffineCoupling -> Unsqueeze - - Args: - in_channels (int): channels of input tensor. - hidden_channels (int): hidden decoder channels. - kernel_size (int): Coupling block kernel size. (Wavenet filter kernel size.) - dilation_rate (int): rate to increase dilation by each layer in a decoder block. - num_flow_blocks (int): number of decoder blocks. - num_coupling_layers (int): number coupling layers. (number of wavenet layers.) - dropout_p (float): wavenet dropout rate. - sigmoid_scale (bool): enable/disable sigmoid scaling in coupling layer. - """ - - def __init__( - self, - in_channels, - hidden_channels, - kernel_size, - dilation_rate, - num_flow_blocks, - num_coupling_layers, - dropout_p=0.0, - num_splits=4, - num_squeeze=2, - sigmoid_scale=False, - c_in_channels=0, - ): - super().__init__() - - self.in_channels = in_channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.num_flow_blocks = num_flow_blocks - self.num_coupling_layers = num_coupling_layers - self.dropout_p = dropout_p - self.num_splits = num_splits - self.num_squeeze = num_squeeze - self.sigmoid_scale = sigmoid_scale - self.c_in_channels = c_in_channels - - self.flows = nn.ModuleList() - for _ in range(num_flow_blocks): - self.flows.append(ActNorm(channels=in_channels * num_squeeze)) - self.flows.append(InvConvNear(channels=in_channels * num_squeeze, num_splits=num_splits)) - self.flows.append( - CouplingBlock( - in_channels * num_squeeze, - hidden_channels, - kernel_size=kernel_size, - dilation_rate=dilation_rate, - num_layers=num_coupling_layers, - c_in_channels=c_in_channels, - dropout_p=dropout_p, - sigmoid_scale=sigmoid_scale, - ) - ) - - def forward(self, x, x_mask, g=None, reverse=False): - """ - Shapes: - - x: :math:`[B, C, T]` - - x_mask: :math:`[B, 1 ,T]` - - g: :math:`[B, C]` - """ - if not reverse: - flows = self.flows - logdet_tot = 0 - else: - flows = reversed(self.flows) - logdet_tot = None - - if self.num_squeeze > 1: - x, x_mask = squeeze(x, x_mask, self.num_squeeze) - for f in flows: - if not reverse: - x, logdet = f(x, x_mask, g=g, reverse=reverse) - logdet_tot += logdet - else: - x, logdet = f(x, x_mask, g=g, reverse=reverse) - if self.num_squeeze > 1: - x, x_mask = unsqueeze(x, x_mask, self.num_squeeze) - return x, logdet_tot - - def store_inverse(self): - for f in self.flows: - f.store_inverse() diff --git a/spaces/artificialguybr/video-dubbing/TTS/TTS/vocoder/models/hifigan_discriminator.py b/spaces/artificialguybr/video-dubbing/TTS/TTS/vocoder/models/hifigan_discriminator.py deleted file mode 100644 index ca5eaf408c95372ea26f4e83db6f470b4dd92dfb..0000000000000000000000000000000000000000 --- a/spaces/artificialguybr/video-dubbing/TTS/TTS/vocoder/models/hifigan_discriminator.py +++ /dev/null @@ -1,217 +0,0 @@ -# adopted from https://github.com/jik876/hifi-gan/blob/master/models.py -import torch -from torch import nn -from torch.nn import functional as F - -LRELU_SLOPE = 0.1 - - -class DiscriminatorP(torch.nn.Module): - """HiFiGAN Periodic Discriminator - - Takes every Pth value from the input waveform and applied a stack of convoluations. - - Note: - if `period` is 2 - `waveform = [1, 2, 3, 4, 5, 6 ...] --> [1, 3, 5 ... ] --> convs -> score, feat` - - Args: - x (Tensor): input waveform. - - Returns: - [Tensor]: discriminator scores per sample in the batch. - [List[Tensor]]: list of features from each convolutional layer. - - Shapes: - x: [B, 1, T] - """ - - def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False): - super().__init__() - self.period = period - get_padding = lambda k, d: int((k * d - d) / 2) - norm_f = nn.utils.spectral_norm if use_spectral_norm else nn.utils.weight_norm - self.convs = nn.ModuleList( - [ - norm_f(nn.Conv2d(1, 32, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), - norm_f(nn.Conv2d(32, 128, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), - norm_f(nn.Conv2d(128, 512, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), - norm_f(nn.Conv2d(512, 1024, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), - norm_f(nn.Conv2d(1024, 1024, (kernel_size, 1), 1, padding=(2, 0))), - ] - ) - self.conv_post = norm_f(nn.Conv2d(1024, 1, (3, 1), 1, padding=(1, 0))) - - def forward(self, x): - """ - Args: - x (Tensor): input waveform. - - Returns: - [Tensor]: discriminator scores per sample in the batch. - [List[Tensor]]: list of features from each convolutional layer. - - Shapes: - x: [B, 1, T] - """ - feat = [] - - # 1d to 2d - b, c, t = x.shape - if t % self.period != 0: # pad first - n_pad = self.period - (t % self.period) - x = F.pad(x, (0, n_pad), "reflect") - t = t + n_pad - x = x.view(b, c, t // self.period, self.period) - - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, LRELU_SLOPE) - feat.append(x) - x = self.conv_post(x) - feat.append(x) - x = torch.flatten(x, 1, -1) - - return x, feat - - -class MultiPeriodDiscriminator(torch.nn.Module): - """HiFiGAN Multi-Period Discriminator (MPD) - Wrapper for the `PeriodDiscriminator` to apply it in different periods. - Periods are suggested to be prime numbers to reduce the overlap between each discriminator. - """ - - def __init__(self, use_spectral_norm=False): - super().__init__() - self.discriminators = nn.ModuleList( - [ - DiscriminatorP(2, use_spectral_norm=use_spectral_norm), - DiscriminatorP(3, use_spectral_norm=use_spectral_norm), - DiscriminatorP(5, use_spectral_norm=use_spectral_norm), - DiscriminatorP(7, use_spectral_norm=use_spectral_norm), - DiscriminatorP(11, use_spectral_norm=use_spectral_norm), - ] - ) - - def forward(self, x): - """ - Args: - x (Tensor): input waveform. - - Returns: - [List[Tensor]]: list of scores from each discriminator. - [List[List[Tensor]]]: list of list of features from each discriminator's each convolutional layer. - - Shapes: - x: [B, 1, T] - """ - scores = [] - feats = [] - for _, d in enumerate(self.discriminators): - score, feat = d(x) - scores.append(score) - feats.append(feat) - return scores, feats - - -class DiscriminatorS(torch.nn.Module): - """HiFiGAN Scale Discriminator. - It is similar to `MelganDiscriminator` but with a specific architecture explained in the paper. - - Args: - use_spectral_norm (bool): if `True` swith to spectral norm instead of weight norm. - - """ - - def __init__(self, use_spectral_norm=False): - super().__init__() - norm_f = nn.utils.spectral_norm if use_spectral_norm else nn.utils.weight_norm - self.convs = nn.ModuleList( - [ - norm_f(nn.Conv1d(1, 128, 15, 1, padding=7)), - norm_f(nn.Conv1d(128, 128, 41, 2, groups=4, padding=20)), - norm_f(nn.Conv1d(128, 256, 41, 2, groups=16, padding=20)), - norm_f(nn.Conv1d(256, 512, 41, 4, groups=16, padding=20)), - norm_f(nn.Conv1d(512, 1024, 41, 4, groups=16, padding=20)), - norm_f(nn.Conv1d(1024, 1024, 41, 1, groups=16, padding=20)), - norm_f(nn.Conv1d(1024, 1024, 5, 1, padding=2)), - ] - ) - self.conv_post = norm_f(nn.Conv1d(1024, 1, 3, 1, padding=1)) - - def forward(self, x): - """ - Args: - x (Tensor): input waveform. - - Returns: - Tensor: discriminator scores. - List[Tensor]: list of features from the convolutiona layers. - """ - feat = [] - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, LRELU_SLOPE) - feat.append(x) - x = self.conv_post(x) - feat.append(x) - x = torch.flatten(x, 1, -1) - return x, feat - - -class MultiScaleDiscriminator(torch.nn.Module): - """HiFiGAN Multi-Scale Discriminator. - It is similar to `MultiScaleMelganDiscriminator` but specially tailored for HiFiGAN as in the paper. - """ - - def __init__(self): - super().__init__() - self.discriminators = nn.ModuleList( - [ - DiscriminatorS(use_spectral_norm=True), - DiscriminatorS(), - DiscriminatorS(), - ] - ) - self.meanpools = nn.ModuleList([nn.AvgPool1d(4, 2, padding=2), nn.AvgPool1d(4, 2, padding=2)]) - - def forward(self, x): - """ - Args: - x (Tensor): input waveform. - - Returns: - List[Tensor]: discriminator scores. - List[List[Tensor]]: list of list of features from each layers of each discriminator. - """ - scores = [] - feats = [] - for i, d in enumerate(self.discriminators): - if i != 0: - x = self.meanpools[i - 1](x) - score, feat = d(x) - scores.append(score) - feats.append(feat) - return scores, feats - - -class HifiganDiscriminator(nn.Module): - """HiFiGAN discriminator wrapping MPD and MSD.""" - - def __init__(self): - super().__init__() - self.mpd = MultiPeriodDiscriminator() - self.msd = MultiScaleDiscriminator() - - def forward(self, x): - """ - Args: - x (Tensor): input waveform. - - Returns: - List[Tensor]: discriminator scores. - List[List[Tensor]]: list of list of features from each layers of each discriminator. - """ - scores, feats = self.mpd(x) - scores_, feats_ = self.msd(x) - return scores + scores_, feats + feats_ diff --git a/spaces/artificialguybr/video-dubbing/TTS/tests/vocoder_tests/test_vocoder_wavernn_datasets.py b/spaces/artificialguybr/video-dubbing/TTS/tests/vocoder_tests/test_vocoder_wavernn_datasets.py deleted file mode 100644 index 503b4e2483b447a01b0cb4abb02bc6cf34c80b90..0000000000000000000000000000000000000000 --- a/spaces/artificialguybr/video-dubbing/TTS/tests/vocoder_tests/test_vocoder_wavernn_datasets.py +++ /dev/null @@ -1,84 +0,0 @@ -import os -import shutil - -import numpy as np -from torch.utils.data import DataLoader - -from tests import get_tests_output_path, get_tests_path -from TTS.utils.audio import AudioProcessor -from TTS.vocoder.configs import WavernnConfig -from TTS.vocoder.datasets.preprocess import load_wav_feat_data, preprocess_wav_files -from TTS.vocoder.datasets.wavernn_dataset import WaveRNNDataset - -file_path = os.path.dirname(os.path.realpath(__file__)) -OUTPATH = os.path.join(get_tests_output_path(), "loader_tests/") -os.makedirs(OUTPATH, exist_ok=True) - -C = WavernnConfig() - -test_data_path = os.path.join(get_tests_path(), "data/ljspeech/") -test_mel_feat_path = os.path.join(test_data_path, "mel") -test_quant_feat_path = os.path.join(test_data_path, "quant") -ok_ljspeech = os.path.exists(test_data_path) - - -def wavernn_dataset_case(batch_size, seq_len, hop_len, pad, mode, mulaw, num_workers): - """run dataloader with given parameters and check conditions""" - ap = AudioProcessor(**C.audio) - - C.batch_size = batch_size - C.mode = mode - C.seq_len = seq_len - C.data_path = test_data_path - - preprocess_wav_files(test_data_path, C, ap) - _, train_items = load_wav_feat_data(test_data_path, test_mel_feat_path, 5) - - dataset = WaveRNNDataset( - ap=ap, items=train_items, seq_len=seq_len, hop_len=hop_len, pad=pad, mode=mode, mulaw=mulaw - ) - # sampler = DistributedSampler(dataset) if num_gpus > 1 else None - loader = DataLoader( - dataset, - shuffle=True, - collate_fn=dataset.collate, - batch_size=batch_size, - num_workers=num_workers, - pin_memory=True, - ) - - max_iter = 10 - count_iter = 0 - - try: - for data in loader: - x_input, mels, _ = data - expected_feat_shape = (ap.num_mels, (x_input.shape[-1] // hop_len) + (pad * 2)) - assert np.all(mels.shape[1:] == expected_feat_shape), f" [!] {mels.shape} vs {expected_feat_shape}" - - assert (mels.shape[2] - pad * 2) * hop_len == x_input.shape[1] - count_iter += 1 - if count_iter == max_iter: - break - # except AssertionError: - # shutil.rmtree(test_mel_feat_path) - # shutil.rmtree(test_quant_feat_path) - finally: - shutil.rmtree(test_mel_feat_path) - shutil.rmtree(test_quant_feat_path) - - -def test_parametrized_wavernn_dataset(): - """test dataloader with different parameters""" - params = [ - [16, C.audio["hop_length"] * 10, C.audio["hop_length"], 2, 10, True, 0], - [16, C.audio["hop_length"] * 10, C.audio["hop_length"], 2, "mold", False, 4], - [1, C.audio["hop_length"] * 10, C.audio["hop_length"], 2, 9, False, 0], - [1, C.audio["hop_length"], C.audio["hop_length"], 2, 10, True, 0], - [1, C.audio["hop_length"], C.audio["hop_length"], 2, "mold", False, 0], - [1, C.audio["hop_length"] * 5, C.audio["hop_length"], 4, 10, False, 2], - [1, C.audio["hop_length"] * 5, C.audio["hop_length"], 2, "mold", False, 0], - ] - for param in params: - print(param) - wavernn_dataset_case(*param) diff --git a/spaces/artificialguybr/video-dubbing/Wav2Lip/face_detection/api.py b/spaces/artificialguybr/video-dubbing/Wav2Lip/face_detection/api.py deleted file mode 100644 index cb02d5252db5362b9985687a992e128a522e5b63..0000000000000000000000000000000000000000 --- a/spaces/artificialguybr/video-dubbing/Wav2Lip/face_detection/api.py +++ /dev/null @@ -1,79 +0,0 @@ -from __future__ import print_function -import os -import torch -from torch.utils.model_zoo import load_url -from enum import Enum -import numpy as np -import cv2 -try: - import urllib.request as request_file -except BaseException: - import urllib as request_file - -from .models import FAN, ResNetDepth -from .utils import * - - -class LandmarksType(Enum): - """Enum class defining the type of landmarks to detect. - - ``_2D`` - the detected points ``(x,y)`` are detected in a 2D space and follow the visible contour of the face - ``_2halfD`` - this points represent the projection of the 3D points into 3D - ``_3D`` - detect the points ``(x,y,z)``` in a 3D space - - """ - _2D = 1 - _2halfD = 2 - _3D = 3 - - -class NetworkSize(Enum): - # TINY = 1 - # SMALL = 2 - # MEDIUM = 3 - LARGE = 4 - - def __new__(cls, value): - member = object.__new__(cls) - member._value_ = value - return member - - def __int__(self): - return self.value - -ROOT = os.path.dirname(os.path.abspath(__file__)) - -class FaceAlignment: - def __init__(self, landmarks_type, network_size=NetworkSize.LARGE, - device='cuda', flip_input=False, face_detector='sfd', verbose=False): - self.device = device - self.flip_input = flip_input - self.landmarks_type = landmarks_type - self.verbose = verbose - - network_size = int(network_size) - - if 'cuda' in device: - torch.backends.cudnn.benchmark = True - - # Get the face detector - face_detector_module = __import__('face_detection.detection.' + face_detector, - globals(), locals(), [face_detector], 0) - self.face_detector = face_detector_module.FaceDetector(device=device, verbose=verbose) - - def get_detections_for_batch(self, images): - images = images[..., ::-1] - detected_faces = self.face_detector.detect_from_batch(images.copy()) - results = [] - - for i, d in enumerate(detected_faces): - if len(d) == 0: - results.append(None) - continue - d = d[0] - d = np.clip(d, 0, None) - - x1, y1, x2, y2 = map(int, d[:-1]) - results.append((x1, y1, x2, y2)) - - return results \ No newline at end of file diff --git a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/Crypto/SelfTest/Util/test_Counter.py b/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/Crypto/SelfTest/Util/test_Counter.py deleted file mode 100644 index 8837a32c8f763e7af267baf7bdff63ef80bf446e..0000000000000000000000000000000000000000 --- a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/Crypto/SelfTest/Util/test_Counter.py +++ /dev/null @@ -1,67 +0,0 @@ -# -*- coding: utf-8 -*- -# -# SelfTest/Util/test_Counter: Self-test for the Crypto.Util.Counter module -# -# Written in 2009 by Dwayne C. Litzenberger -# -# =================================================================== -# The contents of this file are dedicated to the public domain. To -# the extent that dedication to the public domain is not available, -# everyone is granted a worldwide, perpetual, royalty-free, -# non-exclusive license to exercise all rights associated with the -# contents of this file for any purpose whatsoever. -# No rights are reserved. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS -# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN -# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -# SOFTWARE. -# =================================================================== - -"""Self-tests for Crypto.Util.Counter""" - -from Crypto.Util.py3compat import * - -import unittest - -class CounterTests(unittest.TestCase): - def setUp(self): - global Counter - from Crypto.Util import Counter - - def test_BE(self): - """Big endian""" - c = Counter.new(128) - c = Counter.new(128, little_endian=False) - - def test_LE(self): - """Little endian""" - c = Counter.new(128, little_endian=True) - - def test_nbits(self): - c = Counter.new(nbits=128) - self.assertRaises(ValueError, Counter.new, 129) - - def test_prefix(self): - c = Counter.new(128, prefix=b("xx")) - - def test_suffix(self): - c = Counter.new(128, suffix=b("xx")) - - def test_iv(self): - c = Counter.new(128, initial_value=2) - self.assertRaises(ValueError, Counter.new, 16, initial_value=0x1FFFF) - -def get_tests(config={}): - from Crypto.SelfTest.st_common import list_test_cases - return list_test_cases(CounterTests) - -if __name__ == '__main__': - suite = lambda: unittest.TestSuite(get_tests()) - unittest.main(defaultTest='suite') - -# vim:set ts=4 sw=4 sts=4 expandtab: diff --git a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/Cython/Build/Tests/TestInline.py b/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/Cython/Build/Tests/TestInline.py deleted file mode 100644 index d209488083ece727b81ef268f2780317e2aed35d..0000000000000000000000000000000000000000 --- a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/Cython/Build/Tests/TestInline.py +++ /dev/null @@ -1,96 +0,0 @@ -import os, tempfile -from Cython.Shadow import inline -from Cython.Build.Inline import safe_type -from Cython.TestUtils import CythonTest - -try: - import numpy - has_numpy = True -except: - has_numpy = False - -test_kwds = dict(force=True, quiet=True) - -global_value = 100 - -class TestInline(CythonTest): - def setUp(self): - CythonTest.setUp(self) - self.test_kwds = dict(test_kwds) - if os.path.isdir('TEST_TMP'): - lib_dir = os.path.join('TEST_TMP','inline') - else: - lib_dir = tempfile.mkdtemp(prefix='cython_inline_') - self.test_kwds['lib_dir'] = lib_dir - - def test_simple(self): - self.assertEqual(inline("return 1+2", **self.test_kwds), 3) - - def test_types(self): - self.assertEqual(inline(""" - cimport cython - return cython.typeof(a), cython.typeof(b) - """, a=1.0, b=[], **self.test_kwds), ('double', 'list object')) - - def test_locals(self): - a = 1 - b = 2 - self.assertEqual(inline("return a+b", **self.test_kwds), 3) - - def test_globals(self): - self.assertEqual(inline("return global_value + 1", **self.test_kwds), global_value + 1) - - def test_no_return(self): - self.assertEqual(inline(""" - a = 1 - cdef double b = 2 - cdef c = [] - """, **self.test_kwds), dict(a=1, b=2.0, c=[])) - - def test_def_node(self): - foo = inline("def foo(x): return x * x", **self.test_kwds)['foo'] - self.assertEqual(foo(7), 49) - - def test_class_ref(self): - class Type(object): - pass - tp = inline("Type")['Type'] - self.assertEqual(tp, Type) - - def test_pure(self): - import cython as cy - b = inline(""" - b = cy.declare(float, a) - c = cy.declare(cy.pointer(cy.float), &b) - return b - """, a=3, **self.test_kwds) - self.assertEqual(type(b), float) - - def test_compiler_directives(self): - self.assertEqual( - inline('return sum(x)', - x=[1, 2, 3], - cython_compiler_directives={'boundscheck': False}), - 6 - ) - - def test_lang_version(self): - # GH-3419. Caching for inline code didn't always respect compiler directives. - inline_divcode = "def f(int a, int b): return a/b" - self.assertEqual( - inline(inline_divcode, language_level=2)['f'](5,2), - 2 - ) - self.assertEqual( - inline(inline_divcode, language_level=3)['f'](5,2), - 2.5 - ) - - if has_numpy: - - def test_numpy(self): - import numpy - a = numpy.ndarray((10, 20)) - a[0,0] = 10 - self.assertEqual(safe_type(a), 'numpy.ndarray[numpy.float64_t, ndim=2]') - self.assertEqual(inline("return a[0,0]", a=a, **self.test_kwds), 10.0) diff --git a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/fairseq/benchmark/__init__.py b/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/fairseq/benchmark/__init__.py deleted file mode 100644 index 0317d5c623778fe40b7bf07b77769cd10c243244..0000000000000000000000000000000000000000 --- a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/fairseq/benchmark/__init__.py +++ /dev/null @@ -1,7 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -# import models/tasks to register them -from . import dummy_dataset, dummy_lm, dummy_masked_lm, dummy_model, dummy_mt # noqa diff --git a/spaces/auto-academic/auto-draft/latex_templates/Default/abstract.tex b/spaces/auto-academic/auto-draft/latex_templates/Default/abstract.tex deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/awacke1/3DModelEditorWithAIV1/README.md b/spaces/awacke1/3DModelEditorWithAIV1/README.md deleted file mode 100644 index c2ef3a747612961c4f3eee617c1c8d51e061da6a..0000000000000000000000000000000000000000 --- a/spaces/awacke1/3DModelEditorWithAIV1/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: 🎨3DfromImg.GLB🎈 -emoji: 🎈🎨 -colorFrom: green -colorTo: purple -sdk: gradio -sdk_version: 3.3.1 -app_file: app.py -pinned: false -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference \ No newline at end of file diff --git a/spaces/awacke1/ArtStyleLineDrawing/README.md b/spaces/awacke1/ArtStyleLineDrawing/README.md deleted file mode 100644 index 21634b313604a6574d2329d7fdb9ef313c692da8..0000000000000000000000000000000000000000 --- a/spaces/awacke1/ArtStyleLineDrawing/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: ArtStyleLineDrawing -emoji: 🌍 -colorFrom: green -colorTo: yellow -sdk: gradio -sdk_version: 3.0.22 -app_file: app.py -pinned: false -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/awacke1/Bloom.Big.Science.Continual.Generator/README.md b/spaces/awacke1/Bloom.Big.Science.Continual.Generator/README.md deleted file mode 100644 index bd749c254022e71b26236f1fd1fc6e4f1d590883..0000000000000000000000000000000000000000 --- a/spaces/awacke1/Bloom.Big.Science.Continual.Generator/README.md +++ /dev/null @@ -1,114 +0,0 @@ ---- -title: BigScience-Bloom-TextandCodeGenerator -emoji: 👀👀👀 -colorFrom: green -colorTo: indigo -sdk: gradio -sdk_version: 3.18.0 -app_file: app.py -pinned: false -license: mit ---- - - - -## Language Models 🗣️ -🏆 Bloom sets new record for most performant and efficient AI model in science! 🌸 - -### Comparison of Large Language Models -| Model Name | Model Size (in Parameters) | -| ----------------- | -------------------------- | -| BigScience-tr11-176B | 176 billion | -| GPT-3 | 175 billion | -| OpenAI's DALL-E 2.0 | 500 million | -| NVIDIA's Megatron | 8.3 billion | -| Transformer-XL | 250 million | -| XLNet | 210 million | - -## ChatGPT Datasets 📚 -- WebText -- Common Crawl -- BooksCorpus -- English Wikipedia -- Toronto Books Corpus -- OpenWebText -- -## ChatGPT Datasets - Details 📚 -- **WebText:** A dataset of web pages crawled from domains on the Alexa top 5,000 list. This dataset was used to pretrain GPT-2. - - [WebText: A Large-Scale Unsupervised Text Corpus by Radford et al.](https://paperswithcode.com/dataset/webtext) -- **Common Crawl:** A dataset of web pages from a variety of domains, which is updated regularly. This dataset was used to pretrain GPT-3. - - [Language Models are Few-Shot Learners](https://paperswithcode.com/dataset/common-crawl) by Brown et al. -- **BooksCorpus:** A dataset of over 11,000 books from a variety of genres. - - [Scalable Methods for 8 Billion Token Language Modeling](https://paperswithcode.com/dataset/bookcorpus) by Zhu et al. -- **English Wikipedia:** A dump of the English-language Wikipedia as of 2018, with articles from 2001-2017. - - [Improving Language Understanding by Generative Pre-Training](https://huggingface.co/spaces/awacke1/WikipediaUltimateAISearch?logs=build) Space for Wikipedia Search -- **Toronto Books Corpus:** A dataset of over 7,000 books from a variety of genres, collected by the University of Toronto. - - [Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond](https://paperswithcode.com/dataset/bookcorpus) by Schwenk and Douze. -- **OpenWebText:** A dataset of web pages that were filtered to remove content that was likely to be low-quality or spammy. This dataset was used to pretrain GPT-3. - - [Language Models are Few-Shot Learners](https://paperswithcode.com/dataset/openwebtext) by Brown et al. - -## Big Science Model 🚀 -- 📜 Papers: - 1. BLOOM: A 176B-Parameter Open-Access Multilingual Language Model [Paper](https://arxiv.org/abs/2211.05100) - 2. Megatron-LM: Training Multi-Billion Parameter Language Models Using Model Parallelism [Paper](https://arxiv.org/abs/1909.08053) - 3. 8-bit Optimizers via Block-wise Quantization [Paper](https://arxiv.org/abs/2110.02861) - 4. Train Short, Test Long: Attention with Linear Biases Enables Input Length Extrapolation [Paper](https://arxiv.org/abs/2108.12409) - 5. [Other papers related to Big Science](https://huggingface.co/models?other=doi:10.57967/hf/0003) - 6. [217 other models optimized for use with Bloom](https://huggingface.co/models?other=bloom) - -- 📚 Datasets: - -**Datasets:** -1. - **Universal Dependencies:** A collection of annotated corpora for natural language processing in a range of languages, with a focus on dependency parsing. - - [Universal Dependencies official website.](https://universaldependencies.org/) -2. - **WMT 2014:** The fourth edition of the Workshop on Statistical Machine Translation, featuring shared tasks on translating between English and various other languages. - - [WMT14 website.](http://www.statmt.org/wmt14/) -3. - **The Pile:** An English language corpus of diverse text, sourced from various places on the internet. - - [The Pile official website.](https://pile.eleuther.ai/) -4. - **HumanEval:** A dataset of English sentences, annotated with human judgments on a range of linguistic qualities. - - [HumanEval: An Evaluation Benchmark for Language Understanding](https://github.com/google-research-datasets/humaneval) by Gabriel Ilharco, Daniel Loureiro, Pedro Rodriguez, and Afonso Mendes. -5. - **FLORES-101:** A dataset of parallel sentences in 101 languages, designed for multilingual machine translation. - - [FLORES-101: A Massively Multilingual Parallel Corpus for Language Understanding](https://flores101.opennmt.net/) by Aman Madaan, Shruti Rijhwani, Raghav Gupta, and Mitesh M. Khapra. -6. - **CrowS-Pairs:** A dataset of sentence pairs, designed for evaluating the plausibility of generated text. - - [CrowS-Pairs: A Challenge Dataset for Plausible Plausibility Judgments](https://github.com/stanford-cogsci/crows-pairs) by Andrea Madotto, Zhaojiang Lin, Chien-Sheng Wu, Pascale Fung, and Caiming Xiong. -7. - **WikiLingua:** A dataset of parallel sentences in 75 languages, sourced from Wikipedia. - - [WikiLingua: A New Benchmark Dataset for Cross-Lingual Wikification](https://arxiv.org/abs/2105.08031) by Jiarui Yao, Yanqiao Zhu, Ruihan Bao, Guosheng Lin, Lidong Bing, and Bei Shi. -8. - **MTEB:** A dataset of English sentences, annotated with their entailment relationships with respect to other sentences. - - [Multi-Task Evaluation Benchmark for Natural Language Inference](https://github.com/google-research-datasets/mteb) by Michał Lukasik, Marcin Junczys-Dowmunt, and Houda Bouamor. -9. - **xP3:** A dataset of English sentences, annotated with their paraphrase relationships with respect to other sentences. - - [xP3: A Large-Scale Evaluation Benchmark for Paraphrase Identification in Context](https://github.com/nyu-dl/xp3) by Aniket Didolkar, James Mayfield, Markus Saers, and Jason Baldridge. -10. - **DiaBLa:** A dataset of English dialogue, annotated with dialogue acts. - - [A Large-Scale Corpus for Conversation Disentanglement](https://github.com/HLTCHKUST/DiaBLA) by Samuel Broscheit, António Branco, and André F. T. Martins. - -- 📚 Dataset Papers with Code - 1. [Universal Dependencies](https://paperswithcode.com/dataset/universal-dependencies) - 2. [WMT 2014](https://paperswithcode.com/dataset/wmt-2014) - 3. [The Pile](https://paperswithcode.com/dataset/the-pile) - 4. [HumanEval](https://paperswithcode.com/dataset/humaneval) - 5. [FLORES-101](https://paperswithcode.com/dataset/flores-101) - 6. [CrowS-Pairs](https://paperswithcode.com/dataset/crows-pairs) - 7. [WikiLingua](https://paperswithcode.com/dataset/wikilingua) - 8. [MTEB](https://paperswithcode.com/dataset/mteb) - 9. [xP3](https://paperswithcode.com/dataset/xp3) - 10. [DiaBLa](https://paperswithcode.com/dataset/diabla) - -# Deep RL ML Strategy 🧠 -The AI strategies are: -- Language Model Preparation using Human Augmented with Supervised Fine Tuning 🤖 -- Reward Model Training with Prompts Dataset Multi-Model Generate Data to Rank 🎁 -- Fine Tuning with Reinforcement Reward and Distance Distribution Regret Score 🎯 -- Proximal Policy Optimization Fine Tuning 🤝 -- Variations - Preference Model Pretraining 🤔 -- Use Ranking Datasets Sentiment - Thumbs Up/Down, Distribution 📊 -- Online Version Getting Feedback 💬 -- OpenAI - InstructGPT - Humans generate LM Training Text 🔍 -- DeepMind - Advantage Actor Critic Sparrow, GopherCite 🦜 -- Reward Model Human Prefence Feedback 🏆 - - -For more information on specific techniques and implementations, check out the following resources: -- OpenAI's paper on [GPT-3](https://arxiv.org/abs/2005.14165) which details their Language Model Preparation approach -- DeepMind's paper on [SAC](https://arxiv.org/abs/1801.01290) which describes the Advantage Actor Critic algorithm -- OpenAI's paper on [Reward Learning](https://arxiv.org/abs/1810.06580) which explains their approach to training Reward Models -- OpenAI's blog post on [GPT-3's fine-tuning process](https://openai.com/blog/fine-tuning-gpt-3/) - diff --git a/spaces/awacke1/HTML5-DNA-Sequence/style.css b/spaces/awacke1/HTML5-DNA-Sequence/style.css deleted file mode 100644 index 114adf441e9032febb46bc056b2a8bb651075f0d..0000000000000000000000000000000000000000 --- a/spaces/awacke1/HTML5-DNA-Sequence/style.css +++ /dev/null @@ -1,28 +0,0 @@ -body { - padding: 2rem; - font-family: -apple-system, BlinkMacSystemFont, "Arial", sans-serif; -} - -h1 { - font-size: 16px; - margin-top: 0; -} - -p { - color: rgb(107, 114, 128); - font-size: 15px; - margin-bottom: 10px; - margin-top: 5px; -} - -.card { - max-width: 620px; - margin: 0 auto; - padding: 16px; - border: 1px solid lightgray; - border-radius: 16px; -} - -.card p:last-child { - margin-bottom: 0; -} diff --git a/spaces/awacke1/HTML5-Javascript-3D-Breakout-Game/README.md b/spaces/awacke1/HTML5-Javascript-3D-Breakout-Game/README.md deleted file mode 100644 index cf696db6db53cd68d2aa8b4b07e9dbfcc9f75a92..0000000000000000000000000000000000000000 --- a/spaces/awacke1/HTML5-Javascript-3D-Breakout-Game/README.md +++ /dev/null @@ -1,11 +0,0 @@ ---- -title: 🤓HTML5 Javascript 3D Breakout Game🕹️📱 -emoji: 🤓🕹️📱 -colorFrom: yellow -colorTo: pink -sdk: static -pinned: false -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/awacke1/ImageOCRMultilingual/images/Readme.md b/spaces/awacke1/ImageOCRMultilingual/images/Readme.md deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/banana-projects/web3d/node_modules/three/src/geometries/EdgesGeometry.d.ts b/spaces/banana-projects/web3d/node_modules/three/src/geometries/EdgesGeometry.d.ts deleted file mode 100644 index 3f8d203ab86b5a5208a897c2f37b540d32e2839b..0000000000000000000000000000000000000000 --- a/spaces/banana-projects/web3d/node_modules/three/src/geometries/EdgesGeometry.d.ts +++ /dev/null @@ -1,6 +0,0 @@ -import { BufferGeometry } from '../core/BufferGeometry'; -import { Geometry } from '../core/Geometry'; - -export class EdgesGeometry extends BufferGeometry { - constructor(geometry: BufferGeometry | Geometry, thresholdAngle?: number); -} diff --git a/spaces/bigPear/digitalWDF/examples/evaluate.sh b/spaces/bigPear/digitalWDF/examples/evaluate.sh deleted file mode 100644 index 51bd73556a8da338562fc4f9e20e693f8de2aa4b..0000000000000000000000000000000000000000 --- a/spaces/bigPear/digitalWDF/examples/evaluate.sh +++ /dev/null @@ -1,12 +0,0 @@ -#!/bin/bash - -CUDA_VISIBLE_DEVICES=0 python ../src/finetune.py \ - --do_eval \ - --dataset alpaca_gpt4_zh \ - --dataset_dir ../data \ - --checkpoint_dir path_to_checkpoint \ - --output_dir path_to_eval_result \ - --overwrite_cache \ - --per_device_eval_batch_size 8 \ - --max_samples 50 \ - --predict_with_generate diff --git a/spaces/bigjoker/stable-diffusion-webui/launch.py b/spaces/bigjoker/stable-diffusion-webui/launch.py deleted file mode 100644 index c83dd5b72591d422cfc5f64cbe15f19021d8b159..0000000000000000000000000000000000000000 --- a/spaces/bigjoker/stable-diffusion-webui/launch.py +++ /dev/null @@ -1,361 +0,0 @@ -# this scripts installs necessary requirements and launches main program in webui.py -import subprocess -import os -import sys -import importlib.util -import shlex -import platform -import argparse -import json - -dir_repos = "repositories" -dir_extensions = "extensions" -python = sys.executable -git = os.environ.get('GIT', "git") -index_url = os.environ.get('INDEX_URL', "") -stored_commit_hash = None -skip_install = False - - -def check_python_version(): - is_windows = platform.system() == "Windows" - major = sys.version_info.major - minor = sys.version_info.minor - micro = sys.version_info.micro - - if is_windows: - supported_minors = [10] - else: - supported_minors = [7, 8, 9, 10, 11] - - if not (major == 3 and minor in supported_minors): - import modules.errors - - modules.errors.print_error_explanation(f""" -INCOMPATIBLE PYTHON VERSION - -This program is tested with 3.10.6 Python, but you have {major}.{minor}.{micro}. -If you encounter an error with "RuntimeError: Couldn't install torch." message, -or any other error regarding unsuccessful package (library) installation, -please downgrade (or upgrade) to the latest version of 3.10 Python -and delete current Python and "venv" folder in WebUI's directory. - -You can download 3.10 Python from here: https://www.python.org/downloads/release/python-3109/ - -{"Alternatively, use a binary release of WebUI: https://github.com/AUTOMATIC1111/stable-diffusion-webui/releases" if is_windows else ""} - -Use --skip-python-version-check to suppress this warning. -""") - - -def commit_hash(): - global stored_commit_hash - - if stored_commit_hash is not None: - return stored_commit_hash - - try: - stored_commit_hash = run(f"{git} rev-parse HEAD").strip() - except Exception: - stored_commit_hash = "" - - return stored_commit_hash - - -def extract_arg(args, name): - return [x for x in args if x != name], name in args - - -def extract_opt(args, name): - opt = None - is_present = False - if name in args: - is_present = True - idx = args.index(name) - del args[idx] - if idx < len(args) and args[idx][0] != "-": - opt = args[idx] - del args[idx] - return args, is_present, opt - - -def run(command, desc=None, errdesc=None, custom_env=None, live=False): - if desc is not None: - print(desc) - - if live: - result = subprocess.run(command, shell=True, env=os.environ if custom_env is None else custom_env) - if result.returncode != 0: - raise RuntimeError(f"""{errdesc or 'Error running command'}. -Command: {command} -Error code: {result.returncode}""") - - return "" - - result = subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True, env=os.environ if custom_env is None else custom_env) - - if result.returncode != 0: - - message = f"""{errdesc or 'Error running command'}. -Command: {command} -Error code: {result.returncode} -stdout: {result.stdout.decode(encoding="utf8", errors="ignore") if len(result.stdout)>0 else ''} -stderr: {result.stderr.decode(encoding="utf8", errors="ignore") if len(result.stderr)>0 else ''} -""" - raise RuntimeError(message) - - return result.stdout.decode(encoding="utf8", errors="ignore") - - -def check_run(command): - result = subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) - return result.returncode == 0 - - -def is_installed(package): - try: - spec = importlib.util.find_spec(package) - except ModuleNotFoundError: - return False - - return spec is not None - - -def repo_dir(name): - return os.path.join(dir_repos, name) - - -def run_python(code, desc=None, errdesc=None): - return run(f'"{python}" -c "{code}"', desc, errdesc) - - -def run_pip(args, desc=None): - if skip_install: - return - - index_url_line = f' --index-url {index_url}' if index_url != '' else '' - return run(f'"{python}" -m pip {args} --prefer-binary{index_url_line}', desc=f"Installing {desc}", errdesc=f"Couldn't install {desc}") - - -def check_run_python(code): - return check_run(f'"{python}" -c "{code}"') - - -def git_clone(url, dir, name, commithash=None): - # TODO clone into temporary dir and move if successful - - if os.path.exists(dir): - if commithash is None: - return - - current_hash = run(f'"{git}" -C "{dir}" rev-parse HEAD', None, f"Couldn't determine {name}'s hash: {commithash}").strip() - if current_hash == commithash: - return - - run(f'"{git}" -C "{dir}" fetch', f"Fetching updates for {name}...", f"Couldn't fetch {name}") - run(f'"{git}" -C "{dir}" checkout {commithash}', f"Checking out commit for {name} with hash: {commithash}...", f"Couldn't checkout commit {commithash} for {name}") - return - - run(f'"{git}" clone "{url}" "{dir}"', f"Cloning {name} into {dir}...", f"Couldn't clone {name}") - - if commithash is not None: - run(f'"{git}" -C "{dir}" checkout {commithash}', None, "Couldn't checkout {name}'s hash: {commithash}") - - -def version_check(commit): - try: - import requests - commits = requests.get('https://api.github.com/repos/AUTOMATIC1111/stable-diffusion-webui/branches/master').json() - if commit != "" and commits['commit']['sha'] != commit: - print("--------------------------------------------------------") - print("| You are not up to date with the most recent release. |") - print("| Consider running `git pull` to update. |") - print("--------------------------------------------------------") - elif commits['commit']['sha'] == commit: - print("You are up to date with the most recent release.") - else: - print("Not a git clone, can't perform version check.") - except Exception as e: - print("version check failed", e) - - -def run_extension_installer(extension_dir): - path_installer = os.path.join(extension_dir, "install.py") - if not os.path.isfile(path_installer): - return - - try: - env = os.environ.copy() - env['PYTHONPATH'] = os.path.abspath(".") - - print(run(f'"{python}" "{path_installer}"', errdesc=f"Error running install.py for extension {extension_dir}", custom_env=env)) - except Exception as e: - print(e, file=sys.stderr) - - -def list_extensions(settings_file): - settings = {} - - try: - if os.path.isfile(settings_file): - with open(settings_file, "r", encoding="utf8") as file: - settings = json.load(file) - except Exception as e: - print(e, file=sys.stderr) - - disabled_extensions = set(settings.get('disabled_extensions', [])) - - return [x for x in os.listdir(dir_extensions) if x not in disabled_extensions] - - -def run_extensions_installers(settings_file): - if not os.path.isdir(dir_extensions): - return - - for dirname_extension in list_extensions(settings_file): - run_extension_installer(os.path.join(dir_extensions, dirname_extension)) - - -def prepare_environment(): - global skip_install - - torch_command = os.environ.get('TORCH_COMMAND', "pip install torch==1.13.1+cu117 torchvision==0.14.1+cu117 --extra-index-url https://download.pytorch.org/whl/cu117") - requirements_file = os.environ.get('REQS_FILE', "requirements_versions.txt") - commandline_args = os.environ.get('COMMANDLINE_ARGS', "--skip-torch-cuda-test --use-cpu all --precision full --no-half") - - xformers_package = os.environ.get('XFORMERS_PACKAGE', 'xformers==0.0.16rc425') - gfpgan_package = os.environ.get('GFPGAN_PACKAGE', "git+https://github.com/TencentARC/GFPGAN.git@8d2447a2d918f8eba5a4a01463fd48e45126a379") - clip_package = os.environ.get('CLIP_PACKAGE', "git+https://github.com/openai/CLIP.git@d50d76daa670286dd6cacf3bcd80b5e4823fc8e1") - openclip_package = os.environ.get('OPENCLIP_PACKAGE', "git+https://github.com/mlfoundations/open_clip.git@bb6e834e9c70d9c27d0dc3ecedeebeaeb1ffad6b") - - stable_diffusion_repo = os.environ.get('STABLE_DIFFUSION_REPO', "https://github.com/Stability-AI/stablediffusion.git") - taming_transformers_repo = os.environ.get('TAMING_TRANSFORMERS_REPO', "https://github.com/CompVis/taming-transformers.git") - k_diffusion_repo = os.environ.get('K_DIFFUSION_REPO', 'https://github.com/crowsonkb/k-diffusion.git') - codeformer_repo = os.environ.get('CODEFORMER_REPO', 'https://github.com/sczhou/CodeFormer.git') - blip_repo = os.environ.get('BLIP_REPO', 'https://github.com/salesforce/BLIP.git') - - stable_diffusion_commit_hash = os.environ.get('STABLE_DIFFUSION_COMMIT_HASH', "47b6b607fdd31875c9279cd2f4f16b92e4ea958e") - taming_transformers_commit_hash = os.environ.get('TAMING_TRANSFORMERS_COMMIT_HASH', "24268930bf1dce879235a7fddd0b2355b84d7ea6") - k_diffusion_commit_hash = os.environ.get('K_DIFFUSION_COMMIT_HASH', "5b3af030dd83e0297272d861c19477735d0317ec") - codeformer_commit_hash = os.environ.get('CODEFORMER_COMMIT_HASH', "c5b4593074ba6214284d6acd5f1719b6c5d739af") - blip_commit_hash = os.environ.get('BLIP_COMMIT_HASH', "48211a1594f1321b00f14c9f7a5b4813144b2fb9") - - sys.argv += shlex.split(commandline_args) - - parser = argparse.ArgumentParser(add_help=False) - parser.add_argument("--ui-settings-file", type=str, help="filename to use for ui settings", default='config.json') - args, _ = parser.parse_known_args(sys.argv) - - sys.argv, _ = extract_arg(sys.argv, '-f') - sys.argv, skip_torch_cuda_test = extract_arg(sys.argv, '--skip-torch-cuda-test') - sys.argv, skip_python_version_check = extract_arg(sys.argv, '--skip-python-version-check') - sys.argv, reinstall_xformers = extract_arg(sys.argv, '--reinstall-xformers') - sys.argv, reinstall_torch = extract_arg(sys.argv, '--reinstall-torch') - sys.argv, update_check = extract_arg(sys.argv, '--update-check') - sys.argv, run_tests, test_dir = extract_opt(sys.argv, '--tests') - sys.argv, skip_install = extract_arg(sys.argv, '--skip-install') - xformers = '--xformers' in sys.argv - ngrok = '--ngrok' in sys.argv - - if not skip_python_version_check: - check_python_version() - - commit = commit_hash() - - print(f"Python {sys.version}") - print(f"Commit hash: {commit}") - - if reinstall_torch or not is_installed("torch") or not is_installed("torchvision"): - run(f'"{python}" -m {torch_command}', "Installing torch and torchvision", "Couldn't install torch", live=True) - - if not skip_torch_cuda_test: - run_python("import torch; assert torch.cuda.is_available(), 'Torch is not able to use GPU; add --skip-torch-cuda-test to COMMANDLINE_ARGS variable to disable this check'") - - if not is_installed("gfpgan"): - run_pip(f"install {gfpgan_package}", "gfpgan") - - if not is_installed("clip"): - run_pip(f"install {clip_package}", "clip") - - if not is_installed("open_clip"): - run_pip(f"install {openclip_package}", "open_clip") - - if (not is_installed("xformers") or reinstall_xformers) and xformers: - if platform.system() == "Windows": - if platform.python_version().startswith("3.10"): - run_pip(f"install -U -I --no-deps {xformers_package}", "xformers") - else: - print("Installation of xformers is not supported in this version of Python.") - print("You can also check this and build manually: https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Xformers#building-xformers-on-windows-by-duckness") - if not is_installed("xformers"): - exit(0) - elif platform.system() == "Linux": - run_pip(f"install {xformers_package}", "xformers") - - if not is_installed("pyngrok") and ngrok: - run_pip("install pyngrok", "ngrok") - - os.makedirs(dir_repos, exist_ok=True) - - git_clone(stable_diffusion_repo, repo_dir('stable-diffusion-stability-ai'), "Stable Diffusion", stable_diffusion_commit_hash) - git_clone(taming_transformers_repo, repo_dir('taming-transformers'), "Taming Transformers", taming_transformers_commit_hash) - git_clone(k_diffusion_repo, repo_dir('k-diffusion'), "K-diffusion", k_diffusion_commit_hash) - git_clone(codeformer_repo, repo_dir('CodeFormer'), "CodeFormer", codeformer_commit_hash) - git_clone(blip_repo, repo_dir('BLIP'), "BLIP", blip_commit_hash) - - if not is_installed("lpips"): - run_pip(f"install -r {os.path.join(repo_dir('CodeFormer'), 'requirements.txt')}", "requirements for CodeFormer") - - run_pip(f"install -r {requirements_file}", "requirements for Web UI") - - run_extensions_installers(settings_file=args.ui_settings_file) - - if update_check: - version_check(commit) - - if "--exit" in sys.argv: - print("Exiting because of --exit argument") - exit(0) - - if run_tests: - exitcode = tests(test_dir) - exit(exitcode) - - -def tests(test_dir): - if "--api" not in sys.argv: - sys.argv.append("--api") - if "--ckpt" not in sys.argv: - sys.argv.append("--ckpt") - sys.argv.append("./test/test_files/empty.pt") - if "--skip-torch-cuda-test" not in sys.argv: - sys.argv.append("--skip-torch-cuda-test") - if "--disable-nan-check" not in sys.argv: - sys.argv.append("--disable-nan-check") - - print(f"Launching Web UI in another process for testing with arguments: {' '.join(sys.argv[1:])}") - - os.environ['COMMANDLINE_ARGS'] = "" - with open('test/stdout.txt', "w", encoding="utf8") as stdout, open('test/stderr.txt', "w", encoding="utf8") as stderr: - proc = subprocess.Popen([sys.executable, *sys.argv], stdout=stdout, stderr=stderr) - - import test.server_poll - exitcode = test.server_poll.run_tests(proc, test_dir) - - print(f"Stopping Web UI process with id {proc.pid}") - proc.kill() - return exitcode - - -def start(): - print(f"Launching {'API server' if '--nowebui' in sys.argv else 'Web UI'} with arguments: {' '.join(sys.argv[1:])}") - import webui - if '--nowebui' in sys.argv: - webui.api_only() - else: - webui.webui() - - -if __name__ == "__main__": - prepare_environment() - start() diff --git a/spaces/bioriAsaeru/text-to-voice/Fluke Smartview 3.2 [HOT].md b/spaces/bioriAsaeru/text-to-voice/Fluke Smartview 3.2 [HOT].md deleted file mode 100644 index 5051fcfd709e61e89ad1971e4faea2a5228221b0..0000000000000000000000000000000000000000 --- a/spaces/bioriAsaeru/text-to-voice/Fluke Smartview 3.2 [HOT].md +++ /dev/null @@ -1,36 +0,0 @@ -

    Fluke Smartview 3.2


    DOWNLOADhttps://urloso.com/2uyQ1y



    -
    -USB1 or USB2 - -Option 3: First connect the camera to the PC, then connect the camera to the target device (mobile phone, tablet, laptop, etc.) and select the Fluke Connect Web interface on the target device. - -For the first time connection of the camera to the target device, the target device (mobile phone, tablet, laptop, etc.) is automatically connected to the camera. When the camera is connected to the target device, the Connect Wizard will open on the target device. - -Connect the camera to the target device: Press, wait for the camera to display a message asking to press to confirm the connection, then press. - -Select the Fluke Connect Web interface: Press and hold until the image of the Fluke Connect Web interface appears on the target device. - -The Fluke Connect Web interface can be viewed on many mobile phones, tablets, and laptops: - -Android: From the home screen, tap the menu button, then tap Settings; Tap Applications, tap the Search icon, then tap the Fluke Connect application; Tap the Back arrow on the top left, then tap the arrow on the right. - -iPhone: Tap on the Settings icon, tap on General, tap on Configure, tap on Fluke Connect, then tap Fluke Connect. - -iPad: Tap the Settings icon, tap on General, tap on Fluke Connect, then tap Fluke Connect. - -Laptop: Power on the laptop, then wait for the Welcome screen, and tap on Fluke Connect. - -See the Appendix A for information on connecting the camera to various mobile devices. - -**Note:** After the target device is connected to the camera, it is configured as a remote camera. To stop the camera from receiving commands and receiving data from the target device, press. - -Setting up the device to view images and video - -To view images and video, the target device must be connected to the camera and configured as a remote camera. For instructions on connecting the camera to the target device, refer to the Connecting the camera to the target device in Setting up the camera to view images and video. - -**Note:** Once the camera and target device are connected and the camera has been configured as a remote camera, the camera will never be able to receive any commands from the target device. - -**See also:** [Get help with Fluke Connect Desktop software]( 4fefd39f24
    -
    -
    -

    diff --git a/spaces/brjathu/HMR2.0/vendor/detectron2/configs/new_baselines/mask_rcnn_R_50_FPN_200ep_LSJ.py b/spaces/brjathu/HMR2.0/vendor/detectron2/configs/new_baselines/mask_rcnn_R_50_FPN_200ep_LSJ.py deleted file mode 100644 index 2a7c376da5f9269197c44079f3e0f3b09cdc63fa..0000000000000000000000000000000000000000 --- a/spaces/brjathu/HMR2.0/vendor/detectron2/configs/new_baselines/mask_rcnn_R_50_FPN_200ep_LSJ.py +++ /dev/null @@ -1,14 +0,0 @@ -from .mask_rcnn_R_50_FPN_100ep_LSJ import ( - dataloader, - lr_multiplier, - model, - optimizer, - train, -) - -train.max_iter *= 2 # 100ep -> 200ep - -lr_multiplier.scheduler.milestones = [ - milestone * 2 for milestone in lr_multiplier.scheduler.milestones -] -lr_multiplier.scheduler.num_updates = train.max_iter diff --git a/spaces/brjathu/HMR2.0/vendor/detectron2/detectron2/utils/events.py b/spaces/brjathu/HMR2.0/vendor/detectron2/detectron2/utils/events.py deleted file mode 100644 index 7d582a9a1683c2bf3a0452a81b7e1c869789e57e..0000000000000000000000000000000000000000 --- a/spaces/brjathu/HMR2.0/vendor/detectron2/detectron2/utils/events.py +++ /dev/null @@ -1,551 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -import datetime -import json -import logging -import os -import time -from collections import defaultdict -from contextlib import contextmanager -from functools import cached_property -from typing import Optional -import torch -from fvcore.common.history_buffer import HistoryBuffer - -from detectron2.utils.file_io import PathManager - -__all__ = [ - "get_event_storage", - "has_event_storage", - "JSONWriter", - "TensorboardXWriter", - "CommonMetricPrinter", - "EventStorage", -] - -_CURRENT_STORAGE_STACK = [] - - -def get_event_storage(): - """ - Returns: - The :class:`EventStorage` object that's currently being used. - Throws an error if no :class:`EventStorage` is currently enabled. - """ - assert len( - _CURRENT_STORAGE_STACK - ), "get_event_storage() has to be called inside a 'with EventStorage(...)' context!" - return _CURRENT_STORAGE_STACK[-1] - - -def has_event_storage(): - """ - Returns: - Check if there are EventStorage() context existed. - """ - return len(_CURRENT_STORAGE_STACK) > 0 - - -class EventWriter: - """ - Base class for writers that obtain events from :class:`EventStorage` and process them. - """ - - def write(self): - raise NotImplementedError - - def close(self): - pass - - -class JSONWriter(EventWriter): - """ - Write scalars to a json file. - - It saves scalars as one json per line (instead of a big json) for easy parsing. - - Examples parsing such a json file: - :: - $ cat metrics.json | jq -s '.[0:2]' - [ - { - "data_time": 0.008433341979980469, - "iteration": 19, - "loss": 1.9228371381759644, - "loss_box_reg": 0.050025828182697296, - "loss_classifier": 0.5316952466964722, - "loss_mask": 0.7236229181289673, - "loss_rpn_box": 0.0856662318110466, - "loss_rpn_cls": 0.48198649287223816, - "lr": 0.007173333333333333, - "time": 0.25401854515075684 - }, - { - "data_time": 0.007216215133666992, - "iteration": 39, - "loss": 1.282649278640747, - "loss_box_reg": 0.06222952902317047, - "loss_classifier": 0.30682939291000366, - "loss_mask": 0.6970193982124329, - "loss_rpn_box": 0.038663312792778015, - "loss_rpn_cls": 0.1471673548221588, - "lr": 0.007706666666666667, - "time": 0.2490077018737793 - } - ] - - $ cat metrics.json | jq '.loss_mask' - 0.7126231789588928 - 0.689423680305481 - 0.6776131987571716 - ... - - """ - - def __init__(self, json_file, window_size=20): - """ - Args: - json_file (str): path to the json file. New data will be appended if the file exists. - window_size (int): the window size of median smoothing for the scalars whose - `smoothing_hint` are True. - """ - self._file_handle = PathManager.open(json_file, "a") - self._window_size = window_size - self._last_write = -1 - - def write(self): - storage = get_event_storage() - to_save = defaultdict(dict) - - for k, (v, iter) in storage.latest_with_smoothing_hint(self._window_size).items(): - # keep scalars that have not been written - if iter <= self._last_write: - continue - to_save[iter][k] = v - if len(to_save): - all_iters = sorted(to_save.keys()) - self._last_write = max(all_iters) - - for itr, scalars_per_iter in to_save.items(): - scalars_per_iter["iteration"] = itr - self._file_handle.write(json.dumps(scalars_per_iter, sort_keys=True) + "\n") - self._file_handle.flush() - try: - os.fsync(self._file_handle.fileno()) - except AttributeError: - pass - - def close(self): - self._file_handle.close() - - -class TensorboardXWriter(EventWriter): - """ - Write all scalars to a tensorboard file. - """ - - def __init__(self, log_dir: str, window_size: int = 20, **kwargs): - """ - Args: - log_dir (str): the directory to save the output events - window_size (int): the scalars will be median-smoothed by this window size - - kwargs: other arguments passed to `torch.utils.tensorboard.SummaryWriter(...)` - """ - self._window_size = window_size - self._writer_args = {"log_dir": log_dir, **kwargs} - self._last_write = -1 - - @cached_property - def _writer(self): - from torch.utils.tensorboard import SummaryWriter - - return SummaryWriter(**self._writer_args) - - def write(self): - storage = get_event_storage() - new_last_write = self._last_write - for k, (v, iter) in storage.latest_with_smoothing_hint(self._window_size).items(): - if iter > self._last_write: - self._writer.add_scalar(k, v, iter) - new_last_write = max(new_last_write, iter) - self._last_write = new_last_write - - # storage.put_{image,histogram} is only meant to be used by - # tensorboard writer. So we access its internal fields directly from here. - if len(storage._vis_data) >= 1: - for img_name, img, step_num in storage._vis_data: - self._writer.add_image(img_name, img, step_num) - # Storage stores all image data and rely on this writer to clear them. - # As a result it assumes only one writer will use its image data. - # An alternative design is to let storage store limited recent - # data (e.g. only the most recent image) that all writers can access. - # In that case a writer may not see all image data if its period is long. - storage.clear_images() - - if len(storage._histograms) >= 1: - for params in storage._histograms: - self._writer.add_histogram_raw(**params) - storage.clear_histograms() - - def close(self): - if "_writer" in self.__dict__: - self._writer.close() - - -class CommonMetricPrinter(EventWriter): - """ - Print **common** metrics to the terminal, including - iteration time, ETA, memory, all losses, and the learning rate. - It also applies smoothing using a window of 20 elements. - - It's meant to print common metrics in common ways. - To print something in more customized ways, please implement a similar printer by yourself. - """ - - def __init__(self, max_iter: Optional[int] = None, window_size: int = 20): - """ - Args: - max_iter: the maximum number of iterations to train. - Used to compute ETA. If not given, ETA will not be printed. - window_size (int): the losses will be median-smoothed by this window size - """ - self.logger = logging.getLogger("detectron2.utils.events") - self._max_iter = max_iter - self._window_size = window_size - self._last_write = None # (step, time) of last call to write(). Used to compute ETA - - def _get_eta(self, storage) -> Optional[str]: - if self._max_iter is None: - return "" - iteration = storage.iter - try: - eta_seconds = storage.history("time").median(1000) * (self._max_iter - iteration - 1) - storage.put_scalar("eta_seconds", eta_seconds, smoothing_hint=False) - return str(datetime.timedelta(seconds=int(eta_seconds))) - except KeyError: - # estimate eta on our own - more noisy - eta_string = None - if self._last_write is not None: - estimate_iter_time = (time.perf_counter() - self._last_write[1]) / ( - iteration - self._last_write[0] - ) - eta_seconds = estimate_iter_time * (self._max_iter - iteration - 1) - eta_string = str(datetime.timedelta(seconds=int(eta_seconds))) - self._last_write = (iteration, time.perf_counter()) - return eta_string - - def write(self): - storage = get_event_storage() - iteration = storage.iter - if iteration == self._max_iter: - # This hook only reports training progress (loss, ETA, etc) but not other data, - # therefore do not write anything after training succeeds, even if this method - # is called. - return - - try: - avg_data_time = storage.history("data_time").avg( - storage.count_samples("data_time", self._window_size) - ) - last_data_time = storage.history("data_time").latest() - except KeyError: - # they may not exist in the first few iterations (due to warmup) - # or when SimpleTrainer is not used - avg_data_time = None - last_data_time = None - try: - avg_iter_time = storage.history("time").global_avg() - last_iter_time = storage.history("time").latest() - except KeyError: - avg_iter_time = None - last_iter_time = None - try: - lr = "{:.5g}".format(storage.history("lr").latest()) - except KeyError: - lr = "N/A" - - eta_string = self._get_eta(storage) - - if torch.cuda.is_available(): - max_mem_mb = torch.cuda.max_memory_allocated() / 1024.0 / 1024.0 - else: - max_mem_mb = None - - # NOTE: max_mem is parsed by grep in "dev/parse_results.sh" - self.logger.info( - str.format( - " {eta}iter: {iter} {losses} {non_losses} {avg_time}{last_time}" - + "{avg_data_time}{last_data_time} lr: {lr} {memory}", - eta=f"eta: {eta_string} " if eta_string else "", - iter=iteration, - losses=" ".join( - [ - "{}: {:.4g}".format( - k, v.median(storage.count_samples(k, self._window_size)) - ) - for k, v in storage.histories().items() - if "loss" in k - ] - ), - non_losses=" ".join( - [ - "{}: {:.4g}".format( - k, v.median(storage.count_samples(k, self._window_size)) - ) - for k, v in storage.histories().items() - if "[metric]" in k - ] - ), - avg_time="time: {:.4f} ".format(avg_iter_time) - if avg_iter_time is not None - else "", - last_time="last_time: {:.4f} ".format(last_iter_time) - if last_iter_time is not None - else "", - avg_data_time="data_time: {:.4f} ".format(avg_data_time) - if avg_data_time is not None - else "", - last_data_time="last_data_time: {:.4f} ".format(last_data_time) - if last_data_time is not None - else "", - lr=lr, - memory="max_mem: {:.0f}M".format(max_mem_mb) if max_mem_mb is not None else "", - ) - ) - - -class EventStorage: - """ - The user-facing class that provides metric storage functionalities. - - In the future we may add support for storing / logging other types of data if needed. - """ - - def __init__(self, start_iter=0): - """ - Args: - start_iter (int): the iteration number to start with - """ - self._history = defaultdict(HistoryBuffer) - self._smoothing_hints = {} - self._latest_scalars = {} - self._iter = start_iter - self._current_prefix = "" - self._vis_data = [] - self._histograms = [] - - def put_image(self, img_name, img_tensor): - """ - Add an `img_tensor` associated with `img_name`, to be shown on - tensorboard. - - Args: - img_name (str): The name of the image to put into tensorboard. - img_tensor (torch.Tensor or numpy.array): An `uint8` or `float` - Tensor of shape `[channel, height, width]` where `channel` is - 3. The image format should be RGB. The elements in img_tensor - can either have values in [0, 1] (float32) or [0, 255] (uint8). - The `img_tensor` will be visualized in tensorboard. - """ - self._vis_data.append((img_name, img_tensor, self._iter)) - - def put_scalar(self, name, value, smoothing_hint=True, cur_iter=None): - """ - Add a scalar `value` to the `HistoryBuffer` associated with `name`. - - Args: - smoothing_hint (bool): a 'hint' on whether this scalar is noisy and should be - smoothed when logged. The hint will be accessible through - :meth:`EventStorage.smoothing_hints`. A writer may ignore the hint - and apply custom smoothing rule. - - It defaults to True because most scalars we save need to be smoothed to - provide any useful signal. - cur_iter (int): an iteration number to set explicitly instead of current iteration - """ - name = self._current_prefix + name - cur_iter = self._iter if cur_iter is None else cur_iter - history = self._history[name] - value = float(value) - history.update(value, cur_iter) - self._latest_scalars[name] = (value, cur_iter) - - existing_hint = self._smoothing_hints.get(name) - - if existing_hint is not None: - assert ( - existing_hint == smoothing_hint - ), "Scalar {} was put with a different smoothing_hint!".format(name) - else: - self._smoothing_hints[name] = smoothing_hint - - def put_scalars(self, *, smoothing_hint=True, cur_iter=None, **kwargs): - """ - Put multiple scalars from keyword arguments. - - Examples: - - storage.put_scalars(loss=my_loss, accuracy=my_accuracy, smoothing_hint=True) - """ - for k, v in kwargs.items(): - self.put_scalar(k, v, smoothing_hint=smoothing_hint, cur_iter=cur_iter) - - def put_histogram(self, hist_name, hist_tensor, bins=1000): - """ - Create a histogram from a tensor. - - Args: - hist_name (str): The name of the histogram to put into tensorboard. - hist_tensor (torch.Tensor): A Tensor of arbitrary shape to be converted - into a histogram. - bins (int): Number of histogram bins. - """ - ht_min, ht_max = hist_tensor.min().item(), hist_tensor.max().item() - - # Create a histogram with PyTorch - hist_counts = torch.histc(hist_tensor, bins=bins) - hist_edges = torch.linspace(start=ht_min, end=ht_max, steps=bins + 1, dtype=torch.float32) - - # Parameter for the add_histogram_raw function of SummaryWriter - hist_params = dict( - tag=hist_name, - min=ht_min, - max=ht_max, - num=len(hist_tensor), - sum=float(hist_tensor.sum()), - sum_squares=float(torch.sum(hist_tensor**2)), - bucket_limits=hist_edges[1:].tolist(), - bucket_counts=hist_counts.tolist(), - global_step=self._iter, - ) - self._histograms.append(hist_params) - - def history(self, name): - """ - Returns: - HistoryBuffer: the scalar history for name - """ - ret = self._history.get(name, None) - if ret is None: - raise KeyError("No history metric available for {}!".format(name)) - return ret - - def histories(self): - """ - Returns: - dict[name -> HistoryBuffer]: the HistoryBuffer for all scalars - """ - return self._history - - def latest(self): - """ - Returns: - dict[str -> (float, int)]: mapping from the name of each scalar to the most - recent value and the iteration number its added. - """ - return self._latest_scalars - - def latest_with_smoothing_hint(self, window_size=20): - """ - Similar to :meth:`latest`, but the returned values - are either the un-smoothed original latest value, - or a median of the given window_size, - depend on whether the smoothing_hint is True. - - This provides a default behavior that other writers can use. - - Note: All scalars saved in the past `window_size` iterations are used for smoothing. - This is different from the `window_size` definition in HistoryBuffer. - Use :meth:`get_history_window_size` to get the `window_size` used in HistoryBuffer. - """ - result = {} - for k, (v, itr) in self._latest_scalars.items(): - result[k] = ( - self._history[k].median(self.count_samples(k, window_size)) - if self._smoothing_hints[k] - else v, - itr, - ) - return result - - def count_samples(self, name, window_size=20): - """ - Return the number of samples logged in the past `window_size` iterations. - """ - samples = 0 - data = self._history[name].values() - for _, iter_ in reversed(data): - if iter_ > data[-1][1] - window_size: - samples += 1 - else: - break - return samples - - def smoothing_hints(self): - """ - Returns: - dict[name -> bool]: the user-provided hint on whether the scalar - is noisy and needs smoothing. - """ - return self._smoothing_hints - - def step(self): - """ - User should either: (1) Call this function to increment storage.iter when needed. Or - (2) Set `storage.iter` to the correct iteration number before each iteration. - - The storage will then be able to associate the new data with an iteration number. - """ - self._iter += 1 - - @property - def iter(self): - """ - Returns: - int: The current iteration number. When used together with a trainer, - this is ensured to be the same as trainer.iter. - """ - return self._iter - - @iter.setter - def iter(self, val): - self._iter = int(val) - - @property - def iteration(self): - # for backward compatibility - return self._iter - - def __enter__(self): - _CURRENT_STORAGE_STACK.append(self) - return self - - def __exit__(self, exc_type, exc_val, exc_tb): - assert _CURRENT_STORAGE_STACK[-1] == self - _CURRENT_STORAGE_STACK.pop() - - @contextmanager - def name_scope(self, name): - """ - Yields: - A context within which all the events added to this storage - will be prefixed by the name scope. - """ - old_prefix = self._current_prefix - self._current_prefix = name.rstrip("/") + "/" - yield - self._current_prefix = old_prefix - - def clear_images(self): - """ - Delete all the stored images for visualization. This should be called - after images are written to tensorboard. - """ - self._vis_data = [] - - def clear_histograms(self): - """ - Delete all the stored histograms for visualization. - This should be called after histograms are written to tensorboard. - """ - self._histograms = [] diff --git a/spaces/captchaboy/FAST-ABINet-OCR/tools/crop_by_word_bb_syn90k.py b/spaces/captchaboy/FAST-ABINet-OCR/tools/crop_by_word_bb_syn90k.py deleted file mode 100644 index e6f2e072226df98366fb4c52c20a3199f32e4078..0000000000000000000000000000000000000000 --- a/spaces/captchaboy/FAST-ABINet-OCR/tools/crop_by_word_bb_syn90k.py +++ /dev/null @@ -1,153 +0,0 @@ -# Crop by word bounding box -# Locate script with gt.mat -# $ python crop_by_word_bb.py - -import os -import re -import cv2 -import scipy.io as sio -from itertools import chain -import numpy as np -import math - -mat_contents = sio.loadmat('gt.mat') - -image_names = mat_contents['imnames'][0] -cropped_indx = 0 -start_img_indx = 0 -gt_file = open('gt_oabc.txt', 'a') -err_file = open('err_oabc.txt', 'a') - -for img_indx in range(start_img_indx, len(image_names)): - - - # Get image name - image_name_new = image_names[img_indx][0] - # print(image_name_new) - image_name = '/home/yxwang/pytorch/dataset/SynthText/img/'+ image_name_new - # print('IMAGE : {}.{}'.format(img_indx, image_name)) - print('evaluating {} image'.format(img_indx), end='\r') - # Get text in image - txt = mat_contents['txt'][0][img_indx] - txt = [re.split(' \n|\n |\n| ', t.strip()) for t in txt] - txt = list(chain(*txt)) - txt = [t for t in txt if len(t) > 0 ] - # print(txt) # ['Lines:', 'I', 'lost', 'Kevin', 'will', 'line', 'and', 'and', 'the', '(and', 'the', 'out', 'you', "don't", 'pkg'] - # assert 1<0 - - # Open image - #img = Image.open(image_name) - img = cv2.imread(image_name, cv2.IMREAD_COLOR) - img_height, img_width, _ = img.shape - - # Validation - if len(np.shape(mat_contents['wordBB'][0][img_indx])) == 2: - wordBBlen = 1 - else: - wordBBlen = mat_contents['wordBB'][0][img_indx].shape[-1] - - if wordBBlen == len(txt): - # Crop image and save - for word_indx in range(len(txt)): - # print('txt--',txt) - txt_temp = txt[word_indx] - len_now = len(txt_temp) - # txt_temp = re.sub('[^0-9a-zA-Z]+', '', txt_temp) - # print('txt_temp-1-',txt_temp) - txt_temp = re.sub('[^a-zA-Z]+', '', txt_temp) - # print('txt_temp-2-',txt_temp) - if len_now - len(txt_temp) != 0: - print('txt_temp-2-', txt_temp) - - if len(np.shape(mat_contents['wordBB'][0][img_indx])) == 2: # only one word (2,4) - wordBB = mat_contents['wordBB'][0][img_indx] - else: # many words (2,4,num_words) - wordBB = mat_contents['wordBB'][0][img_indx][:, :, word_indx] - - if np.shape(wordBB) != (2, 4): - err_log = 'malformed box index: {}\t{}\t{}\n'.format(image_name, txt[word_indx], wordBB) - err_file.write(err_log) - # print(err_log) - continue - - pts1 = np.float32([[wordBB[0][0], wordBB[1][0]], - [wordBB[0][3], wordBB[1][3]], - [wordBB[0][1], wordBB[1][1]], - [wordBB[0][2], wordBB[1][2]]]) - height = math.sqrt((wordBB[0][0] - wordBB[0][3])**2 + (wordBB[1][0] - wordBB[1][3])**2) - width = math.sqrt((wordBB[0][0] - wordBB[0][1])**2 + (wordBB[1][0] - wordBB[1][1])**2) - - # Coord validation check - if (height * width) <= 0: - err_log = 'empty file : {}\t{}\t{}\n'.format(image_name, txt[word_indx], wordBB) - err_file.write(err_log) - # print(err_log) - continue - elif (height * width) > (img_height * img_width): - err_log = 'too big box : {}\t{}\t{}\n'.format(image_name, txt[word_indx], wordBB) - err_file.write(err_log) - # print(err_log) - continue - else: - valid = True - for i in range(2): - for j in range(4): - if wordBB[i][j] < 0 or wordBB[i][j] > img.shape[1 - i]: - valid = False - break - if not valid: - break - if not valid: - err_log = 'invalid coord : {}\t{}\t{}\t{}\t{}\n'.format( - image_name, txt[word_indx], wordBB, (width, height), (img_width, img_height)) - err_file.write(err_log) - # print(err_log) - continue - - pts2 = np.float32([[0, 0], - [0, height], - [width, 0], - [width, height]]) - - x_min = np.int(round(min(wordBB[0][0], wordBB[0][1], wordBB[0][2], wordBB[0][3]))) - x_max = np.int(round(max(wordBB[0][0], wordBB[0][1], wordBB[0][2], wordBB[0][3]))) - y_min = np.int(round(min(wordBB[1][0], wordBB[1][1], wordBB[1][2], wordBB[1][3]))) - y_max = np.int(round(max(wordBB[1][0], wordBB[1][1], wordBB[1][2], wordBB[1][3]))) - # print(x_min, x_max, y_min, y_max) - # print(img.shape) - # assert 1<0 - if len(img.shape) == 3: - img_cropped = img[ y_min:y_max:1, x_min:x_max:1, :] - else: - img_cropped = img[ y_min:y_max:1, x_min:x_max:1] - dir_name = '/home/yxwang/pytorch/dataset/SynthText/cropped-oabc/{}'.format(image_name_new.split('/')[0]) - # print('dir_name--',dir_name) - if not os.path.exists(dir_name): - os.mkdir(dir_name) - cropped_file_name = "{}/{}_{}_{}.jpg".format(dir_name, cropped_indx, - image_name.split('/')[-1][:-len('.jpg')], word_indx) - # print('cropped_file_name--',cropped_file_name) - # print('img_cropped--',img_cropped.shape) - if img_cropped.shape[0] == 0 or img_cropped.shape[1] == 0: - err_log = 'word_box_mismatch : {}\t{}\t{}\n'.format(image_name, mat_contents['txt'][0][ - img_indx], mat_contents['wordBB'][0][img_indx]) - err_file.write(err_log) - # print(err_log) - continue - # print('img_cropped--',img_cropped) - - # img_cropped.save(cropped_file_name) - cv2.imwrite(cropped_file_name, img_cropped) - cropped_indx += 1 - gt_file.write('%s\t%s\n' % (cropped_file_name, txt[word_indx])) - - # if cropped_indx>10: - # assert 1<0 - # assert 1 < 0 - else: - err_log = 'word_box_mismatch : {}\t{}\t{}\n'.format(image_name, mat_contents['txt'][0][ - img_indx], mat_contents['wordBB'][0][img_indx]) - err_file.write(err_log) - # print(err_log) -gt_file.close() -err_file.close() diff --git a/spaces/carlosalonso/Detection-video/carpeta_deteccion/dev/README.md b/spaces/carlosalonso/Detection-video/carpeta_deteccion/dev/README.md deleted file mode 100644 index bec811ad002a016f2137d9d0ea61c27ee5e78992..0000000000000000000000000000000000000000 --- a/spaces/carlosalonso/Detection-video/carpeta_deteccion/dev/README.md +++ /dev/null @@ -1,7 +0,0 @@ - -## Some scripts for developers to use, include: - -- `linter.sh`: lint the codebase before commit. -- `run_{inference,instant}_tests.sh`: run inference/training for a few iterations. - Note that these tests require 2 GPUs. -- `parse_results.sh`: parse results from a log file. diff --git a/spaces/carlosalonso/Detection-video/carpeta_deteccion/projects/PointSup/point_sup/register_point_annotations.py b/spaces/carlosalonso/Detection-video/carpeta_deteccion/projects/PointSup/point_sup/register_point_annotations.py deleted file mode 100644 index 32f2bb45e864e5be9d002f4d07badb91700ace4b..0000000000000000000000000000000000000000 --- a/spaces/carlosalonso/Detection-video/carpeta_deteccion/projects/PointSup/point_sup/register_point_annotations.py +++ /dev/null @@ -1,69 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved -import logging -import os - -from detectron2.data import DatasetCatalog, MetadataCatalog -from detectron2.data.datasets.builtin import _get_builtin_metadata -from detectron2.data.datasets.coco import load_coco_json - -logger = logging.getLogger(__name__) - - -# COCO dataset -def register_coco_instances_with_points(name, metadata, json_file, image_root): - """ - Register a dataset in COCO's json annotation format for - instance segmentation with point annotation. - - The point annotation json does not have "segmentation" field, instead, - it has "point_coords" and "point_labels" fields. - - Args: - name (str): the name that identifies a dataset, e.g. "coco_2014_train". - metadata (dict): extra metadata associated with this dataset. You can - leave it as an empty dict. - json_file (str): path to the json instance annotation file. - image_root (str or path-like): directory which contains all the images. - """ - assert isinstance(name, str), name - assert isinstance(json_file, (str, os.PathLike)), json_file - assert isinstance(image_root, (str, os.PathLike)), image_root - # 1. register a function which returns dicts - DatasetCatalog.register( - name, lambda: load_coco_json(json_file, image_root, name, ["point_coords", "point_labels"]) - ) - - # 2. Optionally, add metadata about this dataset, - # since they might be useful in evaluation, visualization or logging - MetadataCatalog.get(name).set( - json_file=json_file, image_root=image_root, evaluator_type="coco", **metadata - ) - - -_PREDEFINED_SPLITS_COCO = {} -_PREDEFINED_SPLITS_COCO["coco"] = { - # point annotations without masks - "coco_2017_train_points_n10_v1_without_masks": ( - "coco/train2017", - "coco/annotations/instances_train2017_n10_v1_without_masks.json", - ), -} - - -def register_all_coco_train_points(root): - for dataset_name, splits_per_dataset in _PREDEFINED_SPLITS_COCO.items(): - for key, (image_root, json_file) in splits_per_dataset.items(): - # Assume pre-defined datasets live in `./datasets`. - register_coco_instances_with_points( - key, - _get_builtin_metadata(dataset_name), - os.path.join(root, json_file) if "://" not in json_file else json_file, - os.path.join(root, image_root), - ) - - -# True for open source; -# Internally at fb, we register them elsewhere -if __name__.endswith(".register_point_annotations"): - _root = os.getenv("DETECTRON2_DATASETS", "datasets") - register_all_coco_train_points(_root) diff --git a/spaces/cffl/Exploring_Intelligent_Writing_Assistance/tests/__init__.py b/spaces/cffl/Exploring_Intelligent_Writing_Assistance/tests/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/chendl/compositional_test/multimodal/YOLOX/yolox/layers/jit_ops.py b/spaces/chendl/compositional_test/multimodal/YOLOX/yolox/layers/jit_ops.py deleted file mode 100644 index 0fdac4de2b2cedbf523a887ce7564cbc6c372a28..0000000000000000000000000000000000000000 --- a/spaces/chendl/compositional_test/multimodal/YOLOX/yolox/layers/jit_ops.py +++ /dev/null @@ -1,138 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) Megvii, Inc. and its affiliates. All Rights Reserved - -import glob -import importlib -import os -import sys -import time -from typing import List - -__all__ = ["JitOp", "FastCOCOEvalOp"] - - -class JitOp: - """ - Just-in-time compilation of ops. - - Some code of `JitOp` is inspired by `deepspeed.op_builder`, - check the following link for more details: - https://github.com/microsoft/DeepSpeed/blob/master/op_builder/builder.py - """ - - def __init__(self, name): - self.name = name - - def absolute_name(self) -> str: - """Get absolute build path for cases where the op is pre-installed.""" - pass - - def sources(self) -> List: - """Get path list of source files of op. - - NOTE: the path should be elative to root of package during building, - Otherwise, exception will be raised when building package. - However, for runtime building, path will be absolute. - """ - pass - - def include_dirs(self) -> List: - """ - Get list of include paths, relative to root of package. - - NOTE: the path should be elative to root of package. - Otherwise, exception will be raised when building package. - """ - return [] - - def define_macros(self) -> List: - """Get list of macros to define for op""" - return [] - - def cxx_args(self) -> List: - """Get optional list of compiler flags to forward""" - args = ["-O2"] if sys.platform == "win32" else ["-O3", "-std=c++14", "-g", "-Wno-reorder"] - return args - - def nvcc_args(self) -> List: - """Get optional list of compiler flags to forward to nvcc when building CUDA sources""" - args = [ - "-O3", "--use_fast_math", - "-std=c++17" if sys.platform == "win32" else "-std=c++14", - "-U__CUDA_NO_HALF_OPERATORS__", - "-U__CUDA_NO_HALF_CONVERSIONS__", - "-U__CUDA_NO_HALF2_OPERATORS__", - ] - return args - - def build_op(self): - from torch.utils.cpp_extension import CppExtension - return CppExtension( - name=self.absolute_name(), - sources=self.sources(), - include_dirs=self.include_dirs(), - define_macros=self.define_macros(), - extra_compile_args={ - "cxx": self.cxx_args(), - }, - ) - - def load(self, verbose=True): - try: - # try to import op from pre-installed package - return importlib.import_module(self.absolute_name()) - except Exception: # op not compiled, jit load - from yolox.utils import wait_for_the_master - with wait_for_the_master(): # to avoid race condition - return self.jit_load(verbose) - - def jit_load(self, verbose=True): - from torch.utils.cpp_extension import load - from loguru import logger - try: - import ninja # noqa - except ImportError: - if verbose: - logger.warning( - f"Ninja is not installed, fall back to normal installation for {self.name}." - ) - - build_tik = time.time() - # build op and load - op_module = load( - name=self.name, - sources=self.sources(), - extra_cflags=self.cxx_args(), - extra_cuda_cflags=self.nvcc_args(), - verbose=verbose, - ) - build_duration = time.time() - build_tik - if verbose: - logger.info(f"Load {self.name} op in {build_duration:.3f}s.") - return op_module - - def clear_dynamic_library(self): - """Remove dynamic libraray files generated by JIT compilation.""" - module = self.load() - os.remove(module.__file__) - - -class FastCOCOEvalOp(JitOp): - - def __init__(self, name="fast_cocoeval"): - super().__init__(name=name) - - def absolute_name(self): - return f'yolox.layers.{self.name}' - - def sources(self): - sources = glob.glob(os.path.join("yolox", "layers", "cocoeval", "*.cpp")) - if not sources: # source will be empty list if the so file is removed after install - # use abosolute path to compile - import yolox - code_path = os.path.join(yolox.__path__[0], "layers", "cocoeval", "*.cpp") - sources = glob.glob(code_path) - return sources - - def include_dirs(self): - return [os.path.join("yolox", "layers", "cocoeval")] diff --git a/spaces/chendl/compositional_test/transformers/examples/legacy/token-classification/utils_ner.py b/spaces/chendl/compositional_test/transformers/examples/legacy/token-classification/utils_ner.py deleted file mode 100644 index 2b54c7c4a49159fa45349da333aeffa3679478f1..0000000000000000000000000000000000000000 --- a/spaces/chendl/compositional_test/transformers/examples/legacy/token-classification/utils_ner.py +++ /dev/null @@ -1,372 +0,0 @@ -# coding=utf-8 -# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. -# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" Named entity recognition fine-tuning: utilities to work with CoNLL-2003 task. """ - - -import logging -import os -from dataclasses import dataclass -from enum import Enum -from typing import List, Optional, Union - -from filelock import FileLock - -from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available - - -logger = logging.getLogger(__name__) - - -@dataclass -class InputExample: - """ - A single training/test example for token classification. - - Args: - guid: Unique id for the example. - words: list. The words of the sequence. - labels: (Optional) list. The labels for each word of the sequence. This should be - specified for train and dev examples, but not for test examples. - """ - - guid: str - words: List[str] - labels: Optional[List[str]] - - -@dataclass -class InputFeatures: - """ - A single set of features of data. - Property names are the same names as the corresponding inputs to a model. - """ - - input_ids: List[int] - attention_mask: List[int] - token_type_ids: Optional[List[int]] = None - label_ids: Optional[List[int]] = None - - -class Split(Enum): - train = "train" - dev = "dev" - test = "test" - - -class TokenClassificationTask: - @staticmethod - def read_examples_from_file(data_dir, mode: Union[Split, str]) -> List[InputExample]: - raise NotImplementedError - - @staticmethod - def get_labels(path: str) -> List[str]: - raise NotImplementedError - - @staticmethod - def convert_examples_to_features( - examples: List[InputExample], - label_list: List[str], - max_seq_length: int, - tokenizer: PreTrainedTokenizer, - cls_token_at_end=False, - cls_token="[CLS]", - cls_token_segment_id=1, - sep_token="[SEP]", - sep_token_extra=False, - pad_on_left=False, - pad_token=0, - pad_token_segment_id=0, - pad_token_label_id=-100, - sequence_a_segment_id=0, - mask_padding_with_zero=True, - ) -> List[InputFeatures]: - """Loads a data file into a list of `InputFeatures` - `cls_token_at_end` define the location of the CLS token: - - False (Default, BERT/XLM pattern): [CLS] + A + [SEP] + B + [SEP] - - True (XLNet/GPT pattern): A + [SEP] + B + [SEP] + [CLS] - `cls_token_segment_id` define the segment id associated to the CLS token (0 for BERT, 2 for XLNet) - """ - # TODO clean up all this to leverage built-in features of tokenizers - - label_map = {label: i for i, label in enumerate(label_list)} - - features = [] - for ex_index, example in enumerate(examples): - if ex_index % 10_000 == 0: - logger.info("Writing example %d of %d", ex_index, len(examples)) - - tokens = [] - label_ids = [] - for word, label in zip(example.words, example.labels): - word_tokens = tokenizer.tokenize(word) - - # bert-base-multilingual-cased sometimes output "nothing ([]) when calling tokenize with just a space. - if len(word_tokens) > 0: - tokens.extend(word_tokens) - # Use the real label id for the first token of the word, and padding ids for the remaining tokens - label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(word_tokens) - 1)) - - # Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa. - special_tokens_count = tokenizer.num_special_tokens_to_add() - if len(tokens) > max_seq_length - special_tokens_count: - tokens = tokens[: (max_seq_length - special_tokens_count)] - label_ids = label_ids[: (max_seq_length - special_tokens_count)] - - # The convention in BERT is: - # (a) For sequence pairs: - # tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP] - # type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1 - # (b) For single sequences: - # tokens: [CLS] the dog is hairy . [SEP] - # type_ids: 0 0 0 0 0 0 0 - # - # Where "type_ids" are used to indicate whether this is the first - # sequence or the second sequence. The embedding vectors for `type=0` and - # `type=1` were learned during pre-training and are added to the wordpiece - # embedding vector (and position vector). This is not *strictly* necessary - # since the [SEP] token unambiguously separates the sequences, but it makes - # it easier for the model to learn the concept of sequences. - # - # For classification tasks, the first vector (corresponding to [CLS]) is - # used as the "sentence vector". Note that this only makes sense because - # the entire model is fine-tuned. - tokens += [sep_token] - label_ids += [pad_token_label_id] - if sep_token_extra: - # roberta uses an extra separator b/w pairs of sentences - tokens += [sep_token] - label_ids += [pad_token_label_id] - segment_ids = [sequence_a_segment_id] * len(tokens) - - if cls_token_at_end: - tokens += [cls_token] - label_ids += [pad_token_label_id] - segment_ids += [cls_token_segment_id] - else: - tokens = [cls_token] + tokens - label_ids = [pad_token_label_id] + label_ids - segment_ids = [cls_token_segment_id] + segment_ids - - input_ids = tokenizer.convert_tokens_to_ids(tokens) - - # The mask has 1 for real tokens and 0 for padding tokens. Only real - # tokens are attended to. - input_mask = [1 if mask_padding_with_zero else 0] * len(input_ids) - - # Zero-pad up to the sequence length. - padding_length = max_seq_length - len(input_ids) - if pad_on_left: - input_ids = ([pad_token] * padding_length) + input_ids - input_mask = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask - segment_ids = ([pad_token_segment_id] * padding_length) + segment_ids - label_ids = ([pad_token_label_id] * padding_length) + label_ids - else: - input_ids += [pad_token] * padding_length - input_mask += [0 if mask_padding_with_zero else 1] * padding_length - segment_ids += [pad_token_segment_id] * padding_length - label_ids += [pad_token_label_id] * padding_length - - assert len(input_ids) == max_seq_length - assert len(input_mask) == max_seq_length - assert len(segment_ids) == max_seq_length - assert len(label_ids) == max_seq_length - - if ex_index < 5: - logger.info("*** Example ***") - logger.info("guid: %s", example.guid) - logger.info("tokens: %s", " ".join([str(x) for x in tokens])) - logger.info("input_ids: %s", " ".join([str(x) for x in input_ids])) - logger.info("input_mask: %s", " ".join([str(x) for x in input_mask])) - logger.info("segment_ids: %s", " ".join([str(x) for x in segment_ids])) - logger.info("label_ids: %s", " ".join([str(x) for x in label_ids])) - - if "token_type_ids" not in tokenizer.model_input_names: - segment_ids = None - - features.append( - InputFeatures( - input_ids=input_ids, attention_mask=input_mask, token_type_ids=segment_ids, label_ids=label_ids - ) - ) - return features - - -if is_torch_available(): - import torch - from torch import nn - from torch.utils.data import Dataset - - class TokenClassificationDataset(Dataset): - """ - This will be superseded by a framework-agnostic approach - soon. - """ - - features: List[InputFeatures] - pad_token_label_id: int = nn.CrossEntropyLoss().ignore_index - # Use cross entropy ignore_index as padding label id so that only - # real label ids contribute to the loss later. - - def __init__( - self, - token_classification_task: TokenClassificationTask, - data_dir: str, - tokenizer: PreTrainedTokenizer, - labels: List[str], - model_type: str, - max_seq_length: Optional[int] = None, - overwrite_cache=False, - mode: Split = Split.train, - ): - # Load data features from cache or dataset file - cached_features_file = os.path.join( - data_dir, - "cached_{}_{}_{}".format(mode.value, tokenizer.__class__.__name__, str(max_seq_length)), - ) - - # Make sure only the first process in distributed training processes the dataset, - # and the others will use the cache. - lock_path = cached_features_file + ".lock" - with FileLock(lock_path): - if os.path.exists(cached_features_file) and not overwrite_cache: - logger.info(f"Loading features from cached file {cached_features_file}") - self.features = torch.load(cached_features_file) - else: - logger.info(f"Creating features from dataset file at {data_dir}") - examples = token_classification_task.read_examples_from_file(data_dir, mode) - # TODO clean up all this to leverage built-in features of tokenizers - self.features = token_classification_task.convert_examples_to_features( - examples, - labels, - max_seq_length, - tokenizer, - cls_token_at_end=bool(model_type in ["xlnet"]), - # xlnet has a cls token at the end - cls_token=tokenizer.cls_token, - cls_token_segment_id=2 if model_type in ["xlnet"] else 0, - sep_token=tokenizer.sep_token, - sep_token_extra=False, - # roberta uses an extra separator b/w pairs of sentences, cf. github.com/pytorch/fairseq/commit/1684e166e3da03f5b600dbb7855cb98ddfcd0805 - pad_on_left=bool(tokenizer.padding_side == "left"), - pad_token=tokenizer.pad_token_id, - pad_token_segment_id=tokenizer.pad_token_type_id, - pad_token_label_id=self.pad_token_label_id, - ) - logger.info(f"Saving features into cached file {cached_features_file}") - torch.save(self.features, cached_features_file) - - def __len__(self): - return len(self.features) - - def __getitem__(self, i) -> InputFeatures: - return self.features[i] - - -if is_tf_available(): - import tensorflow as tf - - class TFTokenClassificationDataset: - """ - This will be superseded by a framework-agnostic approach - soon. - """ - - features: List[InputFeatures] - pad_token_label_id: int = -100 - # Use cross entropy ignore_index as padding label id so that only - # real label ids contribute to the loss later. - - def __init__( - self, - token_classification_task: TokenClassificationTask, - data_dir: str, - tokenizer: PreTrainedTokenizer, - labels: List[str], - model_type: str, - max_seq_length: Optional[int] = None, - overwrite_cache=False, - mode: Split = Split.train, - ): - examples = token_classification_task.read_examples_from_file(data_dir, mode) - # TODO clean up all this to leverage built-in features of tokenizers - self.features = token_classification_task.convert_examples_to_features( - examples, - labels, - max_seq_length, - tokenizer, - cls_token_at_end=bool(model_type in ["xlnet"]), - # xlnet has a cls token at the end - cls_token=tokenizer.cls_token, - cls_token_segment_id=2 if model_type in ["xlnet"] else 0, - sep_token=tokenizer.sep_token, - sep_token_extra=False, - # roberta uses an extra separator b/w pairs of sentences, cf. github.com/pytorch/fairseq/commit/1684e166e3da03f5b600dbb7855cb98ddfcd0805 - pad_on_left=bool(tokenizer.padding_side == "left"), - pad_token=tokenizer.pad_token_id, - pad_token_segment_id=tokenizer.pad_token_type_id, - pad_token_label_id=self.pad_token_label_id, - ) - - def gen(): - for ex in self.features: - if ex.token_type_ids is None: - yield ( - {"input_ids": ex.input_ids, "attention_mask": ex.attention_mask}, - ex.label_ids, - ) - else: - yield ( - { - "input_ids": ex.input_ids, - "attention_mask": ex.attention_mask, - "token_type_ids": ex.token_type_ids, - }, - ex.label_ids, - ) - - if "token_type_ids" not in tokenizer.model_input_names: - self.dataset = tf.data.Dataset.from_generator( - gen, - ({"input_ids": tf.int32, "attention_mask": tf.int32}, tf.int64), - ( - {"input_ids": tf.TensorShape([None]), "attention_mask": tf.TensorShape([None])}, - tf.TensorShape([None]), - ), - ) - else: - self.dataset = tf.data.Dataset.from_generator( - gen, - ({"input_ids": tf.int32, "attention_mask": tf.int32, "token_type_ids": tf.int32}, tf.int64), - ( - { - "input_ids": tf.TensorShape([None]), - "attention_mask": tf.TensorShape([None]), - "token_type_ids": tf.TensorShape([None]), - }, - tf.TensorShape([None]), - ), - ) - - def get_dataset(self): - self.dataset = self.dataset.apply(tf.data.experimental.assert_cardinality(len(self.features))) - - return self.dataset - - def __len__(self): - return len(self.features) - - def __getitem__(self, i) -> InputFeatures: - return self.features[i] diff --git a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/fontTools/pens/explicitClosingLinePen.py b/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/fontTools/pens/explicitClosingLinePen.py deleted file mode 100644 index e3c9c943cc504e970d4e9ec9f96c3817d8383ccf..0000000000000000000000000000000000000000 --- a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/fontTools/pens/explicitClosingLinePen.py +++ /dev/null @@ -1,101 +0,0 @@ -from fontTools.pens.filterPen import ContourFilterPen - - -class ExplicitClosingLinePen(ContourFilterPen): - """A filter pen that adds an explicit lineTo to the first point of each closed - contour if the end point of the last segment is not already the same as the first point. - Otherwise, it passes the contour through unchanged. - - >>> from pprint import pprint - >>> from fontTools.pens.recordingPen import RecordingPen - >>> rec = RecordingPen() - >>> pen = ExplicitClosingLinePen(rec) - >>> pen.moveTo((0, 0)) - >>> pen.lineTo((100, 0)) - >>> pen.lineTo((100, 100)) - >>> pen.closePath() - >>> pprint(rec.value) - [('moveTo', ((0, 0),)), - ('lineTo', ((100, 0),)), - ('lineTo', ((100, 100),)), - ('lineTo', ((0, 0),)), - ('closePath', ())] - >>> rec = RecordingPen() - >>> pen = ExplicitClosingLinePen(rec) - >>> pen.moveTo((0, 0)) - >>> pen.lineTo((100, 0)) - >>> pen.lineTo((100, 100)) - >>> pen.lineTo((0, 0)) - >>> pen.closePath() - >>> pprint(rec.value) - [('moveTo', ((0, 0),)), - ('lineTo', ((100, 0),)), - ('lineTo', ((100, 100),)), - ('lineTo', ((0, 0),)), - ('closePath', ())] - >>> rec = RecordingPen() - >>> pen = ExplicitClosingLinePen(rec) - >>> pen.moveTo((0, 0)) - >>> pen.curveTo((100, 0), (0, 100), (100, 100)) - >>> pen.closePath() - >>> pprint(rec.value) - [('moveTo', ((0, 0),)), - ('curveTo', ((100, 0), (0, 100), (100, 100))), - ('lineTo', ((0, 0),)), - ('closePath', ())] - >>> rec = RecordingPen() - >>> pen = ExplicitClosingLinePen(rec) - >>> pen.moveTo((0, 0)) - >>> pen.curveTo((100, 0), (0, 100), (100, 100)) - >>> pen.lineTo((0, 0)) - >>> pen.closePath() - >>> pprint(rec.value) - [('moveTo', ((0, 0),)), - ('curveTo', ((100, 0), (0, 100), (100, 100))), - ('lineTo', ((0, 0),)), - ('closePath', ())] - >>> rec = RecordingPen() - >>> pen = ExplicitClosingLinePen(rec) - >>> pen.moveTo((0, 0)) - >>> pen.curveTo((100, 0), (0, 100), (0, 0)) - >>> pen.closePath() - >>> pprint(rec.value) - [('moveTo', ((0, 0),)), - ('curveTo', ((100, 0), (0, 100), (0, 0))), - ('closePath', ())] - >>> rec = RecordingPen() - >>> pen = ExplicitClosingLinePen(rec) - >>> pen.moveTo((0, 0)) - >>> pen.closePath() - >>> pprint(rec.value) - [('moveTo', ((0, 0),)), ('closePath', ())] - >>> rec = RecordingPen() - >>> pen = ExplicitClosingLinePen(rec) - >>> pen.closePath() - >>> pprint(rec.value) - [('closePath', ())] - >>> rec = RecordingPen() - >>> pen = ExplicitClosingLinePen(rec) - >>> pen.moveTo((0, 0)) - >>> pen.lineTo((100, 0)) - >>> pen.lineTo((100, 100)) - >>> pen.endPath() - >>> pprint(rec.value) - [('moveTo', ((0, 0),)), - ('lineTo', ((100, 0),)), - ('lineTo', ((100, 100),)), - ('endPath', ())] - """ - - def filterContour(self, contour): - if ( - not contour - or contour[0][0] != "moveTo" - or contour[-1][0] != "closePath" - or len(contour) < 3 - ): - return - movePt = contour[0][1][0] - lastSeg = contour[-2][1] - if lastSeg and movePt != lastSeg[-1]: - contour[-1:] = [("lineTo", (movePt,)), ("closePath", ())] diff --git a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/fontTools/ttLib/tables/T_S_I__0.py b/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/fontTools/ttLib/tables/T_S_I__0.py deleted file mode 100644 index 4112937d45d973bb61ae4ccf825f99a752901ed0..0000000000000000000000000000000000000000 --- a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/fontTools/ttLib/tables/T_S_I__0.py +++ /dev/null @@ -1,57 +0,0 @@ -""" TSI{0,1,2,3,5} are private tables used by Microsoft Visual TrueType (VTT) -tool to store its hinting source data. - -TSI0 is the index table containing the lengths and offsets for the glyph -programs and 'extra' programs ('fpgm', 'prep', and 'cvt') that are contained -in the TSI1 table. -""" -from . import DefaultTable -import struct - -tsi0Format = ">HHL" - - -def fixlongs(glyphID, textLength, textOffset): - return int(glyphID), int(textLength), textOffset - - -class table_T_S_I__0(DefaultTable.DefaultTable): - - dependencies = ["TSI1"] - - def decompile(self, data, ttFont): - numGlyphs = ttFont["maxp"].numGlyphs - indices = [] - size = struct.calcsize(tsi0Format) - for i in range(numGlyphs + 5): - glyphID, textLength, textOffset = fixlongs( - *struct.unpack(tsi0Format, data[:size]) - ) - indices.append((glyphID, textLength, textOffset)) - data = data[size:] - assert len(data) == 0 - assert indices[-5] == (0xFFFE, 0, 0xABFC1F34), "bad magic number" - self.indices = indices[:-5] - self.extra_indices = indices[-4:] - - def compile(self, ttFont): - if not hasattr(self, "indices"): - # We have no corresponding table (TSI1 or TSI3); let's return - # no data, which effectively means "ignore us". - return b"" - data = b"" - for index, textLength, textOffset in self.indices: - data = data + struct.pack(tsi0Format, index, textLength, textOffset) - data = data + struct.pack(tsi0Format, 0xFFFE, 0, 0xABFC1F34) - for index, textLength, textOffset in self.extra_indices: - data = data + struct.pack(tsi0Format, index, textLength, textOffset) - return data - - def set(self, indices, extra_indices): - # gets called by 'TSI1' or 'TSI3' - self.indices = indices - self.extra_indices = extra_indices - - def toXML(self, writer, ttFont): - writer.comment("This table will be calculated by the compiler") - writer.newline() diff --git a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/google/protobuf/struct_pb2.py b/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/google/protobuf/struct_pb2.py deleted file mode 100644 index ec1d62d0403752dc028d38a3615b9993ed36b55c..0000000000000000000000000000000000000000 --- a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/google/protobuf/struct_pb2.py +++ /dev/null @@ -1,37 +0,0 @@ -# -*- coding: utf-8 -*- -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: google/protobuf/struct.proto -"""Generated protocol buffer code.""" -from google.protobuf import descriptor as _descriptor -from google.protobuf import descriptor_pool as _descriptor_pool -from google.protobuf import symbol_database as _symbol_database -from google.protobuf.internal import builder as _builder -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - - - -DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x1cgoogle/protobuf/struct.proto\x12\x0fgoogle.protobuf\"\x98\x01\n\x06Struct\x12;\n\x06\x66ields\x18\x01 \x03(\x0b\x32#.google.protobuf.Struct.FieldsEntryR\x06\x66ields\x1aQ\n\x0b\x46ieldsEntry\x12\x10\n\x03key\x18\x01 \x01(\tR\x03key\x12,\n\x05value\x18\x02 \x01(\x0b\x32\x16.google.protobuf.ValueR\x05value:\x02\x38\x01\"\xb2\x02\n\x05Value\x12;\n\nnull_value\x18\x01 \x01(\x0e\x32\x1a.google.protobuf.NullValueH\x00R\tnullValue\x12#\n\x0cnumber_value\x18\x02 \x01(\x01H\x00R\x0bnumberValue\x12#\n\x0cstring_value\x18\x03 \x01(\tH\x00R\x0bstringValue\x12\x1f\n\nbool_value\x18\x04 \x01(\x08H\x00R\tboolValue\x12<\n\x0cstruct_value\x18\x05 \x01(\x0b\x32\x17.google.protobuf.StructH\x00R\x0bstructValue\x12;\n\nlist_value\x18\x06 \x01(\x0b\x32\x1a.google.protobuf.ListValueH\x00R\tlistValueB\x06\n\x04kind\";\n\tListValue\x12.\n\x06values\x18\x01 \x03(\x0b\x32\x16.google.protobuf.ValueR\x06values*\x1b\n\tNullValue\x12\x0e\n\nNULL_VALUE\x10\x00\x42\x7f\n\x13\x63om.google.protobufB\x0bStructProtoP\x01Z/google.golang.org/protobuf/types/known/structpb\xf8\x01\x01\xa2\x02\x03GPB\xaa\x02\x1eGoogle.Protobuf.WellKnownTypesb\x06proto3') - -_globals = globals() -_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) -_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'google.protobuf.struct_pb2', _globals) -if _descriptor._USE_C_DESCRIPTORS == False: - - DESCRIPTOR._options = None - DESCRIPTOR._serialized_options = b'\n\023com.google.protobufB\013StructProtoP\001Z/google.golang.org/protobuf/types/known/structpb\370\001\001\242\002\003GPB\252\002\036Google.Protobuf.WellKnownTypes' - _STRUCT_FIELDSENTRY._options = None - _STRUCT_FIELDSENTRY._serialized_options = b'8\001' - _globals['_NULLVALUE']._serialized_start=574 - _globals['_NULLVALUE']._serialized_end=601 - _globals['_STRUCT']._serialized_start=50 - _globals['_STRUCT']._serialized_end=202 - _globals['_STRUCT_FIELDSENTRY']._serialized_start=121 - _globals['_STRUCT_FIELDSENTRY']._serialized_end=202 - _globals['_VALUE']._serialized_start=205 - _globals['_VALUE']._serialized_end=511 - _globals['_LISTVALUE']._serialized_start=513 - _globals['_LISTVALUE']._serialized_end=572 -# @@protoc_insertion_point(module_scope) diff --git a/spaces/cihyFjudo/fairness-paper-search/SAM CAST 3.3.0 CRACK.rar 4 The Ultimate Guide for Online Broadcasters.md b/spaces/cihyFjudo/fairness-paper-search/SAM CAST 3.3.0 CRACK.rar 4 The Ultimate Guide for Online Broadcasters.md deleted file mode 100644 index 2e32fe5ff4b03985a61e08a6721d18c56936bb21..0000000000000000000000000000000000000000 --- a/spaces/cihyFjudo/fairness-paper-search/SAM CAST 3.3.0 CRACK.rar 4 The Ultimate Guide for Online Broadcasters.md +++ /dev/null @@ -1,6 +0,0 @@ -

    SAM CAST 3.3.0 CRACK.rar 4


    Download File »»» https://tinurli.com/2uwj7j



    -
    - aaccfb2cb3
    -
    -
    -

    diff --git a/spaces/codedog-ai/edu-assistant/edu_assistant/learning_tasks/coding_problem.py b/spaces/codedog-ai/edu-assistant/edu_assistant/learning_tasks/coding_problem.py deleted file mode 100644 index b003985c8aa053e6fc151f3e02e2e4e7a04eaa35..0000000000000000000000000000000000000000 --- a/spaces/codedog-ai/edu-assistant/edu_assistant/learning_tasks/coding_problem.py +++ /dev/null @@ -1,256 +0,0 @@ -from langchain import ConversationChain, PromptTemplate -from langchain.chains import ConversationalRetrievalChain -from langchain.chains.base import Chain -from langchain.memory import ConversationBufferMemory -from langchain.schema import BaseRetriever -from pydantic import BaseModel, Field -from pydantic_redis import Model, Store - -from edu_assistant.learning_tasks.base import BaseTask -from edu_assistant.utils.langchain_utils import ( - escape_for_prompt, - load_gpt4_llm, - load_llm, -) -from edu_assistant.utils.redis_utils import get_redis_config - -TEMPLATE = """The following is a friendly conversation between a human and an ai. -The ai is talkative and provides lots of specific details from its context. -If the ai does not know the answer to a question, it truthfully says it does not know. -The ai act following below instructions: ---- -{instruction} ---- - -The coding problem: ---- -{problem} ---- - -Student's code: ---- -{answer} ---- - -Current conversation: -{{chat_history}} -Human: {{input}} -AI:""" - -KNOWLEDGE_TEMPLATE = """The following is a friendly conversation between a human and an ai. -The ai is talkative and provides lots of specific details from its context. -If the ai does not know the answer to a question, it truthfully says it does not know. -The ai act following below instructions: ---- -{instruction} ---- - -The coding problem: ---- -{problem} ---- - -Student's code: -``` -{answer} -``` - -Extra Information might be helpful for you: ---- -{{context}} ---- - -Current conversation: -{{chat_history}} -Human: {{question}} -AI: -""" - - -DEFAULT_INSTRUCTION = """Act as a c++ professional to check student's code. -The code is written by a student aged 5-10 and mostly like to buggy or bad performanced. -""" - -DEFAULT_FIRST_QUESTION = "请问这段代码中有什么问题吗?" - - -class CodingProblem(Model): - _primary_key_field: str = "title" - title: str = Field() - question: str = Field() - standard_answer: str = Field(default="") - analysis: str = Field(default="") - language: str = Field(default="") - extra: str = Field(default_factory=lambda: list()) - - # TODO: Add cache to expr function with pydantic 2 computed_field decorator. - # Wait for langchain to support pydantic2. - - @staticmethod - def enable_redis_orm(): - store = Store(name="coding_problems", redis_config=get_redis_config(), life_span_in_seconds=3600 * 24 * 30) - - store.register_model(CodingProblem) - - def expr(self, lang=""): - expr = f"## Question\n\n---\n{escape_for_prompt(self.question)}\n---\n\n" - expr += ( - f"""## Standard Answer (There might be others)\n\n```{lang if lang else self.language} -{escape_for_prompt(self.standard_answer)}\n``` -""" - if self.standard_answer - else "" - ) - expr += f"## Analysis\n\n---\n{escape_for_prompt(self.analysis)}\n---\n\n" if self.analysis else "" - expr += "## Extra\n\n" + escape_for_prompt("".join(self.extra)) + "\n" - return expr - - def __str__(self): - return self.expr() - - -class CodingAnswer(BaseModel): - answer: str = Field() - extra: list[str] = Field(default="") - - def expr(self, lang=""): - expr = f"Answer:\n```{lang}\n{escape_for_prompt(self.answer)}\n```\n" - expr += escape_for_prompt("".join(self.extra)) + "\n" - return expr - - def __str__(self): - return self.expr() - - -class CodingProblemAnalysis(BaseTask): - HISTORY_KEY = "chat_history" - - def __init__( - self, - instruction: str = DEFAULT_INSTRUCTION, - first_question: str = DEFAULT_FIRST_QUESTION, - lang: str = "", - knowledge: BaseRetriever = None, - enable_gpt4: bool = False, - ): - self.instruction = instruction - self.first_question = first_question - self.lang = lang - self.enable_gpt4 = enable_gpt4 - # TODO: load threshold key from implement. value from config - self.vectordbkwargs = {"score_threshold": 0.9} # Qdrant cosine. higher is better. - - if knowledge: - self._input_key = "question" - self._output_key = "answer" - else: - self._input_key = "input" - self._output_key = "response" - - self._session_store = {} - self._knowledge = knowledge - - self._init_llm() - - @staticmethod - def build_coding_problem(question: str, standard_answer: str = "", analysis: str = "", extra: list[str] = None): - extra = [] if extra is None else extra - return CodingProblem(question=question, standard_answer=standard_answer, analysis=analysis, extra=extra) - - @staticmethod - def build_coding_answer(answer: str, extra: list[str] = None): - extra = [] if extra is None else extra - return CodingAnswer(answer=answer, extra=extra) - - def start_analysis(self, problem: CodingProblem, answer: CodingAnswer, first_question: str = None) -> dict: - """start analysis of a coding problem and incorrect answer. - - Args: - problem (CodingProblem): a coding problem - answer (CodingAnswer): a coding problem answer - - Returns: - dict: question answer and metadata - """ - chain = self._build_chain(problem, answer) - session_id = self._create_session_id() - self._session_store[session_id] = chain - - args = {self._input_key: first_question if first_question else self.first_question, self.HISTORY_KEY: ""} - - # TODO: ConversationalRetrievalChain should support vectordbkwargs - # if self._knowledge: - # args["vectordbkwargs"] = self.vectordbkwargs - - result = chain(args) - - result["session_id"] = session_id - - return result - - def ask(self, question: str, session_id: str) -> dict: - """further ask question on a coding problem. - - Args: - question (str): question to llm. - session_id (str): specify a problem and answer session. - - Returns: - dict: question answer and metadata - """ - assert question - - if session_id not in self._session_store: - return {} - - chain = self._session_store[session_id] - - args = {self._input_key: question} - - # if self._knowledge: - # args["vectordbkwargs"] = self.vectordbkwargs - - result = chain(args) - - result["session_id"] = session_id - - return result - - def _init_llm(self): - self._main_llm = load_gpt4_llm() if self.enable_gpt4 else load_llm() - self._secondary_llm = load_llm() - - def _build_chain(self, problem: CodingProblem, answer: CodingAnswer) -> Chain: - memory = ConversationBufferMemory( - memory_key=self.HISTORY_KEY, output_key=self._output_key, return_messages=True - ) - - if not self._knowledge: - prompt = PromptTemplate.from_template( - TEMPLATE.format( - instruction=self.instruction, - problem=problem.expr(lang=problem.language or self.lang), - answer=answer.expr(lang=problem.language or self.lang), - ) - ) - return ConversationChain( - llm=self._main_llm, - memory=memory, - prompt=prompt, - ) - else: - prompt = PromptTemplate.from_template( - KNOWLEDGE_TEMPLATE.format( - instruction=self.instruction, - problem=problem.expr(lang=problem.language or self.lang), - answer=answer.expr(lang=problem.language or self.lang), - ) - ) - return ConversationalRetrievalChain.from_llm( - llm=self._main_llm, - memory=memory, - retriever=self._knowledge, - condense_question_llm=self._secondary_llm, - return_source_documents=True, - combine_docs_chain_kwargs={"prompt": prompt}, - ) diff --git a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/aacpsdsp_float.c b/spaces/colakin/video-generater/public/ffmpeg/libavcodec/aacpsdsp_float.c deleted file mode 100644 index 99aa650acff7da3dea2d641ebd0447684b8845e2..0000000000000000000000000000000000000000 --- a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/aacpsdsp_float.c +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Copyright (c) 2010 Alex Converse - * - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -#define USE_FIXED 0 - -#include "aacpsdsp_template.c" diff --git a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/extract_extradata_bsf.c b/spaces/colakin/video-generater/public/ffmpeg/libavcodec/extract_extradata_bsf.c deleted file mode 100644 index 329b1a61744a90a8dd13c110856c1b2d9e20519e..0000000000000000000000000000000000000000 --- a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/extract_extradata_bsf.c +++ /dev/null @@ -1,433 +0,0 @@ -/* - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -#include - -#include "libavutil/log.h" -#include "libavutil/opt.h" - -#include "av1.h" -#include "av1_parse.h" -#include "bsf.h" -#include "bsf_internal.h" -#include "bytestream.h" -#include "h2645_parse.h" -#include "h264.h" -#include "hevc.h" -#include "startcode.h" -#include "vc1_common.h" - -typedef struct ExtractExtradataContext { - const AVClass *class; - - int (*extract)(AVBSFContext *ctx, AVPacket *pkt, - uint8_t **data, int *size); - - /* AV1 specific fields */ - AV1Packet av1_pkt; - - /* H264/HEVC specific fields */ - H2645Packet h2645_pkt; - - /* AVOptions */ - int remove; -} ExtractExtradataContext; - -static int val_in_array(const int *arr, int len, int val) -{ - int i; - for (i = 0; i < len; i++) - if (arr[i] == val) - return 1; - return 0; -} - -static int extract_extradata_av1(AVBSFContext *ctx, AVPacket *pkt, - uint8_t **data, int *size) -{ - static const int extradata_obu_types[] = { - AV1_OBU_SEQUENCE_HEADER, AV1_OBU_METADATA, - }; - ExtractExtradataContext *s = ctx->priv_data; - - int extradata_size = 0, filtered_size = 0; - int nb_extradata_obu_types = FF_ARRAY_ELEMS(extradata_obu_types); - int i, has_seq = 0, ret = 0; - - ret = ff_av1_packet_split(&s->av1_pkt, pkt->data, pkt->size, ctx); - if (ret < 0) - return ret; - - for (i = 0; i < s->av1_pkt.nb_obus; i++) { - AV1OBU *obu = &s->av1_pkt.obus[i]; - if (val_in_array(extradata_obu_types, nb_extradata_obu_types, obu->type)) { - extradata_size += obu->raw_size; - if (obu->type == AV1_OBU_SEQUENCE_HEADER) - has_seq = 1; - } else if (s->remove) { - filtered_size += obu->raw_size; - } - } - - if (extradata_size && has_seq) { - AVBufferRef *filtered_buf = NULL; - PutByteContext pb_filtered_data, pb_extradata; - uint8_t *extradata; - - if (s->remove) { - filtered_buf = av_buffer_alloc(filtered_size + AV_INPUT_BUFFER_PADDING_SIZE); - if (!filtered_buf) { - return AVERROR(ENOMEM); - } - memset(filtered_buf->data + filtered_size, 0, AV_INPUT_BUFFER_PADDING_SIZE); - } - - extradata = av_malloc(extradata_size + AV_INPUT_BUFFER_PADDING_SIZE); - if (!extradata) { - av_buffer_unref(&filtered_buf); - return AVERROR(ENOMEM); - } - - *data = extradata; - *size = extradata_size; - - bytestream2_init_writer(&pb_extradata, extradata, extradata_size); - if (s->remove) - bytestream2_init_writer(&pb_filtered_data, filtered_buf->data, filtered_size); - - for (i = 0; i < s->av1_pkt.nb_obus; i++) { - AV1OBU *obu = &s->av1_pkt.obus[i]; - if (val_in_array(extradata_obu_types, nb_extradata_obu_types, - obu->type)) { - bytestream2_put_bufferu(&pb_extradata, obu->raw_data, obu->raw_size); - } else if (s->remove) { - bytestream2_put_bufferu(&pb_filtered_data, obu->raw_data, obu->raw_size); - } - } - - if (s->remove) { - av_buffer_unref(&pkt->buf); - pkt->buf = filtered_buf; - pkt->data = filtered_buf->data; - pkt->size = filtered_size; - } - } - - return 0; -} - -static int extract_extradata_h2645(AVBSFContext *ctx, AVPacket *pkt, - uint8_t **data, int *size) -{ - static const int extradata_nal_types_hevc[] = { - HEVC_NAL_VPS, HEVC_NAL_SPS, HEVC_NAL_PPS, - }; - static const int extradata_nal_types_h264[] = { - H264_NAL_SPS, H264_NAL_PPS, - }; - - ExtractExtradataContext *s = ctx->priv_data; - - int extradata_size = 0, filtered_size = 0; - const int *extradata_nal_types; - int nb_extradata_nal_types; - int i, has_sps = 0, has_vps = 0, ret = 0; - - if (ctx->par_in->codec_id == AV_CODEC_ID_HEVC) { - extradata_nal_types = extradata_nal_types_hevc; - nb_extradata_nal_types = FF_ARRAY_ELEMS(extradata_nal_types_hevc); - } else { - extradata_nal_types = extradata_nal_types_h264; - nb_extradata_nal_types = FF_ARRAY_ELEMS(extradata_nal_types_h264); - } - - ret = ff_h2645_packet_split(&s->h2645_pkt, pkt->data, pkt->size, - ctx, 0, 0, ctx->par_in->codec_id, 1, 0); - if (ret < 0) - return ret; - - for (i = 0; i < s->h2645_pkt.nb_nals; i++) { - H2645NAL *nal = &s->h2645_pkt.nals[i]; - if (val_in_array(extradata_nal_types, nb_extradata_nal_types, nal->type)) { - extradata_size += nal->raw_size + 3; - if (ctx->par_in->codec_id == AV_CODEC_ID_HEVC) { - if (nal->type == HEVC_NAL_SPS) has_sps = 1; - if (nal->type == HEVC_NAL_VPS) has_vps = 1; - } else { - if (nal->type == H264_NAL_SPS) has_sps = 1; - } - } else if (s->remove) { - filtered_size += nal->raw_size + 3; - } - } - - if (extradata_size && - ((ctx->par_in->codec_id == AV_CODEC_ID_HEVC && has_sps && has_vps) || - (ctx->par_in->codec_id == AV_CODEC_ID_H264 && has_sps))) { - AVBufferRef *filtered_buf = NULL; - PutByteContext pb_filtered_data, pb_extradata; - uint8_t *extradata; - - if (s->remove) { - filtered_buf = av_buffer_alloc(filtered_size + AV_INPUT_BUFFER_PADDING_SIZE); - if (!filtered_buf) { - return AVERROR(ENOMEM); - } - memset(filtered_buf->data + filtered_size, 0, AV_INPUT_BUFFER_PADDING_SIZE); - } - - extradata = av_malloc(extradata_size + AV_INPUT_BUFFER_PADDING_SIZE); - if (!extradata) { - av_buffer_unref(&filtered_buf); - return AVERROR(ENOMEM); - } - - *data = extradata; - *size = extradata_size; - - bytestream2_init_writer(&pb_extradata, extradata, extradata_size); - if (s->remove) - bytestream2_init_writer(&pb_filtered_data, filtered_buf->data, filtered_size); - - for (i = 0; i < s->h2645_pkt.nb_nals; i++) { - H2645NAL *nal = &s->h2645_pkt.nals[i]; - if (val_in_array(extradata_nal_types, nb_extradata_nal_types, - nal->type)) { - bytestream2_put_be24u(&pb_extradata, 1); //startcode - bytestream2_put_bufferu(&pb_extradata, nal->raw_data, nal->raw_size); - } else if (s->remove) { - bytestream2_put_be24u(&pb_filtered_data, 1); // startcode - bytestream2_put_bufferu(&pb_filtered_data, nal->raw_data, nal->raw_size); - } - } - - if (s->remove) { - av_buffer_unref(&pkt->buf); - pkt->buf = filtered_buf; - pkt->data = filtered_buf->data; - pkt->size = filtered_size; - } - } - - return 0; -} - -static int extract_extradata_vc1(AVBSFContext *ctx, AVPacket *pkt, - uint8_t **data, int *size) -{ - ExtractExtradataContext *s = ctx->priv_data; - const uint8_t *ptr = pkt->data, *end = pkt->data + pkt->size; - uint32_t state = UINT32_MAX; - int has_extradata = 0, extradata_size = 0; - - while (ptr < end) { - ptr = avpriv_find_start_code(ptr, end, &state); - if (state == VC1_CODE_SEQHDR || state == VC1_CODE_ENTRYPOINT) { - has_extradata = 1; - } else if (has_extradata && IS_MARKER(state)) { - extradata_size = ptr - 4 - pkt->data; - break; - } - } - - if (extradata_size) { - *data = av_malloc(extradata_size + AV_INPUT_BUFFER_PADDING_SIZE); - if (!*data) - return AVERROR(ENOMEM); - - memcpy(*data, pkt->data, extradata_size); - *size = extradata_size; - - if (s->remove) { - pkt->data += extradata_size; - pkt->size -= extradata_size; - } - } - - return 0; -} - -static int extract_extradata_mpeg12(AVBSFContext *ctx, AVPacket *pkt, - uint8_t **data, int *size) -{ - ExtractExtradataContext *s = ctx->priv_data; - uint32_t state = UINT32_MAX; - int i, found = 0; - - for (i = 0; i < pkt->size; i++) { - state = (state << 8) | pkt->data[i]; - if (state == 0x1B3) - found = 1; - else if (found && state != 0x1B5 && state < 0x200 && state >= 0x100) { - *size = i - 3; - *data = av_malloc(*size + AV_INPUT_BUFFER_PADDING_SIZE); - if (!*data) - return AVERROR(ENOMEM); - - memcpy(*data, pkt->data, *size); - - if (s->remove) { - pkt->data += *size; - pkt->size -= *size; - } - break; - } - } - return 0; -} - -static int extract_extradata_mpeg4(AVBSFContext *ctx, AVPacket *pkt, - uint8_t **data, int *size) -{ - ExtractExtradataContext *s = ctx->priv_data; - const uint8_t *ptr = pkt->data, *end = pkt->data + pkt->size; - uint32_t state = UINT32_MAX; - - while (ptr < end) { - ptr = avpriv_find_start_code(ptr, end, &state); - if (state == 0x1B3 || state == 0x1B6) { - if (ptr - pkt->data > 4) { - *size = ptr - 4 - pkt->data; - *data = av_malloc(*size + AV_INPUT_BUFFER_PADDING_SIZE); - if (!*data) - return AVERROR(ENOMEM); - - memcpy(*data, pkt->data, *size); - - if (s->remove) { - pkt->data += *size; - pkt->size -= *size; - } - } - break; - } - } - return 0; -} - -static const struct { - enum AVCodecID id; - int (*extract)(AVBSFContext *ctx, AVPacket *pkt, - uint8_t **data, int *size); -} extract_tab[] = { - { AV_CODEC_ID_AV1, extract_extradata_av1 }, - { AV_CODEC_ID_AVS2, extract_extradata_mpeg4 }, - { AV_CODEC_ID_AVS3, extract_extradata_mpeg4 }, - { AV_CODEC_ID_CAVS, extract_extradata_mpeg4 }, - { AV_CODEC_ID_H264, extract_extradata_h2645 }, - { AV_CODEC_ID_HEVC, extract_extradata_h2645 }, - { AV_CODEC_ID_MPEG1VIDEO, extract_extradata_mpeg12 }, - { AV_CODEC_ID_MPEG2VIDEO, extract_extradata_mpeg12 }, - { AV_CODEC_ID_MPEG4, extract_extradata_mpeg4 }, - { AV_CODEC_ID_VC1, extract_extradata_vc1 }, -}; - -static int extract_extradata_init(AVBSFContext *ctx) -{ - ExtractExtradataContext *s = ctx->priv_data; - int i; - - for (i = 0; i < FF_ARRAY_ELEMS(extract_tab); i++) { - if (extract_tab[i].id == ctx->par_in->codec_id) { - s->extract = extract_tab[i].extract; - break; - } - } - if (!s->extract) - return AVERROR_BUG; - - return 0; -} - -static int extract_extradata_filter(AVBSFContext *ctx, AVPacket *pkt) -{ - ExtractExtradataContext *s = ctx->priv_data; - uint8_t *extradata = NULL; - int extradata_size; - int ret = 0; - - ret = ff_bsf_get_packet_ref(ctx, pkt); - if (ret < 0) - return ret; - - ret = s->extract(ctx, pkt, &extradata, &extradata_size); - if (ret < 0) - goto fail; - - if (extradata) { - memset(extradata + extradata_size, 0, AV_INPUT_BUFFER_PADDING_SIZE); - ret = av_packet_add_side_data(pkt, AV_PKT_DATA_NEW_EXTRADATA, - extradata, extradata_size); - if (ret < 0) { - av_freep(&extradata); - goto fail; - } - } - - return 0; - -fail: - av_packet_unref(pkt); - return ret; -} - -static void extract_extradata_close(AVBSFContext *ctx) -{ - ExtractExtradataContext *s = ctx->priv_data; - ff_av1_packet_uninit(&s->av1_pkt); - ff_h2645_packet_uninit(&s->h2645_pkt); -} - -static const enum AVCodecID codec_ids[] = { - AV_CODEC_ID_AV1, - AV_CODEC_ID_AVS2, - AV_CODEC_ID_AVS3, - AV_CODEC_ID_CAVS, - AV_CODEC_ID_H264, - AV_CODEC_ID_HEVC, - AV_CODEC_ID_MPEG1VIDEO, - AV_CODEC_ID_MPEG2VIDEO, - AV_CODEC_ID_MPEG4, - AV_CODEC_ID_VC1, - AV_CODEC_ID_NONE, -}; - -#define OFFSET(x) offsetof(ExtractExtradataContext, x) -#define FLAGS (AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_BSF_PARAM) -static const AVOption options[] = { - { "remove", "remove the extradata from the bitstream", OFFSET(remove), AV_OPT_TYPE_INT, - { .i64 = 0 }, 0, 1, FLAGS }, - { NULL }, -}; - -static const AVClass extract_extradata_class = { - .class_name = "extract_extradata", - .item_name = av_default_item_name, - .option = options, - .version = LIBAVUTIL_VERSION_INT, -}; - -const FFBitStreamFilter ff_extract_extradata_bsf = { - .p.name = "extract_extradata", - .p.codec_ids = codec_ids, - .p.priv_class = &extract_extradata_class, - .priv_data_size = sizeof(ExtractExtradataContext), - .init = extract_extradata_init, - .filter = extract_extradata_filter, - .close = extract_extradata_close, -}; diff --git a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/jpeg2000.c b/spaces/colakin/video-generater/public/ffmpeg/libavcodec/jpeg2000.c deleted file mode 100644 index 0aa984bc53f5d0f5c86156b76d3123d430c35019..0000000000000000000000000000000000000000 --- a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/jpeg2000.c +++ /dev/null @@ -1,647 +0,0 @@ -/* - * JPEG 2000 encoder and decoder common functions - * Copyright (c) 2007 Kamil Nowosad - * Copyright (c) 2013 Nicolas Bertrand - * - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -/** - * @file - * JPEG 2000 image encoder and decoder common functions - */ - -#include "libavutil/attributes.h" -#include "libavutil/avassert.h" -#include "libavutil/common.h" -#include "libavutil/imgutils.h" -#include "libavutil/mem.h" -#include "libavutil/thread.h" -#include "avcodec.h" -#include "internal.h" -#include "jpeg2000.h" - -#define SHL(a, n) ((n) >= 0 ? (a) << (n) : (a) >> -(n)) - -/* tag tree routines */ - -static int32_t tag_tree_size(int w, int h) -{ - int64_t res = 0; - while (w > 1 || h > 1) { - res += w * (int64_t)h; - av_assert0(res + 1 < INT32_MAX); - w = (w + 1) >> 1; - h = (h + 1) >> 1; - } - return (int32_t)(res + 1); -} - -/* allocate the memory for tag tree */ -static Jpeg2000TgtNode *ff_jpeg2000_tag_tree_init(int w, int h) -{ - int pw = w, ph = h; - Jpeg2000TgtNode *res, *t, *t2; - int32_t tt_size; - - tt_size = tag_tree_size(w, h); - - t = res = av_calloc(tt_size, sizeof(*t)); - if (!res) - return NULL; - - while (w > 1 || h > 1) { - int i, j; - pw = w; - ph = h; - - w = (w + 1) >> 1; - h = (h + 1) >> 1; - t2 = t + pw * ph; - - for (i = 0; i < ph; i++) - for (j = 0; j < pw; j++) - t[i * pw + j].parent = &t2[(i >> 1) * w + (j >> 1)]; - - t = t2; - } - t[0].parent = NULL; - return res; -} - -void ff_tag_tree_zero(Jpeg2000TgtNode *t, int w, int h, int val) -{ - int i, siz = tag_tree_size(w, h); - - for (i = 0; i < siz; i++) { - t[i].val = val; - t[i].temp_val = 0; - t[i].vis = 0; - } -} - -uint8_t ff_jpeg2000_sigctxno_lut[256][4]; - -static int getsigctxno(int flag, int bandno) -{ - int h, v, d; - - h = ((flag & JPEG2000_T1_SIG_E) ? 1 : 0) + - ((flag & JPEG2000_T1_SIG_W) ? 1 : 0); - v = ((flag & JPEG2000_T1_SIG_N) ? 1 : 0) + - ((flag & JPEG2000_T1_SIG_S) ? 1 : 0); - d = ((flag & JPEG2000_T1_SIG_NE) ? 1 : 0) + - ((flag & JPEG2000_T1_SIG_NW) ? 1 : 0) + - ((flag & JPEG2000_T1_SIG_SE) ? 1 : 0) + - ((flag & JPEG2000_T1_SIG_SW) ? 1 : 0); - - if (bandno < 3) { - if (bandno == 1) - FFSWAP(int, h, v); - if (h == 2) return 8; - if (h == 1) { - if (v >= 1) return 7; - if (d >= 1) return 6; - return 5; - } - if (v == 2) return 4; - if (v == 1) return 3; - if (d >= 2) return 2; - if (d == 1) return 1; - } else { - if (d >= 3) return 8; - if (d == 2) { - if (h+v >= 1) return 7; - return 6; - } - if (d == 1) { - if (h+v >= 2) return 5; - if (h+v == 1) return 4; - return 3; - } - if (h+v >= 2) return 2; - if (h+v == 1) return 1; - } - return 0; -} - -uint8_t ff_jpeg2000_sgnctxno_lut[16][16], ff_jpeg2000_xorbit_lut[16][16]; - -static const int contribtab[3][3] = { { 0, -1, 1 }, { -1, -1, 0 }, { 1, 0, 1 } }; -static const int ctxlbltab[3][3] = { { 13, 12, 11 }, { 10, 9, 10 }, { 11, 12, 13 } }; -static const int xorbittab[3][3] = { { 1, 1, 1 }, { 1, 0, 0 }, { 0, 0, 0 } }; - -static int getsgnctxno(int flag, uint8_t *xorbit) -{ - int vcontrib, hcontrib; - - hcontrib = contribtab[flag & JPEG2000_T1_SIG_E ? flag & JPEG2000_T1_SGN_E ? 1 : 2 : 0] - [flag & JPEG2000_T1_SIG_W ? flag & JPEG2000_T1_SGN_W ? 1 : 2 : 0] + 1; - vcontrib = contribtab[flag & JPEG2000_T1_SIG_S ? flag & JPEG2000_T1_SGN_S ? 1 : 2 : 0] - [flag & JPEG2000_T1_SIG_N ? flag & JPEG2000_T1_SGN_N ? 1 : 2 : 0] + 1; - *xorbit = xorbittab[hcontrib][vcontrib]; - - return ctxlbltab[hcontrib][vcontrib]; -} - -static void av_cold jpeg2000_init_tier1_luts(void) -{ - int i, j; - for (i = 0; i < 256; i++) - for (j = 0; j < 4; j++) - ff_jpeg2000_sigctxno_lut[i][j] = getsigctxno(i, j); - for (i = 0; i < 16; i++) - for (j = 0; j < 16; j++) - ff_jpeg2000_sgnctxno_lut[i][j] = - getsgnctxno(i + (j << 8), &ff_jpeg2000_xorbit_lut[i][j]); -} - -void av_cold ff_jpeg2000_init_tier1_luts(void) -{ - static AVOnce init_static_once = AV_ONCE_INIT; - ff_thread_once(&init_static_once, jpeg2000_init_tier1_luts); -} - -void ff_jpeg2000_set_significance(Jpeg2000T1Context *t1, int x, int y, - int negative) -{ - x++; - y++; - t1->flags[(y) * t1->stride + x] |= JPEG2000_T1_SIG; - if (negative) { - t1->flags[(y) * t1->stride + x + 1] |= JPEG2000_T1_SIG_W | JPEG2000_T1_SGN_W; - t1->flags[(y) * t1->stride + x - 1] |= JPEG2000_T1_SIG_E | JPEG2000_T1_SGN_E; - t1->flags[(y + 1) * t1->stride + x] |= JPEG2000_T1_SIG_N | JPEG2000_T1_SGN_N; - t1->flags[(y - 1) * t1->stride + x] |= JPEG2000_T1_SIG_S | JPEG2000_T1_SGN_S; - } else { - t1->flags[(y) * t1->stride + x + 1] |= JPEG2000_T1_SIG_W; - t1->flags[(y) * t1->stride + x - 1] |= JPEG2000_T1_SIG_E; - t1->flags[(y + 1) * t1->stride + x] |= JPEG2000_T1_SIG_N; - t1->flags[(y - 1) * t1->stride + x] |= JPEG2000_T1_SIG_S; - } - t1->flags[(y + 1) * t1->stride + x + 1] |= JPEG2000_T1_SIG_NW; - t1->flags[(y + 1) * t1->stride + x - 1] |= JPEG2000_T1_SIG_NE; - t1->flags[(y - 1) * t1->stride + x + 1] |= JPEG2000_T1_SIG_SW; - t1->flags[(y - 1) * t1->stride + x - 1] |= JPEG2000_T1_SIG_SE; -} - -// static const uint8_t lut_gain[2][4] = { { 0, 0, 0, 0 }, { 0, 1, 1, 2 } }; (unused) - -static void init_band_stepsize(AVCodecContext *avctx, - Jpeg2000Band *band, - Jpeg2000CodingStyle *codsty, - Jpeg2000QuantStyle *qntsty, - int bandno, int gbandno, int reslevelno, - int cbps) -{ - /* TODO: Implementation of quantization step not finished, - * see ISO/IEC 15444-1:2002 E.1 and A.6.4. */ - switch (qntsty->quantsty) { - uint8_t gain; - case JPEG2000_QSTY_NONE: - /* TODO: to verify. No quantization in this case */ - band->f_stepsize = 1; - break; - case JPEG2000_QSTY_SI: - /*TODO: Compute formula to implement. */ -// numbps = cbps + -// lut_gain[codsty->transform == FF_DWT53][bandno + (reslevelno > 0)]; -// band->f_stepsize = SHL(2048 + qntsty->mant[gbandno], -// 2 + numbps - qntsty->expn[gbandno]); -// break; - case JPEG2000_QSTY_SE: - /* Exponent quantization step. - * Formula: - * delta_b = 2 ^ (R_b - expn_b) * (1 + (mant_b / 2 ^ 11)) - * R_b = R_I + log2 (gain_b ) - * see ISO/IEC 15444-1:2002 E.1.1 eqn. E-3 and E-4 */ - gain = cbps; - band->f_stepsize = ff_exp2fi(gain - qntsty->expn[gbandno]); - band->f_stepsize *= qntsty->mant[gbandno] / 2048.0 + 1.0; - break; - default: - band->f_stepsize = 0; - av_log(avctx, AV_LOG_ERROR, "Unknown quantization format\n"); - break; - } - if (codsty->transform != FF_DWT53) { - int lband = 0; - switch (bandno + (reslevelno > 0)) { - case 1: - case 2: - band->f_stepsize *= F_LFTG_X * 2; - lband = 1; - break; - case 3: - band->f_stepsize *= F_LFTG_X * F_LFTG_X * 4; - break; - } - if (codsty->transform == FF_DWT97) { - band->f_stepsize *= pow(F_LFTG_K, 2*(codsty->nreslevels2decode - reslevelno) + lband - 2); - } - } - - if (band->f_stepsize > (INT_MAX >> 15)) { - band->f_stepsize = 0; - av_log(avctx, AV_LOG_ERROR, "stepsize out of range\n"); - } - - band->i_stepsize = band->f_stepsize * (1 << 15); - - /* FIXME: In OpenJPEG code stepsize = stepsize * 0.5. Why? - * If not set output of entropic decoder is not correct. */ - if (!av_codec_is_encoder(avctx->codec)) - band->f_stepsize *= 0.5; -} - -static int init_prec(AVCodecContext *avctx, - Jpeg2000Band *band, - Jpeg2000ResLevel *reslevel, - Jpeg2000Component *comp, - Jpeg2000CodingStyle *codsty, - int precno, int bandno, int reslevelno, - int log2_band_prec_width, - int log2_band_prec_height) -{ - Jpeg2000Prec *prec = band->prec + precno; - int nb_codeblocks, cblkno; - - prec->decoded_layers = 0; - - /* TODO: Explain formula for JPEG200 DCINEMA. */ - /* TODO: Verify with previous count of codeblocks per band */ - - /* Compute P_x0 */ - prec->coord[0][0] = ((reslevel->coord[0][0] >> reslevel->log2_prec_width) + precno % reslevel->num_precincts_x) * - (1 << log2_band_prec_width); - - /* Compute P_y0 */ - prec->coord[1][0] = ((reslevel->coord[1][0] >> reslevel->log2_prec_height) + precno / reslevel->num_precincts_x) * - (1 << log2_band_prec_height); - - /* Compute P_x1 */ - prec->coord[0][1] = prec->coord[0][0] + - (1 << log2_band_prec_width); - prec->coord[0][0] = FFMAX(prec->coord[0][0], band->coord[0][0]); - prec->coord[0][1] = FFMIN(prec->coord[0][1], band->coord[0][1]); - - /* Compute P_y1 */ - prec->coord[1][1] = prec->coord[1][0] + - (1 << log2_band_prec_height); - prec->coord[1][0] = FFMAX(prec->coord[1][0], band->coord[1][0]); - prec->coord[1][1] = FFMIN(prec->coord[1][1], band->coord[1][1]); - - prec->nb_codeblocks_width = - ff_jpeg2000_ceildivpow2(prec->coord[0][1], - band->log2_cblk_width) - - (prec->coord[0][0] >> band->log2_cblk_width); - prec->nb_codeblocks_height = - ff_jpeg2000_ceildivpow2(prec->coord[1][1], - band->log2_cblk_height) - - (prec->coord[1][0] >> band->log2_cblk_height); - - - /* Tag trees initialization */ - prec->cblkincl = - ff_jpeg2000_tag_tree_init(prec->nb_codeblocks_width, - prec->nb_codeblocks_height); - if (!prec->cblkincl) - return AVERROR(ENOMEM); - - prec->zerobits = - ff_jpeg2000_tag_tree_init(prec->nb_codeblocks_width, - prec->nb_codeblocks_height); - if (!prec->zerobits) - return AVERROR(ENOMEM); - - if (prec->nb_codeblocks_width * (uint64_t)prec->nb_codeblocks_height > INT_MAX) { - prec->cblk = NULL; - return AVERROR(ENOMEM); - } - nb_codeblocks = prec->nb_codeblocks_width * prec->nb_codeblocks_height; - prec->cblk = av_calloc(nb_codeblocks, sizeof(*prec->cblk)); - if (!prec->cblk) - return AVERROR(ENOMEM); - for (cblkno = 0; cblkno < nb_codeblocks; cblkno++) { - Jpeg2000Cblk *cblk = prec->cblk + cblkno; - int Cx0, Cy0; - - /* Compute coordinates of codeblocks */ - /* Compute Cx0*/ - Cx0 = ((prec->coord[0][0]) >> band->log2_cblk_width) << band->log2_cblk_width; - Cx0 = Cx0 + ((cblkno % prec->nb_codeblocks_width) << band->log2_cblk_width); - cblk->coord[0][0] = FFMAX(Cx0, prec->coord[0][0]); - - /* Compute Cy0*/ - Cy0 = ((prec->coord[1][0]) >> band->log2_cblk_height) << band->log2_cblk_height; - Cy0 = Cy0 + ((cblkno / prec->nb_codeblocks_width) << band->log2_cblk_height); - cblk->coord[1][0] = FFMAX(Cy0, prec->coord[1][0]); - - /* Compute Cx1 */ - cblk->coord[0][1] = FFMIN(Cx0 + (1 << band->log2_cblk_width), - prec->coord[0][1]); - - /* Compute Cy1 */ - cblk->coord[1][1] = FFMIN(Cy0 + (1 << band->log2_cblk_height), - prec->coord[1][1]); - /* Update code-blocks coordinates according sub-band position */ - if ((bandno + !!reslevelno) & 1) { - cblk->coord[0][0] += comp->reslevel[reslevelno-1].coord[0][1] - - comp->reslevel[reslevelno-1].coord[0][0]; - cblk->coord[0][1] += comp->reslevel[reslevelno-1].coord[0][1] - - comp->reslevel[reslevelno-1].coord[0][0]; - } - if ((bandno + !!reslevelno) & 2) { - cblk->coord[1][0] += comp->reslevel[reslevelno-1].coord[1][1] - - comp->reslevel[reslevelno-1].coord[1][0]; - cblk->coord[1][1] += comp->reslevel[reslevelno-1].coord[1][1] - - comp->reslevel[reslevelno-1].coord[1][0]; - } - - cblk->lblock = 3; - cblk->length = 0; - cblk->npasses = 0; - if (av_codec_is_encoder(avctx->codec)) { - cblk->layers = av_calloc(codsty->nlayers, sizeof(*cblk->layers)); - if (!cblk->layers) - return AVERROR(ENOMEM); - } - } - - return 0; -} - -static int init_band(AVCodecContext *avctx, - Jpeg2000ResLevel *reslevel, - Jpeg2000Component *comp, - Jpeg2000CodingStyle *codsty, - Jpeg2000QuantStyle *qntsty, - int bandno, int gbandno, int reslevelno, - int cbps, int dx, int dy) -{ - Jpeg2000Band *band = reslevel->band + bandno; - uint8_t log2_band_prec_width, log2_band_prec_height; - int declvl = codsty->nreslevels - reslevelno; // N_L -r see ISO/IEC 15444-1:2002 B.5 - int precno; - int nb_precincts; - int i, j, ret; - - init_band_stepsize(avctx, band, codsty, qntsty, bandno, gbandno, reslevelno, cbps); - - /* computation of tbx_0, tbx_1, tby_0, tby_1 - * see ISO/IEC 15444-1:2002 B.5 eq. B-15 and tbl B.1 - * codeblock width and height is computed for - * DCI JPEG 2000 codeblock_width = codeblock_width = 32 = 2 ^ 5 */ - if (reslevelno == 0) { - /* for reslevelno = 0, only one band, x0_b = y0_b = 0 */ - for (i = 0; i < 2; i++) - for (j = 0; j < 2; j++) - band->coord[i][j] = - ff_jpeg2000_ceildivpow2(comp->coord_o[i][j], - declvl - 1); - log2_band_prec_width = reslevel->log2_prec_width; - log2_band_prec_height = reslevel->log2_prec_height; - /* see ISO/IEC 15444-1:2002 eq. B-17 and eq. B-15 */ - band->log2_cblk_width = FFMIN(codsty->log2_cblk_width, - reslevel->log2_prec_width); - band->log2_cblk_height = FFMIN(codsty->log2_cblk_height, - reslevel->log2_prec_height); - } else { - /* 3 bands x0_b = 1 y0_b = 0; x0_b = 0 y0_b = 1; x0_b = y0_b = 1 */ - /* x0_b and y0_b are computed with ((bandno + 1 >> i) & 1) */ - for (i = 0; i < 2; i++) - for (j = 0; j < 2; j++) - /* Formula example for tbx_0 = ceildiv((tcx_0 - 2 ^ (declvl - 1) * x0_b) / declvl) */ - band->coord[i][j] = - ff_jpeg2000_ceildivpow2(comp->coord_o[i][j] - - (((bandno + 1 >> i) & 1LL) << declvl - 1), - declvl); - /* TODO: Manage case of 3 band offsets here or - * in coding/decoding function? */ - - /* see ISO/IEC 15444-1:2002 eq. B-17 and eq. B-15 */ - band->log2_cblk_width = FFMIN(codsty->log2_cblk_width, - reslevel->log2_prec_width - 1); - band->log2_cblk_height = FFMIN(codsty->log2_cblk_height, - reslevel->log2_prec_height - 1); - - log2_band_prec_width = reslevel->log2_prec_width - 1; - log2_band_prec_height = reslevel->log2_prec_height - 1; - } - - if (reslevel->num_precincts_x * (uint64_t)reslevel->num_precincts_y > INT_MAX) { - band->prec = NULL; - return AVERROR(ENOMEM); - } - nb_precincts = reslevel->num_precincts_x * reslevel->num_precincts_y; - band->prec = av_calloc(nb_precincts, sizeof(*band->prec)); - if (!band->prec) - return AVERROR(ENOMEM); - - for (precno = 0; precno < nb_precincts; precno++) { - ret = init_prec(avctx, band, reslevel, comp, codsty, - precno, bandno, reslevelno, - log2_band_prec_width, log2_band_prec_height); - if (ret < 0) - return ret; - } - - return 0; -} - -int ff_jpeg2000_init_component(Jpeg2000Component *comp, - Jpeg2000CodingStyle *codsty, - Jpeg2000QuantStyle *qntsty, - int cbps, int dx, int dy, - AVCodecContext *avctx) -{ - int reslevelno, bandno, gbandno = 0, ret, i, j; - uint32_t csize; - - if (codsty->nreslevels2decode <= 0) { - av_log(avctx, AV_LOG_ERROR, "nreslevels2decode %d invalid or uninitialized\n", codsty->nreslevels2decode); - return AVERROR_INVALIDDATA; - } - - if (ret = ff_jpeg2000_dwt_init(&comp->dwt, comp->coord, - codsty->nreslevels2decode - 1, - codsty->transform)) - return ret; - - if (av_image_check_size(comp->coord[0][1] - comp->coord[0][0], - comp->coord[1][1] - comp->coord[1][0], 0, avctx)) - return AVERROR_INVALIDDATA; - csize = (comp->coord[0][1] - comp->coord[0][0]) * - (comp->coord[1][1] - comp->coord[1][0]); - if (comp->coord[0][1] - comp->coord[0][0] > 32768 || - comp->coord[1][1] - comp->coord[1][0] > 32768) { - av_log(avctx, AV_LOG_ERROR, "component size too large\n"); - return AVERROR_PATCHWELCOME; - } - - if (codsty->transform == FF_DWT97) { - csize += AV_INPUT_BUFFER_PADDING_SIZE / sizeof(*comp->f_data); - comp->i_data = NULL; - comp->f_data = av_calloc(csize, sizeof(*comp->f_data)); - if (!comp->f_data) - return AVERROR(ENOMEM); - } else { - csize += AV_INPUT_BUFFER_PADDING_SIZE / sizeof(*comp->i_data); - comp->f_data = NULL; - comp->i_data = av_calloc(csize, sizeof(*comp->i_data)); - if (!comp->i_data) - return AVERROR(ENOMEM); - } - comp->reslevel = av_calloc(codsty->nreslevels, sizeof(*comp->reslevel)); - if (!comp->reslevel) - return AVERROR(ENOMEM); - /* LOOP on resolution levels */ - for (reslevelno = 0; reslevelno < codsty->nreslevels; reslevelno++) { - int declvl = codsty->nreslevels - reslevelno; // N_L -r see ISO/IEC 15444-1:2002 B.5 - Jpeg2000ResLevel *reslevel = comp->reslevel + reslevelno; - - /* Compute borders for each resolution level. - * Computation of trx_0, trx_1, try_0 and try_1. - * see ISO/IEC 15444-1:2002 eq. B.5 and B-14 */ - for (i = 0; i < 2; i++) - for (j = 0; j < 2; j++) - reslevel->coord[i][j] = - ff_jpeg2000_ceildivpow2(comp->coord_o[i][j], declvl - 1); - // update precincts size: 2^n value - reslevel->log2_prec_width = codsty->log2_prec_widths[reslevelno]; - reslevel->log2_prec_height = codsty->log2_prec_heights[reslevelno]; - - /* Number of bands for each resolution level */ - if (reslevelno == 0) - reslevel->nbands = 1; - else - reslevel->nbands = 3; - - /* Number of precincts which span the tile for resolution level reslevelno - * see B.6 in ISO/IEC 15444-1:2002 eq. B-16 - * num_precincts_x = |- trx_1 / 2 ^ log2_prec_width) -| - (trx_0 / 2 ^ log2_prec_width) - * num_precincts_y = |- try_1 / 2 ^ log2_prec_width) -| - (try_0 / 2 ^ log2_prec_width) - * for Dcinema profiles in JPEG 2000 - * num_precincts_x = |- trx_1 / 2 ^ log2_prec_width) -| - * num_precincts_y = |- try_1 / 2 ^ log2_prec_width) -| */ - if (reslevel->coord[0][1] == reslevel->coord[0][0]) - reslevel->num_precincts_x = 0; - else - reslevel->num_precincts_x = - ff_jpeg2000_ceildivpow2(reslevel->coord[0][1], - reslevel->log2_prec_width) - - (reslevel->coord[0][0] >> reslevel->log2_prec_width); - - if (reslevel->coord[1][1] == reslevel->coord[1][0]) - reslevel->num_precincts_y = 0; - else - reslevel->num_precincts_y = - ff_jpeg2000_ceildivpow2(reslevel->coord[1][1], - reslevel->log2_prec_height) - - (reslevel->coord[1][0] >> reslevel->log2_prec_height); - - reslevel->band = av_calloc(reslevel->nbands, sizeof(*reslevel->band)); - if (!reslevel->band) - return AVERROR(ENOMEM); - - if (reslevel->num_precincts_x * (uint64_t)reslevel->num_precincts_y * reslevel->nbands > avctx->max_pixels / sizeof(*reslevel->band->prec)) - return AVERROR(ENOMEM); - - for (bandno = 0; bandno < reslevel->nbands; bandno++, gbandno++) { - ret = init_band(avctx, reslevel, - comp, codsty, qntsty, - bandno, gbandno, reslevelno, - cbps, dx, dy); - if (ret < 0) - return ret; - } - } - return 0; -} - -void ff_jpeg2000_reinit(Jpeg2000Component *comp, Jpeg2000CodingStyle *codsty) -{ - int reslevelno, bandno, cblkno, precno; - for (reslevelno = 0; reslevelno < codsty->nreslevels; reslevelno++) { - Jpeg2000ResLevel *rlevel = comp->reslevel + reslevelno; - for (bandno = 0; bandno < rlevel->nbands; bandno++) { - Jpeg2000Band *band = rlevel->band + bandno; - for(precno = 0; precno < rlevel->num_precincts_x * rlevel->num_precincts_y; precno++) { - Jpeg2000Prec *prec = band->prec + precno; - ff_tag_tree_zero(prec->zerobits, prec->nb_codeblocks_width, prec->nb_codeblocks_height, 0); - ff_tag_tree_zero(prec->cblkincl, prec->nb_codeblocks_width, prec->nb_codeblocks_height, 0); - for (cblkno = 0; cblkno < prec->nb_codeblocks_width * prec->nb_codeblocks_height; cblkno++) { - Jpeg2000Cblk *cblk = prec->cblk + cblkno; - cblk->length = 0; - cblk->lblock = 3; - } - } - } - } -} - -void ff_jpeg2000_cleanup(Jpeg2000Component *comp, Jpeg2000CodingStyle *codsty) -{ - int reslevelno, bandno, precno; - for (reslevelno = 0; - comp->reslevel && reslevelno < codsty->nreslevels; - reslevelno++) { - Jpeg2000ResLevel *reslevel; - - if (!comp->reslevel) - continue; - - reslevel = comp->reslevel + reslevelno; - for (bandno = 0; bandno < reslevel->nbands; bandno++) { - Jpeg2000Band *band; - - if (!reslevel->band) - continue; - - band = reslevel->band + bandno; - for (precno = 0; precno < reslevel->num_precincts_x * reslevel->num_precincts_y; precno++) { - if (band->prec) { - Jpeg2000Prec *prec = band->prec + precno; - int nb_code_blocks = prec->nb_codeblocks_height * prec->nb_codeblocks_width; - - av_freep(&prec->zerobits); - av_freep(&prec->cblkincl); - if (prec->cblk) { - int cblkno; - for (cblkno = 0; cblkno < nb_code_blocks; cblkno ++) { - Jpeg2000Cblk *cblk = &prec->cblk[cblkno]; - av_freep(&cblk->data); - av_freep(&cblk->passes); - av_freep(&cblk->lengthinc); - av_freep(&cblk->data_start); - av_freep(&cblk->layers); - } - av_freep(&prec->cblk); - } - } - } - - av_freep(&band->prec); - } - av_freep(&reslevel->band); - } - - ff_dwt_destroy(&comp->dwt); - av_freep(&comp->reslevel); - av_freep(&comp->i_data); - av_freep(&comp->f_data); -} diff --git a/spaces/congsaPfin/Manga-OCR/logs/Download Xtreme Motorbikes APK and Enjoy Realistic Freestyle Physics.md b/spaces/congsaPfin/Manga-OCR/logs/Download Xtreme Motorbikes APK and Enjoy Realistic Freestyle Physics.md deleted file mode 100644 index 8be224cd86432994d3b5e0aae82f297a7a1f988a..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/Download Xtreme Motorbikes APK and Enjoy Realistic Freestyle Physics.md +++ /dev/null @@ -1,128 +0,0 @@ -
    - - -Xtreme Motorbikes: A Thrilling Racing Game for Android Devices - - -

    Xtreme Motorbikes: A Thrilling Racing Game for Android Devices

    -

    Do you love racing games? Do you enjoy riding powerful motorbikes and performing freestyle stunts? If yes, then you should try Xtreme Motorbikes, a 3D racing game for Android devices that will give you a real feeling of riding and performing free style. Xtreme Motorbikes is developed by Mehdi Rabiee, a talented game developer who has created several other popular games such as Extreme Car Driving Simulator, Extreme Bike Driving 3D, and Extreme Car Stunts 3D. In this game, you can drive more than 20 different motorbikes with realistic physics and graphics, explore various open-world maps with different challenges and obstacles, perform amazing stunts and tricks with smooth controls, and complete various mini-tasks to earn money for upgrades and better motorbikes. Xtreme Motorbikes is a game that will keep you entertained for hours with its addictive gameplay and stunning visuals.

    -

    xtreme motorbikes apk game


    Download Ziphttps://urlca.com/2uO5jF



    -

    How to Download and Install Xtreme Motorbikes APK Game

    -

    If you want to play Xtreme Motorbikes on your Android device, you have two options. You can either download it from the Google Play Store or from other sources such as APKCombo. Here are the steps to follow for both options:

    -
      -
    • Option 1: Download from Google Play Store
    • -
        -
      1. Open the Google Play Store app on your device.
      2. -
      3. Search for "Xtreme Motorbikes" in the search bar.
      4. -
      5. Select the game from the list of results and tap on "Install".
      6. -
      7. Wait for the download and installation process to finish.
      8. -
      9. Tap on "Open" to launch the game and enjoy.
      10. -
      -
    • Option 2: Download from APKCombo
    • -
        -
      1. Open your web browser on your device and go to APKCombo.com.
      2. -
      3. Search for "Xtreme Motorbikes" in the search bar.
      4. -
      5. Select the game from the list of results and tap on "Download APK".
      6. -
      7. Wait for the download process to finish.
      8. -
      9. Go to your device settings and enable "Unknown Sources" under security options.
      10. -
      11. Locate the downloaded file in your device storage and tap on it.
      12. -
      13. Follow the instructions on the screen to install the game.
      14. -
      15. Tap on "Open" to launch the game and enjoy.
      16. -
      -
    -

    How to Play Xtreme Motorbikes APK Game

    -

    Choose Your Motorbike

    -

    The first thing you need to do when you start playing Xtreme Motorbikes is to choose your motorbike. You can select from 25 different motorbikes with various specifications such as speed, acceleration, handling, braking, and nitro.

    Perform Freestyle Stunts

    -

    One of the most fun and exciting aspects of Xtreme Motorbikes is the ability to perform freestyle stunts and tricks with your motorbike. You can use the smooth controls and realistic physics to show off your skills and creativity. You can also use the nitro boost to gain more speed and air time. Some of the stunts you can perform include:

    -
      -
    • Wheelie: This is when you lift the front wheel of your motorbike and balance on the rear wheel. You can do this by leaning back and accelerating. You can also steer left or right while doing a wheelie.
    • -
    • Stoppie: This is when you lift the rear wheel of your motorbike and balance on the front wheel. You can do this by leaning forward and braking hard. You can also steer left or right while doing a stoppie.
    • -
    • Burnout: This is when you spin the rear wheel of your motorbike while keeping the front wheel stationary. You can do this by holding down the front brake and revving up the engine. You can also move left or right while doing a burnout.
    • -
    • Drift: This is when you slide the rear wheel of your motorbike sideways while turning a corner. You can do this by braking hard and steering sharply. You can also use the nitro boost to increase the drift angle.
    • -
    • Air Tricks: These are the tricks you can do while your motorbike is in the air. You can do these by using the directional buttons or the accelerometer to rotate or flip your motorbike. Some of the air tricks you can do include:
    • -
        -
      • Backflip: This is when you rotate your motorbike 360 degrees backwards in the air.
      • -
      • Frontflip: This is when you rotate your motorbike 360 degrees forwards in the air.
      • -
      • Rodeo: This is when you let go of your motorbike and grab it again in the air.
      • -
      • No-Hander: This is when you take both hands off the handlebars in the air.
      • -
      • No-Footer: This is when you take both feet off the footpegs in the air.
      • -
      • Nac-Nac: This is when you swing one leg over the seat and extend it to the opposite side of your motorbike in the air.
      • -
      • Saran Wrap: This is when you swing one leg over the handlebars and extend it to the opposite side of your motorbike in the air.
      • -
      • Superman: This is when you let go of your motorbike and stretch your body parallel to it in the air.
      • -
      -
    -

    You can also combine different stunts and tricks to create unique combos and earn more points. The more stunts and tricks you perform, the more money you will earn for upgrades and better motorbikes.

    Explore Different Maps

    -

    Another feature that makes Xtreme Motorbikes a great game is the variety of maps that you can explore with your motorbike. You can drive in different open-world environments with different terrains, obstacles, and challenges. Some of the maps you can choose from include:

    -
    P: ¿Son libres los capítulos: Historias interactivas APK? A: Sí, Capítulos: Historias interactivas APK es gratis para descargar y jugar. Sin embargo, algunas historias y características pueden requerir compras en la aplicación o ver anuncios para desbloquearlos.
    Q: ¿Cómo puedo actualizar los capítulos: Historias interactivas APK? A: Puede actualizar los capítulos: Historias interactivas APK mediante la descarga de la última versión del archivo APK de la misma fuente que lo descargó de. También puedes buscar actualizaciones dentro del juego yendo al menú de configuración y pulsando en "Buscar actualizaciones".
    P: ¿Cómo puedo contactar a los desarrolladores de Capítulos: Historias interactivas? A: Puede contactar a los desarrolladores de Capítulos: Historias interactivas enviándoles un correo electrónico a support@chapters-interactive-stories.com o visitando su sitio web en https://www.chapter-interactive-stories.com/./td>
    P: ¿Cómo puedo compartir mis comentarios o sugerencias para capítulos: Historias interactivas? A: Puedes compartir tus comentarios o sugerencias para capítulos: Historias interactivas dejando una reseña en Google Play Store o uniéndote a su comunidad en Facebook en https://www.facebook.com/ChaptersInteractiveStories/
    - - - - - - - - - - - - - - - - - - - - - - - - -
    Map NameDescription
    CityThis is the default map where you can drive in a realistic city with traffic, buildings, bridges, and ramps. You can also find hidden areas and shortcuts to discover.
    DesertThis is a map where you can drive in a vast desert with sand dunes, rocks, cacti, and oases. You can also find ancient ruins and pyramids to explore.
    ForestThis is a map where you can drive in a lush forest with trees, grass, flowers, and rivers. You can also find wooden bridges, cabins, and campfires to enjoy.
    MountainThis is a map where you can drive in a snowy mountain with slopes, cliffs, caves, and ice. You can also find ski lifts, snowmen, and penguins to interact with.
    IslandThis is a map where you can drive in a tropical island with beaches, palm trees, waterfalls, and volcanoes. You can also find boats, jet skis, and dolphins to ride with.
    -

    You can switch between different maps by tapping on the map icon on the top left corner of the screen. You can also zoom in and out of the map by pinching the screen. Each map has its own unique features and challenges that will test your driving skills and provide you with endless fun.

    -

    xtreme motorbikes game download for android
    -xtreme motorbikes simulator mod apk
    -xtreme motorbikes free style city
    -xtreme motorbikes realistic physics game
    -xtreme motorbikes apps on google play[^1^]
    -xtreme motorbikes apk (android game) free download[^2^]
    -xtreme motorbikes game review and rating
    -xtreme motorbikes best freestyle skills game
    -xtreme motorbikes drive powerful and exciting bikes
    -xtreme motorbikes how to play and tips
    -xtreme motorbikes latest version apk
    -xtreme motorbikes offline game for android
    -xtreme motorbikes game features and screenshots
    -xtreme motorbikes fun and addictive game
    -xtreme motorbikes game cheats and hacks
    -xtreme motorbikes game for pc windows and mac
    -xtreme motorbikes game trailer and gameplay video
    -xtreme motorbikes game challenges and missions
    -xtreme motorbikes game online multiplayer mode
    -xtreme motorbikes game updates and news
    -xtreme motorbikes game alternatives and similar games
    -xtreme motorbikes game feedback and support
    -xtreme motorbikes game system requirements and compatibility
    -xtreme motorbikes game size and installation guide
    -xtreme motorbikes game developer and publisher information

    Complete Mini-Tasks and Earn Money

    -

    Besides driving and performing stunts, you can also complete various mini-tasks in Xtreme Motorbikes to earn more money and have more fun. These mini-tasks are of different kinds and difficulty levels, and they will appear randomly on the map as icons. Some of the mini-tasks you can complete include:

    - -

    You can start a mini-task by driving near its icon and tapping on it. You can also cancel a mini-task by tapping on the cross icon on the top right corner of the screen. Completing a mini-task will reward you with money that you can use for upgrades and better motorbikes.

    -

    Why You Should Play Xtreme Motorbikes APK Game

    -

    Xtreme Motorbikes is a game that will appeal to anyone who loves racing games, motorbikes, and stunts. It is a game that offers you a lot of features and benefits, such as:

    - -

    Xtreme Motorbikes is a game that will give you a real feeling of riding and performing free style. It is a game that will keep you entertained for hours with its addictive gameplay and stunning visuals. It is a game that you should definitely try if you love racing games, motorbikes, and stunts.

    -

    401be4b1e0
    -
    -
    \ No newline at end of file diff --git a/spaces/congsaPfin/Manga-OCR/logs/Paint 3D How to Download Install and Use the New Microsoft App.md b/spaces/congsaPfin/Manga-OCR/logs/Paint 3D How to Download Install and Use the New Microsoft App.md deleted file mode 100644 index f4cf8ca9d442d28bd5e47313fee101c0dcfc9cdf..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/Paint 3D How to Download Install and Use the New Microsoft App.md +++ /dev/null @@ -1,129 +0,0 @@ - -

    How to Download Windows Paint 3D: A Complete Guide

    -

    Windows Paint 3D is a creative application that comes free with Windows 10 and allows you to create professional or fun projects by easily combining 2D and 3D tools. Whether you are an artist or just want to try out some doodles, Windows Paint 3D can help you unleash your creativity and bring your ideas to life.

    -

    download windows paint 3d


    Downloadhttps://urlca.com/2uObCy



    -

    In this article, we will show you what Windows Paint 3D is, why you should try it, how to download it for free, how to use it for basic 3D modeling, and what are some alternatives to it. By the end of this article, you will have a clear understanding of how to download and use Windows Paint 3D for your personal or professional needs.

    -

    What is Windows Paint 3D and why you should try it

    -

    Windows Paint 3D is a built-in creative application that comes free with Windows 10. It is designed to be simple yet powerful by allowing you to create professional or fun creative projects by easily combining 2D and 3D tools. It is the successor of the classic Microsoft Paint but with an updated look and feel and a ton of new features.

    -

    Features of Windows Paint 3D

    -

    Some of the features of Windows Paint 3D are:

    - -

    Benefits of using Windows Paint 3D

    -

    Some of the benefits of using Windows Paint 3D are:

    - -

    How to download Windows Paint 3D for free

    -

    If you have Windows 10 installed on your device, you probably already have Windows Paint 3D. However, if you don't have it or you want to update it to the latest version, you can download it for free from the Microsoft Store. Here is how to do it:

    -

    Requirements for installing Windows Paint 3D

    -

    Before you download Windows Paint 3D, make sure that your device meets the following requirements:

    -

    How to download windows paint 3d for free
    -Download windows paint 3d and create 3d models
    -Windows paint 3d download: a guide for beginners
    -Download windows paint 3d and unleash your creativity
    -Windows paint 3d download: what's new and how to use it
    -Download windows paint 3d and explore 3d stock images
    -Windows paint 3d download: tips and tricks for 3d modeling
    -Download windows paint 3d and learn from tutorials
    -Windows paint 3d download: best features and tools
    -Download windows paint 3d and share your creations online
    -How to download windows paint 3d on Windows 10
    -Download windows paint 3d and make 2d masterpieces
    -Windows paint 3d download: how to import and export files
    -Download windows paint 3d and customize your settings
    -Windows paint 3d download: how to use brushes and effects
    -Download windows paint 3d and try out different shapes and textures
    -Windows paint 3d download: how to add text and stickers
    -Download windows paint 3d and edit your photos in 3d
    -Windows paint 3d download: how to use the magic select tool
    -Download windows paint 3d and join the remix 3d community
    -How to download windows paint 3d on Mac
    -Download windows paint 3d and have fun with doodles
    -Windows paint 3d download: how to use the canvas and view modes
    -Download windows paint 3d and transform your drawings into 3d
    -Windows paint 3d download: how to use the lighting and materials options
    -Download windows paint 3d and animate your models
    -Windows paint 3d download: how to use the history slider and undo button
    -Download windows paint 3d and print your models in 3d
    -Windows paint 3d download: how to use the ruler and protractor tools
    -Download windows paint 3d and collaborate with others on projects
    -How to download windows paint 3d on Linux
    -Download windows paint 3d and compare it with classic Paint
    -Windows paint 3d download: how to use the crop and resize tools
    -Download windows paint 3d and mix colors with the color picker tool
    -Windows paint 3d download: how to use the eraser and fill tools
    -Download windows paint 3d and draw with the pencil, pen, calligraphy, oil, watercolor, spray can, or pixel tools
    -Windows paint 3d download: how to use the selection, lasso, cutout, copy, paste, rotate, flip, or stamp tools
    -Download windows paint 3d and adjust the thickness, opacity, softness, or metallic properties of your brushes
    -Windows paint 3d download: how to use the eyedropper, bucket, or gradient tools
    -Download windows paint 3d and create custom stickers with the sticker tool

    - -

    Steps to download Windows Paint 3D from the Microsoft Store

    -

    To download Windows Paint 3D from the Microsoft Store, follow these steps:

    -
      -
    1. Open the Microsoft Store app: You can find it on your Start menu or by typing "Microsoft Store" in the search box.
    2. -
    3. Search for Windows Paint 3D: You can use the search bar at the top right corner of the app or browse through the categories until you find it.
    4. -
    5. Select Windows Paint 3D: You will see a page with the description, screenshots, ratings, and reviews of Windows Paint 3D. Click on the "Get" button to start downloading it.
    6. -
    7. Install Windows Paint 3D: Once the download is complete, you will see a notification that says "You're good to go". Click on the "Launch" button to open Windows Paint 3D.
    8. -
    -

    How to use Windows Paint 3D for basic 3D modeling

    -

    Now that you have downloaded and installed Windows Paint 3D, you can start using it for basic 3D modeling. Here are some tips on how to use the interface and tools of Windows Paint 3D and how to create and edit 3D objects in it.

    -

    Interface and tools of Windows Paint 3D

    -

    The interface of Windows Paint 3D consists of four main parts:

    - -

    To use the tools of Windows Paint 3D, you can simply click on them from the toolbar and then use them on the artboard. You can also adjust the settings of each tool from the sidebar, such as the color, size, opacity, and style. You can also switch between 2D and 3D mode by clicking on the icons at the bottom right corner of the screen.

    -

    Creating and editing 3D objects in Windows Paint 3D

    -

    To create and edit 3D objects in Windows Paint 3D, you can use the following methods:

    - -

    Alternatives to Windows Paint 3D

    -

    While Windows Paint 3D is a great application for basic 3D modeling, it may not be enough for more advanced or professional projects. If you are looking for alternatives to Windows Paint 3D that offer more features and capabilities, you may want to check out these options:

    -

    Blender

    -

    Blender is a free and open source software that is widely used for creating 3D animations, games, visual effects, and more. It has a powerful and flexible interface that allows you to model, sculpt, rig, animate, render, composite, edit, and simulate 3D content. It also has a large and active community of users and developers who contribute to its improvement and offer support and resources. You can download Blender for free from its official website:

    -

    SketchUp

    -

    SketchUp is a popular and easy-to-use software that is mainly used for creating 3D models of buildings, landscapes, furniture, and other objects. It has a simple and intuitive interface that allows you to draw, push, pull, rotate, and scale your 3D models. It also has a vast library of pre-made 3D models that you can use or modify for your projects. You can use SketchUp for free for personal use or upgrade to a paid version for more features and functionality. You can download SketchUp from its official website:

    -

    Conclusion and FAQs

    -

    In this article, we have shown you how to download Windows Paint 3D, a creative application that comes free with Windows 10 and allows you to create professional or fun projects by easily combining 2D and 3D tools. We have also shown you what Windows Paint 3D is, why you should try it, how to use it for basic 3D modeling, and what are some alternatives to it.

    -

    We hope that this article has been helpful and informative for you and that you have enjoyed learning how to download Windows Paint 3D. If you have any questions or feedback, please feel free to leave a comment below or contact us through our website. Thank you for reading and happy creating!

    -

    FAQs

    -

    Here are some frequently asked questions about Windows Paint 3D:

    -
      -
    1. Is Windows Paint 3D safe to download?
    2. -

      Yes, Windows Paint 3D is safe to download as it is a built-in application that comes with Windows 10. You can download it from the Microsoft Store, which is a trusted source of software for Windows devices. However, you should always be careful when downloading any software from the internet and make sure that your device is protected by antivirus software.

      -
    3. Can I use Windows Paint 3D offline?
    4. -

      Yes, you can use Windows Paint 3D offline once you have downloaded and installed it on your device. However, some features of Windows Paint 3D may require an internet connection, such as searching for more 3D models online or sharing your projects with other users.

      -
    5. Can I use Windows Paint 3D on other devices?
    6. -

      No, Windows Paint 3D is only available for Windows 10 devices. You cannot use it on other devices such as Macs, iPhones, Androids, or Chromebooks.

      -
    7. Can I print my Windows Paint 3D projects?
    8. -

      Yes, you can print your Windows Paint 3D projects by using the print option from the menu bar. You can choose to print your projects as images or as 3D models. If you want to print your projects as 3D models, you will need a compatible 3D printer or a service that can print them for you.

      -
    9. Can I export my Windows Paint 3D projects?
    10. -

      Yes, you can export your Windows Paint 3D projects by using the save as option from the menu bar. You can choose to export your projects in different formats such as PNG, JPEG, GIF, MP4, or GLB. You can also choose to export your projects as videos or as mixed reality scenes.

      -
    - : https://www.blender.org/download/ : https://www.sketchup.com/download

    197e85843d
    -
    -
    \ No newline at end of file diff --git a/spaces/congsaPfin/Manga-OCR/logs/SSC Admit Card 2023 Check latest updates and notifications for SSC exams.md b/spaces/congsaPfin/Manga-OCR/logs/SSC Admit Card 2023 Check latest updates and notifications for SSC exams.md deleted file mode 100644 index 7b261d212ef055e3e2048d6f2a2f09e02f2c627e..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/SSC Admit Card 2023 Check latest updates and notifications for SSC exams.md +++ /dev/null @@ -1,53 +0,0 @@ -
    -

    How to Download Hall Ticket for SSC CGL Exam 2023

    | | H2: Introduction |

    Introduction

    If you are preparing for the SSC CGL exam 2023, you must be wondering how to download your hall ticket for the exam. A hall ticket is a crucial document that you need to carry with you to the exam center. It contains your personal details, exam details, instructions and guidelines for the exam.

    -

    hall ticket download 2023 ssc


    Download ✶✶✶ https://urlca.com/2uOddo



    In this article, we will tell you everything you need to know about how to download your hall ticket for the SSC CGL exam 2023. We will also give you some tips on how to prepare for the exam and ace it with flying colors.

    | | H2: What is SSC CGL Exam? |

    What is SSC CGL Exam?

    SSC CGL stands for Staff Selection Commission Combined Graduate Level Exam. It is one of the most popular and competitive exams in India that recruits candidates for various posts in different departments, ministries and organizations under the Government of India.

    The SSC CGL exam is conducted in four tiers: Tier I (Computer Based Test), Tier II (Computer Based Test), Tier III (Descriptive Paper) and Tier IV (Skill Test/Document Verification). The syllabus and pattern of the exam vary for each tier and post.

    The SSC CGL exam is held once a year, usually in March-April for Tier I, June-July for Tier II, September-October for Tier III and November-December for Tier IV. The dates may change depending on various factors such as COVID-19 pandemic, vacancies, etc.

    | | Outline Table | Article Table | | --- | --- | | H2: How to Download Hall Ticket for SSC CGL Exam 2023 |

    How to Download Hall Ticket for SSC CGL Exam 2023

    The hall ticket for the SSC CGL exam 2023 is expected to be released by the SSC on its official website a few weeks before the exam date. Candidates who have successfully applied for the exam can download their hall ticket by following these steps:

    -

    How to download AP SSC hall ticket 2023 online
    -SSC admit card 2023 for CGL, CHSL, CPO, GD, JE, MTS and Stenographer
    -AP SSC exam 2023 hall ticket release date and time
    -Telangana SSC hall tickets 2023 for regular and private students
    -SSC online admit card 2023 region-wise download links
    -AP SSC hall ticket 2023 bse.ap.gov.in official website
    -SSC CPO paper 2 admit card 2023 download steps
    -Telangana SSC exam 2023 hall ticket download instructions
    -SSC CHSL tier 2 admit card 2023 release date and time
    -AP SSC hall ticket 2023 name wise and school wise search
    -SSC GD constable admit card 2023 for PST/PET exam
    -Telangana SSC hall ticket 2023 for OSSC and vocational students
    -SSC JE paper 2 admit card 2023 download steps
    -AP SSC exam 2023 hall ticket correction process
    -SSC MTS tier 2 admit card 2023 release date and time
    -Telangana SSC exam 2023 hall ticket number and roll number details
    -SSC stenographer skill test admit card 2023 download steps
    -AP SSC exam 2023 hall ticket printout guidelines
    -SSC CGL tier 4 admit card 2023 release date and time
    -Telangana SSC exam 2023 hall ticket verification process
    -SSC CHSL tier 1 re-exam admit card 2023 download steps
    -AP SSC exam 2023 hall ticket helpline numbers and email ids
    -SSC GD constable admit card 2023 for written exam
    -Telangana SSC exam 2023 hall ticket important instructions and documents
    -SSC JE paper 1 re-exam admit card 2023 download steps
    -AP SSC exam 2023 hall ticket FAQs and answers
    -SSC CPO paper 1 re-exam admit card 2023 download steps
    -Telangana SSC exam 2023 hall ticket lost or forgotten solutions
    -SSC MTS tier 1 re-exam admit card 2023 download steps
    -AP SSC exam 2023 hall ticket latest news and updates
    -SSC stenographer grade C and D admit card 2023 download steps
    -Telangana SSC exam 2023 hall ticket mistakes and errors solutions
    -SSC CGL tier 2 re-exam admit card 2023 download steps
    -AP SSC exam center list and address details for hall ticket holders
    -SSC CHSL descriptive paper admit card 2023 download steps
    -Telangana SSC supplementary exam hall tickets 2023 download steps
    -SSC GD constable medical exam admit card 2023 download steps
    -AP SSC revaluation/recounting application form and fee details for hall ticket holders
    -SSC JE document verification admit card 2023 download steps
    -Telangana SSC advanced supplementary exam hall tickets 2023 download steps
    -SSC CPO physical standard test admit card 2023 download steps
    -AP SSC migration certificate application form and fee details for hall ticket holders
    -SSC MTS document verification admit card 2023 download steps
    -Telangana SSC marks memo/certificate download steps for hall ticket holders
    -SSC stenographer final result date and cut off marks for hall ticket holders
    -AP SSC duplicate marks memo/certificate application form and fee details for hall ticket holders
    -SSC CGL document verification admit card 2023 download steps
    -Telangana SSC duplicate marks memo/certificate application form and fee details for hall ticket holders

    1. Visit the official website of SSC: https://ssc.nic.in/
    2. Click on the "Admit Card" tab on the homepage.
    3. Select your region from the list of regional websites.
    4. Click on the link that says "Download Admit Card for Combined Graduate Level Examination, 2023 (Tier-I)" or similar.
    5. Enter your registration number and password or name and date of birth as per your application form.
    6. Click on the "Submit" button and view your hall ticket on the screen.
    7. Download and print your hall ticket and keep it safe for future reference.

    Note: The hall ticket download process may vary slightly for different regions and exams conducted by the SSC. Candidates should visit the official website of their respective region or exam to get the latest updates and instructions on how to download their hall ticket.

    | | H2: How to Check Hall Ticket for SSC CGL Exam 2023 |

    How to Check Hall Ticket for SSC CGL Exam 2023

    After downloading your hall ticket, you should check it carefully for any errors or discrepancies. Your hall ticket should contain the following details:

    If you find any mistake or mismatch in your hall ticket, you should report it to the SSC immediately through email or phone. You should also carry a proof of your identity and application form along with your hall ticket to the exam center in case of any discrepancy.

    | | Outline Table | Article Table | | --- | --- | | H2: How to Ace SSC CGL Exam 2023 |

    How to Ace SSC CGL Exam 2023

    To ace the SSC CGL exam 2023, you need to do more than just preparing well. You also need to have a positive attitude and a confident approach. Here are some tips on how to ace the exam:

    | | H2: Conclusion |

    Conclusion

    The SSC CGL exam 2023 is a golden opportunity for you to get your dream job in the government sector. To crack this exam, you need to download your hall ticket, prepare well, and ace it with flying colors.

    In this article, we have given you a detailed guide on how to download your hall ticket for the SSC CGL exam 2023. We have also given you some tips and tricks on how to prepare for the exam and ace it with flying colors.

    We hope that this article has helped you in your preparation process and has cleared all your doubts regarding the hall ticket download process. If you have any queries or suggestions, please feel free to comment below. We wish you all the best for your exam!

    | | H2: FAQs |

    FAQs

    Here are some frequently asked questions about hall ticket download for SSC CGL exam 2023:

    1. Q: When will the hall ticket for SSC CGL exam 2023 be released?
      A: The hall ticket for SSC CGL exam 2023 is expected to be released by the SSC on its official website a few weeks before the exam date. The exact date will be notified by the SSC through its website or other media.
    2. Q: How can I download my hall ticket for SSC CGL exam 2023?
      A: You can download your hall ticket for SSC CGL exam 2023 by visiting the official website of SSC, clicking on the "Admit Card" tab, selecting your region, clicking on the link for downloading admit card, entering your registration number and password or name and date of birth, and downloading and printing your hall ticket.
    3. Q: What if I forget my registration number or password?
      A: If you forget your registration number or password, you can retrieve them by clicking on the "Forgot Registration Number/Password" link on the admit card download page. You will have to enter your name, father's name, date of birth, email ID, mobile number, etc. to get your registration number or password.
    4. Q: What if I find any error or discrepancy in my hall ticket?
      A: If you find any error or discrepancy in your hall ticket, you should report it to the SSC immediately through email or phone. You should also carry a proof of your identity and application form along with your hall ticket to the exam center in case of any discrepancy.
    5. Q: What documents do I need to carry along with my hall ticket to the exam center?
      A: You need to carry a printout of your hall ticket along with a valid photo ID proof such as Aadhaar card, PAN card, passport, driving license, voter ID card, etc. to the exam center. You should also carry two passport size photographs as uploaded in your application form.
    | | H2:

    This is the end of my article

    401be4b1e0
    -
    -
    \ No newline at end of file diff --git a/spaces/congsaPfin/Manga-OCR/logs/Stream and Download Tamil TV Shows Live with Tamil Show Net - No Subscription Required.md b/spaces/congsaPfin/Manga-OCR/logs/Stream and Download Tamil TV Shows Live with Tamil Show Net - No Subscription Required.md deleted file mode 100644 index 2266169fabbde4ca79a456a9fb196a6d4a5b4579..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/Stream and Download Tamil TV Shows Live with Tamil Show Net - No Subscription Required.md +++ /dev/null @@ -1,192 +0,0 @@ -
    -

    How to Watch Tamil Shows Online for Free

    -

    Tamil shows are a great source of entertainment, culture, and education for millions of people around the world. Whether you are a fan of drama, comedy, romance, thriller, or reality shows, you can find something that suits your taste and mood in the vast collection of Tamil shows available online. But how can you watch them for free without any hassle? In this article, we will show you the best apps and websites that let you stream and download your favorite Tamil shows online for free. We will also discuss the benefits and challenges of watching Tamil shows online, and give you some tips on how to enjoy them to the fullest.

    -

    tamil show net live download


    Download File »»» https://urlca.com/2uO4Cl



    -

    Introduction

    -

    What are Tamil shows and why are they popular?

    -

    Tamil shows are television programs that are produced in the Tamil language, which is spoken by about 80 million people in India, Sri Lanka, Malaysia, Singapore, and other countries. Tamil shows cover a wide range of genres, such as soap operas, game shows, talk shows, reality shows, musical shows, talent shows, and more. Some of the most popular Tamil shows include Bigg Boss Tamil, Super Singer, Neeya Naana, Roja, Chithi, and many others.

    -

    Tamil shows are popular because they reflect the rich and diverse culture, history, and values of the Tamil people. They also showcase the talent and creativity of the actors, directors, writers, and producers who work hard to create engaging and entertaining content for their audiences. Tamil shows also cater to different age groups, preferences, and interests, making them appealing to a wide range of viewers.

    -

    What are the benefits of watching Tamil shows online?

    -

    Watching Tamil shows online has many benefits, such as:

    - -

    What are the challenges of watching Tamil shows online?

    -

    Watching Tamil shows online also has some challenges, such as:

    -

    tamil show net live download zee5
    -tamil show net live download vikatan tv
    -tamil show net live download news7
    -tamil show net live download kalaignar tv
    -tamil show net live download hd quality
    -tamil show net live download latest episodes
    -tamil show net live download serials online
    -tamil show net live download rasi palangal
    -tamil show net live download ponni c/o rani
    -tamil show net live download the imperfect show
    -tamil show net live download kelvi neram
    -tamil show net live download zee comedy show
    -tamil show net live download kumkum bhagya
    -tamil show net live download kundali bhagya
    -tamil show net live download bhagya lakshmi
    -tamil show net live download meet annapoorna
    -tamil show net live download indira bandaa rrr
    -tamil show net live download uri: the surgical strike
    -tamil show net live download dial 100
    -tamil show net live download helmet movie
    -tamil show net live download sunflower web series
    -tamil show net live download tripling season 2
    -tamil show net live download pitchers season 1
    -tamil show net live download naxalbari web series
    -tamil show net live download anantham gaalivana movie
    -tamil show net live download sunny leone videos
    -tamil show net live download disha patani photos
    -tamil show net live download deepika padukone movies
    -tamil show net live download salman khan songs
    -tamil show net live download manoj bajpayee dialogues
    -tamil show net live download nora fatehi dance
    -tamil show net live download zee tv hd channel
    -tamil show net live download &tv hd channel
    -tamil show net live download zee marathi hd channel
    -tamil show net live download taj tv channel
    -tamil show net live download jodha akbar serial
    -tamil show net live download yeh teri galiyan serial
    -tamil show net live download apna time bhi aayega serial
    -tamil show net live download pavitra rishta serial
    -tamil show net live download qubool hai serial
    -tamil show net live download teri meri ikk jindri serial
    -tamil show net live download mazhi tuzhi reshimgaath serial
    -tamil show net live download yeu kashi tashi me nandayla serial
    -tamil show net live download lagira zhala jee serial
    -tamil show net live download man udu udu zhala serial
    -tamil show net live download phulpakhru serial
    -tamil show net live download swarajyarakshak sambhaji serial
    -tamil show net live download rishton ka manjha serial
    -tamil show net live download mauka-e-vardaat serial
    -tamil show net live download ghar ek mandir kripa agrasen maharaj ki serial

    - -

    Best Apps and Websites for Watching Tamil Shows Online

    -

    Sun NXT

    -

    Features

    -

    Sun NXT is one of the most popular apps for watching Tamil shows online. It is owned by Sun TV Network, which is one of the largest media conglomerates in India. Sun NXT offers over 4000 movies and daily TV shows in six languages - Tamil, Telugu, Malayalam, Kannada, Bengali, and Marathi. Apart from TV shows and movies, it also has live streaming, news , music, comedy, and original web series. You can watch the latest episodes of your favorite Tamil shows on Sun NXT within hours of their telecast on TV.

    -

    Pros and cons

    -

    Some of the pros of Sun NXT are:

    - -

    Some of the cons of Sun NXT are:

    - -

    How to download and use

    -

    To download and use Sun NXT, you need to follow these steps:

    -
      -
    1. Go to the Google Play Store or Apple App Store on your smartphone or tablet and search for Sun NXT. Alternatively, you can go to the official website of Sun NXT on your computer or laptop and click on the download button.
    2. -
    3. Install the app on your device or open the website on your browser.
    4. -
    5. Create an account with your email address or phone number, or log in with your existing account if you have one.
    6. -
    7. Select your preferred language and subscription plan. You can also opt for a free trial for 30 days before paying for the subscription.
    8. -
    9. Browse through the categories and genres of shows and movies, or search for your favorite ones by name, actor, director, or channel.
    10. -
    11. Click on the play button to start streaming the show or movie online, or click on the download button to save it for offline viewing. You can also add it to your watchlist or favorites for later viewing.
    12. -
    -

    Tamil TV Live

    -

    Features

    -

    Tamil TV Live is another popular app for watching Tamil shows online. It is a free app that lets you watch live TV channels from Tamil Nadu, India, and other countries. It has over 150 channels in various categories, such as entertainment, news, sports, music, movies, devotional, kids, and more. Some of the channels that you can watch on Tamil TV Live are Zee Tamil, Star Vijay, Colors Tamil, Jaya TV, Raj TV, Polimer TV, Kalaignar TV, and many others. You can also watch some regional channels from Kerala, Karnataka, Andhra Pradesh, Telangana, and Maharashtra.

    -

    Pros and cons

    -

    Some of the pros of Tamil TV Live are:

    - -

    Some of the cons of Tamil TV Live are:

    - -

    How to download and use

    -

    To download and use Tamil TV Live, you need to follow these steps:

    -
      -
    1. Go to the Google Play Store on your smartphone or tablet and search for Tamil TV Live. Alternatively, you can go to the official website of Tamil TV Live on your computer or laptop and click on the download button.
    2. -
    3. Install the app on your device or open the website on your browser.
    4. -
    5. Open the app or website and select your preferred language and category of channels.
    6. -
    7. Browse through the list of channels and tap or click on the one that you want to watch.
    8. -
    9. Enjoy watching the live TV channel online. You can also adjust the settings, such as video quality, screen size, or orientation, according to your preference.
    10. -
    -

    TamilTvShow.net

    -

    Features

    -

    TamilTvShow.net is one of the best websites for watching Tamil shows online. It is a free website that lets you watch and download the latest episodes of your favorite Tamil shows from various TV channels. It has a huge collection of shows in different genres, such as drama, comedy, romance, thriller, reality, and more. Some of the shows that you can watch on TamilTvShow.net are Bigg Boss Tamil, Super Singer, Neeya Naana, Roja, Chithi, and many others. You can also watch some movies and web series on this website.

    -

    Pros and cons

    -

    Some of the pros of TamilTvShow.net are:

    - -

    Some of the cons of TamilTvShow.net are:

    - -

    How to access and use

    -

    To access and use TamilTvShow.net, you need to follow these steps:

    -
      -
    1. Go to the official website of TamilTvShow.net on your computer or laptop browser. Alternatively, you can use a VPN service or proxy server to access the website if it is blocked in your region.
    2. -
    3. On the homepage, you will see a list of featured shows and movies that you can watch online. You can also use the menu bar or the search box to find your favorite shows by name, channel, genre, or date.
    4. -
    5. Click on the show that you want to watch online. You will be redirected to a page where you can see the details and episodes of the show.
    6. -
    7. Select the episode that you want to watch online. You will see a video player where you can stream the episode online. You can also see some download links below the video player where you can download the episode for offline viewing.
    8. -
    9. Enjoy watching or downloading the show online. You can also share it with your friends and family via social media or messaging apps.
    10. -
    -

    Conclusion

    -

    Summary of the main points

    -

    In this article, we have shown you how to watch Tamil shows online for free using three different methods - Sun NXT app, Tamil TV Live app, and TamilTvShow.net website. We have discussed the features, pros and cons, and how to download and use each method. We have also explained what are Tamil shows and why are they popular, what are the benefits and challenges of watching them online, and how to enjoy them to the fullest.

    -

    Call to action

    -

    We hope that this article has helped you find the best way to watch your favorite Tamil shows online for free. Now that you know how to do it, why not give it a try? You will be amazed by how much fun and entertainment you can get from watching Tamil shows online. Whether you want to relax after a long day, learn something new, laugh out loud, or feel some emotions, you can find a show that suits your mood and taste on any of these apps or websites. So go ahead and start watching Tamil shows online for free today!

    Before we end this article, let's take a look at some of the frequently asked questions (FAQs) that people have about watching Tamil shows online for free.

    -

    Frequently Asked Questions

    -

    Q: Is it legal to watch Tamil shows online for free?

    -

    A: It depends on the source and the content that you are watching. Some apps and websites may have the legal rights or permissions to stream or download the shows for free, while others may not. You should always check the terms and conditions of the app or website before using it, and respect the intellectual property rights of the content owners and providers. If you are unsure, you can always use a VPN service or proxy server to protect your identity and privacy online.

    -

    Q: Is it safe to watch Tamil shows online for free?

    -

    A: It depends on the app or website that you are using. Some apps and websites may have security features or encryption methods that protect your data and device from any harm, while others may not. You should always use a trusted and reliable app or website that has good reviews and ratings from other users, and avoid any suspicious or malicious links or downloads. You should also use an antivirus software or firewall to scan and block any potential threats or viruses online.

    -

    Q: What are some of the best Tamil shows to watch online for free?

    -

    A: There are many Tamil shows that you can watch online for free, depending on your preference and interest. Some of the best Tamil shows that we recommend are:

    - -

    Q: How can I improve my Tamil language skills by watching Tamil shows online for free?

    -

    A: Watching Tamil shows online for free can be a great way to improve your Tamil language skills, especially if you are a beginner or intermediate learner. You can learn new words, phrases, expressions, grammar, pronunciation, and accent by listening to and imitating the native speakers. You can also improve your comprehension, vocabulary, and fluency by reading the subtitles or dubbing in different languages. You can also practice your speaking, writing, and listening skills by discussing the show with other fans or writing reviews or comments online.

    -

    Q: How can I watch Tamil shows online for free on my TV?

    -

    A: There are several ways to watch Tamil shows online for free on your TV, such as:

    - -

    We hope that this article has answered all your questions about watching Tamil shows online for free. If you have any more questions or suggestions, please feel free to leave them in the comments section below. Thank you for reading!

    197e85843d
    -
    -
    \ No newline at end of file diff --git a/spaces/congxin95/BMTools-demo/app.py b/spaces/congxin95/BMTools-demo/app.py deleted file mode 100644 index d9c912b7f8555ad20796b9e086da6a2867695f22..0000000000000000000000000000000000000000 --- a/spaces/congxin95/BMTools-demo/app.py +++ /dev/null @@ -1,302 +0,0 @@ -import sys -sys.path.append("BMTools/") - -import gradio as gr -from bmtools.agent.tools_controller import MTQuestionAnswerer, load_valid_tools -from bmtools.agent.singletool import STQuestionAnswerer -from langchain.schema import AgentFinish -import os -import requests - -from tool_server import run_tool_server -from threading import Thread -from multiprocessing import Process -import time - -tool_server_flag = False -def start_tool_server(): - # server = Thread(target=run_tool_server) - server = Process(target=run_tool_server) - server.start() - global tool_server_flag - tool_server_flag = True - - -available_models = ["ChatGPT", "GPT-3.5"] -DEFAULTMODEL = "ChatGPT" # "GPT-3.5" - -tools_mappings = { - "klarna": "https://www.klarna.com/", - "weather": "http://127.0.0.1:8079/tools/weather/", - # "database": "http://127.0.0.1:8079/tools/database/", - # "db_diag": "http://127.0.0.1:8079/tools/db_diag/", - "chemical-prop": "http://127.0.0.1:8079/tools/chemical-prop/", - "douban-film": "http://127.0.0.1:8079/tools/douban-film/", - "wikipedia": "http://127.0.0.1:8079/tools/wikipedia/", - # "wikidata": "http://127.0.0.1:8079/tools/kg/wikidata/", - "wolframalpha": "http://127.0.0.1:8079/tools/wolframalpha/", - "bing_search": "http://127.0.0.1:8079/tools/bing_search/", - "office-ppt": "http://127.0.0.1:8079/tools/office-ppt/", - "stock": "http://127.0.0.1:8079/tools/stock/", - "bing_map": "http://127.0.0.1:8079/tools/map.bing_map/", - # "baidu_map": "http://127.0.0.1:8079/tools/map/baidu_map/", - "zillow": "http://127.0.0.1:8079/tools/zillow/", - "airbnb": "http://127.0.0.1:8079/tools/airbnb/", - "job_search": "http://127.0.0.1:8079/tools/job_search/", - # "baidu-translation": "http://127.0.0.1:8079/tools/translation/baidu-translation/", - # "nllb-translation": "http://127.0.0.1:8079/tools/translation/nllb-translation/", - "tutorial": "http://127.0.0.1:8079/tools/tutorial/", - "file_operation": "http://127.0.0.1:8079/tools/file_operation/", - "meta_analysis": "http://127.0.0.1:8079/tools/meta_analysis/", - "code_interpreter": "http://127.0.0.1:8079/tools/code_interpreter/", - "arxiv": "http://127.0.0.1:8079/tools/arxiv/", - "google_places": "http://127.0.0.1:8079/tools/google_places/", - "google_serper": "http://127.0.0.1:8079/tools/google_serper/", - "google_scholar": "http://127.0.0.1:8079/tools/google_scholar/", - "python": "http://127.0.0.1:8079/tools/python/", - "sceneXplain": "http://127.0.0.1:8079/tools/sceneXplain/", - "shell": "http://127.0.0.1:8079/tools/shell/", - "image_generation": "http://127.0.0.1:8079/tools/image_generation/", - "hugging_tools": "http://127.0.0.1:8079/tools/hugging_tools/", - "gradio_tools": "http://127.0.0.1:8079/tools/gradio_tools/", - "travel": "http://127.0.0.1:8079/tools/travel", - "walmart": "http://127.0.0.1:8079/tools/walmart", -} - -valid_tools_info = [] -all_tools_list = [] - -gr.close_all() - -MAX_TURNS = 30 -MAX_BOXES = MAX_TURNS * 2 - -return_msg = [] -chat_history = "" - -MAX_SLEEP_TIME = 40 -def load_tools(): - global valid_tools_info - global all_tools_list - try: - valid_tools_info = load_valid_tools(tools_mappings) - except BaseException as e: - print(repr(e)) - all_tools_list = sorted(list(valid_tools_info.keys())) - return gr.update(choices=all_tools_list) - -def set_environ(OPENAI_API_KEY: str, - WOLFRAMALPH_APP_ID: str = "", - WEATHER_API_KEYS: str = "", - BING_SUBSCRIPT_KEY: str = "", - ALPHA_VANTAGE_KEY: str = "", - BING_MAP_KEY: str = "", - BAIDU_TRANSLATE_KEY: str = "", - RAPIDAPI_KEY: str = "", - SERPER_API_KEY: str = "", - GPLACES_API_KEY: str = "", - SCENEX_API_KEY: str = "", - STEAMSHIP_API_KEY: str = "", - HUGGINGFACE_API_KEY: str = "", - AMADEUS_ID: str = "", - AMADEUS_KEY: str = "",): - os.environ["OPENAI_API_KEY"] = OPENAI_API_KEY - os.environ["WOLFRAMALPH_APP_ID"] = WOLFRAMALPH_APP_ID - os.environ["WEATHER_API_KEYS"] = WEATHER_API_KEYS - os.environ["BING_SUBSCRIPT_KEY"] = BING_SUBSCRIPT_KEY - os.environ["ALPHA_VANTAGE_KEY"] = ALPHA_VANTAGE_KEY - os.environ["BING_MAP_KEY"] = BING_MAP_KEY - os.environ["BAIDU_TRANSLATE_KEY"] = BAIDU_TRANSLATE_KEY - os.environ["RAPIDAPI_KEY"] = RAPIDAPI_KEY - os.environ["SERPER_API_KEY"] = SERPER_API_KEY - os.environ["GPLACES_API_KEY"] = GPLACES_API_KEY - os.environ["SCENEX_API_KEY"] = SCENEX_API_KEY - os.environ["STEAMSHIP_API_KEY"] = STEAMSHIP_API_KEY - os.environ["HUGGINGFACE_API_KEY"] = HUGGINGFACE_API_KEY - os.environ["AMADEUS_ID"] = AMADEUS_ID - os.environ["AMADEUS_KEY"] = AMADEUS_KEY - if not tool_server_flag: - start_tool_server() - time.sleep(MAX_SLEEP_TIME) - return gr.update(value="OK!") - -def show_avatar_imgs(tools_chosen): - if len(tools_chosen) == 0: - tools_chosen = list(valid_tools_info.keys()) - img_template = ' avatar {} ' - imgs = [valid_tools_info[tool]['avatar'] for tool in tools_chosen if valid_tools_info[tool]['avatar'] != None] - imgs = ' '.join([img_template.format(img, img, tool) for img, tool in zip(imgs, tools_chosen)]) - return [gr.update(value='' + imgs + '', visible=True), gr.update(visible=True)] - -def answer_by_tools(question, tools_chosen, model_chosen): - global return_msg - return_msg += [(question, None), (None, '...')] - yield [gr.update(visible=True, value=return_msg), gr.update(), gr.update()] - OPENAI_API_KEY = os.environ.get('OPENAI_API_KEY', '') - - if len(tools_chosen) == 0: # if there is no tools chosen, we use all todo (TODO: What if the pool is too large.) - tools_chosen = list(valid_tools_info.keys()) - - if len(tools_chosen) == 1: - answerer = STQuestionAnswerer(OPENAI_API_KEY.strip(), stream_output=True, llm=model_chosen) - agent_executor = answerer.load_tools(tools_chosen[0], valid_tools_info[tools_chosen[0]], - prompt_type="react-with-tool-description", return_intermediate_steps=True) - else: - answerer = MTQuestionAnswerer(OPENAI_API_KEY.strip(), - load_valid_tools({k: tools_mappings[k] for k in tools_chosen}), - stream_output=True, llm=model_chosen) - - agent_executor = answerer.build_runner() - - global chat_history - chat_history += "Question: " + question + "\n" - question = chat_history - for inter in agent_executor(question): - if isinstance(inter, AgentFinish): continue - result_str = [] - return_msg.pop() - if isinstance(inter, dict): - result_str.append("Answer: {}".format(inter['output'])) - chat_history += "Answer:" + inter['output'] + "\n" - result_str.append("...") - else: - try: - not_observation = inter[0].log - except: - print(inter[0]) - not_observation = inter[0] - if not not_observation.startswith('Thought:'): - not_observation = "Thought: " + not_observation - chat_history += not_observation - not_observation = not_observation.replace('Thought:', 'Thought: ') - not_observation = not_observation.replace('Action:', 'Action: ') - not_observation = not_observation.replace('Action Input:', 'Action Input: ') - result_str.append("{}".format(not_observation)) - result_str.append("Action output:\n{}".format(inter[1])) - chat_history += "\nAction output:" + inter[1] + "\n" - result_str.append("...") - return_msg += [(None, result) for result in result_str] - yield [gr.update(visible=True, value=return_msg), gr.update(), gr.update()] - return_msg.pop() - if return_msg[-1][1].startswith("Answer: "): - return_msg[-1] = (return_msg[-1][0], return_msg[-1][1].replace("Answer: ", - "Final Answer: ")) - yield [gr.update(visible=True, value=return_msg), gr.update(visible=True), gr.update(visible=False)] - - -def retrieve(tools_search): - if tools_search == "": - return gr.update(choices=all_tools_list) - else: - url = "http://127.0.0.1:8079/retrieve" - param = { - "query": tools_search - } - response = requests.post(url, json=param) - result = response.json() - retrieved_tools = result["tools"] - return gr.update(choices=retrieved_tools) - - -def clear_retrieve(): - return [gr.update(value=""), gr.update(choices=all_tools_list)] - - -def clear_history(): - global return_msg - global chat_history - return_msg = [] - chat_history = "" - yield gr.update(visible=True, value=return_msg) - -with gr.Blocks() as demo: - with gr.Row(): - with gr.Column(scale=14): - gr.Markdown("

    BMTools

    ") - with gr.Column(scale=1): - gr.Markdown('') - - with gr.Tab("Key setting"): - OPENAI_API_KEY = gr.Textbox(label="OpenAI API KEY:", placeholder="sk-...", type="text") - WOLFRAMALPH_APP_ID = gr.Textbox(label="Wolframalpha app id:", placeholder="Key to use wlframalpha", type="text") - WEATHER_API_KEYS = gr.Textbox(label="Weather api key:", placeholder="Key to use weather api", type="text") - BING_SUBSCRIPT_KEY = gr.Textbox(label="Bing subscript key:", placeholder="Key to use bing search", type="text") - ALPHA_VANTAGE_KEY = gr.Textbox(label="Stock api key:", placeholder="Key to use stock api", type="text") - BING_MAP_KEY = gr.Textbox(label="Bing map key:", placeholder="Key to use bing map", type="text") - BAIDU_TRANSLATE_KEY = gr.Textbox(label="Baidu translation key:", placeholder="Key to use baidu translation", type="text") - RAPIDAPI_KEY = gr.Textbox(label="Rapidapi key:", placeholder="Key to use zillow, airbnb and job search", type="text") - SERPER_API_KEY = gr.Textbox(label="Serper key:", placeholder="Key to use google serper and google scholar", type="text") - GPLACES_API_KEY = gr.Textbox(label="Google places key:", placeholder="Key to use google places", type="text") - SCENEX_API_KEY = gr.Textbox(label="Scenex api key:", placeholder="Key to use sceneXplain", type="text") - STEAMSHIP_API_KEY = gr.Textbox(label="Steamship api key:", placeholder="Key to use image generation", type="text") - HUGGINGFACE_API_KEY = gr.Textbox(label="Huggingface api key:", placeholder="Key to use models in huggingface hub", type="text") - AMADEUS_ID = gr.Textbox(label="Amadeus id:", placeholder="Id to use Amadeus", type="text") - AMADEUS_KEY = gr.Textbox(label="Amadeus key:", placeholder="Key to use Amadeus", type="text") - key_set_btn = gr.Button(value="Set keys!") - - - with gr.Tab("Chat with Tool"): - with gr.Row(): - with gr.Column(scale=4): - with gr.Row(): - with gr.Column(scale=0.85): - txt = gr.Textbox(show_label=False, placeholder="Question here. Use Shift+Enter to add new line.", - lines=1).style(container=False) - with gr.Column(scale=0.15, min_width=0): - buttonChat = gr.Button("Chat") - - chatbot = gr.Chatbot(show_label=False, visible=True).style(height=600) - buttonClear = gr.Button("Clear History") - buttonStop = gr.Button("Stop", visible=False) - - with gr.Column(scale=1): - model_chosen = gr.Dropdown( - list(available_models), value=DEFAULTMODEL, multiselect=False, label="Model provided", - info="Choose the model to solve your question, Default means ChatGPT." - ) - with gr.Row(): - tools_search = gr.Textbox( - lines=1, - label="Tools Search", - placeholder="Please input some text to search tools.", - ) - buttonSearch = gr.Button("Reset search condition") - tools_chosen = gr.CheckboxGroup( - choices=all_tools_list, - value=["chemical-prop"], - label="Tools provided", - info="Choose the tools to solve your question.", - ) - - key_set_btn.click(fn=set_environ, inputs=[ - OPENAI_API_KEY, - WOLFRAMALPH_APP_ID, - WEATHER_API_KEYS, - BING_SUBSCRIPT_KEY, - ALPHA_VANTAGE_KEY, - BING_MAP_KEY, - BAIDU_TRANSLATE_KEY, - RAPIDAPI_KEY, - SERPER_API_KEY, - GPLACES_API_KEY, - SCENEX_API_KEY, - STEAMSHIP_API_KEY, - HUGGINGFACE_API_KEY, - AMADEUS_ID, - AMADEUS_KEY, - ], outputs=key_set_btn) - key_set_btn.click(fn=load_tools, outputs=tools_chosen) - - tools_search.change(retrieve, tools_search, tools_chosen) - buttonSearch.click(clear_retrieve, [], [tools_search, tools_chosen]) - - txt.submit(lambda: [gr.update(value=''), gr.update(visible=False), gr.update(visible=True)], [], - [txt, buttonClear, buttonStop]) - inference_event = txt.submit(answer_by_tools, [txt, tools_chosen, model_chosen], [chatbot, buttonClear, buttonStop]) - buttonChat.click(answer_by_tools, [txt, tools_chosen, model_chosen], [chatbot, buttonClear, buttonStop]) - buttonStop.click(lambda: [gr.update(visible=True), gr.update(visible=False)], [], [buttonClear, buttonStop], - cancels=[inference_event]) - buttonClear.click(clear_history, [], chatbot) - -# demo.queue().launch(share=False, inbrowser=True, server_name="127.0.0.1", server_port=7001) -demo.queue().launch() \ No newline at end of file diff --git a/spaces/contluForse/HuggingGPT/assets/Dr Br Ambedkar Biography In Telugu.pdf lhritage dun visionnaire qui a transform lInde moderne.md b/spaces/contluForse/HuggingGPT/assets/Dr Br Ambedkar Biography In Telugu.pdf lhritage dun visionnaire qui a transform lInde moderne.md deleted file mode 100644 index ed105e8265422db712bce72cc80471534a946c1e..0000000000000000000000000000000000000000 --- a/spaces/contluForse/HuggingGPT/assets/Dr Br Ambedkar Biography In Telugu.pdf lhritage dun visionnaire qui a transform lInde moderne.md +++ /dev/null @@ -1,13 +0,0 @@ - -

    /gi , '');result = result.replace(//gi , '');result = result.replace(//gi , '');$("#other_stories_slide").html(result+'');$("#other_stories_slide1").slick(dots:!0,arrows:!1,infinite:!0,speed:3000,autoplayspeed:3000,lazyLoad:"ondemand",slidesToShow:4,autoplay:!0,slidesToScroll:1 ,responsive : [breakpoint: 767, settings: slidesToShow: 1, slidesToScroll: 1,breakpoint: 480, settings: slidesToShow: 1, slidesToScroll: 1]);}}});}); .morefromwidth: 100%;float: left;margin:2% 0 2%;text-align:center; .morefrom afont-weight:bold;color: #4672db; .morefrom spanfloat:left;font-size: 16px; Now we are on Telegram too. Follow us for updates TAGS Delhi government B R Ambedkar CM Arvind Kejriwal $(document).ready(function() $('.whatsapp').on("click", function(e) IEMobile); ); .ArticleDetailContent lifloat: none; list-style: inherit;.ArticleDetailContent blockquote padding-left: 20px !important; padding-right: 8px !important; border-left-width: 5px; border-color: #ccc; font-style: italic;margin:10px 0 !important;padding: 12px 16px !important;font-size:13px !important;.ArticleDetailContent blockquote pfont-size:13px !important;text-align:center;@media screen and ( max-width: 768px) audio width:100%;var base_url = " ";var content_id = "2072388";var content_type_id = "1";var page_Indexid = "no_pagination";var section_id = "340";//location.reload(true);var content_url = " -cm-arvind-kejriwal-launches-booklet-on-ambedkar-for-school-curriculum-2072388.html";var page_param = "340";var content_from = "archive";var bitly_url = "";var bitly_message = "";//$("#storyContent p:eq(0)").before($('.author_txt').show());//$("#storyContent p:eq(0)").before($('.agency_txt').show()); O
    P
    E
    N

    -

    Published in 1988 in Marathi, this autobiography helped bring particular attention to the experiences of Dalit women. Pawar recounts her life over many decades including her childhood in a rural part of Western India, conversion to Buddhism, move to the great metropolis of Bombay, and involvement in the movement for Dalit emancipation. Among other things, the book, translated by Maya Pandit, powerfully registers the evolving nature of Dalit life and politics during the twentieth century.

    -

    Dr Br Ambedkar Biography In Telugu.pdf


    Download File ✒ ✒ ✒ https://ssurll.com/2uzw1f



    -

    Karukku is a 1992 award-winning autobiography in Tamil by a Christian Dalit woman, translated into English by Lakshmi Holmstrom. Bama details not only her experiences of prejudice in her village but also within the Christian denomination to which she belongs. It is a good reminder that caste is not just a Hindu problem. On its publication, the novel quickly became famous for its earthy language.

    -

    In 1907, he passed his matriculation examination and in the following year he entered Elphinstone College, which was affiliated to the University of Bombay, becoming, according to him, the first from his Mahar caste to do so. In his book, The Buddha and his Dhamma, that when he passed his English fourth standard examinations, the people of his community wanted to celebrate because they considered that he had reached "great heights" which he says was "hardly an occasion compared to the state of education in other communities". A public ceremony was evoked, to celebrate his success, by the community, and it was at this occasion that he was presented with a biography of the Buddha by Dada Keluskar, the author and a family friend.[9][10]

    -

    As Ambedkar was educated by the Princely State of Baroda, he was bound to serve it. He was appointed Military Secretary to the Gaikwad but had to quit in a short time. He described the incident in his autobiography, Waiting for a Visa.[16] Thereafter, he tried to find ways to make a living for his growing family. He worked as a private tutor, as an accountant, and established an investment consulting business, but it failed when his clients learned that he was an untouchable.[17] In 1918, he became Professor of Political Economy in the Sydenham College of Commerce and Economics in Mumbai. Although he was successful with the students, other professors objected to his sharing a drinking-water jug with them.[18]

    -

    Bhimayana: Experiences of Untouchability is a graphic biography of Ambedkar created by Pardhan-Gond artists Durgabai Vyam and Subhash Vyam, and writers Srividya Natarajan and S. Anand. The book depicts the experiences of untouchability faced by Ambedkar from childhood to adulthood. CNN named it one of the top 5 political comic books.[137]

    -

    Babasaheb Ambedkar was a prolific and eminent writer. He had written the most among his contemporary politicians.[151] He had written a total of 32 books (10 are incomplete), 10 memoranda, evidence and statement, 10 research documents, review of articles and books and 10 preface and predictions.[152] Apart from this he is also the author of the Indian Constitution. The Buddha and His Dhamma is the last book of Ambedkar, the text is the scripture for those who follow Navayana Buddhism.[153] Waiting for a Visa is his autobiography, The book is used as a textbook in Columbia University.[154][155] He also wrote Pali dictionary (Pali to English). He was known to have knowledge of eleven languages, including Marathi (mother tongue), English, Hindi, Pali, Sanskrit, Gujarati, German, Persian, French, Kannada and Bengali.[156] But he used the Marathi language of his journals (fortnightly, weekly) because Marathi is a native of Maharashtra, except for his almost all writings in the English language.

    -

    -

    Hi.. frndss.. i am very happy to say thanks u all. Plz add me as ur brother. B,caz. I am also loving the god of dalit dr. BR.Ambedkar..
    Plz send all information to me..
    My mail ambedkar.ideal@gmail.com
    Frm: k.kishore Ambedkar (LLB).

    aaccfb2cb3
    -
    -
    \ No newline at end of file diff --git a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/lama/saicinpainting/training/modules/base.py b/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/lama/saicinpainting/training/modules/base.py deleted file mode 100644 index 58c513987601d6a442ca8f066f82f1af46e28939..0000000000000000000000000000000000000000 --- a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/lama/saicinpainting/training/modules/base.py +++ /dev/null @@ -1,80 +0,0 @@ -import abc -from typing import Tuple, List - -import torch -import torch.nn as nn - -from annotator.lama.saicinpainting.training.modules.depthwise_sep_conv import DepthWiseSeperableConv -from annotator.lama.saicinpainting.training.modules.multidilated_conv import MultidilatedConv - - -class BaseDiscriminator(nn.Module): - @abc.abstractmethod - def forward(self, x: torch.Tensor) -> Tuple[torch.Tensor, List[torch.Tensor]]: - """ - Predict scores and get intermediate activations. Useful for feature matching loss - :return tuple (scores, list of intermediate activations) - """ - raise NotImplemented() - - -def get_conv_block_ctor(kind='default'): - if not isinstance(kind, str): - return kind - if kind == 'default': - return nn.Conv2d - if kind == 'depthwise': - return DepthWiseSeperableConv - if kind == 'multidilated': - return MultidilatedConv - raise ValueError(f'Unknown convolutional block kind {kind}') - - -def get_norm_layer(kind='bn'): - if not isinstance(kind, str): - return kind - if kind == 'bn': - return nn.BatchNorm2d - if kind == 'in': - return nn.InstanceNorm2d - raise ValueError(f'Unknown norm block kind {kind}') - - -def get_activation(kind='tanh'): - if kind == 'tanh': - return nn.Tanh() - if kind == 'sigmoid': - return nn.Sigmoid() - if kind is False: - return nn.Identity() - raise ValueError(f'Unknown activation kind {kind}') - - -class SimpleMultiStepGenerator(nn.Module): - def __init__(self, steps: List[nn.Module]): - super().__init__() - self.steps = nn.ModuleList(steps) - - def forward(self, x): - cur_in = x - outs = [] - for step in self.steps: - cur_out = step(cur_in) - outs.append(cur_out) - cur_in = torch.cat((cur_in, cur_out), dim=1) - return torch.cat(outs[::-1], dim=1) - -def deconv_factory(kind, ngf, mult, norm_layer, activation, max_features): - if kind == 'convtranspose': - return [nn.ConvTranspose2d(min(max_features, ngf * mult), - min(max_features, int(ngf * mult / 2)), - kernel_size=3, stride=2, padding=1, output_padding=1), - norm_layer(min(max_features, int(ngf * mult / 2))), activation] - elif kind == 'bilinear': - return [nn.Upsample(scale_factor=2, mode='bilinear'), - DepthWiseSeperableConv(min(max_features, ngf * mult), - min(max_features, int(ngf * mult / 2)), - kernel_size=3, stride=1, padding=1), - norm_layer(min(max_features, int(ngf * mult / 2))), activation] - else: - raise Exception(f"Invalid deconv kind: {kind}") \ No newline at end of file diff --git a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/oneformer/detectron2/data/datasets/coco_panoptic.py b/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/oneformer/detectron2/data/datasets/coco_panoptic.py deleted file mode 100644 index a7180df512c29665222b1a90323ccfa7e7623137..0000000000000000000000000000000000000000 --- a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/oneformer/detectron2/data/datasets/coco_panoptic.py +++ /dev/null @@ -1,228 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -import copy -import json -import os - -from annotator.oneformer.detectron2.data import DatasetCatalog, MetadataCatalog -from annotator.oneformer.detectron2.utils.file_io import PathManager - -from .coco import load_coco_json, load_sem_seg - -__all__ = ["register_coco_panoptic", "register_coco_panoptic_separated"] - - -def load_coco_panoptic_json(json_file, image_dir, gt_dir, meta): - """ - Args: - image_dir (str): path to the raw dataset. e.g., "~/coco/train2017". - gt_dir (str): path to the raw annotations. e.g., "~/coco/panoptic_train2017". - json_file (str): path to the json file. e.g., "~/coco/annotations/panoptic_train2017.json". - - Returns: - list[dict]: a list of dicts in Detectron2 standard format. (See - `Using Custom Datasets `_ ) - """ - - def _convert_category_id(segment_info, meta): - if segment_info["category_id"] in meta["thing_dataset_id_to_contiguous_id"]: - segment_info["category_id"] = meta["thing_dataset_id_to_contiguous_id"][ - segment_info["category_id"] - ] - segment_info["isthing"] = True - else: - segment_info["category_id"] = meta["stuff_dataset_id_to_contiguous_id"][ - segment_info["category_id"] - ] - segment_info["isthing"] = False - return segment_info - - with PathManager.open(json_file) as f: - json_info = json.load(f) - - ret = [] - for ann in json_info["annotations"]: - image_id = int(ann["image_id"]) - # TODO: currently we assume image and label has the same filename but - # different extension, and images have extension ".jpg" for COCO. Need - # to make image extension a user-provided argument if we extend this - # function to support other COCO-like datasets. - image_file = os.path.join(image_dir, os.path.splitext(ann["file_name"])[0] + ".jpg") - label_file = os.path.join(gt_dir, ann["file_name"]) - segments_info = [_convert_category_id(x, meta) for x in ann["segments_info"]] - ret.append( - { - "file_name": image_file, - "image_id": image_id, - "pan_seg_file_name": label_file, - "segments_info": segments_info, - } - ) - assert len(ret), f"No images found in {image_dir}!" - assert PathManager.isfile(ret[0]["file_name"]), ret[0]["file_name"] - assert PathManager.isfile(ret[0]["pan_seg_file_name"]), ret[0]["pan_seg_file_name"] - return ret - - -def register_coco_panoptic( - name, metadata, image_root, panoptic_root, panoptic_json, instances_json=None -): - """ - Register a "standard" version of COCO panoptic segmentation dataset named `name`. - The dictionaries in this registered dataset follows detectron2's standard format. - Hence it's called "standard". - - Args: - name (str): the name that identifies a dataset, - e.g. "coco_2017_train_panoptic" - metadata (dict): extra metadata associated with this dataset. - image_root (str): directory which contains all the images - panoptic_root (str): directory which contains panoptic annotation images in COCO format - panoptic_json (str): path to the json panoptic annotation file in COCO format - sem_seg_root (none): not used, to be consistent with - `register_coco_panoptic_separated`. - instances_json (str): path to the json instance annotation file - """ - panoptic_name = name - DatasetCatalog.register( - panoptic_name, - lambda: load_coco_panoptic_json(panoptic_json, image_root, panoptic_root, metadata), - ) - MetadataCatalog.get(panoptic_name).set( - panoptic_root=panoptic_root, - image_root=image_root, - panoptic_json=panoptic_json, - json_file=instances_json, - evaluator_type="coco_panoptic_seg", - ignore_label=255, - label_divisor=1000, - **metadata, - ) - - -def register_coco_panoptic_separated( - name, metadata, image_root, panoptic_root, panoptic_json, sem_seg_root, instances_json -): - """ - Register a "separated" version of COCO panoptic segmentation dataset named `name`. - The annotations in this registered dataset will contain both instance annotations and - semantic annotations, each with its own contiguous ids. Hence it's called "separated". - - It follows the setting used by the PanopticFPN paper: - - 1. The instance annotations directly come from polygons in the COCO - instances annotation task, rather than from the masks in the COCO panoptic annotations. - - The two format have small differences: - Polygons in the instance annotations may have overlaps. - The mask annotations are produced by labeling the overlapped polygons - with depth ordering. - - 2. The semantic annotations are converted from panoptic annotations, where - all "things" are assigned a semantic id of 0. - All semantic categories will therefore have ids in contiguous - range [1, #stuff_categories]. - - This function will also register a pure semantic segmentation dataset - named ``name + '_stuffonly'``. - - Args: - name (str): the name that identifies a dataset, - e.g. "coco_2017_train_panoptic" - metadata (dict): extra metadata associated with this dataset. - image_root (str): directory which contains all the images - panoptic_root (str): directory which contains panoptic annotation images - panoptic_json (str): path to the json panoptic annotation file - sem_seg_root (str): directory which contains all the ground truth segmentation annotations. - instances_json (str): path to the json instance annotation file - """ - panoptic_name = name + "_separated" - DatasetCatalog.register( - panoptic_name, - lambda: merge_to_panoptic( - load_coco_json(instances_json, image_root, panoptic_name), - load_sem_seg(sem_seg_root, image_root), - ), - ) - MetadataCatalog.get(panoptic_name).set( - panoptic_root=panoptic_root, - image_root=image_root, - panoptic_json=panoptic_json, - sem_seg_root=sem_seg_root, - json_file=instances_json, # TODO rename - evaluator_type="coco_panoptic_seg", - ignore_label=255, - **metadata, - ) - - semantic_name = name + "_stuffonly" - DatasetCatalog.register(semantic_name, lambda: load_sem_seg(sem_seg_root, image_root)) - MetadataCatalog.get(semantic_name).set( - sem_seg_root=sem_seg_root, - image_root=image_root, - evaluator_type="sem_seg", - ignore_label=255, - **metadata, - ) - - -def merge_to_panoptic(detection_dicts, sem_seg_dicts): - """ - Create dataset dicts for panoptic segmentation, by - merging two dicts using "file_name" field to match their entries. - - Args: - detection_dicts (list[dict]): lists of dicts for object detection or instance segmentation. - sem_seg_dicts (list[dict]): lists of dicts for semantic segmentation. - - Returns: - list[dict] (one per input image): Each dict contains all (key, value) pairs from dicts in - both detection_dicts and sem_seg_dicts that correspond to the same image. - The function assumes that the same key in different dicts has the same value. - """ - results = [] - sem_seg_file_to_entry = {x["file_name"]: x for x in sem_seg_dicts} - assert len(sem_seg_file_to_entry) > 0 - - for det_dict in detection_dicts: - dic = copy.copy(det_dict) - dic.update(sem_seg_file_to_entry[dic["file_name"]]) - results.append(dic) - return results - - -if __name__ == "__main__": - """ - Test the COCO panoptic dataset loader. - - Usage: - python -m detectron2.data.datasets.coco_panoptic \ - path/to/image_root path/to/panoptic_root path/to/panoptic_json dataset_name 10 - - "dataset_name" can be "coco_2017_train_panoptic", or other - pre-registered ones - """ - from annotator.oneformer.detectron2.utils.logger import setup_logger - from annotator.oneformer.detectron2.utils.visualizer import Visualizer - import annotator.oneformer.detectron2.data.datasets # noqa # add pre-defined metadata - import sys - from PIL import Image - import numpy as np - - logger = setup_logger(name=__name__) - assert sys.argv[4] in DatasetCatalog.list() - meta = MetadataCatalog.get(sys.argv[4]) - - dicts = load_coco_panoptic_json(sys.argv[3], sys.argv[1], sys.argv[2], meta.as_dict()) - logger.info("Done loading {} samples.".format(len(dicts))) - - dirname = "coco-data-vis" - os.makedirs(dirname, exist_ok=True) - num_imgs_to_vis = int(sys.argv[5]) - for i, d in enumerate(dicts): - img = np.array(Image.open(d["file_name"])) - visualizer = Visualizer(img, metadata=meta) - vis = visualizer.draw_dataset_dict(d) - fpath = os.path.join(dirname, os.path.basename(d["file_name"])) - vis.save(fpath) - if i + 1 >= num_imgs_to_vis: - break diff --git a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/uniformer/mmcv/ops/three_nn.py b/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/uniformer/mmcv/ops/three_nn.py deleted file mode 100644 index 2b01047a129989cd5545a0a86f23a487f4a13ce1..0000000000000000000000000000000000000000 --- a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/uniformer/mmcv/ops/three_nn.py +++ /dev/null @@ -1,51 +0,0 @@ -from typing import Tuple - -import torch -from torch.autograd import Function - -from ..utils import ext_loader - -ext_module = ext_loader.load_ext('_ext', ['three_nn_forward']) - - -class ThreeNN(Function): - """Find the top-3 nearest neighbors of the target set from the source set. - - Please refer to `Paper of PointNet++ `_ - for more details. - """ - - @staticmethod - def forward(ctx, target: torch.Tensor, - source: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: - """ - Args: - target (Tensor): shape (B, N, 3), points set that needs to - find the nearest neighbors. - source (Tensor): shape (B, M, 3), points set that is used - to find the nearest neighbors of points in target set. - - Returns: - Tensor: shape (B, N, 3), L2 distance of each point in target - set to their corresponding nearest neighbors. - """ - target = target.contiguous() - source = source.contiguous() - - B, N, _ = target.size() - m = source.size(1) - dist2 = torch.cuda.FloatTensor(B, N, 3) - idx = torch.cuda.IntTensor(B, N, 3) - - ext_module.three_nn_forward(target, source, dist2, idx, b=B, n=N, m=m) - if torch.__version__ != 'parrots': - ctx.mark_non_differentiable(idx) - - return torch.sqrt(dist2), idx - - @staticmethod - def backward(ctx, a=None, b=None): - return None, None - - -three_nn = ThreeNN.apply diff --git a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/zoe/zoedepth/data/vkitti.py b/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/zoe/zoedepth/data/vkitti.py deleted file mode 100644 index 72a2e5a8346f6e630ede0e28d6959725af8d7e72..0000000000000000000000000000000000000000 --- a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/zoe/zoedepth/data/vkitti.py +++ /dev/null @@ -1,151 +0,0 @@ -# MIT License - -# Copyright (c) 2022 Intelligent Systems Lab Org - -# Permission is hereby granted, free of charge, to any person obtaining a copy -# of this software and associated documentation files (the "Software"), to deal -# in the Software without restriction, including without limitation the rights -# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -# copies of the Software, and to permit persons to whom the Software is -# furnished to do so, subject to the following conditions: - -# The above copyright notice and this permission notice shall be included in all -# copies or substantial portions of the Software. - -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -# SOFTWARE. - -# File author: Shariq Farooq Bhat - -import torch -from torch.utils.data import Dataset, DataLoader -from torchvision import transforms -import os - -from PIL import Image -import numpy as np -import cv2 - - -class ToTensor(object): - def __init__(self): - self.normalize = transforms.Normalize( - mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) - # self.resize = transforms.Resize((375, 1242)) - - def __call__(self, sample): - image, depth = sample['image'], sample['depth'] - - image = self.to_tensor(image) - image = self.normalize(image) - depth = self.to_tensor(depth) - - # image = self.resize(image) - - return {'image': image, 'depth': depth, 'dataset': "vkitti"} - - def to_tensor(self, pic): - - if isinstance(pic, np.ndarray): - img = torch.from_numpy(pic.transpose((2, 0, 1))) - return img - - # # handle PIL Image - if pic.mode == 'I': - img = torch.from_numpy(np.array(pic, np.int32, copy=False)) - elif pic.mode == 'I;16': - img = torch.from_numpy(np.array(pic, np.int16, copy=False)) - else: - img = torch.ByteTensor( - torch.ByteStorage.from_buffer(pic.tobytes())) - # PIL image mode: 1, L, P, I, F, RGB, YCbCr, RGBA, CMYK - if pic.mode == 'YCbCr': - nchannel = 3 - elif pic.mode == 'I;16': - nchannel = 1 - else: - nchannel = len(pic.mode) - img = img.view(pic.size[1], pic.size[0], nchannel) - - img = img.transpose(0, 1).transpose(0, 2).contiguous() - if isinstance(img, torch.ByteTensor): - return img.float() - else: - return img - - -class VKITTI(Dataset): - def __init__(self, data_dir_root, do_kb_crop=True): - import glob - # image paths are of the form /{HR, LR}//{color, depth_filled}/*.png - self.image_files = glob.glob(os.path.join( - data_dir_root, "test_color", '*.png')) - self.depth_files = [r.replace("test_color", "test_depth") - for r in self.image_files] - self.do_kb_crop = True - self.transform = ToTensor() - - def __getitem__(self, idx): - image_path = self.image_files[idx] - depth_path = self.depth_files[idx] - - image = Image.open(image_path) - depth = Image.open(depth_path) - depth = cv2.imread(depth_path, cv2.IMREAD_ANYCOLOR | - cv2.IMREAD_ANYDEPTH) - print("dpeth min max", depth.min(), depth.max()) - - # print(np.shape(image)) - # print(np.shape(depth)) - - # depth[depth > 8] = -1 - - if self.do_kb_crop and False: - height = image.height - width = image.width - top_margin = int(height - 352) - left_margin = int((width - 1216) / 2) - depth = depth.crop( - (left_margin, top_margin, left_margin + 1216, top_margin + 352)) - image = image.crop( - (left_margin, top_margin, left_margin + 1216, top_margin + 352)) - # uv = uv[:, top_margin:top_margin + 352, left_margin:left_margin + 1216] - - image = np.asarray(image, dtype=np.float32) / 255.0 - # depth = np.asarray(depth, dtype=np.uint16) /1. - depth = depth[..., None] - sample = dict(image=image, depth=depth) - - # return sample - sample = self.transform(sample) - - if idx == 0: - print(sample["image"].shape) - - return sample - - def __len__(self): - return len(self.image_files) - - -def get_vkitti_loader(data_dir_root, batch_size=1, **kwargs): - dataset = VKITTI(data_dir_root) - return DataLoader(dataset, batch_size, **kwargs) - - -if __name__ == "__main__": - loader = get_vkitti_loader( - data_dir_root="/home/bhatsf/shortcuts/datasets/vkitti_test") - print("Total files", len(loader.dataset)) - for i, sample in enumerate(loader): - print(sample["image"].shape) - print(sample["depth"].shape) - print(sample["dataset"]) - print(sample['depth'].min(), sample['depth'].max()) - if i > 5: - break diff --git a/spaces/cuiltheory/stable-diffusion-2-base/app.py b/spaces/cuiltheory/stable-diffusion-2-base/app.py deleted file mode 100644 index 2f517b58da01a9274b9bd43ff5daad4af65d365b..0000000000000000000000000000000000000000 --- a/spaces/cuiltheory/stable-diffusion-2-base/app.py +++ /dev/null @@ -1,154 +0,0 @@ -from diffusers import StableDiffusionPipeline, StableDiffusionImg2ImgPipeline, DPMSolverMultistepScheduler -import gradio as gr -import torch -from PIL import Image - -model_id = 'stabilityai/stable-diffusion-2-base' -prefix = '' - -scheduler = DPMSolverMultistepScheduler( - beta_start=0.00085, - beta_end=0.012, - beta_schedule="scaled_linear", - num_train_timesteps=1000, - trained_betas=None, - predict_epsilon=True, - thresholding=False, - algorithm_type="dpmsolver++", - solver_type="midpoint", - lower_order_final=True, -) - -pipe = StableDiffusionPipeline.from_pretrained( - model_id, - torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32, - scheduler=scheduler) - -pipe_i2i = StableDiffusionImg2ImgPipeline.from_pretrained( - model_id, - torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32, - scheduler=scheduler) - -if torch.cuda.is_available(): - pipe = pipe.to("cuda") - pipe_i2i = pipe_i2i.to("cuda") - -def error_str(error, title="Error"): - return f"""#### {title} - {error}""" if error else "" - -def inference(prompt, guidance, steps, width=512, height=512, seed=0, img=None, strength=0.5, neg_prompt="", auto_prefix=True): - - generator = torch.Generator('cuda').manual_seed(seed) if seed != 0 else None - prompt = f"{prefix} {prompt}" if auto_prefix else prompt - - try: - if img is not None: - return img_to_img(prompt, neg_prompt, img, strength, guidance, steps, width, height, generator), None - else: - return txt_to_img(prompt, neg_prompt, guidance, steps, width, height, generator), None - except Exception as e: - return None, error_str(e) - -def txt_to_img(prompt, neg_prompt, guidance, steps, width, height, generator): - - result = pipe( - prompt, - negative_prompt = neg_prompt, - num_inference_steps = int(steps), - guidance_scale = guidance, - width = width, - height = height, - generator = generator) - - return replace_nsfw_images(result) - -def img_to_img(prompt, neg_prompt, img, strength, guidance, steps, width, height, generator): - - ratio = min(height / img.height, width / img.width) - img = img.resize((int(img.width * ratio), int(img.height * ratio)), Image.LANCZOS) - result = pipe_i2i( - prompt, - negative_prompt = neg_prompt, - init_image = img, - num_inference_steps = int(steps), - strength = strength, - guidance_scale = guidance, - width = width, - height = height, - generator = generator) - - return replace_nsfw_images(result) - -def replace_nsfw_images(results): - - for i in range(len(results.images)): - if results.nsfw_content_detected[i]: - results.images[i] = Image.open("nsfw.png") - return results.images[0] - -css = """.main-div div{display:inline-flex;align-items:center;gap:.8rem;font-size:1.75rem}.main-div div h1{font-weight:900;margin-bottom:7px}.main-div p{margin-bottom:10px;font-size:94%}a{text-decoration:underline}.tabs{margin-top:0;margin-bottom:0}#gallery{min-height:20rem} -""" -with gr.Blocks(css=css) as demo: - gr.HTML( - f""" -
    -
    -

    Stable Diffusion 2 Base

    -
    -

    - Demo for Stable Diffusion 2 Base Stable Diffusion model.
    - Add the following tokens to your prompts for the model to work properly: . -

    - Running on {"GPU 🔥" if torch.cuda.is_available() else "CPU 🥶"} -
    - """ - ) - with gr.Row(): - - with gr.Column(scale=55): - with gr.Group(): - with gr.Row(): - prompt = gr.Textbox(label="Prompt", show_label=False, max_lines=2,placeholder=f"{prefix} [your prompt]").style(container=False) - generate = gr.Button(value="Generate").style(rounded=(False, True, True, False)) - - image_out = gr.Image(height=512) - error_output = gr.Markdown() - - with gr.Column(scale=45): - with gr.Tab("Options"): - with gr.Group(): - neg_prompt = gr.Textbox(label="Negative prompt", placeholder="What to exclude from the image") - auto_prefix = gr.Checkbox(label="Prefix styling tokens automatically ()", value=True) - - with gr.Row(): - guidance = gr.Slider(label="Guidance scale", value=7.5, maximum=15) - steps = gr.Slider(label="Steps", value=25, minimum=2, maximum=75, step=1) - - with gr.Row(): - width = gr.Slider(label="Width", value=512, minimum=64, maximum=1024, step=8) - height = gr.Slider(label="Height", value=512, minimum=64, maximum=1024, step=8) - - seed = gr.Slider(0, 2147483647, label='Seed (0 = random)', value=0, step=1) - - with gr.Tab("Image to image"): - with gr.Group(): - image = gr.Image(label="Image", height=256, tool="editor", type="pil") - strength = gr.Slider(label="Transformation strength", minimum=0, maximum=1, step=0.01, value=0.5) - - auto_prefix.change(lambda x: gr.update(placeholder=f"{prefix} [your prompt]" if x else "[Your prompt]"), inputs=auto_prefix, outputs=prompt, queue=False) - - inputs = [prompt, guidance, steps, width, height, seed, image, strength, neg_prompt, auto_prefix] - outputs = [image_out, error_output] - prompt.submit(inference, inputs=inputs, outputs=outputs) - generate.click(inference, inputs=inputs, outputs=outputs) - - gr.HTML(""" -
    -
    -

    This space was created using SD Space Creator.

    -
    - """) - -demo.queue(concurrency_count=1) -demo.launch() diff --git a/spaces/cyberoleg/b2719240e190e2a649150d94db50be82838efeb0/README.md b/spaces/cyberoleg/b2719240e190e2a649150d94db50be82838efeb0/README.md deleted file mode 100644 index a9201506656ae860ac607aadc3d04b40e263a542..0000000000000000000000000000000000000000 --- a/spaces/cyberoleg/b2719240e190e2a649150d94db50be82838efeb0/README.md +++ /dev/null @@ -1,16 +0,0 @@ ---- -title: Stable Diffusion ControlNet WebUI -emoji: ⚡ -colorFrom: gray -colorTo: red -sdk: gradio -sdk_version: 3.19.0 -app_file: app.py -pinned: false -license: apache-2.0 -tags: -- making-demos -duplicated_from: ArtGAN/Stable-Diffusion-ControlNet-WebUI ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference \ No newline at end of file diff --git a/spaces/cymic/Talking_Head_Anime_3/tha3/nn/nonlinearity_factory.py b/spaces/cymic/Talking_Head_Anime_3/tha3/nn/nonlinearity_factory.py deleted file mode 100644 index db8af392a8315beaac8a565697f6ca458b02d7b9..0000000000000000000000000000000000000000 --- a/spaces/cymic/Talking_Head_Anime_3/tha3/nn/nonlinearity_factory.py +++ /dev/null @@ -1,72 +0,0 @@ -from typing import Optional - -from torch.nn import Module, ReLU, LeakyReLU, ELU, ReLU6, Hardswish, SiLU, Tanh, Sigmoid - -from tha3.module.module_factory import ModuleFactory - - -class ReLUFactory(ModuleFactory): - def __init__(self, inplace: bool = False): - self.inplace = inplace - - def create(self) -> Module: - return ReLU(self.inplace) - - -class LeakyReLUFactory(ModuleFactory): - def __init__(self, inplace: bool = False, negative_slope: float = 1e-2): - self.negative_slope = negative_slope - self.inplace = inplace - - def create(self) -> Module: - return LeakyReLU(inplace=self.inplace, negative_slope=self.negative_slope) - - -class ELUFactory(ModuleFactory): - def __init__(self, inplace: bool = False, alpha: float = 1.0): - self.alpha = alpha - self.inplace = inplace - - def create(self) -> Module: - return ELU(inplace=self.inplace, alpha=self.alpha) - - -class ReLU6Factory(ModuleFactory): - def __init__(self, inplace: bool = False): - self.inplace = inplace - - def create(self) -> Module: - return ReLU6(inplace=self.inplace) - - -class SiLUFactory(ModuleFactory): - def __init__(self, inplace: bool = False): - self.inplace = inplace - - def create(self) -> Module: - return SiLU(inplace=self.inplace) - - -class HardswishFactory(ModuleFactory): - def __init__(self, inplace: bool = False): - self.inplace = inplace - - def create(self) -> Module: - return Hardswish(inplace=self.inplace) - - -class TanhFactory(ModuleFactory): - def create(self) -> Module: - return Tanh() - - -class SigmoidFactory(ModuleFactory): - def create(self) -> Module: - return Sigmoid() - - -def resolve_nonlinearity_factory(nonlinearity_fatory: Optional[ModuleFactory]) -> ModuleFactory: - if nonlinearity_fatory is None: - return ReLUFactory(inplace=False) - else: - return nonlinearity_fatory diff --git a/spaces/datasciencedojo/Brain_Stroke_Prediction/app.py b/spaces/datasciencedojo/Brain_Stroke_Prediction/app.py deleted file mode 100644 index 79f73993bc8f5913ec3b205fbcbba467854b0703..0000000000000000000000000000000000000000 --- a/spaces/datasciencedojo/Brain_Stroke_Prediction/app.py +++ /dev/null @@ -1,131 +0,0 @@ -import gradio as gr -import pickle -from sklearn import preprocessing -import pandas as pd - -filename = 'knn_model.sav' - -loaded_model = pickle.load(open(filename, 'rb')) - - - -def hptension(hp): - if hp == 'yes': - return 1 - else: - return 0 - -def ht_dis(ht): - if ht == 'yes': - return 1 - else: - return 0 - - -def gender_select(gen): - if gen == 'male': - return 1 - else: - return 0 - -def age_group_selector(age_grp): - if age_grp == '0-16': return 0 - elif age_grp =='17-32': return 1 - elif age_grp =='33-48': return 2 - elif age_grp =='49-64': return 3 - else: return 4 - - -def smoker_cat(smoke): - if smoke == 'formerly smoked': return 0 - elif smoke =='never smoked': return 1 - elif smoke =='smokes': return 2 - else: return 3 - - -def predict_insurance(input_gender,input_age_group,input_hypertension,input_heart_disease,input_avg_glucose_level,input_bmi,input_smoking_status): - - input_gender,input_age_group,input_hypertension,input_heart_disease,input_avg_glucose_level,input_bmi,input_smoking_status = input_gender,input_age_group,input_hypertension,input_heart_disease,input_avg_glucose_level,input_bmi,input_smoking_status - - series = {'gender': [gender_select(input_gender)], - 'age_band': [age_group_selector(input_age_group)], - 'hypertension': [hptension(input_hypertension)], - 'heart_disease': [ht_dis(input_heart_disease)], - 'avg_glucose_level': [input_avg_glucose_level /272], - 'bmi': [input_bmi/49], - 'smoking_status': [smoker_cat(input_smoking_status)], - } - - vector = pd.DataFrame(series) - - result = loaded_model.predict(vector) - if result[0] == 1: - return "Risk of having stroke is high" - else: - return "Risk of having stroke is low" - -css = """ -footer {display:none !important} -.output-markdown{display:none !important} -footer {visibility: hidden} - -.gr-button-lg { - z-index: 14; - width: 113px; - height: 30px; - left: 0px; - top: 0px; - padding: 0px; - cursor: pointer !important; - background: none rgb(17, 20, 45) !important; - border: none !important; - text-align: center !important; - font-size: 14px !important; - font-weight: 500 !important; - color: rgb(255, 255, 255) !important; - line-height: 1 !important; - border-radius: 6px !important; - transition: box-shadow 200ms ease 0s, background 200ms ease 0s !important; - box-shadow: none !important; -} -.gr-button-lg:hover{ - z-index: 14; - width: 113px; - height: 30px; - left: 0px; - top: 0px; - padding: 0px; - cursor: pointer !important; - background: none rgb(37, 56, 133) !important; - border: none !important; - text-align: center !important; - font-size: 14px !important; - font-weight: 500 !important; - color: rgb(255, 255, 255) !important; - line-height: 1 !important; - border-radius: 6px !important; - transition: box-shadow 200ms ease 0s, background 200ms ease 0s !important; - box-shadow: rgb(0 0 0 / 23%) 0px 1px 7px 0px !important; -} - -""" - -with gr.Blocks(title="Brain Stroke Prediction | Data Science Dojo", css = css) as demo: - with gr.Row(): - input_gender = gr.Radio(["male", "female"],label="Gender") - input_hypertension = gr.Radio(["yes", "no"],label="Hypertension") - input_heart_disease = gr.Radio(["yes", "no"],label="Heart disease") - with gr.Row(): - input_age_group = gr.Dropdown(['0-16','17-32','33-48','49-64','64+'],label='Age Group') - input_smoking_status = gr.Dropdown(['formerly smoked', 'never smoked', 'smokes', 'Prefer not to say'],label='Smoker') - with gr.Row(): - input_avg_glucose_level = gr.Slider(0, 270,label='Average Glucose Level') - with gr.Row(): - input_bmi = gr.Slider(0, 45,label='BMI Range') - with gr.Row(): - stroke = gr.Textbox(label='Chances of stroke') - btn_ins = gr.Button(value="Submit") - btn_ins.click(fn=predict_insurance, inputs=[input_gender,input_age_group,input_hypertension,input_heart_disease, - input_avg_glucose_level,input_bmi,input_smoking_status], outputs=[stroke]) - -demo.launch() \ No newline at end of file diff --git a/spaces/dawood17/SayBot_Enchancer/CodeFormer/basicsr/archs/arcface_arch.py b/spaces/dawood17/SayBot_Enchancer/CodeFormer/basicsr/archs/arcface_arch.py deleted file mode 100644 index fe5afb7bd2b359e0c2b7efdf628ab10b63964d87..0000000000000000000000000000000000000000 --- a/spaces/dawood17/SayBot_Enchancer/CodeFormer/basicsr/archs/arcface_arch.py +++ /dev/null @@ -1,245 +0,0 @@ -import torch.nn as nn -from basicsr.utils.registry import ARCH_REGISTRY - - -def conv3x3(inplanes, outplanes, stride=1): - """A simple wrapper for 3x3 convolution with padding. - - Args: - inplanes (int): Channel number of inputs. - outplanes (int): Channel number of outputs. - stride (int): Stride in convolution. Default: 1. - """ - return nn.Conv2d(inplanes, outplanes, kernel_size=3, stride=stride, padding=1, bias=False) - - -class BasicBlock(nn.Module): - """Basic residual block used in the ResNetArcFace architecture. - - Args: - inplanes (int): Channel number of inputs. - planes (int): Channel number of outputs. - stride (int): Stride in convolution. Default: 1. - downsample (nn.Module): The downsample module. Default: None. - """ - expansion = 1 # output channel expansion ratio - - def __init__(self, inplanes, planes, stride=1, downsample=None): - super(BasicBlock, self).__init__() - self.conv1 = conv3x3(inplanes, planes, stride) - self.bn1 = nn.BatchNorm2d(planes) - self.relu = nn.ReLU(inplace=True) - self.conv2 = conv3x3(planes, planes) - self.bn2 = nn.BatchNorm2d(planes) - self.downsample = downsample - self.stride = stride - - def forward(self, x): - residual = x - - out = self.conv1(x) - out = self.bn1(out) - out = self.relu(out) - - out = self.conv2(out) - out = self.bn2(out) - - if self.downsample is not None: - residual = self.downsample(x) - - out += residual - out = self.relu(out) - - return out - - -class IRBlock(nn.Module): - """Improved residual block (IR Block) used in the ResNetArcFace architecture. - - Args: - inplanes (int): Channel number of inputs. - planes (int): Channel number of outputs. - stride (int): Stride in convolution. Default: 1. - downsample (nn.Module): The downsample module. Default: None. - use_se (bool): Whether use the SEBlock (squeeze and excitation block). Default: True. - """ - expansion = 1 # output channel expansion ratio - - def __init__(self, inplanes, planes, stride=1, downsample=None, use_se=True): - super(IRBlock, self).__init__() - self.bn0 = nn.BatchNorm2d(inplanes) - self.conv1 = conv3x3(inplanes, inplanes) - self.bn1 = nn.BatchNorm2d(inplanes) - self.prelu = nn.PReLU() - self.conv2 = conv3x3(inplanes, planes, stride) - self.bn2 = nn.BatchNorm2d(planes) - self.downsample = downsample - self.stride = stride - self.use_se = use_se - if self.use_se: - self.se = SEBlock(planes) - - def forward(self, x): - residual = x - out = self.bn0(x) - out = self.conv1(out) - out = self.bn1(out) - out = self.prelu(out) - - out = self.conv2(out) - out = self.bn2(out) - if self.use_se: - out = self.se(out) - - if self.downsample is not None: - residual = self.downsample(x) - - out += residual - out = self.prelu(out) - - return out - - -class Bottleneck(nn.Module): - """Bottleneck block used in the ResNetArcFace architecture. - - Args: - inplanes (int): Channel number of inputs. - planes (int): Channel number of outputs. - stride (int): Stride in convolution. Default: 1. - downsample (nn.Module): The downsample module. Default: None. - """ - expansion = 4 # output channel expansion ratio - - def __init__(self, inplanes, planes, stride=1, downsample=None): - super(Bottleneck, self).__init__() - self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False) - self.bn1 = nn.BatchNorm2d(planes) - self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False) - self.bn2 = nn.BatchNorm2d(planes) - self.conv3 = nn.Conv2d(planes, planes * self.expansion, kernel_size=1, bias=False) - self.bn3 = nn.BatchNorm2d(planes * self.expansion) - self.relu = nn.ReLU(inplace=True) - self.downsample = downsample - self.stride = stride - - def forward(self, x): - residual = x - - out = self.conv1(x) - out = self.bn1(out) - out = self.relu(out) - - out = self.conv2(out) - out = self.bn2(out) - out = self.relu(out) - - out = self.conv3(out) - out = self.bn3(out) - - if self.downsample is not None: - residual = self.downsample(x) - - out += residual - out = self.relu(out) - - return out - - -class SEBlock(nn.Module): - """The squeeze-and-excitation block (SEBlock) used in the IRBlock. - - Args: - channel (int): Channel number of inputs. - reduction (int): Channel reduction ration. Default: 16. - """ - - def __init__(self, channel, reduction=16): - super(SEBlock, self).__init__() - self.avg_pool = nn.AdaptiveAvgPool2d(1) # pool to 1x1 without spatial information - self.fc = nn.Sequential( - nn.Linear(channel, channel // reduction), nn.PReLU(), nn.Linear(channel // reduction, channel), - nn.Sigmoid()) - - def forward(self, x): - b, c, _, _ = x.size() - y = self.avg_pool(x).view(b, c) - y = self.fc(y).view(b, c, 1, 1) - return x * y - - -@ARCH_REGISTRY.register() -class ResNetArcFace(nn.Module): - """ArcFace with ResNet architectures. - - Ref: ArcFace: Additive Angular Margin Loss for Deep Face Recognition. - - Args: - block (str): Block used in the ArcFace architecture. - layers (tuple(int)): Block numbers in each layer. - use_se (bool): Whether use the SEBlock (squeeze and excitation block). Default: True. - """ - - def __init__(self, block, layers, use_se=True): - if block == 'IRBlock': - block = IRBlock - self.inplanes = 64 - self.use_se = use_se - super(ResNetArcFace, self).__init__() - - self.conv1 = nn.Conv2d(1, 64, kernel_size=3, padding=1, bias=False) - self.bn1 = nn.BatchNorm2d(64) - self.prelu = nn.PReLU() - self.maxpool = nn.MaxPool2d(kernel_size=2, stride=2) - self.layer1 = self._make_layer(block, 64, layers[0]) - self.layer2 = self._make_layer(block, 128, layers[1], stride=2) - self.layer3 = self._make_layer(block, 256, layers[2], stride=2) - self.layer4 = self._make_layer(block, 512, layers[3], stride=2) - self.bn4 = nn.BatchNorm2d(512) - self.dropout = nn.Dropout() - self.fc5 = nn.Linear(512 * 8 * 8, 512) - self.bn5 = nn.BatchNorm1d(512) - - # initialization - for m in self.modules(): - if isinstance(m, nn.Conv2d): - nn.init.xavier_normal_(m.weight) - elif isinstance(m, nn.BatchNorm2d) or isinstance(m, nn.BatchNorm1d): - nn.init.constant_(m.weight, 1) - nn.init.constant_(m.bias, 0) - elif isinstance(m, nn.Linear): - nn.init.xavier_normal_(m.weight) - nn.init.constant_(m.bias, 0) - - def _make_layer(self, block, planes, num_blocks, stride=1): - downsample = None - if stride != 1 or self.inplanes != planes * block.expansion: - downsample = nn.Sequential( - nn.Conv2d(self.inplanes, planes * block.expansion, kernel_size=1, stride=stride, bias=False), - nn.BatchNorm2d(planes * block.expansion), - ) - layers = [] - layers.append(block(self.inplanes, planes, stride, downsample, use_se=self.use_se)) - self.inplanes = planes - for _ in range(1, num_blocks): - layers.append(block(self.inplanes, planes, use_se=self.use_se)) - - return nn.Sequential(*layers) - - def forward(self, x): - x = self.conv1(x) - x = self.bn1(x) - x = self.prelu(x) - x = self.maxpool(x) - - x = self.layer1(x) - x = self.layer2(x) - x = self.layer3(x) - x = self.layer4(x) - x = self.bn4(x) - x = self.dropout(x) - x = x.view(x.size(0), -1) - x = self.fc5(x) - x = self.bn5(x) - - return x \ No newline at end of file diff --git a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/fontTools/ttLib/tables/M_V_A_R_.py b/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/fontTools/ttLib/tables/M_V_A_R_.py deleted file mode 100644 index 8371795eb2f2d2c233ec1725b8a2c21453170f23..0000000000000000000000000000000000000000 --- a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/fontTools/ttLib/tables/M_V_A_R_.py +++ /dev/null @@ -1,5 +0,0 @@ -from .otBase import BaseTTXConverter - - -class table_M_V_A_R_(BaseTTXConverter): - pass diff --git a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/matplotlib/backends/backend_qt5cairo.py b/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/matplotlib/backends/backend_qt5cairo.py deleted file mode 100644 index a4263f5971191a26d849a741a24cf40f7ea8b9ac..0000000000000000000000000000000000000000 --- a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/matplotlib/backends/backend_qt5cairo.py +++ /dev/null @@ -1,11 +0,0 @@ -from .. import backends - -backends._QT_FORCE_QT5_BINDING = True -from .backend_qtcairo import ( # noqa: F401, E402 # pylint: disable=W0611 - _BackendQTCairo, FigureCanvasQTCairo, FigureCanvasCairo, FigureCanvasQT -) - - -@_BackendQTCairo.export -class _BackendQT5Cairo(_BackendQTCairo): - pass diff --git a/spaces/dcq/freegpt-webui/g4f/Provider/Providers/Yqcloud.py b/spaces/dcq/freegpt-webui/g4f/Provider/Providers/Yqcloud.py deleted file mode 100644 index ad5c3a4326c68ceb7ee012fbf5bc072da72a7e40..0000000000000000000000000000000000000000 --- a/spaces/dcq/freegpt-webui/g4f/Provider/Providers/Yqcloud.py +++ /dev/null @@ -1,39 +0,0 @@ -import os -import time -import requests - -from ...typing import sha256, Dict, get_type_hints -url = 'https://chat9.yqcloud.top/' -model = [ - 'gpt-3.5-turbo', -] -supports_stream = True -needs_auth = False - - -def _create_completion(model: str, messages: list, stream: bool, chatId: str, **kwargs): - - headers = { - 'authority': 'api.aichatos.cloud', - 'origin': 'https://chat9.yqcloud.top', - 'referer': 'https://chat9.yqcloud.top/', - 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36', - } - - json_data = { - 'prompt': str(messages), - 'userId': f'#/chat/{chatId}', - 'network': True, - 'apikey': '', - 'system': '', - 'withoutContext': False, - } - response = requests.post('https://api.aichatos.cloud/api/generateStream', - headers=headers, json=json_data, stream=True) - for token in response.iter_content(chunk_size=2046): - yield (token.decode('utf-8')) - - -params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \ - '(%s)' % ', '.join( - [f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]]) \ No newline at end of file diff --git a/spaces/declare-lab/tango/diffusers/src/diffusers/pipelines/pndm/pipeline_pndm.py b/spaces/declare-lab/tango/diffusers/src/diffusers/pipelines/pndm/pipeline_pndm.py deleted file mode 100644 index 56fb72d3f4ff9827da4b35e2a1ef9095fa741f01..0000000000000000000000000000000000000000 --- a/spaces/declare-lab/tango/diffusers/src/diffusers/pipelines/pndm/pipeline_pndm.py +++ /dev/null @@ -1,99 +0,0 @@ -# Copyright 2023 The HuggingFace Team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -from typing import List, Optional, Tuple, Union - -import torch - -from ...models import UNet2DModel -from ...schedulers import PNDMScheduler -from ...utils import randn_tensor -from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput - - -class PNDMPipeline(DiffusionPipeline): - r""" - This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the - library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) - - Parameters: - unet (`UNet2DModel`): U-Net architecture to denoise the encoded image latents. - scheduler ([`SchedulerMixin`]): - The `PNDMScheduler` to be used in combination with `unet` to denoise the encoded image. - """ - - unet: UNet2DModel - scheduler: PNDMScheduler - - def __init__(self, unet: UNet2DModel, scheduler: PNDMScheduler): - super().__init__() - - scheduler = PNDMScheduler.from_config(scheduler.config) - - self.register_modules(unet=unet, scheduler=scheduler) - - @torch.no_grad() - def __call__( - self, - batch_size: int = 1, - num_inference_steps: int = 50, - generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, - output_type: Optional[str] = "pil", - return_dict: bool = True, - **kwargs, - ) -> Union[ImagePipelineOutput, Tuple]: - r""" - Args: - batch_size (`int`, `optional`, defaults to 1): The number of images to generate. - num_inference_steps (`int`, `optional`, defaults to 50): - The number of denoising steps. More denoising steps usually lead to a higher quality image at the - expense of slower inference. - generator (`torch.Generator`, `optional`): A [torch - generator](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation - deterministic. - output_type (`str`, `optional`, defaults to `"pil"`): The output format of the generate image. Choose - between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. - return_dict (`bool`, `optional`, defaults to `True`): Whether or not to return a - [`~pipelines.ImagePipelineOutput`] instead of a plain tuple. - - Returns: - [`~pipelines.ImagePipelineOutput`] or `tuple`: [`~pipelines.utils.ImagePipelineOutput`] if `return_dict` is - True, otherwise a `tuple. When returning a tuple, the first element is a list with the generated images. - """ - # For more information on the sampling method you can take a look at Algorithm 2 of - # the official paper: https://arxiv.org/pdf/2202.09778.pdf - - # Sample gaussian noise to begin loop - image = randn_tensor( - (batch_size, self.unet.in_channels, self.unet.sample_size, self.unet.sample_size), - generator=generator, - device=self.device, - ) - - self.scheduler.set_timesteps(num_inference_steps) - for t in self.progress_bar(self.scheduler.timesteps): - model_output = self.unet(image, t).sample - - image = self.scheduler.step(model_output, t, image).prev_sample - - image = (image / 2 + 0.5).clamp(0, 1) - image = image.cpu().permute(0, 2, 3, 1).numpy() - if output_type == "pil": - image = self.numpy_to_pil(image) - - if not return_dict: - return (image,) - - return ImagePipelineOutput(images=image) diff --git a/spaces/declare-lab/tango/diffusers/tests/schedulers/test_scheduler_kdpm2_ancestral.py b/spaces/declare-lab/tango/diffusers/tests/schedulers/test_scheduler_kdpm2_ancestral.py deleted file mode 100644 index 45371121e66b8ffdcecb5cc86a91758e436b2955..0000000000000000000000000000000000000000 --- a/spaces/declare-lab/tango/diffusers/tests/schedulers/test_scheduler_kdpm2_ancestral.py +++ /dev/null @@ -1,123 +0,0 @@ -import torch - -from diffusers import KDPM2AncestralDiscreteScheduler -from diffusers.utils import torch_device - -from .test_schedulers import SchedulerCommonTest - - -class KDPM2AncestralDiscreteSchedulerTest(SchedulerCommonTest): - scheduler_classes = (KDPM2AncestralDiscreteScheduler,) - num_inference_steps = 10 - - def get_scheduler_config(self, **kwargs): - config = { - "num_train_timesteps": 1100, - "beta_start": 0.0001, - "beta_end": 0.02, - "beta_schedule": "linear", - } - - config.update(**kwargs) - return config - - def test_timesteps(self): - for timesteps in [10, 50, 100, 1000]: - self.check_over_configs(num_train_timesteps=timesteps) - - def test_betas(self): - for beta_start, beta_end in zip([0.00001, 0.0001, 0.001], [0.0002, 0.002, 0.02]): - self.check_over_configs(beta_start=beta_start, beta_end=beta_end) - - def test_schedules(self): - for schedule in ["linear", "scaled_linear"]: - self.check_over_configs(beta_schedule=schedule) - - def test_full_loop_no_noise(self): - if torch_device == "mps": - return - scheduler_class = self.scheduler_classes[0] - scheduler_config = self.get_scheduler_config() - scheduler = scheduler_class(**scheduler_config) - - scheduler.set_timesteps(self.num_inference_steps) - - generator = torch.manual_seed(0) - - model = self.dummy_model() - sample = self.dummy_sample_deter * scheduler.init_noise_sigma - sample = sample.to(torch_device) - - for i, t in enumerate(scheduler.timesteps): - sample = scheduler.scale_model_input(sample, t) - - model_output = model(sample, t) - - output = scheduler.step(model_output, t, sample, generator=generator) - sample = output.prev_sample - - result_sum = torch.sum(torch.abs(sample)) - result_mean = torch.mean(torch.abs(sample)) - - assert abs(result_sum.item() - 13849.3877) < 1e-2 - assert abs(result_mean.item() - 18.0331) < 5e-3 - - def test_prediction_type(self): - for prediction_type in ["epsilon", "v_prediction"]: - self.check_over_configs(prediction_type=prediction_type) - - def test_full_loop_with_v_prediction(self): - if torch_device == "mps": - return - scheduler_class = self.scheduler_classes[0] - scheduler_config = self.get_scheduler_config(prediction_type="v_prediction") - scheduler = scheduler_class(**scheduler_config) - - scheduler.set_timesteps(self.num_inference_steps) - - model = self.dummy_model() - sample = self.dummy_sample_deter * scheduler.init_noise_sigma - sample = sample.to(torch_device) - - generator = torch.manual_seed(0) - - for i, t in enumerate(scheduler.timesteps): - sample = scheduler.scale_model_input(sample, t) - - model_output = model(sample, t) - - output = scheduler.step(model_output, t, sample, generator=generator) - sample = output.prev_sample - - result_sum = torch.sum(torch.abs(sample)) - result_mean = torch.mean(torch.abs(sample)) - - assert abs(result_sum.item() - 328.9970) < 1e-2 - assert abs(result_mean.item() - 0.4284) < 1e-3 - - def test_full_loop_device(self): - if torch_device == "mps": - return - scheduler_class = self.scheduler_classes[0] - scheduler_config = self.get_scheduler_config() - scheduler = scheduler_class(**scheduler_config) - - scheduler.set_timesteps(self.num_inference_steps, device=torch_device) - generator = torch.manual_seed(0) - - model = self.dummy_model() - sample = self.dummy_sample_deter.to(torch_device) * scheduler.init_noise_sigma - - for t in scheduler.timesteps: - sample = scheduler.scale_model_input(sample, t) - - model_output = model(sample, t) - - output = scheduler.step(model_output, t, sample, generator=generator) - sample = output.prev_sample - - result_sum = torch.sum(torch.abs(sample)) - result_mean = torch.mean(torch.abs(sample)) - - assert abs(result_sum.item() - 13849.3818) < 1e-1 - assert abs(result_mean.item() - 18.0331) < 1e-3 diff --git a/spaces/deepskyreal/ai-mixer-hotchpotch/sad_talker/src/face3d/models/base_model.py b/spaces/deepskyreal/ai-mixer-hotchpotch/sad_talker/src/face3d/models/base_model.py deleted file mode 100644 index cfe64a7f739ad8f8cfbf3073a2bf49e1468127fd..0000000000000000000000000000000000000000 --- a/spaces/deepskyreal/ai-mixer-hotchpotch/sad_talker/src/face3d/models/base_model.py +++ /dev/null @@ -1,316 +0,0 @@ -"""This script defines the base network model for Deep3DFaceRecon_pytorch -""" - -import os -import numpy as np -import torch -from collections import OrderedDict -from abc import ABC, abstractmethod -from . import networks - - -class BaseModel(ABC): - """This class is an abstract base class (ABC) for models. - To create a subclass, you need to implement the following five functions: - -- <__init__>: initialize the class; first call BaseModel.__init__(self, opt). - -- : unpack data from dataset and apply preprocessing. - -- : produce intermediate results. - -- : calculate losses, gradients, and update network weights. - -- : (optionally) add model-specific options and set default options. - """ - - def __init__(self, opt): - """Initialize the BaseModel class. - - Parameters: - opt (Option class)-- stores all the experiment flags; needs to be a subclass of BaseOptions - - When creating your custom class, you need to implement your own initialization. - In this fucntion, you should first call - Then, you need to define four lists: - -- self.loss_names (str list): specify the training losses that you want to plot and save. - -- self.model_names (str list): specify the images that you want to display and save. - -- self.visual_names (str list): define networks used in our training. - -- self.optimizers (optimizer list): define and initialize optimizers. You can define one optimizer for each network. If two networks are updated at the same time, you can use itertools.chain to group them. See cycle_gan_model.py for an example. - """ - self.opt = opt - self.isTrain = False - self.device = torch.device('cpu') - self.save_dir = " " # os.path.join(opt.checkpoints_dir, opt.name) # save all the checkpoints to save_dir - self.loss_names = [] - self.model_names = [] - self.visual_names = [] - self.parallel_names = [] - self.optimizers = [] - self.image_paths = [] - self.metric = 0 # used for learning rate policy 'plateau' - - @staticmethod - def dict_grad_hook_factory(add_func=lambda x: x): - saved_dict = dict() - - def hook_gen(name): - def grad_hook(grad): - saved_vals = add_func(grad) - saved_dict[name] = saved_vals - return grad_hook - return hook_gen, saved_dict - - @staticmethod - def modify_commandline_options(parser, is_train): - """Add new model-specific options, and rewrite default values for existing options. - - Parameters: - parser -- original option parser - is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options. - - Returns: - the modified parser. - """ - return parser - - @abstractmethod - def set_input(self, input): - """Unpack input data from the dataloader and perform necessary pre-processing steps. - - Parameters: - input (dict): includes the data itself and its metadata information. - """ - pass - - @abstractmethod - def forward(self): - """Run forward pass; called by both functions and .""" - pass - - @abstractmethod - def optimize_parameters(self): - """Calculate losses, gradients, and update network weights; called in every training iteration""" - pass - - def setup(self, opt): - """Load and print networks; create schedulers - - Parameters: - opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions - """ - if self.isTrain: - self.schedulers = [networks.get_scheduler(optimizer, opt) for optimizer in self.optimizers] - - if not self.isTrain or opt.continue_train: - load_suffix = opt.epoch - self.load_networks(load_suffix) - - - # self.print_networks(opt.verbose) - - def parallelize(self, convert_sync_batchnorm=True): - if not self.opt.use_ddp: - for name in self.parallel_names: - if isinstance(name, str): - module = getattr(self, name) - setattr(self, name, module.to(self.device)) - else: - for name in self.model_names: - if isinstance(name, str): - module = getattr(self, name) - if convert_sync_batchnorm: - module = torch.nn.SyncBatchNorm.convert_sync_batchnorm(module) - setattr(self, name, torch.nn.parallel.DistributedDataParallel(module.to(self.device), - device_ids=[self.device.index], - find_unused_parameters=True, broadcast_buffers=True)) - - # DistributedDataParallel is not needed when a module doesn't have any parameter that requires a gradient. - for name in self.parallel_names: - if isinstance(name, str) and name not in self.model_names: - module = getattr(self, name) - setattr(self, name, module.to(self.device)) - - # put state_dict of optimizer to gpu device - if self.opt.phase != 'test': - if self.opt.continue_train: - for optim in self.optimizers: - for state in optim.state.values(): - for k, v in state.items(): - if isinstance(v, torch.Tensor): - state[k] = v.to(self.device) - - def data_dependent_initialize(self, data): - pass - - def train(self): - """Make models train mode""" - for name in self.model_names: - if isinstance(name, str): - net = getattr(self, name) - net.train() - - def eval(self): - """Make models eval mode""" - for name in self.model_names: - if isinstance(name, str): - net = getattr(self, name) - net.eval() - - def test(self): - """Forward function used in test time. - - This function wraps function in no_grad() so we don't save intermediate steps for backprop - It also calls to produce additional visualization results - """ - with torch.no_grad(): - self.forward() - self.compute_visuals() - - def compute_visuals(self): - """Calculate additional output images for visdom and HTML visualization""" - pass - - def get_image_paths(self, name='A'): - """ Return image paths that are used to load current data""" - return self.image_paths if name =='A' else self.image_paths_B - - def update_learning_rate(self): - """Update learning rates for all the networks; called at the end of every epoch""" - for scheduler in self.schedulers: - if self.opt.lr_policy == 'plateau': - scheduler.step(self.metric) - else: - scheduler.step() - - lr = self.optimizers[0].param_groups[0]['lr'] - print('learning rate = %.7f' % lr) - - def get_current_visuals(self): - """Return visualization images. train.py will display these images with visdom, and save the images to a HTML""" - visual_ret = OrderedDict() - for name in self.visual_names: - if isinstance(name, str): - visual_ret[name] = getattr(self, name)[:, :3, ...] - return visual_ret - - def get_current_losses(self): - """Return traning losses / errors. train.py will print out these errors on console, and save them to a file""" - errors_ret = OrderedDict() - for name in self.loss_names: - if isinstance(name, str): - errors_ret[name] = float(getattr(self, 'loss_' + name)) # float(...) works for both scalar tensor and float number - return errors_ret - - def save_networks(self, epoch): - """Save all the networks to the disk. - - Parameters: - epoch (int) -- current epoch; used in the file name '%s_net_%s.pth' % (epoch, name) - """ - if not os.path.isdir(self.save_dir): - os.makedirs(self.save_dir) - - save_filename = 'epoch_%s.pth' % (epoch) - save_path = os.path.join(self.save_dir, save_filename) - - save_dict = {} - for name in self.model_names: - if isinstance(name, str): - net = getattr(self, name) - if isinstance(net, torch.nn.DataParallel) or isinstance(net, - torch.nn.parallel.DistributedDataParallel): - net = net.module - save_dict[name] = net.state_dict() - - - for i, optim in enumerate(self.optimizers): - save_dict['opt_%02d'%i] = optim.state_dict() - - for i, sched in enumerate(self.schedulers): - save_dict['sched_%02d'%i] = sched.state_dict() - - torch.save(save_dict, save_path) - - def __patch_instance_norm_state_dict(self, state_dict, module, keys, i=0): - """Fix InstanceNorm checkpoints incompatibility (prior to 0.4)""" - key = keys[i] - if i + 1 == len(keys): # at the end, pointing to a parameter/buffer - if module.__class__.__name__.startswith('InstanceNorm') and \ - (key == 'running_mean' or key == 'running_var'): - if getattr(module, key) is None: - state_dict.pop('.'.join(keys)) - if module.__class__.__name__.startswith('InstanceNorm') and \ - (key == 'num_batches_tracked'): - state_dict.pop('.'.join(keys)) - else: - self.__patch_instance_norm_state_dict(state_dict, getattr(module, key), keys, i + 1) - - def load_networks(self, epoch): - """Load all the networks from the disk. - - Parameters: - epoch (int) -- current epoch; used in the file name '%s_net_%s.pth' % (epoch, name) - """ - if self.opt.isTrain and self.opt.pretrained_name is not None: - load_dir = os.path.join(self.opt.checkpoints_dir, self.opt.pretrained_name) - else: - load_dir = self.save_dir - load_filename = 'epoch_%s.pth' % (epoch) - load_path = os.path.join(load_dir, load_filename) - state_dict = torch.load(load_path, map_location=self.device) - print('loading the model from %s' % load_path) - - for name in self.model_names: - if isinstance(name, str): - net = getattr(self, name) - if isinstance(net, torch.nn.DataParallel): - net = net.module - net.load_state_dict(state_dict[name]) - - if self.opt.phase != 'test': - if self.opt.continue_train: - print('loading the optim from %s' % load_path) - for i, optim in enumerate(self.optimizers): - optim.load_state_dict(state_dict['opt_%02d'%i]) - - try: - print('loading the sched from %s' % load_path) - for i, sched in enumerate(self.schedulers): - sched.load_state_dict(state_dict['sched_%02d'%i]) - except: - print('Failed to load schedulers, set schedulers according to epoch count manually') - for i, sched in enumerate(self.schedulers): - sched.last_epoch = self.opt.epoch_count - 1 - - - - - def print_networks(self, verbose): - """Print the total number of parameters in the network and (if verbose) network architecture - - Parameters: - verbose (bool) -- if verbose: print the network architecture - """ - print('---------- Networks initialized -------------') - for name in self.model_names: - if isinstance(name, str): - net = getattr(self, name) - num_params = 0 - for param in net.parameters(): - num_params += param.numel() - if verbose: - print(net) - print('[Network %s] Total number of parameters : %.3f M' % (name, num_params / 1e6)) - print('-----------------------------------------------') - - def set_requires_grad(self, nets, requires_grad=False): - """Set requies_grad=Fasle for all the networks to avoid unnecessary computations - Parameters: - nets (network list) -- a list of networks - requires_grad (bool) -- whether the networks require gradients or not - """ - if not isinstance(nets, list): - nets = [nets] - for net in nets: - if net is not None: - for param in net.parameters(): - param.requires_grad = requires_grad - - def generate_visuals_for_evaluation(self, data, mode): - return {} diff --git a/spaces/descript/vampnet/vampnet/__init__.py b/spaces/descript/vampnet/vampnet/__init__.py deleted file mode 100644 index 2a9dd073cacc51dd2996e856cf6c0eb87dd93ce0..0000000000000000000000000000000000000000 --- a/spaces/descript/vampnet/vampnet/__init__.py +++ /dev/null @@ -1,6 +0,0 @@ - -from . import modules -from . import scheduler -from .interface import Interface - -__version__ = "0.0.1" diff --git a/spaces/devisionx/auto-annotation-segmentation/README.md b/spaces/devisionx/auto-annotation-segmentation/README.md deleted file mode 100644 index 58b83e7dbd33675d67642b6cd178929074028c06..0000000000000000000000000000000000000000 --- a/spaces/devisionx/auto-annotation-segmentation/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Auto Annotation Segmentation -emoji: 💻 -colorFrom: pink -colorTo: purple -sdk: gradio -sdk_version: 3.50.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/dhof/shapetest/style.css b/spaces/dhof/shapetest/style.css deleted file mode 100644 index 47478fba3cc8260705dccaa72ecce0cb9ea79f73..0000000000000000000000000000000000000000 --- a/spaces/dhof/shapetest/style.css +++ /dev/null @@ -1,13 +0,0 @@ -h1 { - text-align: center; -} - -#component-0 { - max-width: 730px; - margin: auto; - padding-top: 1.5rem; -} - -#prompt-container { - gap: 0; -} diff --git a/spaces/diagaiwei/ir_chinese_medqa/colbert/infra/provenance.py b/spaces/diagaiwei/ir_chinese_medqa/colbert/infra/provenance.py deleted file mode 100644 index 114644871e762d9884a7024426b4afdc9ac1907b..0000000000000000000000000000000000000000 --- a/spaces/diagaiwei/ir_chinese_medqa/colbert/infra/provenance.py +++ /dev/null @@ -1,43 +0,0 @@ -import sys -import traceback -import inspect - - -class Provenance: - def __init__(self) -> None: - self.initial_stacktrace = self.stacktrace() - - def stacktrace(self): - trace = inspect.stack() - output = [] - - for frame in trace[2:-1]: - try: - frame = f'{frame.filename}:{frame.lineno}:{frame.function}: {frame.code_context[0].strip()}' - output.append(frame) - except: - output.append(None) - - return output - - def toDict(self): # for ujson - self.serialization_stacktrace = self.stacktrace() - return dict(self.__dict__) - - -if __name__ == '__main__': - p = Provenance() - print(p.toDict().keys()) - - import ujson - print(ujson.dumps(p, indent=4)) - - - class X: - def __init__(self) -> None: - pass - - def toDict(self): - return {'key': 1} - - print(ujson.dumps(X())) \ No newline at end of file diff --git a/spaces/digitalxingtong/Azusa-Bert-VITS2/monotonic_align/setup.py b/spaces/digitalxingtong/Azusa-Bert-VITS2/monotonic_align/setup.py deleted file mode 100644 index 30c224807a70faa9df9c9eb75f8e80c8c867b16b..0000000000000000000000000000000000000000 --- a/spaces/digitalxingtong/Azusa-Bert-VITS2/monotonic_align/setup.py +++ /dev/null @@ -1,9 +0,0 @@ -from distutils.core import setup -from Cython.Build import cythonize -import numpy - -setup( - name = 'monotonic_align', - ext_modules = cythonize("core.pyx"), - include_dirs=[numpy.get_include()] -) diff --git a/spaces/digitalxingtong/Bufeiyan-c-Bert-VITS2/app.py b/spaces/digitalxingtong/Bufeiyan-c-Bert-VITS2/app.py deleted file mode 100644 index fd1077616212badcf2cc37885e94edbf14f9cb1a..0000000000000000000000000000000000000000 --- a/spaces/digitalxingtong/Bufeiyan-c-Bert-VITS2/app.py +++ /dev/null @@ -1,165 +0,0 @@ -import sys, os - -if sys.platform == "darwin": - os.environ["PYTORCH_ENABLE_MPS_FALLBACK"] = "1" - -import logging - -logging.getLogger("numba").setLevel(logging.WARNING) -logging.getLogger("markdown_it").setLevel(logging.WARNING) -logging.getLogger("urllib3").setLevel(logging.WARNING) -logging.getLogger("matplotlib").setLevel(logging.WARNING) - -logging.basicConfig(level=logging.INFO, format="| %(name)s | %(levelname)s | %(message)s") - -logger = logging.getLogger(__name__) - -import torch -import argparse -import commons -import utils -from models import SynthesizerTrn -from text.symbols import symbols -from text import cleaned_text_to_sequence, get_bert -from text.cleaner import clean_text -import gradio as gr -import webbrowser - - -net_g = None - - -def get_text(text, language_str, hps): - norm_text, phone, tone, word2ph = clean_text(text, language_str) - phone, tone, language = cleaned_text_to_sequence(phone, tone, language_str) - - if hps.data.add_blank: - phone = commons.intersperse(phone, 0) - tone = commons.intersperse(tone, 0) - language = commons.intersperse(language, 0) - for i in range(len(word2ph)): - word2ph[i] = word2ph[i] * 2 - word2ph[0] += 1 - bert = get_bert(norm_text, word2ph, language_str) - del word2ph - - assert bert.shape[-1] == len(phone) - - phone = torch.LongTensor(phone) - tone = torch.LongTensor(tone) - language = torch.LongTensor(language) - - return bert, phone, tone, language -import soundfile as sf -def infer(text, sdp_ratio, noise_scale, noise_scale_w, length_scale, sid): - global net_g - bert, phones, tones, lang_ids = get_text(text, "ZH", hps) - with torch.no_grad(): - x_tst=phones.to(device).unsqueeze(0) - tones=tones.to(device).unsqueeze(0) - lang_ids=lang_ids.to(device).unsqueeze(0) - bert = bert.to(device).unsqueeze(0) - x_tst_lengths = torch.LongTensor([phones.size(0)]).to(device) - del phones - speakers = torch.LongTensor([hps.data.spk2id[sid]]).to(device) - audio = net_g.infer(x_tst, x_tst_lengths, speakers, tones, lang_ids, bert, sdp_ratio=sdp_ratio - , noise_scale=noise_scale, noise_scale_w=noise_scale_w, length_scale=length_scale)[0][0,0].data.cpu().float().numpy() - del x_tst, tones, lang_ids, bert, x_tst_lengths, speakers - sf.write("tmp.wav", audio, 44100) - return audio -def convert_wav_to_ogg(wav_file): - os.makedirs('out', exist_ok=True) - filename = os.path.splitext(os.path.basename(wav_file.name))[0] - output_path_ogg = os.path.join('out', f"out.ogg") - - renamed_input_path = os.path.join('in', f"in.wav") - os.makedirs('in', exist_ok=True) - os.rename(wav_file.name, renamed_input_path) - command = ["ffmpeg", "-i", renamed_input_path, "-acodec", "libopus", "-y", output_path_ogg] - os.system(" ".join(command)) - return output_path_ogg -def tts_fn(text, speaker, sdp_ratio, noise_scale, noise_scale_w, length_scale): - with torch.no_grad(): - audio = infer(text, sdp_ratio=sdp_ratio, noise_scale=noise_scale, noise_scale_w=noise_scale_w, length_scale=length_scale, sid=speaker) - with open('tmp.wav', 'rb') as wav_file: - newogg = convert_wav_to_ogg(wav_file) - return "Success", (hps.data.sampling_rate, audio),newogg - - -if __name__ == "__main__": - parser = argparse.ArgumentParser() - parser.add_argument("--model_dir", default="./logs/bfy/bfy_c.pth", help="path of your model") - parser.add_argument("--config_dir", default="./configs/config.json", help="path of your config file") - parser.add_argument("--share", default=False, help="make link public") - parser.add_argument("-d", "--debug", action="store_true", help="enable DEBUG-LEVEL log") - - args = parser.parse_args() - if args.debug: - logger.info("Enable DEBUG-LEVEL log") - logging.basicConfig(level=logging.DEBUG) - hps = utils.get_hparams_from_file(args.config_dir) - device = "cuda:0" if torch.cuda.is_available() else "cpu" - ''' - device = ( - "cuda:0" - if torch.cuda.is_available() - else ( - "mps" - if sys.platform == "darwin" and torch.backends.mps.is_available() - else "cpu" - ) - ) - ''' - net_g = SynthesizerTrn( - len(symbols), - hps.data.filter_length // 2 + 1, - hps.train.segment_size // hps.data.hop_length, - n_speakers=hps.data.n_speakers, - **hps.model).to(device) - _ = net_g.eval() - - _ = utils.load_checkpoint(args.model_dir, net_g, None, skip_optimizer=True) - - speaker_ids = hps.data.spk2id - speakers = list(speaker_ids.keys()) - with gr.Blocks() as app: - with gr.Row(): - with gr.Column(): - - - gr.Markdown(value=""" - 步非烟 Ver.c Bert-Vits2在线语音生成\n - 1、模型作者:数字星瞳企划 https://t.me/xingtong25680 \n - 2、原项目地址:https://github.com/Stardust-minus/Bert-VITS2\n - 3、使用此模型进行二创请注明AI生成,以及原项目地址\n - 4、素材来自散文朗读比赛,严禁将此项目用于一切违反《中华人民共和国宪法》,《中华人民共和国刑法》,《中华人民共和国治安管理处罚法》和《中华人民共和国民法典》之用途。严禁用于任何政治相关用途。 \n - """) - text = gr.TextArea(label="Text", placeholder="Input Text Here", - value="这里是数字星瞳企画,请在电报搜索星瞳全拼加二五六八零,获取最新更新进展。") - speaker = gr.Dropdown(choices=speakers, value=speakers[0], label='Speaker') - sdp_ratio = gr.Slider(minimum=0, maximum=1, value=0.2, step=0.01, label='语调变化') - noise_scale = gr.Slider(minimum=0.1, maximum=1.5, value=0.6, step=0.01, label='感情变化') - noise_scale_w = gr.Slider(minimum=0.1, maximum=1.4, value=0.8, step=0.01, label='音节发音长度变化') - length_scale = gr.Slider(minimum=0.1, maximum=2, value=1, step=0.01, label='语速') - btn = gr.Button("开启AI语音之旅吧!", variant="primary") - with gr.Column(): - text_output = gr.Textbox(label="Message") - audio_output = gr.Audio(label="Output Audio") - ogg_output = gr.File(label="Converted OGG file") - gr.Markdown(value=""" - 模型汇总:\n - 星瞳整合 https://huggingface.co/spaces/digitalxingtong/Xingtong-All-in-One\n - 步非烟 Ver.a https://huggingface.co/spaces/digitalxingtong/Bufeiyan-a-Bert-VITS2 \n - 步非烟 Ver.b https://huggingface.co/spaces/digitalxingtong/Bufeiyan-b-Bert-VITS2 \n - 步非烟 Ver.c https://huggingface.co/spaces/digitalxingtong/Bufeiyan-c-Bert-VITS2 \n - 男声朗读 https://huggingface.co/spaces/digitalxingtong/Kanghui-Read-Bert-VITS2 \n - 男声朗读(长文本) https://huggingface.co/spaces/digitalxingtong/Kanghui-Longread-Bert-VITS2\n - IGN 中国 https://huggingface.co/spaces/digitalxingtong/Ign-Read-Bert-VITS2 \n - IGN 中国(长文本)https://huggingface.co/spaces/digitalxingtong/Ign-Longread-Bert-VITS2 \n - """) - btn.click(tts_fn, - inputs=[text, speaker, sdp_ratio, noise_scale, noise_scale_w, length_scale], - outputs=[text_output, audio_output,ogg_output]) - - - app.launch(show_error=True) diff --git a/spaces/digitalxingtong/Jiuxia-Bert-Vits2/monotonic_align/core.c b/spaces/digitalxingtong/Jiuxia-Bert-Vits2/monotonic_align/core.c deleted file mode 100644 index 5f8af54d32474f821e9d1f4d2679d78128722596..0000000000000000000000000000000000000000 --- a/spaces/digitalxingtong/Jiuxia-Bert-Vits2/monotonic_align/core.c +++ /dev/null @@ -1,26530 +0,0 @@ -/* Generated by Cython 3.0.0 */ - -/* BEGIN: Cython Metadata -{ - "distutils": { - "name": "monotonic_align.core", - "sources": [ - "core.pyx" - ] - }, - "module_name": "monotonic_align.core" -} -END: Cython Metadata */ - -#ifndef PY_SSIZE_T_CLEAN -#define PY_SSIZE_T_CLEAN -#endif /* PY_SSIZE_T_CLEAN */ -#if defined(CYTHON_LIMITED_API) && 0 - #ifndef Py_LIMITED_API - #if CYTHON_LIMITED_API+0 > 0x03030000 - #define Py_LIMITED_API CYTHON_LIMITED_API - #else - #define Py_LIMITED_API 0x03030000 - #endif - #endif -#endif - -#include "Python.h" -#ifndef Py_PYTHON_H - #error Python headers needed to compile C extensions, please install development version of Python. -#elif PY_VERSION_HEX < 0x02070000 || (0x03000000 <= PY_VERSION_HEX && PY_VERSION_HEX < 0x03030000) - #error Cython requires Python 2.7+ or Python 3.3+. -#else -#define CYTHON_ABI "3_0_0" -#define __PYX_ABI_MODULE_NAME "_cython_" CYTHON_ABI -#define __PYX_TYPE_MODULE_PREFIX __PYX_ABI_MODULE_NAME "." -#define CYTHON_HEX_VERSION 0x030000F0 -#define CYTHON_FUTURE_DIVISION 1 -#include -#ifndef offsetof - #define offsetof(type, member) ( (size_t) & ((type*)0) -> member ) -#endif -#if !defined(_WIN32) && !defined(WIN32) && !defined(MS_WINDOWS) - #ifndef __stdcall - #define __stdcall - #endif - #ifndef __cdecl - #define __cdecl - #endif - #ifndef __fastcall - #define __fastcall - #endif -#endif -#ifndef DL_IMPORT - #define DL_IMPORT(t) t -#endif -#ifndef DL_EXPORT - #define DL_EXPORT(t) t -#endif -#define __PYX_COMMA , -#ifndef HAVE_LONG_LONG - #define HAVE_LONG_LONG -#endif -#ifndef PY_LONG_LONG - #define PY_LONG_LONG LONG_LONG -#endif -#ifndef Py_HUGE_VAL - #define Py_HUGE_VAL HUGE_VAL -#endif -#if defined(GRAALVM_PYTHON) - /* For very preliminary testing purposes. Most variables are set the same as PyPy. - The existence of this section does not imply that anything works or is even tested */ - #define CYTHON_COMPILING_IN_PYPY 0 - #define CYTHON_COMPILING_IN_CPYTHON 0 - #define CYTHON_COMPILING_IN_LIMITED_API 0 - #define CYTHON_COMPILING_IN_GRAAL 1 - #define CYTHON_COMPILING_IN_NOGIL 0 - #undef CYTHON_USE_TYPE_SLOTS - #define CYTHON_USE_TYPE_SLOTS 0 - #undef CYTHON_USE_TYPE_SPECS - #define CYTHON_USE_TYPE_SPECS 0 - #undef CYTHON_USE_PYTYPE_LOOKUP - #define CYTHON_USE_PYTYPE_LOOKUP 0 - #if PY_VERSION_HEX < 0x03050000 - #undef CYTHON_USE_ASYNC_SLOTS - #define CYTHON_USE_ASYNC_SLOTS 0 - #elif !defined(CYTHON_USE_ASYNC_SLOTS) - #define CYTHON_USE_ASYNC_SLOTS 1 - #endif - #undef CYTHON_USE_PYLIST_INTERNALS - #define CYTHON_USE_PYLIST_INTERNALS 0 - #undef CYTHON_USE_UNICODE_INTERNALS - #define CYTHON_USE_UNICODE_INTERNALS 0 - #undef CYTHON_USE_UNICODE_WRITER - #define CYTHON_USE_UNICODE_WRITER 0 - #undef CYTHON_USE_PYLONG_INTERNALS - #define CYTHON_USE_PYLONG_INTERNALS 0 - #undef CYTHON_AVOID_BORROWED_REFS - #define CYTHON_AVOID_BORROWED_REFS 1 - #undef CYTHON_ASSUME_SAFE_MACROS - #define CYTHON_ASSUME_SAFE_MACROS 0 - #undef CYTHON_UNPACK_METHODS - #define CYTHON_UNPACK_METHODS 0 - #undef CYTHON_FAST_THREAD_STATE - #define CYTHON_FAST_THREAD_STATE 0 - #undef CYTHON_FAST_GIL - #define CYTHON_FAST_GIL 0 - #undef CYTHON_METH_FASTCALL - #define CYTHON_METH_FASTCALL 0 - #undef CYTHON_FAST_PYCALL - #define CYTHON_FAST_PYCALL 0 - #ifndef CYTHON_PEP487_INIT_SUBCLASS - #define CYTHON_PEP487_INIT_SUBCLASS (PY_MAJOR_VERSION >= 3) - #endif - #undef CYTHON_PEP489_MULTI_PHASE_INIT - #define CYTHON_PEP489_MULTI_PHASE_INIT 1 - #undef CYTHON_USE_MODULE_STATE - #define CYTHON_USE_MODULE_STATE 0 - #undef CYTHON_USE_TP_FINALIZE - #define CYTHON_USE_TP_FINALIZE 0 - #undef CYTHON_USE_DICT_VERSIONS - #define CYTHON_USE_DICT_VERSIONS 0 - #undef CYTHON_USE_EXC_INFO_STACK - #define CYTHON_USE_EXC_INFO_STACK 0 - #ifndef CYTHON_UPDATE_DESCRIPTOR_DOC - #define CYTHON_UPDATE_DESCRIPTOR_DOC 0 - #endif -#elif defined(PYPY_VERSION) - #define CYTHON_COMPILING_IN_PYPY 1 - #define CYTHON_COMPILING_IN_CPYTHON 0 - #define CYTHON_COMPILING_IN_LIMITED_API 0 - #define CYTHON_COMPILING_IN_GRAAL 0 - #define CYTHON_COMPILING_IN_NOGIL 0 - #undef CYTHON_USE_TYPE_SLOTS - #define CYTHON_USE_TYPE_SLOTS 0 - #undef CYTHON_USE_TYPE_SPECS - #define CYTHON_USE_TYPE_SPECS 0 - #undef CYTHON_USE_PYTYPE_LOOKUP - #define CYTHON_USE_PYTYPE_LOOKUP 0 - #if PY_VERSION_HEX < 0x03050000 - #undef CYTHON_USE_ASYNC_SLOTS - #define CYTHON_USE_ASYNC_SLOTS 0 - #elif !defined(CYTHON_USE_ASYNC_SLOTS) - #define CYTHON_USE_ASYNC_SLOTS 1 - #endif - #undef CYTHON_USE_PYLIST_INTERNALS - #define CYTHON_USE_PYLIST_INTERNALS 0 - #undef CYTHON_USE_UNICODE_INTERNALS - #define CYTHON_USE_UNICODE_INTERNALS 0 - #undef CYTHON_USE_UNICODE_WRITER - #define CYTHON_USE_UNICODE_WRITER 0 - #undef CYTHON_USE_PYLONG_INTERNALS - #define CYTHON_USE_PYLONG_INTERNALS 0 - #undef CYTHON_AVOID_BORROWED_REFS - #define CYTHON_AVOID_BORROWED_REFS 1 - #undef CYTHON_ASSUME_SAFE_MACROS - #define CYTHON_ASSUME_SAFE_MACROS 0 - #undef CYTHON_UNPACK_METHODS - #define CYTHON_UNPACK_METHODS 0 - #undef CYTHON_FAST_THREAD_STATE - #define CYTHON_FAST_THREAD_STATE 0 - #undef CYTHON_FAST_GIL - #define CYTHON_FAST_GIL 0 - #undef CYTHON_METH_FASTCALL - #define CYTHON_METH_FASTCALL 0 - #undef CYTHON_FAST_PYCALL - #define CYTHON_FAST_PYCALL 0 - #ifndef CYTHON_PEP487_INIT_SUBCLASS - #define CYTHON_PEP487_INIT_SUBCLASS (PY_MAJOR_VERSION >= 3) - #endif - #if PY_VERSION_HEX < 0x03090000 - #undef CYTHON_PEP489_MULTI_PHASE_INIT - #define CYTHON_PEP489_MULTI_PHASE_INIT 0 - #elif !defined(CYTHON_PEP489_MULTI_PHASE_INIT) - #define CYTHON_PEP489_MULTI_PHASE_INIT 1 - #endif - #undef CYTHON_USE_MODULE_STATE - #define CYTHON_USE_MODULE_STATE 0 - #undef CYTHON_USE_TP_FINALIZE - #define CYTHON_USE_TP_FINALIZE (PY_VERSION_HEX >= 0x030400a1 && PYPY_VERSION_NUM >= 0x07030C00) - #undef CYTHON_USE_DICT_VERSIONS - #define CYTHON_USE_DICT_VERSIONS 0 - #undef CYTHON_USE_EXC_INFO_STACK - #define CYTHON_USE_EXC_INFO_STACK 0 - #ifndef CYTHON_UPDATE_DESCRIPTOR_DOC - #define CYTHON_UPDATE_DESCRIPTOR_DOC 0 - #endif -#elif defined(CYTHON_LIMITED_API) - #define CYTHON_COMPILING_IN_PYPY 0 - #define CYTHON_COMPILING_IN_CPYTHON 0 - #define CYTHON_COMPILING_IN_LIMITED_API 1 - #define CYTHON_COMPILING_IN_GRAAL 0 - #define CYTHON_COMPILING_IN_NOGIL 0 - #undef CYTHON_CLINE_IN_TRACEBACK - #define CYTHON_CLINE_IN_TRACEBACK 0 - #undef CYTHON_USE_TYPE_SLOTS - #define CYTHON_USE_TYPE_SLOTS 0 - #undef CYTHON_USE_TYPE_SPECS - #define CYTHON_USE_TYPE_SPECS 1 - #undef CYTHON_USE_PYTYPE_LOOKUP - #define CYTHON_USE_PYTYPE_LOOKUP 0 - #undef CYTHON_USE_ASYNC_SLOTS - #define CYTHON_USE_ASYNC_SLOTS 0 - #undef CYTHON_USE_PYLIST_INTERNALS - #define CYTHON_USE_PYLIST_INTERNALS 0 - #undef CYTHON_USE_UNICODE_INTERNALS - #define CYTHON_USE_UNICODE_INTERNALS 0 - #ifndef CYTHON_USE_UNICODE_WRITER - #define CYTHON_USE_UNICODE_WRITER 0 - #endif - #undef CYTHON_USE_PYLONG_INTERNALS - #define CYTHON_USE_PYLONG_INTERNALS 0 - #ifndef CYTHON_AVOID_BORROWED_REFS - #define CYTHON_AVOID_BORROWED_REFS 0 - #endif - #undef CYTHON_ASSUME_SAFE_MACROS - #define CYTHON_ASSUME_SAFE_MACROS 0 - #undef CYTHON_UNPACK_METHODS - #define CYTHON_UNPACK_METHODS 0 - #undef CYTHON_FAST_THREAD_STATE - #define CYTHON_FAST_THREAD_STATE 0 - #undef CYTHON_FAST_GIL - #define CYTHON_FAST_GIL 0 - #undef CYTHON_METH_FASTCALL - #define CYTHON_METH_FASTCALL 0 - #undef CYTHON_FAST_PYCALL - #define CYTHON_FAST_PYCALL 0 - #ifndef CYTHON_PEP487_INIT_SUBCLASS - #define CYTHON_PEP487_INIT_SUBCLASS 1 - #endif - #undef CYTHON_PEP489_MULTI_PHASE_INIT - #define CYTHON_PEP489_MULTI_PHASE_INIT 0 - #undef CYTHON_USE_MODULE_STATE - #define CYTHON_USE_MODULE_STATE 1 - #ifndef CYTHON_USE_TP_FINALIZE - #define CYTHON_USE_TP_FINALIZE 1 - #endif - #undef CYTHON_USE_DICT_VERSIONS - #define CYTHON_USE_DICT_VERSIONS 0 - #undef CYTHON_USE_EXC_INFO_STACK - #define CYTHON_USE_EXC_INFO_STACK 0 - #ifndef CYTHON_UPDATE_DESCRIPTOR_DOC - #define CYTHON_UPDATE_DESCRIPTOR_DOC 0 - #endif -#elif defined(PY_NOGIL) - #define CYTHON_COMPILING_IN_PYPY 0 - #define CYTHON_COMPILING_IN_CPYTHON 0 - #define CYTHON_COMPILING_IN_LIMITED_API 0 - #define CYTHON_COMPILING_IN_GRAAL 0 - #define CYTHON_COMPILING_IN_NOGIL 1 - #ifndef CYTHON_USE_TYPE_SLOTS - #define CYTHON_USE_TYPE_SLOTS 1 - #endif - #undef CYTHON_USE_PYTYPE_LOOKUP - #define CYTHON_USE_PYTYPE_LOOKUP 0 - #ifndef CYTHON_USE_ASYNC_SLOTS - #define CYTHON_USE_ASYNC_SLOTS 1 - #endif - #undef CYTHON_USE_PYLIST_INTERNALS - #define CYTHON_USE_PYLIST_INTERNALS 0 - #ifndef CYTHON_USE_UNICODE_INTERNALS - #define CYTHON_USE_UNICODE_INTERNALS 1 - #endif - #undef CYTHON_USE_UNICODE_WRITER - #define CYTHON_USE_UNICODE_WRITER 0 - #undef CYTHON_USE_PYLONG_INTERNALS - #define CYTHON_USE_PYLONG_INTERNALS 0 - #ifndef CYTHON_AVOID_BORROWED_REFS - #define CYTHON_AVOID_BORROWED_REFS 0 - #endif - #ifndef CYTHON_ASSUME_SAFE_MACROS - #define CYTHON_ASSUME_SAFE_MACROS 1 - #endif - #ifndef CYTHON_UNPACK_METHODS - #define CYTHON_UNPACK_METHODS 1 - #endif - #undef CYTHON_FAST_THREAD_STATE - #define CYTHON_FAST_THREAD_STATE 0 - #undef CYTHON_FAST_PYCALL - #define CYTHON_FAST_PYCALL 0 - #ifndef CYTHON_PEP489_MULTI_PHASE_INIT - #define CYTHON_PEP489_MULTI_PHASE_INIT 1 - #endif - #ifndef CYTHON_USE_TP_FINALIZE - #define CYTHON_USE_TP_FINALIZE 1 - #endif - #undef CYTHON_USE_DICT_VERSIONS - #define CYTHON_USE_DICT_VERSIONS 0 - #undef CYTHON_USE_EXC_INFO_STACK - #define CYTHON_USE_EXC_INFO_STACK 0 -#else - #define CYTHON_COMPILING_IN_PYPY 0 - #define CYTHON_COMPILING_IN_CPYTHON 1 - #define CYTHON_COMPILING_IN_LIMITED_API 0 - #define CYTHON_COMPILING_IN_GRAAL 0 - #define CYTHON_COMPILING_IN_NOGIL 0 - #ifndef CYTHON_USE_TYPE_SLOTS - #define CYTHON_USE_TYPE_SLOTS 1 - #endif - #ifndef CYTHON_USE_TYPE_SPECS - #define CYTHON_USE_TYPE_SPECS 0 - #endif - #ifndef CYTHON_USE_PYTYPE_LOOKUP - #define CYTHON_USE_PYTYPE_LOOKUP 1 - #endif - #if PY_MAJOR_VERSION < 3 - #undef CYTHON_USE_ASYNC_SLOTS - #define CYTHON_USE_ASYNC_SLOTS 0 - #elif !defined(CYTHON_USE_ASYNC_SLOTS) - #define CYTHON_USE_ASYNC_SLOTS 1 - #endif - #ifndef CYTHON_USE_PYLONG_INTERNALS - #define CYTHON_USE_PYLONG_INTERNALS 1 - #endif - #ifndef CYTHON_USE_PYLIST_INTERNALS - #define CYTHON_USE_PYLIST_INTERNALS 1 - #endif - #ifndef CYTHON_USE_UNICODE_INTERNALS - #define CYTHON_USE_UNICODE_INTERNALS 1 - #endif - #if PY_VERSION_HEX < 0x030300F0 || PY_VERSION_HEX >= 0x030B00A2 - #undef CYTHON_USE_UNICODE_WRITER - #define CYTHON_USE_UNICODE_WRITER 0 - #elif !defined(CYTHON_USE_UNICODE_WRITER) - #define CYTHON_USE_UNICODE_WRITER 1 - #endif - #ifndef CYTHON_AVOID_BORROWED_REFS - #define CYTHON_AVOID_BORROWED_REFS 0 - #endif - #ifndef CYTHON_ASSUME_SAFE_MACROS - #define CYTHON_ASSUME_SAFE_MACROS 1 - #endif - #ifndef CYTHON_UNPACK_METHODS - #define CYTHON_UNPACK_METHODS 1 - #endif - #ifndef CYTHON_FAST_THREAD_STATE - #define CYTHON_FAST_THREAD_STATE 1 - #endif - #ifndef CYTHON_FAST_GIL - #define CYTHON_FAST_GIL (PY_MAJOR_VERSION < 3 || PY_VERSION_HEX >= 0x03060000 && PY_VERSION_HEX < 0x030C00A6) - #endif - #ifndef CYTHON_METH_FASTCALL - #define CYTHON_METH_FASTCALL (PY_VERSION_HEX >= 0x030700A1) - #endif - #ifndef CYTHON_FAST_PYCALL - #define CYTHON_FAST_PYCALL 1 - #endif - #ifndef CYTHON_PEP487_INIT_SUBCLASS - #define CYTHON_PEP487_INIT_SUBCLASS 1 - #endif - #if PY_VERSION_HEX < 0x03050000 - #undef CYTHON_PEP489_MULTI_PHASE_INIT - #define CYTHON_PEP489_MULTI_PHASE_INIT 0 - #elif !defined(CYTHON_PEP489_MULTI_PHASE_INIT) - #define CYTHON_PEP489_MULTI_PHASE_INIT 1 - #endif - #ifndef CYTHON_USE_MODULE_STATE - #define CYTHON_USE_MODULE_STATE 0 - #endif - #if PY_VERSION_HEX < 0x030400a1 - #undef CYTHON_USE_TP_FINALIZE - #define CYTHON_USE_TP_FINALIZE 0 - #elif !defined(CYTHON_USE_TP_FINALIZE) - #define CYTHON_USE_TP_FINALIZE 1 - #endif - #if PY_VERSION_HEX < 0x030600B1 - #undef CYTHON_USE_DICT_VERSIONS - #define CYTHON_USE_DICT_VERSIONS 0 - #elif !defined(CYTHON_USE_DICT_VERSIONS) - #define CYTHON_USE_DICT_VERSIONS (PY_VERSION_HEX < 0x030C00A5) - #endif - #if PY_VERSION_HEX < 0x030700A3 - #undef CYTHON_USE_EXC_INFO_STACK - #define CYTHON_USE_EXC_INFO_STACK 0 - #elif !defined(CYTHON_USE_EXC_INFO_STACK) - #define CYTHON_USE_EXC_INFO_STACK 1 - #endif - #ifndef CYTHON_UPDATE_DESCRIPTOR_DOC - #define CYTHON_UPDATE_DESCRIPTOR_DOC 1 - #endif -#endif -#if !defined(CYTHON_FAST_PYCCALL) -#define CYTHON_FAST_PYCCALL (CYTHON_FAST_PYCALL && PY_VERSION_HEX >= 0x030600B1) -#endif -#if !defined(CYTHON_VECTORCALL) -#define CYTHON_VECTORCALL (CYTHON_FAST_PYCCALL && PY_VERSION_HEX >= 0x030800B1) -#endif -#define CYTHON_BACKPORT_VECTORCALL (CYTHON_METH_FASTCALL && PY_VERSION_HEX < 0x030800B1) -#if CYTHON_USE_PYLONG_INTERNALS - #if PY_MAJOR_VERSION < 3 - #include "longintrepr.h" - #endif - #undef SHIFT - #undef BASE - #undef MASK - #ifdef SIZEOF_VOID_P - enum { __pyx_check_sizeof_voidp = 1 / (int)(SIZEOF_VOID_P == sizeof(void*)) }; - #endif -#endif -#ifndef __has_attribute - #define __has_attribute(x) 0 -#endif -#ifndef __has_cpp_attribute - #define __has_cpp_attribute(x) 0 -#endif -#ifndef CYTHON_RESTRICT - #if defined(__GNUC__) - #define CYTHON_RESTRICT __restrict__ - #elif defined(_MSC_VER) && _MSC_VER >= 1400 - #define CYTHON_RESTRICT __restrict - #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L - #define CYTHON_RESTRICT restrict - #else - #define CYTHON_RESTRICT - #endif -#endif -#ifndef CYTHON_UNUSED - #if defined(__cplusplus) - /* for clang __has_cpp_attribute(maybe_unused) is true even before C++17 - * but leads to warnings with -pedantic, since it is a C++17 feature */ - #if ((defined(_MSVC_LANG) && _MSVC_LANG >= 201703L) || __cplusplus >= 201703L) - #if __has_cpp_attribute(maybe_unused) - #define CYTHON_UNUSED [[maybe_unused]] - #endif - #endif - #endif -#endif -#ifndef CYTHON_UNUSED -# if defined(__GNUC__) -# if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)) -# define CYTHON_UNUSED __attribute__ ((__unused__)) -# else -# define CYTHON_UNUSED -# endif -# elif defined(__ICC) || (defined(__INTEL_COMPILER) && !defined(_MSC_VER)) -# define CYTHON_UNUSED __attribute__ ((__unused__)) -# else -# define CYTHON_UNUSED -# endif -#endif -#ifndef CYTHON_UNUSED_VAR -# if defined(__cplusplus) - template void CYTHON_UNUSED_VAR( const T& ) { } -# else -# define CYTHON_UNUSED_VAR(x) (void)(x) -# endif -#endif -#ifndef CYTHON_MAYBE_UNUSED_VAR - #define CYTHON_MAYBE_UNUSED_VAR(x) CYTHON_UNUSED_VAR(x) -#endif -#ifndef CYTHON_NCP_UNUSED -# if CYTHON_COMPILING_IN_CPYTHON -# define CYTHON_NCP_UNUSED -# else -# define CYTHON_NCP_UNUSED CYTHON_UNUSED -# endif -#endif -#define __Pyx_void_to_None(void_result) ((void)(void_result), Py_INCREF(Py_None), Py_None) -#ifdef _MSC_VER - #ifndef _MSC_STDINT_H_ - #if _MSC_VER < 1300 - typedef unsigned char uint8_t; - typedef unsigned short uint16_t; - typedef unsigned int uint32_t; - #else - typedef unsigned __int8 uint8_t; - typedef unsigned __int16 uint16_t; - typedef unsigned __int32 uint32_t; - #endif - #endif - #if _MSC_VER < 1300 - #ifdef _WIN64 - typedef unsigned long long __pyx_uintptr_t; - #else - typedef unsigned int __pyx_uintptr_t; - #endif - #else - #ifdef _WIN64 - typedef unsigned __int64 __pyx_uintptr_t; - #else - typedef unsigned __int32 __pyx_uintptr_t; - #endif - #endif -#else - #include - typedef uintptr_t __pyx_uintptr_t; -#endif -#ifndef CYTHON_FALLTHROUGH - #if defined(__cplusplus) - /* for clang __has_cpp_attribute(fallthrough) is true even before C++17 - * but leads to warnings with -pedantic, since it is a C++17 feature */ - #if ((defined(_MSVC_LANG) && _MSVC_LANG >= 201703L) || __cplusplus >= 201703L) - #if __has_cpp_attribute(fallthrough) - #define CYTHON_FALLTHROUGH [[fallthrough]] - #endif - #endif - #ifndef CYTHON_FALLTHROUGH - #if __has_cpp_attribute(clang::fallthrough) - #define CYTHON_FALLTHROUGH [[clang::fallthrough]] - #elif __has_cpp_attribute(gnu::fallthrough) - #define CYTHON_FALLTHROUGH [[gnu::fallthrough]] - #endif - #endif - #endif - #ifndef CYTHON_FALLTHROUGH - #if __has_attribute(fallthrough) - #define CYTHON_FALLTHROUGH __attribute__((fallthrough)) - #else - #define CYTHON_FALLTHROUGH - #endif - #endif - #if defined(__clang__) && defined(__apple_build_version__) - #if __apple_build_version__ < 7000000 - #undef CYTHON_FALLTHROUGH - #define CYTHON_FALLTHROUGH - #endif - #endif -#endif -#ifdef __cplusplus - template - struct __PYX_IS_UNSIGNED_IMPL {static const bool value = T(0) < T(-1);}; - #define __PYX_IS_UNSIGNED(type) (__PYX_IS_UNSIGNED_IMPL::value) -#else - #define __PYX_IS_UNSIGNED(type) (((type)-1) > 0) -#endif -#if CYTHON_COMPILING_IN_PYPY == 1 - #define __PYX_NEED_TP_PRINT_SLOT (PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x030A0000) -#else - #define __PYX_NEED_TP_PRINT_SLOT (PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000) -#endif -#define __PYX_REINTERPRET_FUNCION(func_pointer, other_pointer) ((func_pointer)(void(*)(void))(other_pointer)) - -#ifndef CYTHON_INLINE - #if defined(__clang__) - #define CYTHON_INLINE __inline__ __attribute__ ((__unused__)) - #elif defined(__GNUC__) - #define CYTHON_INLINE __inline__ - #elif defined(_MSC_VER) - #define CYTHON_INLINE __inline - #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L - #define CYTHON_INLINE inline - #else - #define CYTHON_INLINE - #endif -#endif - -#define __PYX_BUILD_PY_SSIZE_T "n" -#define CYTHON_FORMAT_SSIZE_T "z" -#if PY_MAJOR_VERSION < 3 - #define __Pyx_BUILTIN_MODULE_NAME "__builtin__" - #define __Pyx_DefaultClassType PyClass_Type - #define __Pyx_PyCode_New(a, p, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ - PyCode_New(a+k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) -#else - #define __Pyx_BUILTIN_MODULE_NAME "builtins" - #define __Pyx_DefaultClassType PyType_Type -#if PY_VERSION_HEX >= 0x030B00A1 - static CYTHON_INLINE PyCodeObject* __Pyx_PyCode_New(int a, int p, int k, int l, int s, int f, - PyObject *code, PyObject *c, PyObject* n, PyObject *v, - PyObject *fv, PyObject *cell, PyObject* fn, - PyObject *name, int fline, PyObject *lnos) { - PyObject *kwds=NULL, *argcount=NULL, *posonlyargcount=NULL, *kwonlyargcount=NULL; - PyObject *nlocals=NULL, *stacksize=NULL, *flags=NULL, *replace=NULL, *empty=NULL; - const char *fn_cstr=NULL; - const char *name_cstr=NULL; - PyCodeObject *co=NULL, *result=NULL; - PyObject *type, *value, *traceback; - PyErr_Fetch(&type, &value, &traceback); - if (!(kwds=PyDict_New())) goto end; - if (!(argcount=PyLong_FromLong(a))) goto end; - if (PyDict_SetItemString(kwds, "co_argcount", argcount) != 0) goto end; - if (!(posonlyargcount=PyLong_FromLong(p))) goto end; - if (PyDict_SetItemString(kwds, "co_posonlyargcount", posonlyargcount) != 0) goto end; - if (!(kwonlyargcount=PyLong_FromLong(k))) goto end; - if (PyDict_SetItemString(kwds, "co_kwonlyargcount", kwonlyargcount) != 0) goto end; - if (!(nlocals=PyLong_FromLong(l))) goto end; - if (PyDict_SetItemString(kwds, "co_nlocals", nlocals) != 0) goto end; - if (!(stacksize=PyLong_FromLong(s))) goto end; - if (PyDict_SetItemString(kwds, "co_stacksize", stacksize) != 0) goto end; - if (!(flags=PyLong_FromLong(f))) goto end; - if (PyDict_SetItemString(kwds, "co_flags", flags) != 0) goto end; - if (PyDict_SetItemString(kwds, "co_code", code) != 0) goto end; - if (PyDict_SetItemString(kwds, "co_consts", c) != 0) goto end; - if (PyDict_SetItemString(kwds, "co_names", n) != 0) goto end; - if (PyDict_SetItemString(kwds, "co_varnames", v) != 0) goto end; - if (PyDict_SetItemString(kwds, "co_freevars", fv) != 0) goto end; - if (PyDict_SetItemString(kwds, "co_cellvars", cell) != 0) goto end; - if (PyDict_SetItemString(kwds, "co_linetable", lnos) != 0) goto end; - if (!(fn_cstr=PyUnicode_AsUTF8AndSize(fn, NULL))) goto end; - if (!(name_cstr=PyUnicode_AsUTF8AndSize(name, NULL))) goto end; - if (!(co = PyCode_NewEmpty(fn_cstr, name_cstr, fline))) goto end; - if (!(replace = PyObject_GetAttrString((PyObject*)co, "replace"))) goto end; - if (!(empty = PyTuple_New(0))) goto end; - result = (PyCodeObject*) PyObject_Call(replace, empty, kwds); - end: - Py_XDECREF((PyObject*) co); - Py_XDECREF(kwds); - Py_XDECREF(argcount); - Py_XDECREF(posonlyargcount); - Py_XDECREF(kwonlyargcount); - Py_XDECREF(nlocals); - Py_XDECREF(stacksize); - Py_XDECREF(replace); - Py_XDECREF(empty); - if (type) { - PyErr_Restore(type, value, traceback); - } - return result; - } -#elif PY_VERSION_HEX >= 0x030800B2 && !CYTHON_COMPILING_IN_PYPY - #define __Pyx_PyCode_New(a, p, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ - PyCode_NewWithPosOnlyArgs(a, p, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) -#else - #define __Pyx_PyCode_New(a, p, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ - PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) -#endif -#endif -#if PY_VERSION_HEX >= 0x030900A4 || defined(Py_IS_TYPE) - #define __Pyx_IS_TYPE(ob, type) Py_IS_TYPE(ob, type) -#else - #define __Pyx_IS_TYPE(ob, type) (((const PyObject*)ob)->ob_type == (type)) -#endif -#if PY_VERSION_HEX >= 0x030A00B1 || defined(Py_Is) - #define __Pyx_Py_Is(x, y) Py_Is(x, y) -#else - #define __Pyx_Py_Is(x, y) ((x) == (y)) -#endif -#if PY_VERSION_HEX >= 0x030A00B1 || defined(Py_IsNone) - #define __Pyx_Py_IsNone(ob) Py_IsNone(ob) -#else - #define __Pyx_Py_IsNone(ob) __Pyx_Py_Is((ob), Py_None) -#endif -#if PY_VERSION_HEX >= 0x030A00B1 || defined(Py_IsTrue) - #define __Pyx_Py_IsTrue(ob) Py_IsTrue(ob) -#else - #define __Pyx_Py_IsTrue(ob) __Pyx_Py_Is((ob), Py_True) -#endif -#if PY_VERSION_HEX >= 0x030A00B1 || defined(Py_IsFalse) - #define __Pyx_Py_IsFalse(ob) Py_IsFalse(ob) -#else - #define __Pyx_Py_IsFalse(ob) __Pyx_Py_Is((ob), Py_False) -#endif -#define __Pyx_NoneAsNull(obj) (__Pyx_Py_IsNone(obj) ? NULL : (obj)) -#if PY_VERSION_HEX >= 0x030900F0 && !CYTHON_COMPILING_IN_PYPY - #define __Pyx_PyObject_GC_IsFinalized(o) PyObject_GC_IsFinalized(o) -#else - #define __Pyx_PyObject_GC_IsFinalized(o) _PyGC_FINALIZED(o) -#endif -#ifndef CO_COROUTINE - #define CO_COROUTINE 0x80 -#endif -#ifndef CO_ASYNC_GENERATOR - #define CO_ASYNC_GENERATOR 0x200 -#endif -#ifndef Py_TPFLAGS_CHECKTYPES - #define Py_TPFLAGS_CHECKTYPES 0 -#endif -#ifndef Py_TPFLAGS_HAVE_INDEX - #define Py_TPFLAGS_HAVE_INDEX 0 -#endif -#ifndef Py_TPFLAGS_HAVE_NEWBUFFER - #define Py_TPFLAGS_HAVE_NEWBUFFER 0 -#endif -#ifndef Py_TPFLAGS_HAVE_FINALIZE - #define Py_TPFLAGS_HAVE_FINALIZE 0 -#endif -#ifndef Py_TPFLAGS_SEQUENCE - #define Py_TPFLAGS_SEQUENCE 0 -#endif -#ifndef Py_TPFLAGS_MAPPING - #define Py_TPFLAGS_MAPPING 0 -#endif -#ifndef METH_STACKLESS - #define METH_STACKLESS 0 -#endif -#if PY_VERSION_HEX <= 0x030700A3 || !defined(METH_FASTCALL) - #ifndef METH_FASTCALL - #define METH_FASTCALL 0x80 - #endif - typedef PyObject *(*__Pyx_PyCFunctionFast) (PyObject *self, PyObject *const *args, Py_ssize_t nargs); - typedef PyObject *(*__Pyx_PyCFunctionFastWithKeywords) (PyObject *self, PyObject *const *args, - Py_ssize_t nargs, PyObject *kwnames); -#else - #define __Pyx_PyCFunctionFast _PyCFunctionFast - #define __Pyx_PyCFunctionFastWithKeywords _PyCFunctionFastWithKeywords -#endif -#if CYTHON_METH_FASTCALL - #define __Pyx_METH_FASTCALL METH_FASTCALL - #define __Pyx_PyCFunction_FastCall __Pyx_PyCFunctionFast - #define __Pyx_PyCFunction_FastCallWithKeywords __Pyx_PyCFunctionFastWithKeywords -#else - #define __Pyx_METH_FASTCALL METH_VARARGS - #define __Pyx_PyCFunction_FastCall PyCFunction - #define __Pyx_PyCFunction_FastCallWithKeywords PyCFunctionWithKeywords -#endif -#if CYTHON_VECTORCALL - #define __pyx_vectorcallfunc vectorcallfunc - #define __Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET PY_VECTORCALL_ARGUMENTS_OFFSET - #define __Pyx_PyVectorcall_NARGS(n) PyVectorcall_NARGS((size_t)(n)) -#elif CYTHON_BACKPORT_VECTORCALL - typedef PyObject *(*__pyx_vectorcallfunc)(PyObject *callable, PyObject *const *args, - size_t nargsf, PyObject *kwnames); - #define __Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET ((size_t)1 << (8 * sizeof(size_t) - 1)) - #define __Pyx_PyVectorcall_NARGS(n) ((Py_ssize_t)(((size_t)(n)) & ~__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)) -#else - #define __Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET 0 - #define __Pyx_PyVectorcall_NARGS(n) ((Py_ssize_t)(n)) -#endif -#if PY_VERSION_HEX < 0x030900B1 - #define __Pyx_PyType_FromModuleAndSpec(m, s, b) ((void)m, PyType_FromSpecWithBases(s, b)) - typedef PyObject *(*__Pyx_PyCMethod)(PyObject *, PyTypeObject *, PyObject *const *, size_t, PyObject *); -#else - #define __Pyx_PyType_FromModuleAndSpec(m, s, b) PyType_FromModuleAndSpec(m, s, b) - #define __Pyx_PyCMethod PyCMethod -#endif -#ifndef METH_METHOD - #define METH_METHOD 0x200 -#endif -#if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Malloc) - #define PyObject_Malloc(s) PyMem_Malloc(s) - #define PyObject_Free(p) PyMem_Free(p) - #define PyObject_Realloc(p) PyMem_Realloc(p) -#endif -#if CYTHON_COMPILING_IN_LIMITED_API - #define __Pyx_PyCode_HasFreeVars(co) (PyCode_GetNumFree(co) > 0) - #define __Pyx_PyFrame_SetLineNumber(frame, lineno) -#else - #define __Pyx_PyCode_HasFreeVars(co) (PyCode_GetNumFree(co) > 0) - #define __Pyx_PyFrame_SetLineNumber(frame, lineno) (frame)->f_lineno = (lineno) -#endif -#if CYTHON_COMPILING_IN_LIMITED_API - #define __Pyx_PyThreadState_Current PyThreadState_Get() -#elif !CYTHON_FAST_THREAD_STATE - #define __Pyx_PyThreadState_Current PyThreadState_GET() -#elif PY_VERSION_HEX >= 0x03060000 - #define __Pyx_PyThreadState_Current _PyThreadState_UncheckedGet() -#elif PY_VERSION_HEX >= 0x03000000 - #define __Pyx_PyThreadState_Current PyThreadState_GET() -#else - #define __Pyx_PyThreadState_Current _PyThreadState_Current -#endif -#if CYTHON_COMPILING_IN_LIMITED_API -static CYTHON_INLINE void *__Pyx_PyModule_GetState(PyObject *op) -{ - void *result; - result = PyModule_GetState(op); - if (!result) - Py_FatalError("Couldn't find the module state"); - return result; -} -#endif -#define __Pyx_PyObject_GetSlot(obj, name, func_ctype) __Pyx_PyType_GetSlot(Py_TYPE(obj), name, func_ctype) -#if CYTHON_COMPILING_IN_LIMITED_API - #define __Pyx_PyType_GetSlot(type, name, func_ctype) ((func_ctype) PyType_GetSlot((type), Py_##name)) -#else - #define __Pyx_PyType_GetSlot(type, name, func_ctype) ((type)->name) -#endif -#if PY_VERSION_HEX < 0x030700A2 && !defined(PyThread_tss_create) && !defined(Py_tss_NEEDS_INIT) -#include "pythread.h" -#define Py_tss_NEEDS_INIT 0 -typedef int Py_tss_t; -static CYTHON_INLINE int PyThread_tss_create(Py_tss_t *key) { - *key = PyThread_create_key(); - return 0; -} -static CYTHON_INLINE Py_tss_t * PyThread_tss_alloc(void) { - Py_tss_t *key = (Py_tss_t *)PyObject_Malloc(sizeof(Py_tss_t)); - *key = Py_tss_NEEDS_INIT; - return key; -} -static CYTHON_INLINE void PyThread_tss_free(Py_tss_t *key) { - PyObject_Free(key); -} -static CYTHON_INLINE int PyThread_tss_is_created(Py_tss_t *key) { - return *key != Py_tss_NEEDS_INIT; -} -static CYTHON_INLINE void PyThread_tss_delete(Py_tss_t *key) { - PyThread_delete_key(*key); - *key = Py_tss_NEEDS_INIT; -} -static CYTHON_INLINE int PyThread_tss_set(Py_tss_t *key, void *value) { - return PyThread_set_key_value(*key, value); -} -static CYTHON_INLINE void * PyThread_tss_get(Py_tss_t *key) { - return PyThread_get_key_value(*key); -} -#endif -#if PY_MAJOR_VERSION < 3 - #if CYTHON_COMPILING_IN_PYPY - #if PYPY_VERSION_NUM < 0x07030600 - #if defined(__cplusplus) && __cplusplus >= 201402L - [[deprecated("`with nogil:` inside a nogil function will not release the GIL in PyPy2 < 7.3.6")]] - #elif defined(__GNUC__) || defined(__clang__) - __attribute__ ((__deprecated__("`with nogil:` inside a nogil function will not release the GIL in PyPy2 < 7.3.6"))) - #elif defined(_MSC_VER) - __declspec(deprecated("`with nogil:` inside a nogil function will not release the GIL in PyPy2 < 7.3.6")) - #endif - static CYTHON_INLINE int PyGILState_Check(void) { - return 0; - } - #else // PYPY_VERSION_NUM < 0x07030600 - #endif // PYPY_VERSION_NUM < 0x07030600 - #else - static CYTHON_INLINE int PyGILState_Check(void) { - PyThreadState * tstate = _PyThreadState_Current; - return tstate && (tstate == PyGILState_GetThisThreadState()); - } - #endif -#endif -#if CYTHON_COMPILING_IN_CPYTHON || defined(_PyDict_NewPresized) -#define __Pyx_PyDict_NewPresized(n) ((n <= 8) ? PyDict_New() : _PyDict_NewPresized(n)) -#else -#define __Pyx_PyDict_NewPresized(n) PyDict_New() -#endif -#if PY_MAJOR_VERSION >= 3 || CYTHON_FUTURE_DIVISION - #define __Pyx_PyNumber_Divide(x,y) PyNumber_TrueDivide(x,y) - #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceTrueDivide(x,y) -#else - #define __Pyx_PyNumber_Divide(x,y) PyNumber_Divide(x,y) - #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceDivide(x,y) -#endif -#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX > 0x030600B4 && CYTHON_USE_UNICODE_INTERNALS -#define __Pyx_PyDict_GetItemStrWithError(dict, name) _PyDict_GetItem_KnownHash(dict, name, ((PyASCIIObject *) name)->hash) -static CYTHON_INLINE PyObject * __Pyx_PyDict_GetItemStr(PyObject *dict, PyObject *name) { - PyObject *res = __Pyx_PyDict_GetItemStrWithError(dict, name); - if (res == NULL) PyErr_Clear(); - return res; -} -#elif PY_MAJOR_VERSION >= 3 && (!CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07020000) -#define __Pyx_PyDict_GetItemStrWithError PyDict_GetItemWithError -#define __Pyx_PyDict_GetItemStr PyDict_GetItem -#else -static CYTHON_INLINE PyObject * __Pyx_PyDict_GetItemStrWithError(PyObject *dict, PyObject *name) { -#if CYTHON_COMPILING_IN_PYPY - return PyDict_GetItem(dict, name); -#else - PyDictEntry *ep; - PyDictObject *mp = (PyDictObject*) dict; - long hash = ((PyStringObject *) name)->ob_shash; - assert(hash != -1); - ep = (mp->ma_lookup)(mp, name, hash); - if (ep == NULL) { - return NULL; - } - return ep->me_value; -#endif -} -#define __Pyx_PyDict_GetItemStr PyDict_GetItem -#endif -#if CYTHON_USE_TYPE_SLOTS - #define __Pyx_PyType_GetFlags(tp) (((PyTypeObject *)tp)->tp_flags) - #define __Pyx_PyType_HasFeature(type, feature) ((__Pyx_PyType_GetFlags(type) & (feature)) != 0) - #define __Pyx_PyObject_GetIterNextFunc(obj) (Py_TYPE(obj)->tp_iternext) -#else - #define __Pyx_PyType_GetFlags(tp) (PyType_GetFlags((PyTypeObject *)tp)) - #define __Pyx_PyType_HasFeature(type, feature) PyType_HasFeature(type, feature) - #define __Pyx_PyObject_GetIterNextFunc(obj) PyIter_Next -#endif -#if CYTHON_USE_TYPE_SPECS && PY_VERSION_HEX >= 0x03080000 -#define __Pyx_PyHeapTypeObject_GC_Del(obj) {\ - PyTypeObject *type = Py_TYPE(obj);\ - assert(__Pyx_PyType_HasFeature(type, Py_TPFLAGS_HEAPTYPE));\ - PyObject_GC_Del(obj);\ - Py_DECREF(type);\ -} -#else -#define __Pyx_PyHeapTypeObject_GC_Del(obj) PyObject_GC_Del(obj) -#endif -#if CYTHON_COMPILING_IN_LIMITED_API - #define CYTHON_PEP393_ENABLED 1 - #define __Pyx_PyUnicode_READY(op) (0) - #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GetLength(u) - #define __Pyx_PyUnicode_READ_CHAR(u, i) PyUnicode_ReadChar(u, i) - #define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) ((void)u, 1114111U) - #define __Pyx_PyUnicode_KIND(u) ((void)u, (0)) - #define __Pyx_PyUnicode_DATA(u) ((void*)u) - #define __Pyx_PyUnicode_READ(k, d, i) ((void)k, PyUnicode_ReadChar((PyObject*)(d), i)) - #define __Pyx_PyUnicode_IS_TRUE(u) (0 != PyUnicode_GetLength(u)) -#elif PY_VERSION_HEX > 0x03030000 && defined(PyUnicode_KIND) - #define CYTHON_PEP393_ENABLED 1 - #if PY_VERSION_HEX >= 0x030C0000 - #define __Pyx_PyUnicode_READY(op) (0) - #else - #define __Pyx_PyUnicode_READY(op) (likely(PyUnicode_IS_READY(op)) ?\ - 0 : _PyUnicode_Ready((PyObject *)(op))) - #endif - #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_LENGTH(u) - #define __Pyx_PyUnicode_READ_CHAR(u, i) PyUnicode_READ_CHAR(u, i) - #define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) PyUnicode_MAX_CHAR_VALUE(u) - #define __Pyx_PyUnicode_KIND(u) ((int)PyUnicode_KIND(u)) - #define __Pyx_PyUnicode_DATA(u) PyUnicode_DATA(u) - #define __Pyx_PyUnicode_READ(k, d, i) PyUnicode_READ(k, d, i) - #define __Pyx_PyUnicode_WRITE(k, d, i, ch) PyUnicode_WRITE(k, d, i, (Py_UCS4) ch) - #if PY_VERSION_HEX >= 0x030C0000 - #define __Pyx_PyUnicode_IS_TRUE(u) (0 != PyUnicode_GET_LENGTH(u)) - #else - #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x03090000 - #define __Pyx_PyUnicode_IS_TRUE(u) (0 != (likely(PyUnicode_IS_READY(u)) ? PyUnicode_GET_LENGTH(u) : ((PyCompactUnicodeObject *)(u))->wstr_length)) - #else - #define __Pyx_PyUnicode_IS_TRUE(u) (0 != (likely(PyUnicode_IS_READY(u)) ? PyUnicode_GET_LENGTH(u) : PyUnicode_GET_SIZE(u))) - #endif - #endif -#else - #define CYTHON_PEP393_ENABLED 0 - #define PyUnicode_1BYTE_KIND 1 - #define PyUnicode_2BYTE_KIND 2 - #define PyUnicode_4BYTE_KIND 4 - #define __Pyx_PyUnicode_READY(op) (0) - #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_SIZE(u) - #define __Pyx_PyUnicode_READ_CHAR(u, i) ((Py_UCS4)(PyUnicode_AS_UNICODE(u)[i])) - #define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) ((sizeof(Py_UNICODE) == 2) ? 65535U : 1114111U) - #define __Pyx_PyUnicode_KIND(u) ((int)sizeof(Py_UNICODE)) - #define __Pyx_PyUnicode_DATA(u) ((void*)PyUnicode_AS_UNICODE(u)) - #define __Pyx_PyUnicode_READ(k, d, i) ((void)(k), (Py_UCS4)(((Py_UNICODE*)d)[i])) - #define __Pyx_PyUnicode_WRITE(k, d, i, ch) (((void)(k)), ((Py_UNICODE*)d)[i] = (Py_UNICODE) ch) - #define __Pyx_PyUnicode_IS_TRUE(u) (0 != PyUnicode_GET_SIZE(u)) -#endif -#if CYTHON_COMPILING_IN_PYPY - #define __Pyx_PyUnicode_Concat(a, b) PyNumber_Add(a, b) - #define __Pyx_PyUnicode_ConcatSafe(a, b) PyNumber_Add(a, b) -#else - #define __Pyx_PyUnicode_Concat(a, b) PyUnicode_Concat(a, b) - #define __Pyx_PyUnicode_ConcatSafe(a, b) ((unlikely((a) == Py_None) || unlikely((b) == Py_None)) ?\ - PyNumber_Add(a, b) : __Pyx_PyUnicode_Concat(a, b)) -#endif -#if CYTHON_COMPILING_IN_PYPY - #if !defined(PyUnicode_DecodeUnicodeEscape) - #define PyUnicode_DecodeUnicodeEscape(s, size, errors) PyUnicode_Decode(s, size, "unicode_escape", errors) - #endif - #if !defined(PyUnicode_Contains) || (PY_MAJOR_VERSION == 2 && PYPY_VERSION_NUM < 0x07030500) - #undef PyUnicode_Contains - #define PyUnicode_Contains(u, s) PySequence_Contains(u, s) - #endif - #if !defined(PyByteArray_Check) - #define PyByteArray_Check(obj) PyObject_TypeCheck(obj, &PyByteArray_Type) - #endif - #if !defined(PyObject_Format) - #define PyObject_Format(obj, fmt) PyObject_CallMethod(obj, "__format__", "O", fmt) - #endif -#endif -#define __Pyx_PyString_FormatSafe(a, b) ((unlikely((a) == Py_None || (PyString_Check(b) && !PyString_CheckExact(b)))) ? PyNumber_Remainder(a, b) : __Pyx_PyString_Format(a, b)) -#define __Pyx_PyUnicode_FormatSafe(a, b) ((unlikely((a) == Py_None || (PyUnicode_Check(b) && !PyUnicode_CheckExact(b)))) ? PyNumber_Remainder(a, b) : PyUnicode_Format(a, b)) -#if PY_MAJOR_VERSION >= 3 - #define __Pyx_PyString_Format(a, b) PyUnicode_Format(a, b) -#else - #define __Pyx_PyString_Format(a, b) PyString_Format(a, b) -#endif -#if PY_MAJOR_VERSION < 3 && !defined(PyObject_ASCII) - #define PyObject_ASCII(o) PyObject_Repr(o) -#endif -#if PY_MAJOR_VERSION >= 3 - #define PyBaseString_Type PyUnicode_Type - #define PyStringObject PyUnicodeObject - #define PyString_Type PyUnicode_Type - #define PyString_Check PyUnicode_Check - #define PyString_CheckExact PyUnicode_CheckExact -#ifndef PyObject_Unicode - #define PyObject_Unicode PyObject_Str -#endif -#endif -#if PY_MAJOR_VERSION >= 3 - #define __Pyx_PyBaseString_Check(obj) PyUnicode_Check(obj) - #define __Pyx_PyBaseString_CheckExact(obj) PyUnicode_CheckExact(obj) -#else - #define __Pyx_PyBaseString_Check(obj) (PyString_Check(obj) || PyUnicode_Check(obj)) - #define __Pyx_PyBaseString_CheckExact(obj) (PyString_CheckExact(obj) || PyUnicode_CheckExact(obj)) -#endif -#if CYTHON_COMPILING_IN_CPYTHON - #define __Pyx_PySequence_ListKeepNew(obj)\ - (likely(PyList_CheckExact(obj) && Py_REFCNT(obj) == 1) ? __Pyx_NewRef(obj) : PySequence_List(obj)) -#else - #define __Pyx_PySequence_ListKeepNew(obj) PySequence_List(obj) -#endif -#ifndef PySet_CheckExact - #define PySet_CheckExact(obj) __Pyx_IS_TYPE(obj, &PySet_Type) -#endif -#if PY_VERSION_HEX >= 0x030900A4 - #define __Pyx_SET_REFCNT(obj, refcnt) Py_SET_REFCNT(obj, refcnt) - #define __Pyx_SET_SIZE(obj, size) Py_SET_SIZE(obj, size) -#else - #define __Pyx_SET_REFCNT(obj, refcnt) Py_REFCNT(obj) = (refcnt) - #define __Pyx_SET_SIZE(obj, size) Py_SIZE(obj) = (size) -#endif -#if CYTHON_ASSUME_SAFE_MACROS - #define __Pyx_PySequence_SIZE(seq) Py_SIZE(seq) -#else - #define __Pyx_PySequence_SIZE(seq) PySequence_Size(seq) -#endif -#if PY_MAJOR_VERSION >= 3 - #define PyIntObject PyLongObject - #define PyInt_Type PyLong_Type - #define PyInt_Check(op) PyLong_Check(op) - #define PyInt_CheckExact(op) PyLong_CheckExact(op) - #define __Pyx_Py3Int_Check(op) PyLong_Check(op) - #define __Pyx_Py3Int_CheckExact(op) PyLong_CheckExact(op) - #define PyInt_FromString PyLong_FromString - #define PyInt_FromUnicode PyLong_FromUnicode - #define PyInt_FromLong PyLong_FromLong - #define PyInt_FromSize_t PyLong_FromSize_t - #define PyInt_FromSsize_t PyLong_FromSsize_t - #define PyInt_AsLong PyLong_AsLong - #define PyInt_AS_LONG PyLong_AS_LONG - #define PyInt_AsSsize_t PyLong_AsSsize_t - #define PyInt_AsUnsignedLongMask PyLong_AsUnsignedLongMask - #define PyInt_AsUnsignedLongLongMask PyLong_AsUnsignedLongLongMask - #define PyNumber_Int PyNumber_Long -#else - #define __Pyx_Py3Int_Check(op) (PyLong_Check(op) || PyInt_Check(op)) - #define __Pyx_Py3Int_CheckExact(op) (PyLong_CheckExact(op) || PyInt_CheckExact(op)) -#endif -#if PY_MAJOR_VERSION >= 3 - #define PyBoolObject PyLongObject -#endif -#if PY_MAJOR_VERSION >= 3 && CYTHON_COMPILING_IN_PYPY - #ifndef PyUnicode_InternFromString - #define PyUnicode_InternFromString(s) PyUnicode_FromString(s) - #endif -#endif -#if PY_VERSION_HEX < 0x030200A4 - typedef long Py_hash_t; - #define __Pyx_PyInt_FromHash_t PyInt_FromLong - #define __Pyx_PyInt_AsHash_t __Pyx_PyIndex_AsHash_t -#else - #define __Pyx_PyInt_FromHash_t PyInt_FromSsize_t - #define __Pyx_PyInt_AsHash_t __Pyx_PyIndex_AsSsize_t -#endif -#if CYTHON_USE_ASYNC_SLOTS - #if PY_VERSION_HEX >= 0x030500B1 - #define __Pyx_PyAsyncMethodsStruct PyAsyncMethods - #define __Pyx_PyType_AsAsync(obj) (Py_TYPE(obj)->tp_as_async) - #else - #define __Pyx_PyType_AsAsync(obj) ((__Pyx_PyAsyncMethodsStruct*) (Py_TYPE(obj)->tp_reserved)) - #endif -#else - #define __Pyx_PyType_AsAsync(obj) NULL -#endif -#ifndef __Pyx_PyAsyncMethodsStruct - typedef struct { - unaryfunc am_await; - unaryfunc am_aiter; - unaryfunc am_anext; - } __Pyx_PyAsyncMethodsStruct; -#endif - -#if defined(_WIN32) || defined(WIN32) || defined(MS_WINDOWS) - #if !defined(_USE_MATH_DEFINES) - #define _USE_MATH_DEFINES - #endif -#endif -#include -#ifdef NAN -#define __PYX_NAN() ((float) NAN) -#else -static CYTHON_INLINE float __PYX_NAN() { - float value; - memset(&value, 0xFF, sizeof(value)); - return value; -} -#endif -#if defined(__CYGWIN__) && defined(_LDBL_EQ_DBL) -#define __Pyx_truncl trunc -#else -#define __Pyx_truncl truncl -#endif - -#define __PYX_MARK_ERR_POS(f_index, lineno) \ - { __pyx_filename = __pyx_f[f_index]; (void)__pyx_filename; __pyx_lineno = lineno; (void)__pyx_lineno; __pyx_clineno = __LINE__; (void)__pyx_clineno; } -#define __PYX_ERR(f_index, lineno, Ln_error) \ - { __PYX_MARK_ERR_POS(f_index, lineno) goto Ln_error; } - -#ifdef CYTHON_EXTERN_C - #undef __PYX_EXTERN_C - #define __PYX_EXTERN_C CYTHON_EXTERN_C -#elif defined(__PYX_EXTERN_C) - #ifdef _MSC_VER - #pragma message ("Please do not define the '__PYX_EXTERN_C' macro externally. Use 'CYTHON_EXTERN_C' instead.") - #else - #warning Please do not define the '__PYX_EXTERN_C' macro externally. Use 'CYTHON_EXTERN_C' instead. - #endif -#else - #ifdef __cplusplus - #define __PYX_EXTERN_C extern "C" - #else - #define __PYX_EXTERN_C extern - #endif -#endif - -#define __PYX_HAVE__monotonic_align__core -#define __PYX_HAVE_API__monotonic_align__core -/* Early includes */ -#include "pythread.h" -#include -#include -#ifdef _OPENMP -#include -#endif /* _OPENMP */ - -#if defined(PYREX_WITHOUT_ASSERTIONS) && !defined(CYTHON_WITHOUT_ASSERTIONS) -#define CYTHON_WITHOUT_ASSERTIONS -#endif - -typedef struct {PyObject **p; const char *s; const Py_ssize_t n; const char* encoding; - const char is_unicode; const char is_str; const char intern; } __Pyx_StringTabEntry; - -#define __PYX_DEFAULT_STRING_ENCODING_IS_ASCII 0 -#define __PYX_DEFAULT_STRING_ENCODING_IS_UTF8 0 -#define __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT (PY_MAJOR_VERSION >= 3 && __PYX_DEFAULT_STRING_ENCODING_IS_UTF8) -#define __PYX_DEFAULT_STRING_ENCODING "" -#define __Pyx_PyObject_FromString __Pyx_PyBytes_FromString -#define __Pyx_PyObject_FromStringAndSize __Pyx_PyBytes_FromStringAndSize -#define __Pyx_uchar_cast(c) ((unsigned char)c) -#define __Pyx_long_cast(x) ((long)x) -#define __Pyx_fits_Py_ssize_t(v, type, is_signed) (\ - (sizeof(type) < sizeof(Py_ssize_t)) ||\ - (sizeof(type) > sizeof(Py_ssize_t) &&\ - likely(v < (type)PY_SSIZE_T_MAX ||\ - v == (type)PY_SSIZE_T_MAX) &&\ - (!is_signed || likely(v > (type)PY_SSIZE_T_MIN ||\ - v == (type)PY_SSIZE_T_MIN))) ||\ - (sizeof(type) == sizeof(Py_ssize_t) &&\ - (is_signed || likely(v < (type)PY_SSIZE_T_MAX ||\ - v == (type)PY_SSIZE_T_MAX))) ) -static CYTHON_INLINE int __Pyx_is_valid_index(Py_ssize_t i, Py_ssize_t limit) { - return (size_t) i < (size_t) limit; -} -#if defined (__cplusplus) && __cplusplus >= 201103L - #include - #define __Pyx_sst_abs(value) std::abs(value) -#elif SIZEOF_INT >= SIZEOF_SIZE_T - #define __Pyx_sst_abs(value) abs(value) -#elif SIZEOF_LONG >= SIZEOF_SIZE_T - #define __Pyx_sst_abs(value) labs(value) -#elif defined (_MSC_VER) - #define __Pyx_sst_abs(value) ((Py_ssize_t)_abs64(value)) -#elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L - #define __Pyx_sst_abs(value) llabs(value) -#elif defined (__GNUC__) - #define __Pyx_sst_abs(value) __builtin_llabs(value) -#else - #define __Pyx_sst_abs(value) ((value<0) ? -value : value) -#endif -static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject*); -static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject*, Py_ssize_t* length); -#define __Pyx_PyByteArray_FromString(s) PyByteArray_FromStringAndSize((const char*)s, strlen((const char*)s)) -#define __Pyx_PyByteArray_FromStringAndSize(s, l) PyByteArray_FromStringAndSize((const char*)s, l) -#define __Pyx_PyBytes_FromString PyBytes_FromString -#define __Pyx_PyBytes_FromStringAndSize PyBytes_FromStringAndSize -static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char*); -#if PY_MAJOR_VERSION < 3 - #define __Pyx_PyStr_FromString __Pyx_PyBytes_FromString - #define __Pyx_PyStr_FromStringAndSize __Pyx_PyBytes_FromStringAndSize -#else - #define __Pyx_PyStr_FromString __Pyx_PyUnicode_FromString - #define __Pyx_PyStr_FromStringAndSize __Pyx_PyUnicode_FromStringAndSize -#endif -#define __Pyx_PyBytes_AsWritableString(s) ((char*) PyBytes_AS_STRING(s)) -#define __Pyx_PyBytes_AsWritableSString(s) ((signed char*) PyBytes_AS_STRING(s)) -#define __Pyx_PyBytes_AsWritableUString(s) ((unsigned char*) PyBytes_AS_STRING(s)) -#define __Pyx_PyBytes_AsString(s) ((const char*) PyBytes_AS_STRING(s)) -#define __Pyx_PyBytes_AsSString(s) ((const signed char*) PyBytes_AS_STRING(s)) -#define __Pyx_PyBytes_AsUString(s) ((const unsigned char*) PyBytes_AS_STRING(s)) -#define __Pyx_PyObject_AsWritableString(s) ((char*)(__pyx_uintptr_t) __Pyx_PyObject_AsString(s)) -#define __Pyx_PyObject_AsWritableSString(s) ((signed char*)(__pyx_uintptr_t) __Pyx_PyObject_AsString(s)) -#define __Pyx_PyObject_AsWritableUString(s) ((unsigned char*)(__pyx_uintptr_t) __Pyx_PyObject_AsString(s)) -#define __Pyx_PyObject_AsSString(s) ((const signed char*) __Pyx_PyObject_AsString(s)) -#define __Pyx_PyObject_AsUString(s) ((const unsigned char*) __Pyx_PyObject_AsString(s)) -#define __Pyx_PyObject_FromCString(s) __Pyx_PyObject_FromString((const char*)s) -#define __Pyx_PyBytes_FromCString(s) __Pyx_PyBytes_FromString((const char*)s) -#define __Pyx_PyByteArray_FromCString(s) __Pyx_PyByteArray_FromString((const char*)s) -#define __Pyx_PyStr_FromCString(s) __Pyx_PyStr_FromString((const char*)s) -#define __Pyx_PyUnicode_FromCString(s) __Pyx_PyUnicode_FromString((const char*)s) -#if CYTHON_COMPILING_IN_LIMITED_API -static CYTHON_INLINE size_t __Pyx_Py_UNICODE_strlen(const wchar_t *u) -{ - const wchar_t *u_end = u; - while (*u_end++) ; - return (size_t)(u_end - u - 1); -} -#else -static CYTHON_INLINE size_t __Pyx_Py_UNICODE_strlen(const Py_UNICODE *u) -{ - const Py_UNICODE *u_end = u; - while (*u_end++) ; - return (size_t)(u_end - u - 1); -} -#endif -#define __Pyx_PyUnicode_FromOrdinal(o) PyUnicode_FromOrdinal((int)o) -#define __Pyx_PyUnicode_FromUnicode(u) PyUnicode_FromUnicode(u, __Pyx_Py_UNICODE_strlen(u)) -#define __Pyx_PyUnicode_FromUnicodeAndLength PyUnicode_FromUnicode -#define __Pyx_PyUnicode_AsUnicode PyUnicode_AsUnicode -#define __Pyx_NewRef(obj) (Py_INCREF(obj), obj) -#define __Pyx_Owned_Py_None(b) __Pyx_NewRef(Py_None) -static CYTHON_INLINE PyObject * __Pyx_PyBool_FromLong(long b); -static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject*); -static CYTHON_INLINE int __Pyx_PyObject_IsTrueAndDecref(PyObject*); -static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x); -#define __Pyx_PySequence_Tuple(obj)\ - (likely(PyTuple_CheckExact(obj)) ? __Pyx_NewRef(obj) : PySequence_Tuple(obj)) -static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject*); -static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t); -static CYTHON_INLINE Py_hash_t __Pyx_PyIndex_AsHash_t(PyObject*); -#if CYTHON_ASSUME_SAFE_MACROS -#define __pyx_PyFloat_AsDouble(x) (PyFloat_CheckExact(x) ? PyFloat_AS_DOUBLE(x) : PyFloat_AsDouble(x)) -#else -#define __pyx_PyFloat_AsDouble(x) PyFloat_AsDouble(x) -#endif -#define __pyx_PyFloat_AsFloat(x) ((float) __pyx_PyFloat_AsDouble(x)) -#if PY_MAJOR_VERSION >= 3 -#define __Pyx_PyNumber_Int(x) (PyLong_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Long(x)) -#else -#define __Pyx_PyNumber_Int(x) (PyInt_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Int(x)) -#endif -#if CYTHON_USE_PYLONG_INTERNALS - #if PY_VERSION_HEX >= 0x030C00A7 - #ifndef _PyLong_SIGN_MASK - #define _PyLong_SIGN_MASK 3 - #endif - #ifndef _PyLong_NON_SIZE_BITS - #define _PyLong_NON_SIZE_BITS 3 - #endif - #define __Pyx_PyLong_Sign(x) (((PyLongObject*)x)->long_value.lv_tag & _PyLong_SIGN_MASK) - #define __Pyx_PyLong_IsNeg(x) ((__Pyx_PyLong_Sign(x) & 2) != 0) - #define __Pyx_PyLong_IsNonNeg(x) (!__Pyx_PyLong_IsNeg(x)) - #define __Pyx_PyLong_IsZero(x) (__Pyx_PyLong_Sign(x) & 1) - #define __Pyx_PyLong_IsPos(x) (__Pyx_PyLong_Sign(x) == 0) - #define __Pyx_PyLong_CompactValueUnsigned(x) (__Pyx_PyLong_Digits(x)[0]) - #define __Pyx_PyLong_DigitCount(x) ((Py_ssize_t) (((PyLongObject*)x)->long_value.lv_tag >> _PyLong_NON_SIZE_BITS)) - #define __Pyx_PyLong_SignedDigitCount(x)\ - ((1 - (Py_ssize_t) __Pyx_PyLong_Sign(x)) * __Pyx_PyLong_DigitCount(x)) - #if defined(PyUnstable_Long_IsCompact) && defined(PyUnstable_Long_CompactValue) - #define __Pyx_PyLong_IsCompact(x) PyUnstable_Long_IsCompact((PyLongObject*) x) - #define __Pyx_PyLong_CompactValue(x) PyUnstable_Long_CompactValue((PyLongObject*) x) - #else - #define __Pyx_PyLong_IsCompact(x) (((PyLongObject*)x)->long_value.lv_tag < (2 << _PyLong_NON_SIZE_BITS)) - #define __Pyx_PyLong_CompactValue(x) ((1 - (Py_ssize_t) __Pyx_PyLong_Sign(x)) * (Py_ssize_t) __Pyx_PyLong_Digits(x)[0]) - #endif - typedef Py_ssize_t __Pyx_compact_pylong; - typedef size_t __Pyx_compact_upylong; - #else // Py < 3.12 - #define __Pyx_PyLong_IsNeg(x) (Py_SIZE(x) < 0) - #define __Pyx_PyLong_IsNonNeg(x) (Py_SIZE(x) >= 0) - #define __Pyx_PyLong_IsZero(x) (Py_SIZE(x) == 0) - #define __Pyx_PyLong_IsPos(x) (Py_SIZE(x) > 0) - #define __Pyx_PyLong_CompactValueUnsigned(x) ((Py_SIZE(x) == 0) ? 0 : __Pyx_PyLong_Digits(x)[0]) - #define __Pyx_PyLong_DigitCount(x) __Pyx_sst_abs(Py_SIZE(x)) - #define __Pyx_PyLong_SignedDigitCount(x) Py_SIZE(x) - #define __Pyx_PyLong_IsCompact(x) (Py_SIZE(x) == 0 || Py_SIZE(x) == 1 || Py_SIZE(x) == -1) - #define __Pyx_PyLong_CompactValue(x)\ - ((Py_SIZE(x) == 0) ? (sdigit) 0 : ((Py_SIZE(x) < 0) ? -(sdigit)__Pyx_PyLong_Digits(x)[0] : (sdigit)__Pyx_PyLong_Digits(x)[0])) - typedef sdigit __Pyx_compact_pylong; - typedef digit __Pyx_compact_upylong; - #endif - #if PY_VERSION_HEX >= 0x030C00A5 - #define __Pyx_PyLong_Digits(x) (((PyLongObject*)x)->long_value.ob_digit) - #else - #define __Pyx_PyLong_Digits(x) (((PyLongObject*)x)->ob_digit) - #endif -#endif -#if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII -static int __Pyx_sys_getdefaultencoding_not_ascii; -static int __Pyx_init_sys_getdefaultencoding_params(void) { - PyObject* sys; - PyObject* default_encoding = NULL; - PyObject* ascii_chars_u = NULL; - PyObject* ascii_chars_b = NULL; - const char* default_encoding_c; - sys = PyImport_ImportModule("sys"); - if (!sys) goto bad; - default_encoding = PyObject_CallMethod(sys, (char*) "getdefaultencoding", NULL); - Py_DECREF(sys); - if (!default_encoding) goto bad; - default_encoding_c = PyBytes_AsString(default_encoding); - if (!default_encoding_c) goto bad; - if (strcmp(default_encoding_c, "ascii") == 0) { - __Pyx_sys_getdefaultencoding_not_ascii = 0; - } else { - char ascii_chars[128]; - int c; - for (c = 0; c < 128; c++) { - ascii_chars[c] = (char) c; - } - __Pyx_sys_getdefaultencoding_not_ascii = 1; - ascii_chars_u = PyUnicode_DecodeASCII(ascii_chars, 128, NULL); - if (!ascii_chars_u) goto bad; - ascii_chars_b = PyUnicode_AsEncodedString(ascii_chars_u, default_encoding_c, NULL); - if (!ascii_chars_b || !PyBytes_Check(ascii_chars_b) || memcmp(ascii_chars, PyBytes_AS_STRING(ascii_chars_b), 128) != 0) { - PyErr_Format( - PyExc_ValueError, - "This module compiled with c_string_encoding=ascii, but default encoding '%.200s' is not a superset of ascii.", - default_encoding_c); - goto bad; - } - Py_DECREF(ascii_chars_u); - Py_DECREF(ascii_chars_b); - } - Py_DECREF(default_encoding); - return 0; -bad: - Py_XDECREF(default_encoding); - Py_XDECREF(ascii_chars_u); - Py_XDECREF(ascii_chars_b); - return -1; -} -#endif -#if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT && PY_MAJOR_VERSION >= 3 -#define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_DecodeUTF8(c_str, size, NULL) -#else -#define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_Decode(c_str, size, __PYX_DEFAULT_STRING_ENCODING, NULL) -#if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT -static char* __PYX_DEFAULT_STRING_ENCODING; -static int __Pyx_init_sys_getdefaultencoding_params(void) { - PyObject* sys; - PyObject* default_encoding = NULL; - char* default_encoding_c; - sys = PyImport_ImportModule("sys"); - if (!sys) goto bad; - default_encoding = PyObject_CallMethod(sys, (char*) (const char*) "getdefaultencoding", NULL); - Py_DECREF(sys); - if (!default_encoding) goto bad; - default_encoding_c = PyBytes_AsString(default_encoding); - if (!default_encoding_c) goto bad; - __PYX_DEFAULT_STRING_ENCODING = (char*) malloc(strlen(default_encoding_c) + 1); - if (!__PYX_DEFAULT_STRING_ENCODING) goto bad; - strcpy(__PYX_DEFAULT_STRING_ENCODING, default_encoding_c); - Py_DECREF(default_encoding); - return 0; -bad: - Py_XDECREF(default_encoding); - return -1; -} -#endif -#endif - - -/* Test for GCC > 2.95 */ -#if defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))) - #define likely(x) __builtin_expect(!!(x), 1) - #define unlikely(x) __builtin_expect(!!(x), 0) -#else /* !__GNUC__ or GCC < 2.95 */ - #define likely(x) (x) - #define unlikely(x) (x) -#endif /* __GNUC__ */ -static CYTHON_INLINE void __Pyx_pretend_to_initialize(void* ptr) { (void)ptr; } - -#if !CYTHON_USE_MODULE_STATE -static PyObject *__pyx_m = NULL; -#endif -static int __pyx_lineno; -static int __pyx_clineno = 0; -static const char * __pyx_cfilenm = __FILE__; -static const char *__pyx_filename; - -/* #### Code section: filename_table ### */ - -static const char *__pyx_f[] = { - "core.pyx", - "", -}; -/* #### Code section: utility_code_proto_before_types ### */ -/* ForceInitThreads.proto */ -#ifndef __PYX_FORCE_INIT_THREADS - #define __PYX_FORCE_INIT_THREADS 0 -#endif - -/* NoFastGil.proto */ -#define __Pyx_PyGILState_Ensure PyGILState_Ensure -#define __Pyx_PyGILState_Release PyGILState_Release -#define __Pyx_FastGIL_Remember() -#define __Pyx_FastGIL_Forget() -#define __Pyx_FastGilFuncInit() - -/* BufferFormatStructs.proto */ -struct __Pyx_StructField_; -#define __PYX_BUF_FLAGS_PACKED_STRUCT (1 << 0) -typedef struct { - const char* name; - struct __Pyx_StructField_* fields; - size_t size; - size_t arraysize[8]; - int ndim; - char typegroup; - char is_unsigned; - int flags; -} __Pyx_TypeInfo; -typedef struct __Pyx_StructField_ { - __Pyx_TypeInfo* type; - const char* name; - size_t offset; -} __Pyx_StructField; -typedef struct { - __Pyx_StructField* field; - size_t parent_offset; -} __Pyx_BufFmt_StackElem; -typedef struct { - __Pyx_StructField root; - __Pyx_BufFmt_StackElem* head; - size_t fmt_offset; - size_t new_count, enc_count; - size_t struct_alignment; - int is_complex; - char enc_type; - char new_packmode; - char enc_packmode; - char is_valid_array; -} __Pyx_BufFmt_Context; - -/* Atomics.proto */ -#include -#ifndef CYTHON_ATOMICS - #define CYTHON_ATOMICS 1 -#endif -#define __PYX_CYTHON_ATOMICS_ENABLED() CYTHON_ATOMICS -#define __pyx_atomic_int_type int -#define __pyx_nonatomic_int_type int -#if CYTHON_ATOMICS && (defined(__STDC_VERSION__) &&\ - (__STDC_VERSION__ >= 201112L) &&\ - !defined(__STDC_NO_ATOMICS__)) - #include -#elif CYTHON_ATOMICS && (defined(__cplusplus) && (\ - (__cplusplus >= 201103L) ||\ - (defined(_MSC_VER) && _MSC_VER >= 1700))) - #include -#endif -#if CYTHON_ATOMICS && (defined(__STDC_VERSION__) &&\ - (__STDC_VERSION__ >= 201112L) &&\ - !defined(__STDC_NO_ATOMICS__) &&\ - ATOMIC_INT_LOCK_FREE == 2) - #undef __pyx_atomic_int_type - #define __pyx_atomic_int_type atomic_int - #define __pyx_atomic_incr_aligned(value) atomic_fetch_add_explicit(value, 1, memory_order_relaxed) - #define __pyx_atomic_decr_aligned(value) atomic_fetch_sub_explicit(value, 1, memory_order_acq_rel) - #if defined(__PYX_DEBUG_ATOMICS) && defined(_MSC_VER) - #pragma message ("Using standard C atomics") - #elif defined(__PYX_DEBUG_ATOMICS) - #warning "Using standard C atomics" - #endif -#elif CYTHON_ATOMICS && (defined(__cplusplus) && (\ - (__cplusplus >= 201103L) ||\ -\ - (defined(_MSC_VER) && _MSC_VER >= 1700)) &&\ - ATOMIC_INT_LOCK_FREE == 2) - #undef __pyx_atomic_int_type - #define __pyx_atomic_int_type std::atomic_int - #define __pyx_atomic_incr_aligned(value) std::atomic_fetch_add_explicit(value, 1, std::memory_order_relaxed) - #define __pyx_atomic_decr_aligned(value) std::atomic_fetch_sub_explicit(value, 1, std::memory_order_acq_rel) - #if defined(__PYX_DEBUG_ATOMICS) && defined(_MSC_VER) - #pragma message ("Using standard C++ atomics") - #elif defined(__PYX_DEBUG_ATOMICS) - #warning "Using standard C++ atomics" - #endif -#elif CYTHON_ATOMICS && (__GNUC__ >= 5 || (__GNUC__ == 4 &&\ - (__GNUC_MINOR__ > 1 ||\ - (__GNUC_MINOR__ == 1 && __GNUC_PATCHLEVEL__ >= 2)))) - #define __pyx_atomic_incr_aligned(value) __sync_fetch_and_add(value, 1) - #define __pyx_atomic_decr_aligned(value) __sync_fetch_and_sub(value, 1) - #ifdef __PYX_DEBUG_ATOMICS - #warning "Using GNU atomics" - #endif -#elif CYTHON_ATOMICS && defined(_MSC_VER) - #include - #undef __pyx_atomic_int_type - #define __pyx_atomic_int_type long - #define __pyx_nonatomic_int_type long - #pragma intrinsic (_InterlockedExchangeAdd) - #define __pyx_atomic_incr_aligned(value) _InterlockedExchangeAdd(value, 1) - #define __pyx_atomic_decr_aligned(value) _InterlockedExchangeAdd(value, -1) - #ifdef __PYX_DEBUG_ATOMICS - #pragma message ("Using MSVC atomics") - #endif -#else - #undef CYTHON_ATOMICS - #define CYTHON_ATOMICS 0 - #ifdef __PYX_DEBUG_ATOMICS - #warning "Not using atomics" - #endif -#endif -#if CYTHON_ATOMICS - #define __pyx_add_acquisition_count(memview)\ - __pyx_atomic_incr_aligned(__pyx_get_slice_count_pointer(memview)) - #define __pyx_sub_acquisition_count(memview)\ - __pyx_atomic_decr_aligned(__pyx_get_slice_count_pointer(memview)) -#else - #define __pyx_add_acquisition_count(memview)\ - __pyx_add_acquisition_count_locked(__pyx_get_slice_count_pointer(memview), memview->lock) - #define __pyx_sub_acquisition_count(memview)\ - __pyx_sub_acquisition_count_locked(__pyx_get_slice_count_pointer(memview), memview->lock) -#endif - -/* MemviewSliceStruct.proto */ -struct __pyx_memoryview_obj; -typedef struct { - struct __pyx_memoryview_obj *memview; - char *data; - Py_ssize_t shape[8]; - Py_ssize_t strides[8]; - Py_ssize_t suboffsets[8]; -} __Pyx_memviewslice; -#define __Pyx_MemoryView_Len(m) (m.shape[0]) - -/* #### Code section: numeric_typedefs ### */ -/* #### Code section: complex_type_declarations ### */ -/* #### Code section: type_declarations ### */ - -/*--- Type declarations ---*/ -struct __pyx_array_obj; -struct __pyx_MemviewEnum_obj; -struct __pyx_memoryview_obj; -struct __pyx_memoryviewslice_obj; -struct __pyx_opt_args_15monotonic_align_4core_maximum_path_each; - -/* "monotonic_align/core.pyx":7 - * @cython.boundscheck(False) - * @cython.wraparound(False) - * cdef void maximum_path_each(int[:,::1] path, float[:,::1] value, int t_y, int t_x, float max_neg_val=-1e9) nogil: # <<<<<<<<<<<<<< - * cdef int x - * cdef int y - */ -struct __pyx_opt_args_15monotonic_align_4core_maximum_path_each { - int __pyx_n; - float max_neg_val; -}; - -/* "View.MemoryView":114 - * @cython.collection_type("sequence") - * @cname("__pyx_array") - * cdef class array: # <<<<<<<<<<<<<< - * - * cdef: - */ -struct __pyx_array_obj { - PyObject_HEAD - struct __pyx_vtabstruct_array *__pyx_vtab; - char *data; - Py_ssize_t len; - char *format; - int ndim; - Py_ssize_t *_shape; - Py_ssize_t *_strides; - Py_ssize_t itemsize; - PyObject *mode; - PyObject *_format; - void (*callback_free_data)(void *); - int free_data; - int dtype_is_object; -}; - - -/* "View.MemoryView":302 - * - * @cname('__pyx_MemviewEnum') - * cdef class Enum(object): # <<<<<<<<<<<<<< - * cdef object name - * def __init__(self, name): - */ -struct __pyx_MemviewEnum_obj { - PyObject_HEAD - PyObject *name; -}; - - -/* "View.MemoryView":337 - * - * @cname('__pyx_memoryview') - * cdef class memoryview: # <<<<<<<<<<<<<< - * - * cdef object obj - */ -struct __pyx_memoryview_obj { - PyObject_HEAD - struct __pyx_vtabstruct_memoryview *__pyx_vtab; - PyObject *obj; - PyObject *_size; - PyObject *_array_interface; - PyThread_type_lock lock; - __pyx_atomic_int_type acquisition_count; - Py_buffer view; - int flags; - int dtype_is_object; - __Pyx_TypeInfo *typeinfo; -}; - - -/* "View.MemoryView":952 - * @cython.collection_type("sequence") - * @cname('__pyx_memoryviewslice') - * cdef class _memoryviewslice(memoryview): # <<<<<<<<<<<<<< - * "Internal class for passing memoryview slices to Python" - * - */ -struct __pyx_memoryviewslice_obj { - struct __pyx_memoryview_obj __pyx_base; - __Pyx_memviewslice from_slice; - PyObject *from_object; - PyObject *(*to_object_func)(char *); - int (*to_dtype_func)(char *, PyObject *); -}; - - - -/* "View.MemoryView":114 - * @cython.collection_type("sequence") - * @cname("__pyx_array") - * cdef class array: # <<<<<<<<<<<<<< - * - * cdef: - */ - -struct __pyx_vtabstruct_array { - PyObject *(*get_memview)(struct __pyx_array_obj *); -}; -static struct __pyx_vtabstruct_array *__pyx_vtabptr_array; - - -/* "View.MemoryView":337 - * - * @cname('__pyx_memoryview') - * cdef class memoryview: # <<<<<<<<<<<<<< - * - * cdef object obj - */ - -struct __pyx_vtabstruct_memoryview { - char *(*get_item_pointer)(struct __pyx_memoryview_obj *, PyObject *); - PyObject *(*is_slice)(struct __pyx_memoryview_obj *, PyObject *); - PyObject *(*setitem_slice_assignment)(struct __pyx_memoryview_obj *, PyObject *, PyObject *); - PyObject *(*setitem_slice_assign_scalar)(struct __pyx_memoryview_obj *, struct __pyx_memoryview_obj *, PyObject *); - PyObject *(*setitem_indexed)(struct __pyx_memoryview_obj *, PyObject *, PyObject *); - PyObject *(*convert_item_to_object)(struct __pyx_memoryview_obj *, char *); - PyObject *(*assign_item_from_object)(struct __pyx_memoryview_obj *, char *, PyObject *); - PyObject *(*_get_base)(struct __pyx_memoryview_obj *); -}; -static struct __pyx_vtabstruct_memoryview *__pyx_vtabptr_memoryview; - - -/* "View.MemoryView":952 - * @cython.collection_type("sequence") - * @cname('__pyx_memoryviewslice') - * cdef class _memoryviewslice(memoryview): # <<<<<<<<<<<<<< - * "Internal class for passing memoryview slices to Python" - * - */ - -struct __pyx_vtabstruct__memoryviewslice { - struct __pyx_vtabstruct_memoryview __pyx_base; -}; -static struct __pyx_vtabstruct__memoryviewslice *__pyx_vtabptr__memoryviewslice; -/* #### Code section: utility_code_proto ### */ - -/* --- Runtime support code (head) --- */ -/* Refnanny.proto */ -#ifndef CYTHON_REFNANNY - #define CYTHON_REFNANNY 0 -#endif -#if CYTHON_REFNANNY - typedef struct { - void (*INCREF)(void*, PyObject*, Py_ssize_t); - void (*DECREF)(void*, PyObject*, Py_ssize_t); - void (*GOTREF)(void*, PyObject*, Py_ssize_t); - void (*GIVEREF)(void*, PyObject*, Py_ssize_t); - void* (*SetupContext)(const char*, Py_ssize_t, const char*); - void (*FinishContext)(void**); - } __Pyx_RefNannyAPIStruct; - static __Pyx_RefNannyAPIStruct *__Pyx_RefNanny = NULL; - static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname); - #define __Pyx_RefNannyDeclarations void *__pyx_refnanny = NULL; -#ifdef WITH_THREAD - #define __Pyx_RefNannySetupContext(name, acquire_gil)\ - if (acquire_gil) {\ - PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();\ - __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), (__LINE__), (__FILE__));\ - PyGILState_Release(__pyx_gilstate_save);\ - } else {\ - __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), (__LINE__), (__FILE__));\ - } - #define __Pyx_RefNannyFinishContextNogil() {\ - PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();\ - __Pyx_RefNannyFinishContext();\ - PyGILState_Release(__pyx_gilstate_save);\ - } -#else - #define __Pyx_RefNannySetupContext(name, acquire_gil)\ - __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), (__LINE__), (__FILE__)) - #define __Pyx_RefNannyFinishContextNogil() __Pyx_RefNannyFinishContext() -#endif - #define __Pyx_RefNannyFinishContextNogil() {\ - PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();\ - __Pyx_RefNannyFinishContext();\ - PyGILState_Release(__pyx_gilstate_save);\ - } - #define __Pyx_RefNannyFinishContext()\ - __Pyx_RefNanny->FinishContext(&__pyx_refnanny) - #define __Pyx_INCREF(r) __Pyx_RefNanny->INCREF(__pyx_refnanny, (PyObject *)(r), (__LINE__)) - #define __Pyx_DECREF(r) __Pyx_RefNanny->DECREF(__pyx_refnanny, (PyObject *)(r), (__LINE__)) - #define __Pyx_GOTREF(r) __Pyx_RefNanny->GOTREF(__pyx_refnanny, (PyObject *)(r), (__LINE__)) - #define __Pyx_GIVEREF(r) __Pyx_RefNanny->GIVEREF(__pyx_refnanny, (PyObject *)(r), (__LINE__)) - #define __Pyx_XINCREF(r) do { if((r) == NULL); else {__Pyx_INCREF(r); }} while(0) - #define __Pyx_XDECREF(r) do { if((r) == NULL); else {__Pyx_DECREF(r); }} while(0) - #define __Pyx_XGOTREF(r) do { if((r) == NULL); else {__Pyx_GOTREF(r); }} while(0) - #define __Pyx_XGIVEREF(r) do { if((r) == NULL); else {__Pyx_GIVEREF(r);}} while(0) -#else - #define __Pyx_RefNannyDeclarations - #define __Pyx_RefNannySetupContext(name, acquire_gil) - #define __Pyx_RefNannyFinishContextNogil() - #define __Pyx_RefNannyFinishContext() - #define __Pyx_INCREF(r) Py_INCREF(r) - #define __Pyx_DECREF(r) Py_DECREF(r) - #define __Pyx_GOTREF(r) - #define __Pyx_GIVEREF(r) - #define __Pyx_XINCREF(r) Py_XINCREF(r) - #define __Pyx_XDECREF(r) Py_XDECREF(r) - #define __Pyx_XGOTREF(r) - #define __Pyx_XGIVEREF(r) -#endif -#define __Pyx_Py_XDECREF_SET(r, v) do {\ - PyObject *tmp = (PyObject *) r;\ - r = v; Py_XDECREF(tmp);\ - } while (0) -#define __Pyx_XDECREF_SET(r, v) do {\ - PyObject *tmp = (PyObject *) r;\ - r = v; __Pyx_XDECREF(tmp);\ - } while (0) -#define __Pyx_DECREF_SET(r, v) do {\ - PyObject *tmp = (PyObject *) r;\ - r = v; __Pyx_DECREF(tmp);\ - } while (0) -#define __Pyx_CLEAR(r) do { PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);} while(0) -#define __Pyx_XCLEAR(r) do { if((r) != NULL) {PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);}} while(0) - -/* PyErrExceptionMatches.proto */ -#if CYTHON_FAST_THREAD_STATE -#define __Pyx_PyErr_ExceptionMatches(err) __Pyx_PyErr_ExceptionMatchesInState(__pyx_tstate, err) -static CYTHON_INLINE int __Pyx_PyErr_ExceptionMatchesInState(PyThreadState* tstate, PyObject* err); -#else -#define __Pyx_PyErr_ExceptionMatches(err) PyErr_ExceptionMatches(err) -#endif - -/* PyThreadStateGet.proto */ -#if CYTHON_FAST_THREAD_STATE -#define __Pyx_PyThreadState_declare PyThreadState *__pyx_tstate; -#define __Pyx_PyThreadState_assign __pyx_tstate = __Pyx_PyThreadState_Current; -#if PY_VERSION_HEX >= 0x030C00A6 -#define __Pyx_PyErr_Occurred() (__pyx_tstate->current_exception != NULL) -#define __Pyx_PyErr_CurrentExceptionType() (__pyx_tstate->current_exception ? (PyObject*) Py_TYPE(__pyx_tstate->current_exception) : (PyObject*) NULL) -#else -#define __Pyx_PyErr_Occurred() (__pyx_tstate->curexc_type != NULL) -#define __Pyx_PyErr_CurrentExceptionType() (__pyx_tstate->curexc_type) -#endif -#else -#define __Pyx_PyThreadState_declare -#define __Pyx_PyThreadState_assign -#define __Pyx_PyErr_Occurred() (PyErr_Occurred() != NULL) -#define __Pyx_PyErr_CurrentExceptionType() PyErr_Occurred() -#endif - -/* PyErrFetchRestore.proto */ -#if CYTHON_FAST_THREAD_STATE -#define __Pyx_PyErr_Clear() __Pyx_ErrRestore(NULL, NULL, NULL) -#define __Pyx_ErrRestoreWithState(type, value, tb) __Pyx_ErrRestoreInState(PyThreadState_GET(), type, value, tb) -#define __Pyx_ErrFetchWithState(type, value, tb) __Pyx_ErrFetchInState(PyThreadState_GET(), type, value, tb) -#define __Pyx_ErrRestore(type, value, tb) __Pyx_ErrRestoreInState(__pyx_tstate, type, value, tb) -#define __Pyx_ErrFetch(type, value, tb) __Pyx_ErrFetchInState(__pyx_tstate, type, value, tb) -static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb); -static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); -#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX < 0x030C00A6 -#define __Pyx_PyErr_SetNone(exc) (Py_INCREF(exc), __Pyx_ErrRestore((exc), NULL, NULL)) -#else -#define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc) -#endif -#else -#define __Pyx_PyErr_Clear() PyErr_Clear() -#define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc) -#define __Pyx_ErrRestoreWithState(type, value, tb) PyErr_Restore(type, value, tb) -#define __Pyx_ErrFetchWithState(type, value, tb) PyErr_Fetch(type, value, tb) -#define __Pyx_ErrRestoreInState(tstate, type, value, tb) PyErr_Restore(type, value, tb) -#define __Pyx_ErrFetchInState(tstate, type, value, tb) PyErr_Fetch(type, value, tb) -#define __Pyx_ErrRestore(type, value, tb) PyErr_Restore(type, value, tb) -#define __Pyx_ErrFetch(type, value, tb) PyErr_Fetch(type, value, tb) -#endif - -/* PyObjectGetAttrStr.proto */ -#if CYTHON_USE_TYPE_SLOTS -static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name); -#else -#define __Pyx_PyObject_GetAttrStr(o,n) PyObject_GetAttr(o,n) -#endif - -/* PyObjectGetAttrStrNoError.proto */ -static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStrNoError(PyObject* obj, PyObject* attr_name); - -/* GetBuiltinName.proto */ -static PyObject *__Pyx_GetBuiltinName(PyObject *name); - -/* TupleAndListFromArray.proto */ -#if CYTHON_COMPILING_IN_CPYTHON -static CYTHON_INLINE PyObject* __Pyx_PyList_FromArray(PyObject *const *src, Py_ssize_t n); -static CYTHON_INLINE PyObject* __Pyx_PyTuple_FromArray(PyObject *const *src, Py_ssize_t n); -#endif - -/* IncludeStringH.proto */ -#include - -/* BytesEquals.proto */ -static CYTHON_INLINE int __Pyx_PyBytes_Equals(PyObject* s1, PyObject* s2, int equals); - -/* UnicodeEquals.proto */ -static CYTHON_INLINE int __Pyx_PyUnicode_Equals(PyObject* s1, PyObject* s2, int equals); - -/* fastcall.proto */ -#define __Pyx_Arg_VARARGS(args, i) PyTuple_GET_ITEM(args, i) -#define __Pyx_NumKwargs_VARARGS(kwds) PyDict_Size(kwds) -#define __Pyx_KwValues_VARARGS(args, nargs) NULL -#define __Pyx_GetKwValue_VARARGS(kw, kwvalues, s) __Pyx_PyDict_GetItemStrWithError(kw, s) -#define __Pyx_KwargsAsDict_VARARGS(kw, kwvalues) PyDict_Copy(kw) -#if CYTHON_METH_FASTCALL - #define __Pyx_Arg_FASTCALL(args, i) args[i] - #define __Pyx_NumKwargs_FASTCALL(kwds) PyTuple_GET_SIZE(kwds) - #define __Pyx_KwValues_FASTCALL(args, nargs) ((args) + (nargs)) - static CYTHON_INLINE PyObject * __Pyx_GetKwValue_FASTCALL(PyObject *kwnames, PyObject *const *kwvalues, PyObject *s); - #define __Pyx_KwargsAsDict_FASTCALL(kw, kwvalues) _PyStack_AsDict(kwvalues, kw) -#else - #define __Pyx_Arg_FASTCALL __Pyx_Arg_VARARGS - #define __Pyx_NumKwargs_FASTCALL __Pyx_NumKwargs_VARARGS - #define __Pyx_KwValues_FASTCALL __Pyx_KwValues_VARARGS - #define __Pyx_GetKwValue_FASTCALL __Pyx_GetKwValue_VARARGS - #define __Pyx_KwargsAsDict_FASTCALL __Pyx_KwargsAsDict_VARARGS -#endif -#if CYTHON_COMPILING_IN_CPYTHON -#define __Pyx_ArgsSlice_VARARGS(args, start, stop) __Pyx_PyTuple_FromArray(&__Pyx_Arg_VARARGS(args, start), stop - start) -#define __Pyx_ArgsSlice_FASTCALL(args, start, stop) __Pyx_PyTuple_FromArray(&__Pyx_Arg_FASTCALL(args, start), stop - start) -#else -#define __Pyx_ArgsSlice_VARARGS(args, start, stop) PyTuple_GetSlice(args, start, stop) -#define __Pyx_ArgsSlice_FASTCALL(args, start, stop) PyTuple_GetSlice(args, start, stop) -#endif - -/* RaiseArgTupleInvalid.proto */ -static void __Pyx_RaiseArgtupleInvalid(const char* func_name, int exact, - Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found); - -/* RaiseDoubleKeywords.proto */ -static void __Pyx_RaiseDoubleKeywordsError(const char* func_name, PyObject* kw_name); - -/* ParseKeywords.proto */ -static int __Pyx_ParseOptionalKeywords(PyObject *kwds, PyObject *const *kwvalues, - PyObject **argnames[], - PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args, - const char* function_name); - -/* ArgTypeTest.proto */ -#define __Pyx_ArgTypeTest(obj, type, none_allowed, name, exact)\ - ((likely(__Pyx_IS_TYPE(obj, type) | (none_allowed && (obj == Py_None)))) ? 1 :\ - __Pyx__ArgTypeTest(obj, type, name, exact)) -static int __Pyx__ArgTypeTest(PyObject *obj, PyTypeObject *type, const char *name, int exact); - -/* RaiseException.proto */ -static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause); - -/* PyFunctionFastCall.proto */ -#if CYTHON_FAST_PYCALL -#if !CYTHON_VECTORCALL -#define __Pyx_PyFunction_FastCall(func, args, nargs)\ - __Pyx_PyFunction_FastCallDict((func), (args), (nargs), NULL) -static PyObject *__Pyx_PyFunction_FastCallDict(PyObject *func, PyObject **args, Py_ssize_t nargs, PyObject *kwargs); -#endif -#define __Pyx_BUILD_ASSERT_EXPR(cond)\ - (sizeof(char [1 - 2*!(cond)]) - 1) -#ifndef Py_MEMBER_SIZE -#define Py_MEMBER_SIZE(type, member) sizeof(((type *)0)->member) -#endif -#if !CYTHON_VECTORCALL -#if PY_VERSION_HEX >= 0x03080000 - #include "frameobject.h" -#if PY_VERSION_HEX >= 0x030b00a6 - #ifndef Py_BUILD_CORE - #define Py_BUILD_CORE 1 - #endif - #include "internal/pycore_frame.h" -#endif - #define __Pxy_PyFrame_Initialize_Offsets() - #define __Pyx_PyFrame_GetLocalsplus(frame) ((frame)->f_localsplus) -#else - static size_t __pyx_pyframe_localsplus_offset = 0; - #include "frameobject.h" - #define __Pxy_PyFrame_Initialize_Offsets()\ - ((void)__Pyx_BUILD_ASSERT_EXPR(sizeof(PyFrameObject) == offsetof(PyFrameObject, f_localsplus) + Py_MEMBER_SIZE(PyFrameObject, f_localsplus)),\ - (void)(__pyx_pyframe_localsplus_offset = ((size_t)PyFrame_Type.tp_basicsize) - Py_MEMBER_SIZE(PyFrameObject, f_localsplus))) - #define __Pyx_PyFrame_GetLocalsplus(frame)\ - (assert(__pyx_pyframe_localsplus_offset), (PyObject **)(((char *)(frame)) + __pyx_pyframe_localsplus_offset)) -#endif -#endif -#endif - -/* PyObjectCall.proto */ -#if CYTHON_COMPILING_IN_CPYTHON -static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw); -#else -#define __Pyx_PyObject_Call(func, arg, kw) PyObject_Call(func, arg, kw) -#endif - -/* PyObjectCallMethO.proto */ -#if CYTHON_COMPILING_IN_CPYTHON -static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg); -#endif - -/* PyObjectFastCall.proto */ -#define __Pyx_PyObject_FastCall(func, args, nargs) __Pyx_PyObject_FastCallDict(func, args, (size_t)(nargs), NULL) -static CYTHON_INLINE PyObject* __Pyx_PyObject_FastCallDict(PyObject *func, PyObject **args, size_t nargs, PyObject *kwargs); - -/* RaiseUnexpectedTypeError.proto */ -static int __Pyx_RaiseUnexpectedTypeError(const char *expected, PyObject *obj); - -/* GCCDiagnostics.proto */ -#if !defined(__INTEL_COMPILER) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6)) -#define __Pyx_HAS_GCC_DIAGNOSTIC -#endif - -/* BuildPyUnicode.proto */ -static PyObject* __Pyx_PyUnicode_BuildFromAscii(Py_ssize_t ulength, char* chars, int clength, - int prepend_sign, char padding_char); - -/* CIntToPyUnicode.proto */ -static CYTHON_INLINE PyObject* __Pyx_PyUnicode_From_int(int value, Py_ssize_t width, char padding_char, char format_char); - -/* CIntToPyUnicode.proto */ -static CYTHON_INLINE PyObject* __Pyx_PyUnicode_From_Py_ssize_t(Py_ssize_t value, Py_ssize_t width, char padding_char, char format_char); - -/* JoinPyUnicode.proto */ -static PyObject* __Pyx_PyUnicode_Join(PyObject* value_tuple, Py_ssize_t value_count, Py_ssize_t result_ulength, - Py_UCS4 max_char); - -/* StrEquals.proto */ -#if PY_MAJOR_VERSION >= 3 -#define __Pyx_PyString_Equals __Pyx_PyUnicode_Equals -#else -#define __Pyx_PyString_Equals __Pyx_PyBytes_Equals -#endif - -/* PyObjectFormatSimple.proto */ -#if CYTHON_COMPILING_IN_PYPY - #define __Pyx_PyObject_FormatSimple(s, f) (\ - likely(PyUnicode_CheckExact(s)) ? (Py_INCREF(s), s) :\ - PyObject_Format(s, f)) -#elif PY_MAJOR_VERSION < 3 - #define __Pyx_PyObject_FormatSimple(s, f) (\ - likely(PyUnicode_CheckExact(s)) ? (Py_INCREF(s), s) :\ - likely(PyString_CheckExact(s)) ? PyUnicode_FromEncodedObject(s, NULL, "strict") :\ - PyObject_Format(s, f)) -#elif CYTHON_USE_TYPE_SLOTS - #define __Pyx_PyObject_FormatSimple(s, f) (\ - likely(PyUnicode_CheckExact(s)) ? (Py_INCREF(s), s) :\ - likely(PyLong_CheckExact(s)) ? PyLong_Type.tp_repr(s) :\ - likely(PyFloat_CheckExact(s)) ? PyFloat_Type.tp_repr(s) :\ - PyObject_Format(s, f)) -#else - #define __Pyx_PyObject_FormatSimple(s, f) (\ - likely(PyUnicode_CheckExact(s)) ? (Py_INCREF(s), s) :\ - PyObject_Format(s, f)) -#endif - -CYTHON_UNUSED static int __pyx_array_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ -static PyObject *__pyx_array_get_memview(struct __pyx_array_obj *); /*proto*/ -/* GetAttr.proto */ -static CYTHON_INLINE PyObject *__Pyx_GetAttr(PyObject *, PyObject *); - -/* GetItemInt.proto */ -#define __Pyx_GetItemInt(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\ - (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\ - __Pyx_GetItemInt_Fast(o, (Py_ssize_t)i, is_list, wraparound, boundscheck) :\ - (is_list ? (PyErr_SetString(PyExc_IndexError, "list index out of range"), (PyObject*)NULL) :\ - __Pyx_GetItemInt_Generic(o, to_py_func(i)))) -#define __Pyx_GetItemInt_List(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\ - (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\ - __Pyx_GetItemInt_List_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) :\ - (PyErr_SetString(PyExc_IndexError, "list index out of range"), (PyObject*)NULL)) -static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i, - int wraparound, int boundscheck); -#define __Pyx_GetItemInt_Tuple(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\ - (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\ - __Pyx_GetItemInt_Tuple_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) :\ - (PyErr_SetString(PyExc_IndexError, "tuple index out of range"), (PyObject*)NULL)) -static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i, - int wraparound, int boundscheck); -static PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j); -static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i, - int is_list, int wraparound, int boundscheck); - -/* PyObjectCallOneArg.proto */ -static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg); - -/* ObjectGetItem.proto */ -#if CYTHON_USE_TYPE_SLOTS -static CYTHON_INLINE PyObject *__Pyx_PyObject_GetItem(PyObject *obj, PyObject *key); -#else -#define __Pyx_PyObject_GetItem(obj, key) PyObject_GetItem(obj, key) -#endif - -/* KeywordStringCheck.proto */ -static int __Pyx_CheckKeywordStrings(PyObject *kw, const char* function_name, int kw_allowed); - -/* DivInt[Py_ssize_t].proto */ -static CYTHON_INLINE Py_ssize_t __Pyx_div_Py_ssize_t(Py_ssize_t, Py_ssize_t); - -/* UnaryNegOverflows.proto */ -#define __Pyx_UNARY_NEG_WOULD_OVERFLOW(x)\ - (((x) < 0) & ((unsigned long)(x) == 0-(unsigned long)(x))) - -/* GetAttr3.proto */ -static CYTHON_INLINE PyObject *__Pyx_GetAttr3(PyObject *, PyObject *, PyObject *); - -/* PyDictVersioning.proto */ -#if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_TYPE_SLOTS -#define __PYX_DICT_VERSION_INIT ((PY_UINT64_T) -1) -#define __PYX_GET_DICT_VERSION(dict) (((PyDictObject*)(dict))->ma_version_tag) -#define __PYX_UPDATE_DICT_CACHE(dict, value, cache_var, version_var)\ - (version_var) = __PYX_GET_DICT_VERSION(dict);\ - (cache_var) = (value); -#define __PYX_PY_DICT_LOOKUP_IF_MODIFIED(VAR, DICT, LOOKUP) {\ - static PY_UINT64_T __pyx_dict_version = 0;\ - static PyObject *__pyx_dict_cached_value = NULL;\ - if (likely(__PYX_GET_DICT_VERSION(DICT) == __pyx_dict_version)) {\ - (VAR) = __pyx_dict_cached_value;\ - } else {\ - (VAR) = __pyx_dict_cached_value = (LOOKUP);\ - __pyx_dict_version = __PYX_GET_DICT_VERSION(DICT);\ - }\ -} -static CYTHON_INLINE PY_UINT64_T __Pyx_get_tp_dict_version(PyObject *obj); -static CYTHON_INLINE PY_UINT64_T __Pyx_get_object_dict_version(PyObject *obj); -static CYTHON_INLINE int __Pyx_object_dict_version_matches(PyObject* obj, PY_UINT64_T tp_dict_version, PY_UINT64_T obj_dict_version); -#else -#define __PYX_GET_DICT_VERSION(dict) (0) -#define __PYX_UPDATE_DICT_CACHE(dict, value, cache_var, version_var) -#define __PYX_PY_DICT_LOOKUP_IF_MODIFIED(VAR, DICT, LOOKUP) (VAR) = (LOOKUP); -#endif - -/* GetModuleGlobalName.proto */ -#if CYTHON_USE_DICT_VERSIONS -#define __Pyx_GetModuleGlobalName(var, name) do {\ - static PY_UINT64_T __pyx_dict_version = 0;\ - static PyObject *__pyx_dict_cached_value = NULL;\ - (var) = (likely(__pyx_dict_version == __PYX_GET_DICT_VERSION(__pyx_d))) ?\ - (likely(__pyx_dict_cached_value) ? __Pyx_NewRef(__pyx_dict_cached_value) : __Pyx_GetBuiltinName(name)) :\ - __Pyx__GetModuleGlobalName(name, &__pyx_dict_version, &__pyx_dict_cached_value);\ -} while(0) -#define __Pyx_GetModuleGlobalNameUncached(var, name) do {\ - PY_UINT64_T __pyx_dict_version;\ - PyObject *__pyx_dict_cached_value;\ - (var) = __Pyx__GetModuleGlobalName(name, &__pyx_dict_version, &__pyx_dict_cached_value);\ -} while(0) -static PyObject *__Pyx__GetModuleGlobalName(PyObject *name, PY_UINT64_T *dict_version, PyObject **dict_cached_value); -#else -#define __Pyx_GetModuleGlobalName(var, name) (var) = __Pyx__GetModuleGlobalName(name) -#define __Pyx_GetModuleGlobalNameUncached(var, name) (var) = __Pyx__GetModuleGlobalName(name) -static CYTHON_INLINE PyObject *__Pyx__GetModuleGlobalName(PyObject *name); -#endif - -/* AssertionsEnabled.proto */ -#define __Pyx_init_assertions_enabled() -#if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX < 0x02070600 && !defined(Py_OptimizeFlag) - #define __pyx_assertions_enabled() (1) -#elif PY_VERSION_HEX < 0x03080000 || CYTHON_COMPILING_IN_PYPY || defined(Py_LIMITED_API) - #define __pyx_assertions_enabled() (!Py_OptimizeFlag) -#elif CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030900A6 - static int __pyx_assertions_enabled_flag; - #define __pyx_assertions_enabled() (__pyx_assertions_enabled_flag) - #undef __Pyx_init_assertions_enabled - static void __Pyx_init_assertions_enabled(void) { - __pyx_assertions_enabled_flag = ! _PyInterpreterState_GetConfig(__Pyx_PyThreadState_Current->interp)->optimization_level; - } -#else - #define __pyx_assertions_enabled() (!Py_OptimizeFlag) -#endif - -/* RaiseTooManyValuesToUnpack.proto */ -static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected); - -/* RaiseNeedMoreValuesToUnpack.proto */ -static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index); - -/* RaiseNoneIterError.proto */ -static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void); - -/* ExtTypeTest.proto */ -static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type); - -/* GetTopmostException.proto */ -#if CYTHON_USE_EXC_INFO_STACK && CYTHON_FAST_THREAD_STATE -static _PyErr_StackItem * __Pyx_PyErr_GetTopmostException(PyThreadState *tstate); -#endif - -/* SaveResetException.proto */ -#if CYTHON_FAST_THREAD_STATE -#define __Pyx_ExceptionSave(type, value, tb) __Pyx__ExceptionSave(__pyx_tstate, type, value, tb) -static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); -#define __Pyx_ExceptionReset(type, value, tb) __Pyx__ExceptionReset(__pyx_tstate, type, value, tb) -static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb); -#else -#define __Pyx_ExceptionSave(type, value, tb) PyErr_GetExcInfo(type, value, tb) -#define __Pyx_ExceptionReset(type, value, tb) PyErr_SetExcInfo(type, value, tb) -#endif - -/* GetException.proto */ -#if CYTHON_FAST_THREAD_STATE -#define __Pyx_GetException(type, value, tb) __Pyx__GetException(__pyx_tstate, type, value, tb) -static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); -#else -static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb); -#endif - -/* SwapException.proto */ -#if CYTHON_FAST_THREAD_STATE -#define __Pyx_ExceptionSwap(type, value, tb) __Pyx__ExceptionSwap(__pyx_tstate, type, value, tb) -static CYTHON_INLINE void __Pyx__ExceptionSwap(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); -#else -static CYTHON_INLINE void __Pyx_ExceptionSwap(PyObject **type, PyObject **value, PyObject **tb); -#endif - -/* Import.proto */ -static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level); - -/* ImportDottedModule.proto */ -static PyObject *__Pyx_ImportDottedModule(PyObject *name, PyObject *parts_tuple); -#if PY_MAJOR_VERSION >= 3 -static PyObject *__Pyx_ImportDottedModule_WalkParts(PyObject *module, PyObject *name, PyObject *parts_tuple); -#endif - -/* ssize_strlen.proto */ -static CYTHON_INLINE Py_ssize_t __Pyx_ssize_strlen(const char *s); - -/* FastTypeChecks.proto */ -#if CYTHON_COMPILING_IN_CPYTHON -#define __Pyx_TypeCheck(obj, type) __Pyx_IsSubtype(Py_TYPE(obj), (PyTypeObject *)type) -#define __Pyx_TypeCheck2(obj, type1, type2) __Pyx_IsAnySubtype2(Py_TYPE(obj), (PyTypeObject *)type1, (PyTypeObject *)type2) -static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b); -static CYTHON_INLINE int __Pyx_IsAnySubtype2(PyTypeObject *cls, PyTypeObject *a, PyTypeObject *b); -static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches(PyObject *err, PyObject *type); -static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches2(PyObject *err, PyObject *type1, PyObject *type2); -#else -#define __Pyx_TypeCheck(obj, type) PyObject_TypeCheck(obj, (PyTypeObject *)type) -#define __Pyx_TypeCheck2(obj, type1, type2) (PyObject_TypeCheck(obj, (PyTypeObject *)type1) || PyObject_TypeCheck(obj, (PyTypeObject *)type2)) -#define __Pyx_PyErr_GivenExceptionMatches(err, type) PyErr_GivenExceptionMatches(err, type) -#define __Pyx_PyErr_GivenExceptionMatches2(err, type1, type2) (PyErr_GivenExceptionMatches(err, type1) || PyErr_GivenExceptionMatches(err, type2)) -#endif -#define __Pyx_PyErr_ExceptionMatches2(err1, err2) __Pyx_PyErr_GivenExceptionMatches2(__Pyx_PyErr_CurrentExceptionType(), err1, err2) -#define __Pyx_PyException_Check(obj) __Pyx_TypeCheck(obj, PyExc_Exception) - -CYTHON_UNUSED static int __pyx_memoryview_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ -/* ListCompAppend.proto */ -#if CYTHON_USE_PYLIST_INTERNALS && CYTHON_ASSUME_SAFE_MACROS -static CYTHON_INLINE int __Pyx_ListComp_Append(PyObject* list, PyObject* x) { - PyListObject* L = (PyListObject*) list; - Py_ssize_t len = Py_SIZE(list); - if (likely(L->allocated > len)) { - Py_INCREF(x); - PyList_SET_ITEM(list, len, x); - __Pyx_SET_SIZE(list, len + 1); - return 0; - } - return PyList_Append(list, x); -} -#else -#define __Pyx_ListComp_Append(L,x) PyList_Append(L,x) -#endif - -/* PySequenceMultiply.proto */ -#define __Pyx_PySequence_Multiply_Left(mul, seq) __Pyx_PySequence_Multiply(seq, mul) -static CYTHON_INLINE PyObject* __Pyx_PySequence_Multiply(PyObject *seq, Py_ssize_t mul); - -/* SetItemInt.proto */ -#define __Pyx_SetItemInt(o, i, v, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\ - (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\ - __Pyx_SetItemInt_Fast(o, (Py_ssize_t)i, v, is_list, wraparound, boundscheck) :\ - (is_list ? (PyErr_SetString(PyExc_IndexError, "list assignment index out of range"), -1) :\ - __Pyx_SetItemInt_Generic(o, to_py_func(i), v))) -static int __Pyx_SetItemInt_Generic(PyObject *o, PyObject *j, PyObject *v); -static CYTHON_INLINE int __Pyx_SetItemInt_Fast(PyObject *o, Py_ssize_t i, PyObject *v, - int is_list, int wraparound, int boundscheck); - -/* RaiseUnboundLocalError.proto */ -static CYTHON_INLINE void __Pyx_RaiseUnboundLocalError(const char *varname); - -/* DivInt[long].proto */ -static CYTHON_INLINE long __Pyx_div_long(long, long); - -/* PySequenceContains.proto */ -static CYTHON_INLINE int __Pyx_PySequence_ContainsTF(PyObject* item, PyObject* seq, int eq) { - int result = PySequence_Contains(seq, item); - return unlikely(result < 0) ? result : (result == (eq == Py_EQ)); -} - -/* ImportFrom.proto */ -static PyObject* __Pyx_ImportFrom(PyObject* module, PyObject* name); - -/* HasAttr.proto */ -static CYTHON_INLINE int __Pyx_HasAttr(PyObject *, PyObject *); - -/* ErrOccurredWithGIL.proto */ -static CYTHON_INLINE int __Pyx_ErrOccurredWithGIL(void); - -/* PyObject_GenericGetAttrNoDict.proto */ -#if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000 -static CYTHON_INLINE PyObject* __Pyx_PyObject_GenericGetAttrNoDict(PyObject* obj, PyObject* attr_name); -#else -#define __Pyx_PyObject_GenericGetAttrNoDict PyObject_GenericGetAttr -#endif - -/* PyObject_GenericGetAttr.proto */ -#if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000 -static PyObject* __Pyx_PyObject_GenericGetAttr(PyObject* obj, PyObject* attr_name); -#else -#define __Pyx_PyObject_GenericGetAttr PyObject_GenericGetAttr -#endif - -/* IncludeStructmemberH.proto */ -#include - -/* FixUpExtensionType.proto */ -#if CYTHON_USE_TYPE_SPECS -static int __Pyx_fix_up_extension_type_from_spec(PyType_Spec *spec, PyTypeObject *type); -#endif - -/* PyObjectCallNoArg.proto */ -static CYTHON_INLINE PyObject* __Pyx_PyObject_CallNoArg(PyObject *func); - -/* PyObjectGetMethod.proto */ -static int __Pyx_PyObject_GetMethod(PyObject *obj, PyObject *name, PyObject **method); - -/* PyObjectCallMethod0.proto */ -static PyObject* __Pyx_PyObject_CallMethod0(PyObject* obj, PyObject* method_name); - -/* ValidateBasesTuple.proto */ -#if CYTHON_COMPILING_IN_CPYTHON || CYTHON_COMPILING_IN_LIMITED_API || CYTHON_USE_TYPE_SPECS -static int __Pyx_validate_bases_tuple(const char *type_name, Py_ssize_t dictoffset, PyObject *bases); -#endif - -/* PyType_Ready.proto */ -CYTHON_UNUSED static int __Pyx_PyType_Ready(PyTypeObject *t); - -/* SetVTable.proto */ -static int __Pyx_SetVtable(PyTypeObject* typeptr , void* vtable); - -/* GetVTable.proto */ -static void* __Pyx_GetVtable(PyTypeObject *type); - -/* MergeVTables.proto */ -#if !CYTHON_COMPILING_IN_LIMITED_API -static int __Pyx_MergeVtables(PyTypeObject *type); -#endif - -/* SetupReduce.proto */ -#if !CYTHON_COMPILING_IN_LIMITED_API -static int __Pyx_setup_reduce(PyObject* type_obj); -#endif - -/* FetchSharedCythonModule.proto */ -static PyObject *__Pyx_FetchSharedCythonABIModule(void); - -/* FetchCommonType.proto */ -#if !CYTHON_USE_TYPE_SPECS -static PyTypeObject* __Pyx_FetchCommonType(PyTypeObject* type); -#else -static PyTypeObject* __Pyx_FetchCommonTypeFromSpec(PyObject *module, PyType_Spec *spec, PyObject *bases); -#endif - -/* PyMethodNew.proto */ -#if PY_MAJOR_VERSION >= 3 -static PyObject *__Pyx_PyMethod_New(PyObject *func, PyObject *self, PyObject *typ) { - CYTHON_UNUSED_VAR(typ); - if (!self) - return __Pyx_NewRef(func); - return PyMethod_New(func, self); -} -#else - #define __Pyx_PyMethod_New PyMethod_New -#endif - -/* PyVectorcallFastCallDict.proto */ -#if CYTHON_METH_FASTCALL -static CYTHON_INLINE PyObject *__Pyx_PyVectorcall_FastCallDict(PyObject *func, __pyx_vectorcallfunc vc, PyObject *const *args, size_t nargs, PyObject *kw); -#endif - -/* CythonFunctionShared.proto */ -#define __Pyx_CyFunction_USED -#define __Pyx_CYFUNCTION_STATICMETHOD 0x01 -#define __Pyx_CYFUNCTION_CLASSMETHOD 0x02 -#define __Pyx_CYFUNCTION_CCLASS 0x04 -#define __Pyx_CYFUNCTION_COROUTINE 0x08 -#define __Pyx_CyFunction_GetClosure(f)\ - (((__pyx_CyFunctionObject *) (f))->func_closure) -#if PY_VERSION_HEX < 0x030900B1 - #define __Pyx_CyFunction_GetClassObj(f)\ - (((__pyx_CyFunctionObject *) (f))->func_classobj) -#else - #define __Pyx_CyFunction_GetClassObj(f)\ - ((PyObject*) ((PyCMethodObject *) (f))->mm_class) -#endif -#define __Pyx_CyFunction_SetClassObj(f, classobj)\ - __Pyx__CyFunction_SetClassObj((__pyx_CyFunctionObject *) (f), (classobj)) -#define __Pyx_CyFunction_Defaults(type, f)\ - ((type *)(((__pyx_CyFunctionObject *) (f))->defaults)) -#define __Pyx_CyFunction_SetDefaultsGetter(f, g)\ - ((__pyx_CyFunctionObject *) (f))->defaults_getter = (g) -typedef struct { -#if PY_VERSION_HEX < 0x030900B1 - PyCFunctionObject func; -#else - PyCMethodObject func; -#endif -#if CYTHON_BACKPORT_VECTORCALL - __pyx_vectorcallfunc func_vectorcall; -#endif -#if PY_VERSION_HEX < 0x030500A0 - PyObject *func_weakreflist; -#endif - PyObject *func_dict; - PyObject *func_name; - PyObject *func_qualname; - PyObject *func_doc; - PyObject *func_globals; - PyObject *func_code; - PyObject *func_closure; -#if PY_VERSION_HEX < 0x030900B1 - PyObject *func_classobj; -#endif - void *defaults; - int defaults_pyobjects; - size_t defaults_size; // used by FusedFunction for copying defaults - int flags; - PyObject *defaults_tuple; - PyObject *defaults_kwdict; - PyObject *(*defaults_getter)(PyObject *); - PyObject *func_annotations; - PyObject *func_is_coroutine; -} __pyx_CyFunctionObject; -#define __Pyx_CyFunction_Check(obj) __Pyx_TypeCheck(obj, __pyx_CyFunctionType) -#define __Pyx_IsCyOrPyCFunction(obj) __Pyx_TypeCheck2(obj, __pyx_CyFunctionType, &PyCFunction_Type) -#define __Pyx_CyFunction_CheckExact(obj) __Pyx_IS_TYPE(obj, __pyx_CyFunctionType) -static PyObject *__Pyx_CyFunction_Init(__pyx_CyFunctionObject* op, PyMethodDef *ml, - int flags, PyObject* qualname, - PyObject *closure, - PyObject *module, PyObject *globals, - PyObject* code); -static CYTHON_INLINE void __Pyx__CyFunction_SetClassObj(__pyx_CyFunctionObject* f, PyObject* classobj); -static CYTHON_INLINE void *__Pyx_CyFunction_InitDefaults(PyObject *m, - size_t size, - int pyobjects); -static CYTHON_INLINE void __Pyx_CyFunction_SetDefaultsTuple(PyObject *m, - PyObject *tuple); -static CYTHON_INLINE void __Pyx_CyFunction_SetDefaultsKwDict(PyObject *m, - PyObject *dict); -static CYTHON_INLINE void __Pyx_CyFunction_SetAnnotationsDict(PyObject *m, - PyObject *dict); -static int __pyx_CyFunction_init(PyObject *module); -#if CYTHON_METH_FASTCALL -static PyObject * __Pyx_CyFunction_Vectorcall_NOARGS(PyObject *func, PyObject *const *args, size_t nargsf, PyObject *kwnames); -static PyObject * __Pyx_CyFunction_Vectorcall_O(PyObject *func, PyObject *const *args, size_t nargsf, PyObject *kwnames); -static PyObject * __Pyx_CyFunction_Vectorcall_FASTCALL_KEYWORDS(PyObject *func, PyObject *const *args, size_t nargsf, PyObject *kwnames); -static PyObject * __Pyx_CyFunction_Vectorcall_FASTCALL_KEYWORDS_METHOD(PyObject *func, PyObject *const *args, size_t nargsf, PyObject *kwnames); -#if CYTHON_BACKPORT_VECTORCALL -#define __Pyx_CyFunction_func_vectorcall(f) (((__pyx_CyFunctionObject*)f)->func_vectorcall) -#else -#define __Pyx_CyFunction_func_vectorcall(f) (((PyCFunctionObject*)f)->vectorcall) -#endif -#endif - -/* CythonFunction.proto */ -static PyObject *__Pyx_CyFunction_New(PyMethodDef *ml, - int flags, PyObject* qualname, - PyObject *closure, - PyObject *module, PyObject *globals, - PyObject* code); - -/* CLineInTraceback.proto */ -#ifdef CYTHON_CLINE_IN_TRACEBACK -#define __Pyx_CLineForTraceback(tstate, c_line) (((CYTHON_CLINE_IN_TRACEBACK)) ? c_line : 0) -#else -static int __Pyx_CLineForTraceback(PyThreadState *tstate, int c_line); -#endif - -/* CodeObjectCache.proto */ -#if !CYTHON_COMPILING_IN_LIMITED_API -typedef struct { - PyCodeObject* code_object; - int code_line; -} __Pyx_CodeObjectCacheEntry; -struct __Pyx_CodeObjectCache { - int count; - int max_count; - __Pyx_CodeObjectCacheEntry* entries; -}; -static struct __Pyx_CodeObjectCache __pyx_code_cache = {0,0,NULL}; -static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line); -static PyCodeObject *__pyx_find_code_object(int code_line); -static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object); -#endif - -/* AddTraceback.proto */ -static void __Pyx_AddTraceback(const char *funcname, int c_line, - int py_line, const char *filename); - -#if PY_MAJOR_VERSION < 3 - static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags); - static void __Pyx_ReleaseBuffer(Py_buffer *view); -#else - #define __Pyx_GetBuffer PyObject_GetBuffer - #define __Pyx_ReleaseBuffer PyBuffer_Release -#endif - - -/* BufferStructDeclare.proto */ -typedef struct { - Py_ssize_t shape, strides, suboffsets; -} __Pyx_Buf_DimInfo; -typedef struct { - size_t refcount; - Py_buffer pybuffer; -} __Pyx_Buffer; -typedef struct { - __Pyx_Buffer *rcbuffer; - char *data; - __Pyx_Buf_DimInfo diminfo[8]; -} __Pyx_LocalBuf_ND; - -/* MemviewSliceIsContig.proto */ -static int __pyx_memviewslice_is_contig(const __Pyx_memviewslice mvs, char order, int ndim); - -/* OverlappingSlices.proto */ -static int __pyx_slices_overlap(__Pyx_memviewslice *slice1, - __Pyx_memviewslice *slice2, - int ndim, size_t itemsize); - -/* IsLittleEndian.proto */ -static CYTHON_INLINE int __Pyx_Is_Little_Endian(void); - -/* BufferFormatCheck.proto */ -static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts); -static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx, - __Pyx_BufFmt_StackElem* stack, - __Pyx_TypeInfo* type); - -/* TypeInfoCompare.proto */ -static int __pyx_typeinfo_cmp(__Pyx_TypeInfo *a, __Pyx_TypeInfo *b); - -/* MemviewSliceValidateAndInit.proto */ -static int __Pyx_ValidateAndInit_memviewslice( - int *axes_specs, - int c_or_f_flag, - int buf_flags, - int ndim, - __Pyx_TypeInfo *dtype, - __Pyx_BufFmt_StackElem stack[], - __Pyx_memviewslice *memviewslice, - PyObject *original_obj); - -/* ObjectToMemviewSlice.proto */ -static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_d_d_dc_int(PyObject *, int writable_flag); - -/* ObjectToMemviewSlice.proto */ -static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_d_d_dc_float(PyObject *, int writable_flag); - -/* ObjectToMemviewSlice.proto */ -static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_dc_int(PyObject *, int writable_flag); - -/* MemviewSliceCopyTemplate.proto */ -static __Pyx_memviewslice -__pyx_memoryview_copy_new_contig(const __Pyx_memviewslice *from_mvs, - const char *mode, int ndim, - size_t sizeof_dtype, int contig_flag, - int dtype_is_object); - -/* MemviewSliceInit.proto */ -#define __Pyx_BUF_MAX_NDIMS %(BUF_MAX_NDIMS)d -#define __Pyx_MEMVIEW_DIRECT 1 -#define __Pyx_MEMVIEW_PTR 2 -#define __Pyx_MEMVIEW_FULL 4 -#define __Pyx_MEMVIEW_CONTIG 8 -#define __Pyx_MEMVIEW_STRIDED 16 -#define __Pyx_MEMVIEW_FOLLOW 32 -#define __Pyx_IS_C_CONTIG 1 -#define __Pyx_IS_F_CONTIG 2 -static int __Pyx_init_memviewslice( - struct __pyx_memoryview_obj *memview, - int ndim, - __Pyx_memviewslice *memviewslice, - int memview_is_new_reference); -static CYTHON_INLINE int __pyx_add_acquisition_count_locked( - __pyx_atomic_int_type *acquisition_count, PyThread_type_lock lock); -static CYTHON_INLINE int __pyx_sub_acquisition_count_locked( - __pyx_atomic_int_type *acquisition_count, PyThread_type_lock lock); -#define __pyx_get_slice_count_pointer(memview) (&memview->acquisition_count) -#define __PYX_INC_MEMVIEW(slice, have_gil) __Pyx_INC_MEMVIEW(slice, have_gil, __LINE__) -#define __PYX_XCLEAR_MEMVIEW(slice, have_gil) __Pyx_XCLEAR_MEMVIEW(slice, have_gil, __LINE__) -static CYTHON_INLINE void __Pyx_INC_MEMVIEW(__Pyx_memviewslice *, int, int); -static CYTHON_INLINE void __Pyx_XCLEAR_MEMVIEW(__Pyx_memviewslice *, int, int); - -/* CIntToPy.proto */ -static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value); - -/* CIntFromPy.proto */ -static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *); - -/* CIntToPy.proto */ -static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value); - -/* CIntFromPy.proto */ -static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *); - -/* CIntFromPy.proto */ -static CYTHON_INLINE char __Pyx_PyInt_As_char(PyObject *); - -/* FormatTypeName.proto */ -#if CYTHON_COMPILING_IN_LIMITED_API -typedef PyObject *__Pyx_TypeName; -#define __Pyx_FMT_TYPENAME "%U" -static __Pyx_TypeName __Pyx_PyType_GetName(PyTypeObject* tp); -#define __Pyx_DECREF_TypeName(obj) Py_XDECREF(obj) -#else -typedef const char *__Pyx_TypeName; -#define __Pyx_FMT_TYPENAME "%.200s" -#define __Pyx_PyType_GetName(tp) ((tp)->tp_name) -#define __Pyx_DECREF_TypeName(obj) -#endif - -/* CheckBinaryVersion.proto */ -static int __Pyx_check_binary_version(void); - -/* InitStrings.proto */ -static int __Pyx_InitStrings(__Pyx_StringTabEntry *t); - -/* #### Code section: module_declarations ### */ -static PyObject *__pyx_array_get_memview(struct __pyx_array_obj *__pyx_v_self); /* proto*/ -static char *__pyx_memoryview_get_item_pointer(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index); /* proto*/ -static PyObject *__pyx_memoryview_is_slice(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj); /* proto*/ -static PyObject *__pyx_memoryview_setitem_slice_assignment(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_dst, PyObject *__pyx_v_src); /* proto*/ -static PyObject *__pyx_memoryview_setitem_slice_assign_scalar(struct __pyx_memoryview_obj *__pyx_v_self, struct __pyx_memoryview_obj *__pyx_v_dst, PyObject *__pyx_v_value); /* proto*/ -static PyObject *__pyx_memoryview_setitem_indexed(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value); /* proto*/ -static PyObject *__pyx_memoryview_convert_item_to_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp); /* proto*/ -static PyObject *__pyx_memoryview_assign_item_from_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value); /* proto*/ -static PyObject *__pyx_memoryview__get_base(struct __pyx_memoryview_obj *__pyx_v_self); /* proto*/ -static PyObject *__pyx_memoryviewslice_convert_item_to_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp); /* proto*/ -static PyObject *__pyx_memoryviewslice_assign_item_from_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value); /* proto*/ -static PyObject *__pyx_memoryviewslice__get_base(struct __pyx_memoryviewslice_obj *__pyx_v_self); /* proto*/ - -/* Module declarations from "cython.view" */ - -/* Module declarations from "cython.dataclasses" */ - -/* Module declarations from "cython" */ - -/* Module declarations from "monotonic_align.core" */ -static PyObject *__pyx_collections_abc_Sequence = 0; -static PyObject *generic = 0; -static PyObject *strided = 0; -static PyObject *indirect = 0; -static PyObject *contiguous = 0; -static PyObject *indirect_contiguous = 0; -static int __pyx_memoryview_thread_locks_used; -static PyThread_type_lock __pyx_memoryview_thread_locks[8]; -static void __pyx_f_15monotonic_align_4core_maximum_path_each(__Pyx_memviewslice, __Pyx_memviewslice, int, int, struct __pyx_opt_args_15monotonic_align_4core_maximum_path_each *__pyx_optional_args); /*proto*/ -static void __pyx_f_15monotonic_align_4core_maximum_path_c(__Pyx_memviewslice, __Pyx_memviewslice, __Pyx_memviewslice, __Pyx_memviewslice, int __pyx_skip_dispatch); /*proto*/ -static int __pyx_array_allocate_buffer(struct __pyx_array_obj *); /*proto*/ -static struct __pyx_array_obj *__pyx_array_new(PyObject *, Py_ssize_t, char *, char *, char *); /*proto*/ -static PyObject *__pyx_memoryview_new(PyObject *, int, int, __Pyx_TypeInfo *); /*proto*/ -static CYTHON_INLINE int __pyx_memoryview_check(PyObject *); /*proto*/ -static PyObject *_unellipsify(PyObject *, int); /*proto*/ -static int assert_direct_dimensions(Py_ssize_t *, int); /*proto*/ -static struct __pyx_memoryview_obj *__pyx_memview_slice(struct __pyx_memoryview_obj *, PyObject *); /*proto*/ -static int __pyx_memoryview_slice_memviewslice(__Pyx_memviewslice *, Py_ssize_t, Py_ssize_t, Py_ssize_t, int, int, int *, Py_ssize_t, Py_ssize_t, Py_ssize_t, int, int, int, int); /*proto*/ -static char *__pyx_pybuffer_index(Py_buffer *, char *, Py_ssize_t, Py_ssize_t); /*proto*/ -static int __pyx_memslice_transpose(__Pyx_memviewslice *); /*proto*/ -static PyObject *__pyx_memoryview_fromslice(__Pyx_memviewslice, int, PyObject *(*)(char *), int (*)(char *, PyObject *), int); /*proto*/ -static __Pyx_memviewslice *__pyx_memoryview_get_slice_from_memoryview(struct __pyx_memoryview_obj *, __Pyx_memviewslice *); /*proto*/ -static void __pyx_memoryview_slice_copy(struct __pyx_memoryview_obj *, __Pyx_memviewslice *); /*proto*/ -static PyObject *__pyx_memoryview_copy_object(struct __pyx_memoryview_obj *); /*proto*/ -static PyObject *__pyx_memoryview_copy_object_from_slice(struct __pyx_memoryview_obj *, __Pyx_memviewslice *); /*proto*/ -static Py_ssize_t abs_py_ssize_t(Py_ssize_t); /*proto*/ -static char __pyx_get_best_slice_order(__Pyx_memviewslice *, int); /*proto*/ -static void _copy_strided_to_strided(char *, Py_ssize_t *, char *, Py_ssize_t *, Py_ssize_t *, Py_ssize_t *, int, size_t); /*proto*/ -static void copy_strided_to_strided(__Pyx_memviewslice *, __Pyx_memviewslice *, int, size_t); /*proto*/ -static Py_ssize_t __pyx_memoryview_slice_get_size(__Pyx_memviewslice *, int); /*proto*/ -static Py_ssize_t __pyx_fill_contig_strides_array(Py_ssize_t *, Py_ssize_t *, Py_ssize_t, int, char); /*proto*/ -static void *__pyx_memoryview_copy_data_to_temp(__Pyx_memviewslice *, __Pyx_memviewslice *, char, int); /*proto*/ -static int __pyx_memoryview_err_extents(int, Py_ssize_t, Py_ssize_t); /*proto*/ -static int __pyx_memoryview_err_dim(PyObject *, PyObject *, int); /*proto*/ -static int __pyx_memoryview_err(PyObject *, PyObject *); /*proto*/ -static int __pyx_memoryview_err_no_memory(void); /*proto*/ -static int __pyx_memoryview_copy_contents(__Pyx_memviewslice, __Pyx_memviewslice, int, int, int); /*proto*/ -static void __pyx_memoryview_broadcast_leading(__Pyx_memviewslice *, int, int); /*proto*/ -static void __pyx_memoryview_refcount_copying(__Pyx_memviewslice *, int, int, int); /*proto*/ -static void __pyx_memoryview_refcount_objects_in_slice_with_gil(char *, Py_ssize_t *, Py_ssize_t *, int, int); /*proto*/ -static void __pyx_memoryview_refcount_objects_in_slice(char *, Py_ssize_t *, Py_ssize_t *, int, int); /*proto*/ -static void __pyx_memoryview_slice_assign_scalar(__Pyx_memviewslice *, int, size_t, void *, int); /*proto*/ -static void __pyx_memoryview__slice_assign_scalar(char *, Py_ssize_t *, Py_ssize_t *, int, size_t, void *); /*proto*/ -static PyObject *__pyx_unpickle_Enum__set_state(struct __pyx_MemviewEnum_obj *, PyObject *); /*proto*/ -/* #### Code section: typeinfo ### */ -static __Pyx_TypeInfo __Pyx_TypeInfo_int = { "int", NULL, sizeof(int), { 0 }, 0, __PYX_IS_UNSIGNED(int) ? 'U' : 'I', __PYX_IS_UNSIGNED(int), 0 }; -static __Pyx_TypeInfo __Pyx_TypeInfo_float = { "float", NULL, sizeof(float), { 0 }, 0, 'R', 0, 0 }; -/* #### Code section: before_global_var ### */ -#define __Pyx_MODULE_NAME "monotonic_align.core" -extern int __pyx_module_is_main_monotonic_align__core; -int __pyx_module_is_main_monotonic_align__core = 0; - -/* Implementation of "monotonic_align.core" */ -/* #### Code section: global_var ### */ -static PyObject *__pyx_builtin_range; -static PyObject *__pyx_builtin___import__; -static PyObject *__pyx_builtin_ValueError; -static PyObject *__pyx_builtin_MemoryError; -static PyObject *__pyx_builtin_enumerate; -static PyObject *__pyx_builtin_TypeError; -static PyObject *__pyx_builtin_AssertionError; -static PyObject *__pyx_builtin_Ellipsis; -static PyObject *__pyx_builtin_id; -static PyObject *__pyx_builtin_IndexError; -/* #### Code section: string_decls ### */ -static const char __pyx_k_[] = ": "; -static const char __pyx_k_O[] = "O"; -static const char __pyx_k_c[] = "c"; -static const char __pyx_k__2[] = "."; -static const char __pyx_k__3[] = "*"; -static const char __pyx_k__6[] = "'"; -static const char __pyx_k__7[] = ")"; -static const char __pyx_k_gc[] = "gc"; -static const char __pyx_k_id[] = "id"; -static const char __pyx_k__23[] = "?"; -static const char __pyx_k_abc[] = "abc"; -static const char __pyx_k_and[] = " and "; -static const char __pyx_k_got[] = " (got "; -static const char __pyx_k_new[] = "__new__"; -static const char __pyx_k_obj[] = "obj"; -static const char __pyx_k_sys[] = "sys"; -static const char __pyx_k_base[] = "base"; -static const char __pyx_k_dict[] = "__dict__"; -static const char __pyx_k_main[] = "__main__"; -static const char __pyx_k_mode[] = "mode"; -static const char __pyx_k_name[] = "name"; -static const char __pyx_k_ndim[] = "ndim"; -static const char __pyx_k_pack[] = "pack"; -static const char __pyx_k_size[] = "size"; -static const char __pyx_k_spec[] = "__spec__"; -static const char __pyx_k_step[] = "step"; -static const char __pyx_k_stop[] = "stop"; -static const char __pyx_k_t_xs[] = "t_xs"; -static const char __pyx_k_t_ys[] = "t_ys"; -static const char __pyx_k_test[] = "__test__"; -static const char __pyx_k_ASCII[] = "ASCII"; -static const char __pyx_k_class[] = "__class__"; -static const char __pyx_k_count[] = "count"; -static const char __pyx_k_error[] = "error"; -static const char __pyx_k_flags[] = "flags"; -static const char __pyx_k_index[] = "index"; -static const char __pyx_k_paths[] = "paths"; -static const char __pyx_k_range[] = "range"; -static const char __pyx_k_shape[] = "shape"; -static const char __pyx_k_start[] = "start"; -static const char __pyx_k_enable[] = "enable"; -static const char __pyx_k_encode[] = "encode"; -static const char __pyx_k_format[] = "format"; -static const char __pyx_k_import[] = "__import__"; -static const char __pyx_k_name_2[] = "__name__"; -static const char __pyx_k_pickle[] = "pickle"; -static const char __pyx_k_reduce[] = "__reduce__"; -static const char __pyx_k_struct[] = "struct"; -static const char __pyx_k_unpack[] = "unpack"; -static const char __pyx_k_update[] = "update"; -static const char __pyx_k_values[] = "values"; -static const char __pyx_k_disable[] = "disable"; -static const char __pyx_k_fortran[] = "fortran"; -static const char __pyx_k_memview[] = "memview"; -static const char __pyx_k_Ellipsis[] = "Ellipsis"; -static const char __pyx_k_Sequence[] = "Sequence"; -static const char __pyx_k_core_pyx[] = "core.pyx"; -static const char __pyx_k_getstate[] = "__getstate__"; -static const char __pyx_k_itemsize[] = "itemsize"; -static const char __pyx_k_pyx_type[] = "__pyx_type"; -static const char __pyx_k_register[] = "register"; -static const char __pyx_k_setstate[] = "__setstate__"; -static const char __pyx_k_TypeError[] = "TypeError"; -static const char __pyx_k_enumerate[] = "enumerate"; -static const char __pyx_k_isenabled[] = "isenabled"; -static const char __pyx_k_pyx_state[] = "__pyx_state"; -static const char __pyx_k_reduce_ex[] = "__reduce_ex__"; -static const char __pyx_k_IndexError[] = "IndexError"; -static const char __pyx_k_ValueError[] = "ValueError"; -static const char __pyx_k_pyx_result[] = "__pyx_result"; -static const char __pyx_k_pyx_vtable[] = "__pyx_vtable__"; -static const char __pyx_k_MemoryError[] = "MemoryError"; -static const char __pyx_k_PickleError[] = "PickleError"; -static const char __pyx_k_collections[] = "collections"; -static const char __pyx_k_initializing[] = "_initializing"; -static const char __pyx_k_is_coroutine[] = "_is_coroutine"; -static const char __pyx_k_pyx_checksum[] = "__pyx_checksum"; -static const char __pyx_k_stringsource[] = ""; -static const char __pyx_k_version_info[] = "version_info"; -static const char __pyx_k_class_getitem[] = "__class_getitem__"; -static const char __pyx_k_reduce_cython[] = "__reduce_cython__"; -static const char __pyx_k_AssertionError[] = "AssertionError"; -static const char __pyx_k_maximum_path_c[] = "maximum_path_c"; -static const char __pyx_k_View_MemoryView[] = "View.MemoryView"; -static const char __pyx_k_allocate_buffer[] = "allocate_buffer"; -static const char __pyx_k_collections_abc[] = "collections.abc"; -static const char __pyx_k_dtype_is_object[] = "dtype_is_object"; -static const char __pyx_k_pyx_PickleError[] = "__pyx_PickleError"; -static const char __pyx_k_setstate_cython[] = "__setstate_cython__"; -static const char __pyx_k_pyx_unpickle_Enum[] = "__pyx_unpickle_Enum"; -static const char __pyx_k_asyncio_coroutines[] = "asyncio.coroutines"; -static const char __pyx_k_cline_in_traceback[] = "cline_in_traceback"; -static const char __pyx_k_strided_and_direct[] = ""; -static const char __pyx_k_monotonic_align_core[] = "monotonic_align.core"; -static const char __pyx_k_strided_and_indirect[] = ""; -static const char __pyx_k_Invalid_shape_in_axis[] = "Invalid shape in axis "; -static const char __pyx_k_contiguous_and_direct[] = ""; -static const char __pyx_k_Cannot_index_with_type[] = "Cannot index with type '"; -static const char __pyx_k_MemoryView_of_r_object[] = ""; -static const char __pyx_k_MemoryView_of_r_at_0x_x[] = ""; -static const char __pyx_k_contiguous_and_indirect[] = ""; -static const char __pyx_k_Dimension_d_is_not_direct[] = "Dimension %d is not direct"; -static const char __pyx_k_Index_out_of_bounds_axis_d[] = "Index out of bounds (axis %d)"; -static const char __pyx_k_Step_may_not_be_zero_axis_d[] = "Step may not be zero (axis %d)"; -static const char __pyx_k_itemsize_0_for_cython_array[] = "itemsize <= 0 for cython.array"; -static const char __pyx_k_unable_to_allocate_array_data[] = "unable to allocate array data."; -static const char __pyx_k_strided_and_direct_or_indirect[] = ""; -static const char __pyx_k_All_dimensions_preceding_dimensi[] = "All dimensions preceding dimension %d must be indexed and not sliced"; -static const char __pyx_k_Buffer_view_does_not_expose_stri[] = "Buffer view does not expose strides"; -static const char __pyx_k_Can_only_create_a_buffer_that_is[] = "Can only create a buffer that is contiguous in memory."; -static const char __pyx_k_Cannot_assign_to_read_only_memor[] = "Cannot assign to read-only memoryview"; -static const char __pyx_k_Cannot_create_writable_memory_vi[] = "Cannot create writable memory view from read-only memoryview"; -static const char __pyx_k_Cannot_transpose_memoryview_with[] = "Cannot transpose memoryview with indirect dimensions"; -static const char __pyx_k_Empty_shape_tuple_for_cython_arr[] = "Empty shape tuple for cython.array"; -static const char __pyx_k_Incompatible_checksums_0x_x_vs_0[] = "Incompatible checksums (0x%x vs (0x82a3537, 0x6ae9995, 0xb068931) = (name))"; -static const char __pyx_k_Indirect_dimensions_not_supporte[] = "Indirect dimensions not supported"; -static const char __pyx_k_Invalid_mode_expected_c_or_fortr[] = "Invalid mode, expected 'c' or 'fortran', got "; -static const char __pyx_k_Out_of_bounds_on_buffer_access_a[] = "Out of bounds on buffer access (axis "; -static const char __pyx_k_Unable_to_convert_item_to_object[] = "Unable to convert item to object"; -static const char __pyx_k_got_differing_extents_in_dimensi[] = "got differing extents in dimension "; -static const char __pyx_k_no_default___reduce___due_to_non[] = "no default __reduce__ due to non-trivial __cinit__"; -static const char __pyx_k_unable_to_allocate_shape_and_str[] = "unable to allocate shape and strides."; -/* #### Code section: decls ### */ -static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array___cinit__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_shape, Py_ssize_t __pyx_v_itemsize, PyObject *__pyx_v_format, PyObject *__pyx_v_mode, int __pyx_v_allocate_buffer); /* proto */ -static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_2__getbuffer__(struct __pyx_array_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /* proto */ -static void __pyx_array___pyx_pf_15View_dot_MemoryView_5array_4__dealloc__(struct __pyx_array_obj *__pyx_v_self); /* proto */ -static PyObject *__pyx_pf_15View_dot_MemoryView_5array_7memview___get__(struct __pyx_array_obj *__pyx_v_self); /* proto */ -static Py_ssize_t __pyx_array___pyx_pf_15View_dot_MemoryView_5array_6__len__(struct __pyx_array_obj *__pyx_v_self); /* proto */ -static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_8__getattr__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_attr); /* proto */ -static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_10__getitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item); /* proto */ -static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_12__setitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value); /* proto */ -static PyObject *__pyx_pf___pyx_array___reduce_cython__(CYTHON_UNUSED struct __pyx_array_obj *__pyx_v_self); /* proto */ -static PyObject *__pyx_pf___pyx_array_2__setstate_cython__(CYTHON_UNUSED struct __pyx_array_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */ -static int __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum___init__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v_name); /* proto */ -static PyObject *__pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum_2__repr__(struct __pyx_MemviewEnum_obj *__pyx_v_self); /* proto */ -static PyObject *__pyx_pf___pyx_MemviewEnum___reduce_cython__(struct __pyx_MemviewEnum_obj *__pyx_v_self); /* proto */ -static PyObject *__pyx_pf___pyx_MemviewEnum_2__setstate_cython__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v___pyx_state); /* proto */ -static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview___cinit__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj, int __pyx_v_flags, int __pyx_v_dtype_is_object); /* proto */ -static void __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_2__dealloc__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ -static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_4__getitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index); /* proto */ -static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_6__setitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value); /* proto */ -static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_8__getbuffer__(struct __pyx_memoryview_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /* proto */ -static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_1T___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ -static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4base___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ -static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_5shape___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ -static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_7strides___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ -static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_10suboffsets___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ -static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4ndim___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ -static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_8itemsize___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ -static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_6nbytes___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ -static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4size___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ -static Py_ssize_t __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_10__len__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ -static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_12__repr__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ -static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_14__str__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ -static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_16is_c_contig(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ -static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_18is_f_contig(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ -static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_20copy(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ -static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_22copy_fortran(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ -static PyObject *__pyx_pf___pyx_memoryview___reduce_cython__(CYTHON_UNUSED struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ -static PyObject *__pyx_pf___pyx_memoryview_2__setstate_cython__(CYTHON_UNUSED struct __pyx_memoryview_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */ -static void __pyx_memoryviewslice___pyx_pf_15View_dot_MemoryView_16_memoryviewslice___dealloc__(struct __pyx_memoryviewslice_obj *__pyx_v_self); /* proto */ -static PyObject *__pyx_pf___pyx_memoryviewslice___reduce_cython__(CYTHON_UNUSED struct __pyx_memoryviewslice_obj *__pyx_v_self); /* proto */ -static PyObject *__pyx_pf___pyx_memoryviewslice_2__setstate_cython__(CYTHON_UNUSED struct __pyx_memoryviewslice_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */ -static PyObject *__pyx_pf_15View_dot_MemoryView___pyx_unpickle_Enum(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v___pyx_type, long __pyx_v___pyx_checksum, PyObject *__pyx_v___pyx_state); /* proto */ -static PyObject *__pyx_pf_15monotonic_align_4core_maximum_path_c(CYTHON_UNUSED PyObject *__pyx_self, __Pyx_memviewslice __pyx_v_paths, __Pyx_memviewslice __pyx_v_values, __Pyx_memviewslice __pyx_v_t_ys, __Pyx_memviewslice __pyx_v_t_xs); /* proto */ -static PyObject *__pyx_tp_new_array(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/ -static PyObject *__pyx_tp_new_Enum(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/ -static PyObject *__pyx_tp_new_memoryview(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/ -static PyObject *__pyx_tp_new__memoryviewslice(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/ -/* #### Code section: late_includes ### */ -/* #### Code section: module_state ### */ -typedef struct { - PyObject *__pyx_d; - PyObject *__pyx_b; - PyObject *__pyx_cython_runtime; - PyObject *__pyx_empty_tuple; - PyObject *__pyx_empty_bytes; - PyObject *__pyx_empty_unicode; - #ifdef __Pyx_CyFunction_USED - PyTypeObject *__pyx_CyFunctionType; - #endif - #ifdef __Pyx_FusedFunction_USED - PyTypeObject *__pyx_FusedFunctionType; - #endif - #ifdef __Pyx_Generator_USED - PyTypeObject *__pyx_GeneratorType; - #endif - #ifdef __Pyx_IterableCoroutine_USED - PyTypeObject *__pyx_IterableCoroutineType; - #endif - #ifdef __Pyx_Coroutine_USED - PyTypeObject *__pyx_CoroutineAwaitType; - #endif - #ifdef __Pyx_Coroutine_USED - PyTypeObject *__pyx_CoroutineType; - #endif - #if CYTHON_USE_MODULE_STATE - #endif - #if CYTHON_USE_MODULE_STATE - #endif - #if CYTHON_USE_MODULE_STATE - #endif - #if CYTHON_USE_MODULE_STATE - PyObject *__pyx_type___pyx_array; - PyObject *__pyx_type___pyx_MemviewEnum; - PyObject *__pyx_type___pyx_memoryview; - PyObject *__pyx_type___pyx_memoryviewslice; - #endif - PyTypeObject *__pyx_array_type; - PyTypeObject *__pyx_MemviewEnum_type; - PyTypeObject *__pyx_memoryview_type; - PyTypeObject *__pyx_memoryviewslice_type; - PyObject *__pyx_kp_u_; - PyObject *__pyx_n_s_ASCII; - PyObject *__pyx_kp_s_All_dimensions_preceding_dimensi; - PyObject *__pyx_n_s_AssertionError; - PyObject *__pyx_kp_s_Buffer_view_does_not_expose_stri; - PyObject *__pyx_kp_s_Can_only_create_a_buffer_that_is; - PyObject *__pyx_kp_s_Cannot_assign_to_read_only_memor; - PyObject *__pyx_kp_s_Cannot_create_writable_memory_vi; - PyObject *__pyx_kp_u_Cannot_index_with_type; - PyObject *__pyx_kp_s_Cannot_transpose_memoryview_with; - PyObject *__pyx_kp_s_Dimension_d_is_not_direct; - PyObject *__pyx_n_s_Ellipsis; - PyObject *__pyx_kp_s_Empty_shape_tuple_for_cython_arr; - PyObject *__pyx_kp_s_Incompatible_checksums_0x_x_vs_0; - PyObject *__pyx_n_s_IndexError; - PyObject *__pyx_kp_s_Index_out_of_bounds_axis_d; - PyObject *__pyx_kp_s_Indirect_dimensions_not_supporte; - PyObject *__pyx_kp_u_Invalid_mode_expected_c_or_fortr; - PyObject *__pyx_kp_u_Invalid_shape_in_axis; - PyObject *__pyx_n_s_MemoryError; - PyObject *__pyx_kp_s_MemoryView_of_r_at_0x_x; - PyObject *__pyx_kp_s_MemoryView_of_r_object; - PyObject *__pyx_n_b_O; - PyObject *__pyx_kp_u_Out_of_bounds_on_buffer_access_a; - PyObject *__pyx_n_s_PickleError; - PyObject *__pyx_n_s_Sequence; - PyObject *__pyx_kp_s_Step_may_not_be_zero_axis_d; - PyObject *__pyx_n_s_TypeError; - PyObject *__pyx_kp_s_Unable_to_convert_item_to_object; - PyObject *__pyx_n_s_ValueError; - PyObject *__pyx_n_s_View_MemoryView; - PyObject *__pyx_kp_u__2; - PyObject *__pyx_n_s__23; - PyObject *__pyx_n_s__3; - PyObject *__pyx_kp_u__6; - PyObject *__pyx_kp_u__7; - PyObject *__pyx_n_s_abc; - PyObject *__pyx_n_s_allocate_buffer; - PyObject *__pyx_kp_u_and; - PyObject *__pyx_n_s_asyncio_coroutines; - PyObject *__pyx_n_s_base; - PyObject *__pyx_n_s_c; - PyObject *__pyx_n_u_c; - PyObject *__pyx_n_s_class; - PyObject *__pyx_n_s_class_getitem; - PyObject *__pyx_n_s_cline_in_traceback; - PyObject *__pyx_n_s_collections; - PyObject *__pyx_kp_s_collections_abc; - PyObject *__pyx_kp_s_contiguous_and_direct; - PyObject *__pyx_kp_s_contiguous_and_indirect; - PyObject *__pyx_kp_s_core_pyx; - PyObject *__pyx_n_s_count; - PyObject *__pyx_n_s_dict; - PyObject *__pyx_kp_u_disable; - PyObject *__pyx_n_s_dtype_is_object; - PyObject *__pyx_kp_u_enable; - PyObject *__pyx_n_s_encode; - PyObject *__pyx_n_s_enumerate; - PyObject *__pyx_n_s_error; - PyObject *__pyx_n_s_flags; - PyObject *__pyx_n_s_format; - PyObject *__pyx_n_s_fortran; - PyObject *__pyx_n_u_fortran; - PyObject *__pyx_kp_u_gc; - PyObject *__pyx_n_s_getstate; - PyObject *__pyx_kp_u_got; - PyObject *__pyx_kp_u_got_differing_extents_in_dimensi; - PyObject *__pyx_n_s_id; - PyObject *__pyx_n_s_import; - PyObject *__pyx_n_s_index; - PyObject *__pyx_n_s_initializing; - PyObject *__pyx_n_s_is_coroutine; - PyObject *__pyx_kp_u_isenabled; - PyObject *__pyx_n_s_itemsize; - PyObject *__pyx_kp_s_itemsize_0_for_cython_array; - PyObject *__pyx_n_s_main; - PyObject *__pyx_n_s_maximum_path_c; - PyObject *__pyx_n_s_memview; - PyObject *__pyx_n_s_mode; - PyObject *__pyx_n_s_monotonic_align_core; - PyObject *__pyx_n_s_name; - PyObject *__pyx_n_s_name_2; - PyObject *__pyx_n_s_ndim; - PyObject *__pyx_n_s_new; - PyObject *__pyx_kp_s_no_default___reduce___due_to_non; - PyObject *__pyx_n_s_obj; - PyObject *__pyx_n_s_pack; - PyObject *__pyx_n_s_paths; - PyObject *__pyx_n_s_pickle; - PyObject *__pyx_n_s_pyx_PickleError; - PyObject *__pyx_n_s_pyx_checksum; - PyObject *__pyx_n_s_pyx_result; - PyObject *__pyx_n_s_pyx_state; - PyObject *__pyx_n_s_pyx_type; - PyObject *__pyx_n_s_pyx_unpickle_Enum; - PyObject *__pyx_n_s_pyx_vtable; - PyObject *__pyx_n_s_range; - PyObject *__pyx_n_s_reduce; - PyObject *__pyx_n_s_reduce_cython; - PyObject *__pyx_n_s_reduce_ex; - PyObject *__pyx_n_s_register; - PyObject *__pyx_n_s_setstate; - PyObject *__pyx_n_s_setstate_cython; - PyObject *__pyx_n_s_shape; - PyObject *__pyx_n_s_size; - PyObject *__pyx_n_s_spec; - PyObject *__pyx_n_s_start; - PyObject *__pyx_n_s_step; - PyObject *__pyx_n_s_stop; - PyObject *__pyx_kp_s_strided_and_direct; - PyObject *__pyx_kp_s_strided_and_direct_or_indirect; - PyObject *__pyx_kp_s_strided_and_indirect; - PyObject *__pyx_kp_s_stringsource; - PyObject *__pyx_n_s_struct; - PyObject *__pyx_n_s_sys; - PyObject *__pyx_n_s_t_xs; - PyObject *__pyx_n_s_t_ys; - PyObject *__pyx_n_s_test; - PyObject *__pyx_kp_s_unable_to_allocate_array_data; - PyObject *__pyx_kp_s_unable_to_allocate_shape_and_str; - PyObject *__pyx_n_s_unpack; - PyObject *__pyx_n_s_update; - PyObject *__pyx_n_s_values; - PyObject *__pyx_n_s_version_info; - PyObject *__pyx_int_0; - PyObject *__pyx_int_1; - PyObject *__pyx_int_3; - PyObject *__pyx_int_112105877; - PyObject *__pyx_int_136983863; - PyObject *__pyx_int_184977713; - PyObject *__pyx_int_neg_1; - float __pyx_k__9; - PyObject *__pyx_slice__5; - PyObject *__pyx_tuple__4; - PyObject *__pyx_tuple__8; - PyObject *__pyx_tuple__10; - PyObject *__pyx_tuple__11; - PyObject *__pyx_tuple__12; - PyObject *__pyx_tuple__13; - PyObject *__pyx_tuple__14; - PyObject *__pyx_tuple__15; - PyObject *__pyx_tuple__16; - PyObject *__pyx_tuple__17; - PyObject *__pyx_tuple__18; - PyObject *__pyx_tuple__19; - PyObject *__pyx_tuple__21; - PyObject *__pyx_codeobj__20; - PyObject *__pyx_codeobj__22; -} __pyx_mstate; - -#if CYTHON_USE_MODULE_STATE -#ifdef __cplusplus -namespace { - extern struct PyModuleDef __pyx_moduledef; -} /* anonymous namespace */ -#else -static struct PyModuleDef __pyx_moduledef; -#endif - -#define __pyx_mstate(o) ((__pyx_mstate *)__Pyx_PyModule_GetState(o)) - -#define __pyx_mstate_global (__pyx_mstate(PyState_FindModule(&__pyx_moduledef))) - -#define __pyx_m (PyState_FindModule(&__pyx_moduledef)) -#else -static __pyx_mstate __pyx_mstate_global_static = -#ifdef __cplusplus - {}; -#else - {0}; -#endif -static __pyx_mstate *__pyx_mstate_global = &__pyx_mstate_global_static; -#endif -/* #### Code section: module_state_clear ### */ -#if CYTHON_USE_MODULE_STATE -static int __pyx_m_clear(PyObject *m) { - __pyx_mstate *clear_module_state = __pyx_mstate(m); - if (!clear_module_state) return 0; - Py_CLEAR(clear_module_state->__pyx_d); - Py_CLEAR(clear_module_state->__pyx_b); - Py_CLEAR(clear_module_state->__pyx_cython_runtime); - Py_CLEAR(clear_module_state->__pyx_empty_tuple); - Py_CLEAR(clear_module_state->__pyx_empty_bytes); - Py_CLEAR(clear_module_state->__pyx_empty_unicode); - #ifdef __Pyx_CyFunction_USED - Py_CLEAR(clear_module_state->__pyx_CyFunctionType); - #endif - #ifdef __Pyx_FusedFunction_USED - Py_CLEAR(clear_module_state->__pyx_FusedFunctionType); - #endif - Py_CLEAR(clear_module_state->__pyx_array_type); - Py_CLEAR(clear_module_state->__pyx_type___pyx_array); - Py_CLEAR(clear_module_state->__pyx_MemviewEnum_type); - Py_CLEAR(clear_module_state->__pyx_type___pyx_MemviewEnum); - Py_CLEAR(clear_module_state->__pyx_memoryview_type); - Py_CLEAR(clear_module_state->__pyx_type___pyx_memoryview); - Py_CLEAR(clear_module_state->__pyx_memoryviewslice_type); - Py_CLEAR(clear_module_state->__pyx_type___pyx_memoryviewslice); - Py_CLEAR(clear_module_state->__pyx_kp_u_); - Py_CLEAR(clear_module_state->__pyx_n_s_ASCII); - Py_CLEAR(clear_module_state->__pyx_kp_s_All_dimensions_preceding_dimensi); - Py_CLEAR(clear_module_state->__pyx_n_s_AssertionError); - Py_CLEAR(clear_module_state->__pyx_kp_s_Buffer_view_does_not_expose_stri); - Py_CLEAR(clear_module_state->__pyx_kp_s_Can_only_create_a_buffer_that_is); - Py_CLEAR(clear_module_state->__pyx_kp_s_Cannot_assign_to_read_only_memor); - Py_CLEAR(clear_module_state->__pyx_kp_s_Cannot_create_writable_memory_vi); - Py_CLEAR(clear_module_state->__pyx_kp_u_Cannot_index_with_type); - Py_CLEAR(clear_module_state->__pyx_kp_s_Cannot_transpose_memoryview_with); - Py_CLEAR(clear_module_state->__pyx_kp_s_Dimension_d_is_not_direct); - Py_CLEAR(clear_module_state->__pyx_n_s_Ellipsis); - Py_CLEAR(clear_module_state->__pyx_kp_s_Empty_shape_tuple_for_cython_arr); - Py_CLEAR(clear_module_state->__pyx_kp_s_Incompatible_checksums_0x_x_vs_0); - Py_CLEAR(clear_module_state->__pyx_n_s_IndexError); - Py_CLEAR(clear_module_state->__pyx_kp_s_Index_out_of_bounds_axis_d); - Py_CLEAR(clear_module_state->__pyx_kp_s_Indirect_dimensions_not_supporte); - Py_CLEAR(clear_module_state->__pyx_kp_u_Invalid_mode_expected_c_or_fortr); - Py_CLEAR(clear_module_state->__pyx_kp_u_Invalid_shape_in_axis); - Py_CLEAR(clear_module_state->__pyx_n_s_MemoryError); - Py_CLEAR(clear_module_state->__pyx_kp_s_MemoryView_of_r_at_0x_x); - Py_CLEAR(clear_module_state->__pyx_kp_s_MemoryView_of_r_object); - Py_CLEAR(clear_module_state->__pyx_n_b_O); - Py_CLEAR(clear_module_state->__pyx_kp_u_Out_of_bounds_on_buffer_access_a); - Py_CLEAR(clear_module_state->__pyx_n_s_PickleError); - Py_CLEAR(clear_module_state->__pyx_n_s_Sequence); - Py_CLEAR(clear_module_state->__pyx_kp_s_Step_may_not_be_zero_axis_d); - Py_CLEAR(clear_module_state->__pyx_n_s_TypeError); - Py_CLEAR(clear_module_state->__pyx_kp_s_Unable_to_convert_item_to_object); - Py_CLEAR(clear_module_state->__pyx_n_s_ValueError); - Py_CLEAR(clear_module_state->__pyx_n_s_View_MemoryView); - Py_CLEAR(clear_module_state->__pyx_kp_u__2); - Py_CLEAR(clear_module_state->__pyx_n_s__23); - Py_CLEAR(clear_module_state->__pyx_n_s__3); - Py_CLEAR(clear_module_state->__pyx_kp_u__6); - Py_CLEAR(clear_module_state->__pyx_kp_u__7); - Py_CLEAR(clear_module_state->__pyx_n_s_abc); - Py_CLEAR(clear_module_state->__pyx_n_s_allocate_buffer); - Py_CLEAR(clear_module_state->__pyx_kp_u_and); - Py_CLEAR(clear_module_state->__pyx_n_s_asyncio_coroutines); - Py_CLEAR(clear_module_state->__pyx_n_s_base); - Py_CLEAR(clear_module_state->__pyx_n_s_c); - Py_CLEAR(clear_module_state->__pyx_n_u_c); - Py_CLEAR(clear_module_state->__pyx_n_s_class); - Py_CLEAR(clear_module_state->__pyx_n_s_class_getitem); - Py_CLEAR(clear_module_state->__pyx_n_s_cline_in_traceback); - Py_CLEAR(clear_module_state->__pyx_n_s_collections); - Py_CLEAR(clear_module_state->__pyx_kp_s_collections_abc); - Py_CLEAR(clear_module_state->__pyx_kp_s_contiguous_and_direct); - Py_CLEAR(clear_module_state->__pyx_kp_s_contiguous_and_indirect); - Py_CLEAR(clear_module_state->__pyx_kp_s_core_pyx); - Py_CLEAR(clear_module_state->__pyx_n_s_count); - Py_CLEAR(clear_module_state->__pyx_n_s_dict); - Py_CLEAR(clear_module_state->__pyx_kp_u_disable); - Py_CLEAR(clear_module_state->__pyx_n_s_dtype_is_object); - Py_CLEAR(clear_module_state->__pyx_kp_u_enable); - Py_CLEAR(clear_module_state->__pyx_n_s_encode); - Py_CLEAR(clear_module_state->__pyx_n_s_enumerate); - Py_CLEAR(clear_module_state->__pyx_n_s_error); - Py_CLEAR(clear_module_state->__pyx_n_s_flags); - Py_CLEAR(clear_module_state->__pyx_n_s_format); - Py_CLEAR(clear_module_state->__pyx_n_s_fortran); - Py_CLEAR(clear_module_state->__pyx_n_u_fortran); - Py_CLEAR(clear_module_state->__pyx_kp_u_gc); - Py_CLEAR(clear_module_state->__pyx_n_s_getstate); - Py_CLEAR(clear_module_state->__pyx_kp_u_got); - Py_CLEAR(clear_module_state->__pyx_kp_u_got_differing_extents_in_dimensi); - Py_CLEAR(clear_module_state->__pyx_n_s_id); - Py_CLEAR(clear_module_state->__pyx_n_s_import); - Py_CLEAR(clear_module_state->__pyx_n_s_index); - Py_CLEAR(clear_module_state->__pyx_n_s_initializing); - Py_CLEAR(clear_module_state->__pyx_n_s_is_coroutine); - Py_CLEAR(clear_module_state->__pyx_kp_u_isenabled); - Py_CLEAR(clear_module_state->__pyx_n_s_itemsize); - Py_CLEAR(clear_module_state->__pyx_kp_s_itemsize_0_for_cython_array); - Py_CLEAR(clear_module_state->__pyx_n_s_main); - Py_CLEAR(clear_module_state->__pyx_n_s_maximum_path_c); - Py_CLEAR(clear_module_state->__pyx_n_s_memview); - Py_CLEAR(clear_module_state->__pyx_n_s_mode); - Py_CLEAR(clear_module_state->__pyx_n_s_monotonic_align_core); - Py_CLEAR(clear_module_state->__pyx_n_s_name); - Py_CLEAR(clear_module_state->__pyx_n_s_name_2); - Py_CLEAR(clear_module_state->__pyx_n_s_ndim); - Py_CLEAR(clear_module_state->__pyx_n_s_new); - Py_CLEAR(clear_module_state->__pyx_kp_s_no_default___reduce___due_to_non); - Py_CLEAR(clear_module_state->__pyx_n_s_obj); - Py_CLEAR(clear_module_state->__pyx_n_s_pack); - Py_CLEAR(clear_module_state->__pyx_n_s_paths); - Py_CLEAR(clear_module_state->__pyx_n_s_pickle); - Py_CLEAR(clear_module_state->__pyx_n_s_pyx_PickleError); - Py_CLEAR(clear_module_state->__pyx_n_s_pyx_checksum); - Py_CLEAR(clear_module_state->__pyx_n_s_pyx_result); - Py_CLEAR(clear_module_state->__pyx_n_s_pyx_state); - Py_CLEAR(clear_module_state->__pyx_n_s_pyx_type); - Py_CLEAR(clear_module_state->__pyx_n_s_pyx_unpickle_Enum); - Py_CLEAR(clear_module_state->__pyx_n_s_pyx_vtable); - Py_CLEAR(clear_module_state->__pyx_n_s_range); - Py_CLEAR(clear_module_state->__pyx_n_s_reduce); - Py_CLEAR(clear_module_state->__pyx_n_s_reduce_cython); - Py_CLEAR(clear_module_state->__pyx_n_s_reduce_ex); - Py_CLEAR(clear_module_state->__pyx_n_s_register); - Py_CLEAR(clear_module_state->__pyx_n_s_setstate); - Py_CLEAR(clear_module_state->__pyx_n_s_setstate_cython); - Py_CLEAR(clear_module_state->__pyx_n_s_shape); - Py_CLEAR(clear_module_state->__pyx_n_s_size); - Py_CLEAR(clear_module_state->__pyx_n_s_spec); - Py_CLEAR(clear_module_state->__pyx_n_s_start); - Py_CLEAR(clear_module_state->__pyx_n_s_step); - Py_CLEAR(clear_module_state->__pyx_n_s_stop); - Py_CLEAR(clear_module_state->__pyx_kp_s_strided_and_direct); - Py_CLEAR(clear_module_state->__pyx_kp_s_strided_and_direct_or_indirect); - Py_CLEAR(clear_module_state->__pyx_kp_s_strided_and_indirect); - Py_CLEAR(clear_module_state->__pyx_kp_s_stringsource); - Py_CLEAR(clear_module_state->__pyx_n_s_struct); - Py_CLEAR(clear_module_state->__pyx_n_s_sys); - Py_CLEAR(clear_module_state->__pyx_n_s_t_xs); - Py_CLEAR(clear_module_state->__pyx_n_s_t_ys); - Py_CLEAR(clear_module_state->__pyx_n_s_test); - Py_CLEAR(clear_module_state->__pyx_kp_s_unable_to_allocate_array_data); - Py_CLEAR(clear_module_state->__pyx_kp_s_unable_to_allocate_shape_and_str); - Py_CLEAR(clear_module_state->__pyx_n_s_unpack); - Py_CLEAR(clear_module_state->__pyx_n_s_update); - Py_CLEAR(clear_module_state->__pyx_n_s_values); - Py_CLEAR(clear_module_state->__pyx_n_s_version_info); - Py_CLEAR(clear_module_state->__pyx_int_0); - Py_CLEAR(clear_module_state->__pyx_int_1); - Py_CLEAR(clear_module_state->__pyx_int_3); - Py_CLEAR(clear_module_state->__pyx_int_112105877); - Py_CLEAR(clear_module_state->__pyx_int_136983863); - Py_CLEAR(clear_module_state->__pyx_int_184977713); - Py_CLEAR(clear_module_state->__pyx_int_neg_1); - Py_CLEAR(clear_module_state->__pyx_slice__5); - Py_CLEAR(clear_module_state->__pyx_tuple__4); - Py_CLEAR(clear_module_state->__pyx_tuple__8); - Py_CLEAR(clear_module_state->__pyx_tuple__10); - Py_CLEAR(clear_module_state->__pyx_tuple__11); - Py_CLEAR(clear_module_state->__pyx_tuple__12); - Py_CLEAR(clear_module_state->__pyx_tuple__13); - Py_CLEAR(clear_module_state->__pyx_tuple__14); - Py_CLEAR(clear_module_state->__pyx_tuple__15); - Py_CLEAR(clear_module_state->__pyx_tuple__16); - Py_CLEAR(clear_module_state->__pyx_tuple__17); - Py_CLEAR(clear_module_state->__pyx_tuple__18); - Py_CLEAR(clear_module_state->__pyx_tuple__19); - Py_CLEAR(clear_module_state->__pyx_tuple__21); - Py_CLEAR(clear_module_state->__pyx_codeobj__20); - Py_CLEAR(clear_module_state->__pyx_codeobj__22); - return 0; -} -#endif -/* #### Code section: module_state_traverse ### */ -#if CYTHON_USE_MODULE_STATE -static int __pyx_m_traverse(PyObject *m, visitproc visit, void *arg) { - __pyx_mstate *traverse_module_state = __pyx_mstate(m); - if (!traverse_module_state) return 0; - Py_VISIT(traverse_module_state->__pyx_d); - Py_VISIT(traverse_module_state->__pyx_b); - Py_VISIT(traverse_module_state->__pyx_cython_runtime); - Py_VISIT(traverse_module_state->__pyx_empty_tuple); - Py_VISIT(traverse_module_state->__pyx_empty_bytes); - Py_VISIT(traverse_module_state->__pyx_empty_unicode); - #ifdef __Pyx_CyFunction_USED - Py_VISIT(traverse_module_state->__pyx_CyFunctionType); - #endif - #ifdef __Pyx_FusedFunction_USED - Py_VISIT(traverse_module_state->__pyx_FusedFunctionType); - #endif - Py_VISIT(traverse_module_state->__pyx_array_type); - Py_VISIT(traverse_module_state->__pyx_type___pyx_array); - Py_VISIT(traverse_module_state->__pyx_MemviewEnum_type); - Py_VISIT(traverse_module_state->__pyx_type___pyx_MemviewEnum); - Py_VISIT(traverse_module_state->__pyx_memoryview_type); - Py_VISIT(traverse_module_state->__pyx_type___pyx_memoryview); - Py_VISIT(traverse_module_state->__pyx_memoryviewslice_type); - Py_VISIT(traverse_module_state->__pyx_type___pyx_memoryviewslice); - Py_VISIT(traverse_module_state->__pyx_kp_u_); - Py_VISIT(traverse_module_state->__pyx_n_s_ASCII); - Py_VISIT(traverse_module_state->__pyx_kp_s_All_dimensions_preceding_dimensi); - Py_VISIT(traverse_module_state->__pyx_n_s_AssertionError); - Py_VISIT(traverse_module_state->__pyx_kp_s_Buffer_view_does_not_expose_stri); - Py_VISIT(traverse_module_state->__pyx_kp_s_Can_only_create_a_buffer_that_is); - Py_VISIT(traverse_module_state->__pyx_kp_s_Cannot_assign_to_read_only_memor); - Py_VISIT(traverse_module_state->__pyx_kp_s_Cannot_create_writable_memory_vi); - Py_VISIT(traverse_module_state->__pyx_kp_u_Cannot_index_with_type); - Py_VISIT(traverse_module_state->__pyx_kp_s_Cannot_transpose_memoryview_with); - Py_VISIT(traverse_module_state->__pyx_kp_s_Dimension_d_is_not_direct); - Py_VISIT(traverse_module_state->__pyx_n_s_Ellipsis); - Py_VISIT(traverse_module_state->__pyx_kp_s_Empty_shape_tuple_for_cython_arr); - Py_VISIT(traverse_module_state->__pyx_kp_s_Incompatible_checksums_0x_x_vs_0); - Py_VISIT(traverse_module_state->__pyx_n_s_IndexError); - Py_VISIT(traverse_module_state->__pyx_kp_s_Index_out_of_bounds_axis_d); - Py_VISIT(traverse_module_state->__pyx_kp_s_Indirect_dimensions_not_supporte); - Py_VISIT(traverse_module_state->__pyx_kp_u_Invalid_mode_expected_c_or_fortr); - Py_VISIT(traverse_module_state->__pyx_kp_u_Invalid_shape_in_axis); - Py_VISIT(traverse_module_state->__pyx_n_s_MemoryError); - Py_VISIT(traverse_module_state->__pyx_kp_s_MemoryView_of_r_at_0x_x); - Py_VISIT(traverse_module_state->__pyx_kp_s_MemoryView_of_r_object); - Py_VISIT(traverse_module_state->__pyx_n_b_O); - Py_VISIT(traverse_module_state->__pyx_kp_u_Out_of_bounds_on_buffer_access_a); - Py_VISIT(traverse_module_state->__pyx_n_s_PickleError); - Py_VISIT(traverse_module_state->__pyx_n_s_Sequence); - Py_VISIT(traverse_module_state->__pyx_kp_s_Step_may_not_be_zero_axis_d); - Py_VISIT(traverse_module_state->__pyx_n_s_TypeError); - Py_VISIT(traverse_module_state->__pyx_kp_s_Unable_to_convert_item_to_object); - Py_VISIT(traverse_module_state->__pyx_n_s_ValueError); - Py_VISIT(traverse_module_state->__pyx_n_s_View_MemoryView); - Py_VISIT(traverse_module_state->__pyx_kp_u__2); - Py_VISIT(traverse_module_state->__pyx_n_s__23); - Py_VISIT(traverse_module_state->__pyx_n_s__3); - Py_VISIT(traverse_module_state->__pyx_kp_u__6); - Py_VISIT(traverse_module_state->__pyx_kp_u__7); - Py_VISIT(traverse_module_state->__pyx_n_s_abc); - Py_VISIT(traverse_module_state->__pyx_n_s_allocate_buffer); - Py_VISIT(traverse_module_state->__pyx_kp_u_and); - Py_VISIT(traverse_module_state->__pyx_n_s_asyncio_coroutines); - Py_VISIT(traverse_module_state->__pyx_n_s_base); - Py_VISIT(traverse_module_state->__pyx_n_s_c); - Py_VISIT(traverse_module_state->__pyx_n_u_c); - Py_VISIT(traverse_module_state->__pyx_n_s_class); - Py_VISIT(traverse_module_state->__pyx_n_s_class_getitem); - Py_VISIT(traverse_module_state->__pyx_n_s_cline_in_traceback); - Py_VISIT(traverse_module_state->__pyx_n_s_collections); - Py_VISIT(traverse_module_state->__pyx_kp_s_collections_abc); - Py_VISIT(traverse_module_state->__pyx_kp_s_contiguous_and_direct); - Py_VISIT(traverse_module_state->__pyx_kp_s_contiguous_and_indirect); - Py_VISIT(traverse_module_state->__pyx_kp_s_core_pyx); - Py_VISIT(traverse_module_state->__pyx_n_s_count); - Py_VISIT(traverse_module_state->__pyx_n_s_dict); - Py_VISIT(traverse_module_state->__pyx_kp_u_disable); - Py_VISIT(traverse_module_state->__pyx_n_s_dtype_is_object); - Py_VISIT(traverse_module_state->__pyx_kp_u_enable); - Py_VISIT(traverse_module_state->__pyx_n_s_encode); - Py_VISIT(traverse_module_state->__pyx_n_s_enumerate); - Py_VISIT(traverse_module_state->__pyx_n_s_error); - Py_VISIT(traverse_module_state->__pyx_n_s_flags); - Py_VISIT(traverse_module_state->__pyx_n_s_format); - Py_VISIT(traverse_module_state->__pyx_n_s_fortran); - Py_VISIT(traverse_module_state->__pyx_n_u_fortran); - Py_VISIT(traverse_module_state->__pyx_kp_u_gc); - Py_VISIT(traverse_module_state->__pyx_n_s_getstate); - Py_VISIT(traverse_module_state->__pyx_kp_u_got); - Py_VISIT(traverse_module_state->__pyx_kp_u_got_differing_extents_in_dimensi); - Py_VISIT(traverse_module_state->__pyx_n_s_id); - Py_VISIT(traverse_module_state->__pyx_n_s_import); - Py_VISIT(traverse_module_state->__pyx_n_s_index); - Py_VISIT(traverse_module_state->__pyx_n_s_initializing); - Py_VISIT(traverse_module_state->__pyx_n_s_is_coroutine); - Py_VISIT(traverse_module_state->__pyx_kp_u_isenabled); - Py_VISIT(traverse_module_state->__pyx_n_s_itemsize); - Py_VISIT(traverse_module_state->__pyx_kp_s_itemsize_0_for_cython_array); - Py_VISIT(traverse_module_state->__pyx_n_s_main); - Py_VISIT(traverse_module_state->__pyx_n_s_maximum_path_c); - Py_VISIT(traverse_module_state->__pyx_n_s_memview); - Py_VISIT(traverse_module_state->__pyx_n_s_mode); - Py_VISIT(traverse_module_state->__pyx_n_s_monotonic_align_core); - Py_VISIT(traverse_module_state->__pyx_n_s_name); - Py_VISIT(traverse_module_state->__pyx_n_s_name_2); - Py_VISIT(traverse_module_state->__pyx_n_s_ndim); - Py_VISIT(traverse_module_state->__pyx_n_s_new); - Py_VISIT(traverse_module_state->__pyx_kp_s_no_default___reduce___due_to_non); - Py_VISIT(traverse_module_state->__pyx_n_s_obj); - Py_VISIT(traverse_module_state->__pyx_n_s_pack); - Py_VISIT(traverse_module_state->__pyx_n_s_paths); - Py_VISIT(traverse_module_state->__pyx_n_s_pickle); - Py_VISIT(traverse_module_state->__pyx_n_s_pyx_PickleError); - Py_VISIT(traverse_module_state->__pyx_n_s_pyx_checksum); - Py_VISIT(traverse_module_state->__pyx_n_s_pyx_result); - Py_VISIT(traverse_module_state->__pyx_n_s_pyx_state); - Py_VISIT(traverse_module_state->__pyx_n_s_pyx_type); - Py_VISIT(traverse_module_state->__pyx_n_s_pyx_unpickle_Enum); - Py_VISIT(traverse_module_state->__pyx_n_s_pyx_vtable); - Py_VISIT(traverse_module_state->__pyx_n_s_range); - Py_VISIT(traverse_module_state->__pyx_n_s_reduce); - Py_VISIT(traverse_module_state->__pyx_n_s_reduce_cython); - Py_VISIT(traverse_module_state->__pyx_n_s_reduce_ex); - Py_VISIT(traverse_module_state->__pyx_n_s_register); - Py_VISIT(traverse_module_state->__pyx_n_s_setstate); - Py_VISIT(traverse_module_state->__pyx_n_s_setstate_cython); - Py_VISIT(traverse_module_state->__pyx_n_s_shape); - Py_VISIT(traverse_module_state->__pyx_n_s_size); - Py_VISIT(traverse_module_state->__pyx_n_s_spec); - Py_VISIT(traverse_module_state->__pyx_n_s_start); - Py_VISIT(traverse_module_state->__pyx_n_s_step); - Py_VISIT(traverse_module_state->__pyx_n_s_stop); - Py_VISIT(traverse_module_state->__pyx_kp_s_strided_and_direct); - Py_VISIT(traverse_module_state->__pyx_kp_s_strided_and_direct_or_indirect); - Py_VISIT(traverse_module_state->__pyx_kp_s_strided_and_indirect); - Py_VISIT(traverse_module_state->__pyx_kp_s_stringsource); - Py_VISIT(traverse_module_state->__pyx_n_s_struct); - Py_VISIT(traverse_module_state->__pyx_n_s_sys); - Py_VISIT(traverse_module_state->__pyx_n_s_t_xs); - Py_VISIT(traverse_module_state->__pyx_n_s_t_ys); - Py_VISIT(traverse_module_state->__pyx_n_s_test); - Py_VISIT(traverse_module_state->__pyx_kp_s_unable_to_allocate_array_data); - Py_VISIT(traverse_module_state->__pyx_kp_s_unable_to_allocate_shape_and_str); - Py_VISIT(traverse_module_state->__pyx_n_s_unpack); - Py_VISIT(traverse_module_state->__pyx_n_s_update); - Py_VISIT(traverse_module_state->__pyx_n_s_values); - Py_VISIT(traverse_module_state->__pyx_n_s_version_info); - Py_VISIT(traverse_module_state->__pyx_int_0); - Py_VISIT(traverse_module_state->__pyx_int_1); - Py_VISIT(traverse_module_state->__pyx_int_3); - Py_VISIT(traverse_module_state->__pyx_int_112105877); - Py_VISIT(traverse_module_state->__pyx_int_136983863); - Py_VISIT(traverse_module_state->__pyx_int_184977713); - Py_VISIT(traverse_module_state->__pyx_int_neg_1); - Py_VISIT(traverse_module_state->__pyx_slice__5); - Py_VISIT(traverse_module_state->__pyx_tuple__4); - Py_VISIT(traverse_module_state->__pyx_tuple__8); - Py_VISIT(traverse_module_state->__pyx_tuple__10); - Py_VISIT(traverse_module_state->__pyx_tuple__11); - Py_VISIT(traverse_module_state->__pyx_tuple__12); - Py_VISIT(traverse_module_state->__pyx_tuple__13); - Py_VISIT(traverse_module_state->__pyx_tuple__14); - Py_VISIT(traverse_module_state->__pyx_tuple__15); - Py_VISIT(traverse_module_state->__pyx_tuple__16); - Py_VISIT(traverse_module_state->__pyx_tuple__17); - Py_VISIT(traverse_module_state->__pyx_tuple__18); - Py_VISIT(traverse_module_state->__pyx_tuple__19); - Py_VISIT(traverse_module_state->__pyx_tuple__21); - Py_VISIT(traverse_module_state->__pyx_codeobj__20); - Py_VISIT(traverse_module_state->__pyx_codeobj__22); - return 0; -} -#endif -/* #### Code section: module_state_defines ### */ -#define __pyx_d __pyx_mstate_global->__pyx_d -#define __pyx_b __pyx_mstate_global->__pyx_b -#define __pyx_cython_runtime __pyx_mstate_global->__pyx_cython_runtime -#define __pyx_empty_tuple __pyx_mstate_global->__pyx_empty_tuple -#define __pyx_empty_bytes __pyx_mstate_global->__pyx_empty_bytes -#define __pyx_empty_unicode __pyx_mstate_global->__pyx_empty_unicode -#ifdef __Pyx_CyFunction_USED -#define __pyx_CyFunctionType __pyx_mstate_global->__pyx_CyFunctionType -#endif -#ifdef __Pyx_FusedFunction_USED -#define __pyx_FusedFunctionType __pyx_mstate_global->__pyx_FusedFunctionType -#endif -#ifdef __Pyx_Generator_USED -#define __pyx_GeneratorType __pyx_mstate_global->__pyx_GeneratorType -#endif -#ifdef __Pyx_IterableCoroutine_USED -#define __pyx_IterableCoroutineType __pyx_mstate_global->__pyx_IterableCoroutineType -#endif -#ifdef __Pyx_Coroutine_USED -#define __pyx_CoroutineAwaitType __pyx_mstate_global->__pyx_CoroutineAwaitType -#endif -#ifdef __Pyx_Coroutine_USED -#define __pyx_CoroutineType __pyx_mstate_global->__pyx_CoroutineType -#endif -#if CYTHON_USE_MODULE_STATE -#endif -#if CYTHON_USE_MODULE_STATE -#endif -#if CYTHON_USE_MODULE_STATE -#endif -#if CYTHON_USE_MODULE_STATE -#define __pyx_type___pyx_array __pyx_mstate_global->__pyx_type___pyx_array -#define __pyx_type___pyx_MemviewEnum __pyx_mstate_global->__pyx_type___pyx_MemviewEnum -#define __pyx_type___pyx_memoryview __pyx_mstate_global->__pyx_type___pyx_memoryview -#define __pyx_type___pyx_memoryviewslice __pyx_mstate_global->__pyx_type___pyx_memoryviewslice -#endif -#define __pyx_array_type __pyx_mstate_global->__pyx_array_type -#define __pyx_MemviewEnum_type __pyx_mstate_global->__pyx_MemviewEnum_type -#define __pyx_memoryview_type __pyx_mstate_global->__pyx_memoryview_type -#define __pyx_memoryviewslice_type __pyx_mstate_global->__pyx_memoryviewslice_type -#define __pyx_kp_u_ __pyx_mstate_global->__pyx_kp_u_ -#define __pyx_n_s_ASCII __pyx_mstate_global->__pyx_n_s_ASCII -#define __pyx_kp_s_All_dimensions_preceding_dimensi __pyx_mstate_global->__pyx_kp_s_All_dimensions_preceding_dimensi -#define __pyx_n_s_AssertionError __pyx_mstate_global->__pyx_n_s_AssertionError -#define __pyx_kp_s_Buffer_view_does_not_expose_stri __pyx_mstate_global->__pyx_kp_s_Buffer_view_does_not_expose_stri -#define __pyx_kp_s_Can_only_create_a_buffer_that_is __pyx_mstate_global->__pyx_kp_s_Can_only_create_a_buffer_that_is -#define __pyx_kp_s_Cannot_assign_to_read_only_memor __pyx_mstate_global->__pyx_kp_s_Cannot_assign_to_read_only_memor -#define __pyx_kp_s_Cannot_create_writable_memory_vi __pyx_mstate_global->__pyx_kp_s_Cannot_create_writable_memory_vi -#define __pyx_kp_u_Cannot_index_with_type __pyx_mstate_global->__pyx_kp_u_Cannot_index_with_type -#define __pyx_kp_s_Cannot_transpose_memoryview_with __pyx_mstate_global->__pyx_kp_s_Cannot_transpose_memoryview_with -#define __pyx_kp_s_Dimension_d_is_not_direct __pyx_mstate_global->__pyx_kp_s_Dimension_d_is_not_direct -#define __pyx_n_s_Ellipsis __pyx_mstate_global->__pyx_n_s_Ellipsis -#define __pyx_kp_s_Empty_shape_tuple_for_cython_arr __pyx_mstate_global->__pyx_kp_s_Empty_shape_tuple_for_cython_arr -#define __pyx_kp_s_Incompatible_checksums_0x_x_vs_0 __pyx_mstate_global->__pyx_kp_s_Incompatible_checksums_0x_x_vs_0 -#define __pyx_n_s_IndexError __pyx_mstate_global->__pyx_n_s_IndexError -#define __pyx_kp_s_Index_out_of_bounds_axis_d __pyx_mstate_global->__pyx_kp_s_Index_out_of_bounds_axis_d -#define __pyx_kp_s_Indirect_dimensions_not_supporte __pyx_mstate_global->__pyx_kp_s_Indirect_dimensions_not_supporte -#define __pyx_kp_u_Invalid_mode_expected_c_or_fortr __pyx_mstate_global->__pyx_kp_u_Invalid_mode_expected_c_or_fortr -#define __pyx_kp_u_Invalid_shape_in_axis __pyx_mstate_global->__pyx_kp_u_Invalid_shape_in_axis -#define __pyx_n_s_MemoryError __pyx_mstate_global->__pyx_n_s_MemoryError -#define __pyx_kp_s_MemoryView_of_r_at_0x_x __pyx_mstate_global->__pyx_kp_s_MemoryView_of_r_at_0x_x -#define __pyx_kp_s_MemoryView_of_r_object __pyx_mstate_global->__pyx_kp_s_MemoryView_of_r_object -#define __pyx_n_b_O __pyx_mstate_global->__pyx_n_b_O -#define __pyx_kp_u_Out_of_bounds_on_buffer_access_a __pyx_mstate_global->__pyx_kp_u_Out_of_bounds_on_buffer_access_a -#define __pyx_n_s_PickleError __pyx_mstate_global->__pyx_n_s_PickleError -#define __pyx_n_s_Sequence __pyx_mstate_global->__pyx_n_s_Sequence -#define __pyx_kp_s_Step_may_not_be_zero_axis_d __pyx_mstate_global->__pyx_kp_s_Step_may_not_be_zero_axis_d -#define __pyx_n_s_TypeError __pyx_mstate_global->__pyx_n_s_TypeError -#define __pyx_kp_s_Unable_to_convert_item_to_object __pyx_mstate_global->__pyx_kp_s_Unable_to_convert_item_to_object -#define __pyx_n_s_ValueError __pyx_mstate_global->__pyx_n_s_ValueError -#define __pyx_n_s_View_MemoryView __pyx_mstate_global->__pyx_n_s_View_MemoryView -#define __pyx_kp_u__2 __pyx_mstate_global->__pyx_kp_u__2 -#define __pyx_n_s__23 __pyx_mstate_global->__pyx_n_s__23 -#define __pyx_n_s__3 __pyx_mstate_global->__pyx_n_s__3 -#define __pyx_kp_u__6 __pyx_mstate_global->__pyx_kp_u__6 -#define __pyx_kp_u__7 __pyx_mstate_global->__pyx_kp_u__7 -#define __pyx_n_s_abc __pyx_mstate_global->__pyx_n_s_abc -#define __pyx_n_s_allocate_buffer __pyx_mstate_global->__pyx_n_s_allocate_buffer -#define __pyx_kp_u_and __pyx_mstate_global->__pyx_kp_u_and -#define __pyx_n_s_asyncio_coroutines __pyx_mstate_global->__pyx_n_s_asyncio_coroutines -#define __pyx_n_s_base __pyx_mstate_global->__pyx_n_s_base -#define __pyx_n_s_c __pyx_mstate_global->__pyx_n_s_c -#define __pyx_n_u_c __pyx_mstate_global->__pyx_n_u_c -#define __pyx_n_s_class __pyx_mstate_global->__pyx_n_s_class -#define __pyx_n_s_class_getitem __pyx_mstate_global->__pyx_n_s_class_getitem -#define __pyx_n_s_cline_in_traceback __pyx_mstate_global->__pyx_n_s_cline_in_traceback -#define __pyx_n_s_collections __pyx_mstate_global->__pyx_n_s_collections -#define __pyx_kp_s_collections_abc __pyx_mstate_global->__pyx_kp_s_collections_abc -#define __pyx_kp_s_contiguous_and_direct __pyx_mstate_global->__pyx_kp_s_contiguous_and_direct -#define __pyx_kp_s_contiguous_and_indirect __pyx_mstate_global->__pyx_kp_s_contiguous_and_indirect -#define __pyx_kp_s_core_pyx __pyx_mstate_global->__pyx_kp_s_core_pyx -#define __pyx_n_s_count __pyx_mstate_global->__pyx_n_s_count -#define __pyx_n_s_dict __pyx_mstate_global->__pyx_n_s_dict -#define __pyx_kp_u_disable __pyx_mstate_global->__pyx_kp_u_disable -#define __pyx_n_s_dtype_is_object __pyx_mstate_global->__pyx_n_s_dtype_is_object -#define __pyx_kp_u_enable __pyx_mstate_global->__pyx_kp_u_enable -#define __pyx_n_s_encode __pyx_mstate_global->__pyx_n_s_encode -#define __pyx_n_s_enumerate __pyx_mstate_global->__pyx_n_s_enumerate -#define __pyx_n_s_error __pyx_mstate_global->__pyx_n_s_error -#define __pyx_n_s_flags __pyx_mstate_global->__pyx_n_s_flags -#define __pyx_n_s_format __pyx_mstate_global->__pyx_n_s_format -#define __pyx_n_s_fortran __pyx_mstate_global->__pyx_n_s_fortran -#define __pyx_n_u_fortran __pyx_mstate_global->__pyx_n_u_fortran -#define __pyx_kp_u_gc __pyx_mstate_global->__pyx_kp_u_gc -#define __pyx_n_s_getstate __pyx_mstate_global->__pyx_n_s_getstate -#define __pyx_kp_u_got __pyx_mstate_global->__pyx_kp_u_got -#define __pyx_kp_u_got_differing_extents_in_dimensi __pyx_mstate_global->__pyx_kp_u_got_differing_extents_in_dimensi -#define __pyx_n_s_id __pyx_mstate_global->__pyx_n_s_id -#define __pyx_n_s_import __pyx_mstate_global->__pyx_n_s_import -#define __pyx_n_s_index __pyx_mstate_global->__pyx_n_s_index -#define __pyx_n_s_initializing __pyx_mstate_global->__pyx_n_s_initializing -#define __pyx_n_s_is_coroutine __pyx_mstate_global->__pyx_n_s_is_coroutine -#define __pyx_kp_u_isenabled __pyx_mstate_global->__pyx_kp_u_isenabled -#define __pyx_n_s_itemsize __pyx_mstate_global->__pyx_n_s_itemsize -#define __pyx_kp_s_itemsize_0_for_cython_array __pyx_mstate_global->__pyx_kp_s_itemsize_0_for_cython_array -#define __pyx_n_s_main __pyx_mstate_global->__pyx_n_s_main -#define __pyx_n_s_maximum_path_c __pyx_mstate_global->__pyx_n_s_maximum_path_c -#define __pyx_n_s_memview __pyx_mstate_global->__pyx_n_s_memview -#define __pyx_n_s_mode __pyx_mstate_global->__pyx_n_s_mode -#define __pyx_n_s_monotonic_align_core __pyx_mstate_global->__pyx_n_s_monotonic_align_core -#define __pyx_n_s_name __pyx_mstate_global->__pyx_n_s_name -#define __pyx_n_s_name_2 __pyx_mstate_global->__pyx_n_s_name_2 -#define __pyx_n_s_ndim __pyx_mstate_global->__pyx_n_s_ndim -#define __pyx_n_s_new __pyx_mstate_global->__pyx_n_s_new -#define __pyx_kp_s_no_default___reduce___due_to_non __pyx_mstate_global->__pyx_kp_s_no_default___reduce___due_to_non -#define __pyx_n_s_obj __pyx_mstate_global->__pyx_n_s_obj -#define __pyx_n_s_pack __pyx_mstate_global->__pyx_n_s_pack -#define __pyx_n_s_paths __pyx_mstate_global->__pyx_n_s_paths -#define __pyx_n_s_pickle __pyx_mstate_global->__pyx_n_s_pickle -#define __pyx_n_s_pyx_PickleError __pyx_mstate_global->__pyx_n_s_pyx_PickleError -#define __pyx_n_s_pyx_checksum __pyx_mstate_global->__pyx_n_s_pyx_checksum -#define __pyx_n_s_pyx_result __pyx_mstate_global->__pyx_n_s_pyx_result -#define __pyx_n_s_pyx_state __pyx_mstate_global->__pyx_n_s_pyx_state -#define __pyx_n_s_pyx_type __pyx_mstate_global->__pyx_n_s_pyx_type -#define __pyx_n_s_pyx_unpickle_Enum __pyx_mstate_global->__pyx_n_s_pyx_unpickle_Enum -#define __pyx_n_s_pyx_vtable __pyx_mstate_global->__pyx_n_s_pyx_vtable -#define __pyx_n_s_range __pyx_mstate_global->__pyx_n_s_range -#define __pyx_n_s_reduce __pyx_mstate_global->__pyx_n_s_reduce -#define __pyx_n_s_reduce_cython __pyx_mstate_global->__pyx_n_s_reduce_cython -#define __pyx_n_s_reduce_ex __pyx_mstate_global->__pyx_n_s_reduce_ex -#define __pyx_n_s_register __pyx_mstate_global->__pyx_n_s_register -#define __pyx_n_s_setstate __pyx_mstate_global->__pyx_n_s_setstate -#define __pyx_n_s_setstate_cython __pyx_mstate_global->__pyx_n_s_setstate_cython -#define __pyx_n_s_shape __pyx_mstate_global->__pyx_n_s_shape -#define __pyx_n_s_size __pyx_mstate_global->__pyx_n_s_size -#define __pyx_n_s_spec __pyx_mstate_global->__pyx_n_s_spec -#define __pyx_n_s_start __pyx_mstate_global->__pyx_n_s_start -#define __pyx_n_s_step __pyx_mstate_global->__pyx_n_s_step -#define __pyx_n_s_stop __pyx_mstate_global->__pyx_n_s_stop -#define __pyx_kp_s_strided_and_direct __pyx_mstate_global->__pyx_kp_s_strided_and_direct -#define __pyx_kp_s_strided_and_direct_or_indirect __pyx_mstate_global->__pyx_kp_s_strided_and_direct_or_indirect -#define __pyx_kp_s_strided_and_indirect __pyx_mstate_global->__pyx_kp_s_strided_and_indirect -#define __pyx_kp_s_stringsource __pyx_mstate_global->__pyx_kp_s_stringsource -#define __pyx_n_s_struct __pyx_mstate_global->__pyx_n_s_struct -#define __pyx_n_s_sys __pyx_mstate_global->__pyx_n_s_sys -#define __pyx_n_s_t_xs __pyx_mstate_global->__pyx_n_s_t_xs -#define __pyx_n_s_t_ys __pyx_mstate_global->__pyx_n_s_t_ys -#define __pyx_n_s_test __pyx_mstate_global->__pyx_n_s_test -#define __pyx_kp_s_unable_to_allocate_array_data __pyx_mstate_global->__pyx_kp_s_unable_to_allocate_array_data -#define __pyx_kp_s_unable_to_allocate_shape_and_str __pyx_mstate_global->__pyx_kp_s_unable_to_allocate_shape_and_str -#define __pyx_n_s_unpack __pyx_mstate_global->__pyx_n_s_unpack -#define __pyx_n_s_update __pyx_mstate_global->__pyx_n_s_update -#define __pyx_n_s_values __pyx_mstate_global->__pyx_n_s_values -#define __pyx_n_s_version_info __pyx_mstate_global->__pyx_n_s_version_info -#define __pyx_int_0 __pyx_mstate_global->__pyx_int_0 -#define __pyx_int_1 __pyx_mstate_global->__pyx_int_1 -#define __pyx_int_3 __pyx_mstate_global->__pyx_int_3 -#define __pyx_int_112105877 __pyx_mstate_global->__pyx_int_112105877 -#define __pyx_int_136983863 __pyx_mstate_global->__pyx_int_136983863 -#define __pyx_int_184977713 __pyx_mstate_global->__pyx_int_184977713 -#define __pyx_int_neg_1 __pyx_mstate_global->__pyx_int_neg_1 -#define __pyx_k__9 __pyx_mstate_global->__pyx_k__9 -#define __pyx_slice__5 __pyx_mstate_global->__pyx_slice__5 -#define __pyx_tuple__4 __pyx_mstate_global->__pyx_tuple__4 -#define __pyx_tuple__8 __pyx_mstate_global->__pyx_tuple__8 -#define __pyx_tuple__10 __pyx_mstate_global->__pyx_tuple__10 -#define __pyx_tuple__11 __pyx_mstate_global->__pyx_tuple__11 -#define __pyx_tuple__12 __pyx_mstate_global->__pyx_tuple__12 -#define __pyx_tuple__13 __pyx_mstate_global->__pyx_tuple__13 -#define __pyx_tuple__14 __pyx_mstate_global->__pyx_tuple__14 -#define __pyx_tuple__15 __pyx_mstate_global->__pyx_tuple__15 -#define __pyx_tuple__16 __pyx_mstate_global->__pyx_tuple__16 -#define __pyx_tuple__17 __pyx_mstate_global->__pyx_tuple__17 -#define __pyx_tuple__18 __pyx_mstate_global->__pyx_tuple__18 -#define __pyx_tuple__19 __pyx_mstate_global->__pyx_tuple__19 -#define __pyx_tuple__21 __pyx_mstate_global->__pyx_tuple__21 -#define __pyx_codeobj__20 __pyx_mstate_global->__pyx_codeobj__20 -#define __pyx_codeobj__22 __pyx_mstate_global->__pyx_codeobj__22 -/* #### Code section: module_code ### */ - -/* "View.MemoryView":131 - * cdef bint dtype_is_object - * - * def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, # <<<<<<<<<<<<<< - * mode="c", bint allocate_buffer=True): - * - */ - -/* Python wrapper */ -static int __pyx_array___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ -static int __pyx_array___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { - PyObject *__pyx_v_shape = 0; - Py_ssize_t __pyx_v_itemsize; - PyObject *__pyx_v_format = 0; - PyObject *__pyx_v_mode = 0; - int __pyx_v_allocate_buffer; - CYTHON_UNUSED const Py_ssize_t __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); - CYTHON_UNUSED PyObject *const *__pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - int __pyx_r; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__cinit__ (wrapper)", 0); - { - PyObject **__pyx_pyargnames[] = {&__pyx_n_s_shape,&__pyx_n_s_itemsize,&__pyx_n_s_format,&__pyx_n_s_mode,&__pyx_n_s_allocate_buffer,0}; - PyObject* values[5] = {0,0,0,0,0}; - values[3] = ((PyObject *)__pyx_n_s_c); - if (__pyx_kwds) { - Py_ssize_t kw_args; - switch (__pyx_nargs) { - case 5: values[4] = __Pyx_Arg_VARARGS(__pyx_args, 4); - CYTHON_FALLTHROUGH; - case 4: values[3] = __Pyx_Arg_VARARGS(__pyx_args, 3); - CYTHON_FALLTHROUGH; - case 3: values[2] = __Pyx_Arg_VARARGS(__pyx_args, 2); - CYTHON_FALLTHROUGH; - case 2: values[1] = __Pyx_Arg_VARARGS(__pyx_args, 1); - CYTHON_FALLTHROUGH; - case 1: values[0] = __Pyx_Arg_VARARGS(__pyx_args, 0); - CYTHON_FALLTHROUGH; - case 0: break; - default: goto __pyx_L5_argtuple_error; - } - kw_args = __Pyx_NumKwargs_VARARGS(__pyx_kwds); - switch (__pyx_nargs) { - case 0: - if (likely((values[0] = __Pyx_GetKwValue_VARARGS(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_shape)) != 0)) kw_args--; - else if (unlikely(PyErr_Occurred())) __PYX_ERR(1, 131, __pyx_L3_error) - else goto __pyx_L5_argtuple_error; - CYTHON_FALLTHROUGH; - case 1: - if (likely((values[1] = __Pyx_GetKwValue_VARARGS(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_itemsize)) != 0)) kw_args--; - else if (unlikely(PyErr_Occurred())) __PYX_ERR(1, 131, __pyx_L3_error) - else { - __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 3, 5, 1); __PYX_ERR(1, 131, __pyx_L3_error) - } - CYTHON_FALLTHROUGH; - case 2: - if (likely((values[2] = __Pyx_GetKwValue_VARARGS(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_format)) != 0)) kw_args--; - else if (unlikely(PyErr_Occurred())) __PYX_ERR(1, 131, __pyx_L3_error) - else { - __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 3, 5, 2); __PYX_ERR(1, 131, __pyx_L3_error) - } - CYTHON_FALLTHROUGH; - case 3: - if (kw_args > 0) { - PyObject* value = __Pyx_GetKwValue_VARARGS(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_mode); - if (value) { values[3] = value; kw_args--; } - else if (unlikely(PyErr_Occurred())) __PYX_ERR(1, 131, __pyx_L3_error) - } - CYTHON_FALLTHROUGH; - case 4: - if (kw_args > 0) { - PyObject* value = __Pyx_GetKwValue_VARARGS(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_allocate_buffer); - if (value) { values[4] = value; kw_args--; } - else if (unlikely(PyErr_Occurred())) __PYX_ERR(1, 131, __pyx_L3_error) - } - } - if (unlikely(kw_args > 0)) { - const Py_ssize_t kwd_pos_args = __pyx_nargs; - if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values + 0, kwd_pos_args, "__cinit__") < 0)) __PYX_ERR(1, 131, __pyx_L3_error) - } - } else { - switch (__pyx_nargs) { - case 5: values[4] = __Pyx_Arg_VARARGS(__pyx_args, 4); - CYTHON_FALLTHROUGH; - case 4: values[3] = __Pyx_Arg_VARARGS(__pyx_args, 3); - CYTHON_FALLTHROUGH; - case 3: values[2] = __Pyx_Arg_VARARGS(__pyx_args, 2); - values[1] = __Pyx_Arg_VARARGS(__pyx_args, 1); - values[0] = __Pyx_Arg_VARARGS(__pyx_args, 0); - break; - default: goto __pyx_L5_argtuple_error; - } - } - __pyx_v_shape = ((PyObject*)values[0]); - __pyx_v_itemsize = __Pyx_PyIndex_AsSsize_t(values[1]); if (unlikely((__pyx_v_itemsize == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 131, __pyx_L3_error) - __pyx_v_format = values[2]; - __pyx_v_mode = values[3]; - if (values[4]) { - __pyx_v_allocate_buffer = __Pyx_PyObject_IsTrue(values[4]); if (unlikely((__pyx_v_allocate_buffer == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 132, __pyx_L3_error) - } else { - - /* "View.MemoryView":132 - * - * def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, - * mode="c", bint allocate_buffer=True): # <<<<<<<<<<<<<< - * - * cdef int idx - */ - __pyx_v_allocate_buffer = ((int)1); - } - } - goto __pyx_L4_argument_unpacking_done; - __pyx_L5_argtuple_error:; - __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 3, 5, __pyx_nargs); __PYX_ERR(1, 131, __pyx_L3_error) - __pyx_L3_error:; - __Pyx_AddTraceback("View.MemoryView.array.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __Pyx_RefNannyFinishContext(); - return -1; - __pyx_L4_argument_unpacking_done:; - if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_shape), (&PyTuple_Type), 1, "shape", 1))) __PYX_ERR(1, 131, __pyx_L1_error) - if (unlikely(((PyObject *)__pyx_v_format) == Py_None)) { - PyErr_Format(PyExc_TypeError, "Argument '%.200s' must not be None", "format"); __PYX_ERR(1, 131, __pyx_L1_error) - } - __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array___cinit__(((struct __pyx_array_obj *)__pyx_v_self), __pyx_v_shape, __pyx_v_itemsize, __pyx_v_format, __pyx_v_mode, __pyx_v_allocate_buffer); - - /* "View.MemoryView":131 - * cdef bint dtype_is_object - * - * def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, # <<<<<<<<<<<<<< - * mode="c", bint allocate_buffer=True): - * - */ - - /* function exit code */ - goto __pyx_L0; - __pyx_L1_error:; - __pyx_r = -1; - __pyx_L0:; - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array___cinit__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_shape, Py_ssize_t __pyx_v_itemsize, PyObject *__pyx_v_format, PyObject *__pyx_v_mode, int __pyx_v_allocate_buffer) { - int __pyx_v_idx; - Py_ssize_t __pyx_v_dim; - char __pyx_v_order; - int __pyx_r; - __Pyx_RefNannyDeclarations - Py_ssize_t __pyx_t_1; - int __pyx_t_2; - int __pyx_t_3; - PyObject *__pyx_t_4 = NULL; - PyObject *__pyx_t_5 = NULL; - PyObject *__pyx_t_6 = NULL; - int __pyx_t_7; - char *__pyx_t_8; - Py_ssize_t __pyx_t_9; - Py_UCS4 __pyx_t_10; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("__cinit__", 0); - __Pyx_INCREF(__pyx_v_format); - - /* "View.MemoryView":137 - * cdef Py_ssize_t dim - * - * self.ndim = len(shape) # <<<<<<<<<<<<<< - * self.itemsize = itemsize - * - */ - if (unlikely(__pyx_v_shape == Py_None)) { - PyErr_SetString(PyExc_TypeError, "object of type 'NoneType' has no len()"); - __PYX_ERR(1, 137, __pyx_L1_error) - } - __pyx_t_1 = PyTuple_GET_SIZE(__pyx_v_shape); if (unlikely(__pyx_t_1 == ((Py_ssize_t)-1))) __PYX_ERR(1, 137, __pyx_L1_error) - __pyx_v_self->ndim = ((int)__pyx_t_1); - - /* "View.MemoryView":138 - * - * self.ndim = len(shape) - * self.itemsize = itemsize # <<<<<<<<<<<<<< - * - * if not self.ndim: - */ - __pyx_v_self->itemsize = __pyx_v_itemsize; - - /* "View.MemoryView":140 - * self.itemsize = itemsize - * - * if not self.ndim: # <<<<<<<<<<<<<< - * raise ValueError, "Empty shape tuple for cython.array" - * - */ - __pyx_t_2 = (!(__pyx_v_self->ndim != 0)); - if (unlikely(__pyx_t_2)) { - - /* "View.MemoryView":141 - * - * if not self.ndim: - * raise ValueError, "Empty shape tuple for cython.array" # <<<<<<<<<<<<<< - * - * if itemsize <= 0: - */ - __Pyx_Raise(__pyx_builtin_ValueError, __pyx_kp_s_Empty_shape_tuple_for_cython_arr, 0, 0); - __PYX_ERR(1, 141, __pyx_L1_error) - - /* "View.MemoryView":140 - * self.itemsize = itemsize - * - * if not self.ndim: # <<<<<<<<<<<<<< - * raise ValueError, "Empty shape tuple for cython.array" - * - */ - } - - /* "View.MemoryView":143 - * raise ValueError, "Empty shape tuple for cython.array" - * - * if itemsize <= 0: # <<<<<<<<<<<<<< - * raise ValueError, "itemsize <= 0 for cython.array" - * - */ - __pyx_t_2 = (__pyx_v_itemsize <= 0); - if (unlikely(__pyx_t_2)) { - - /* "View.MemoryView":144 - * - * if itemsize <= 0: - * raise ValueError, "itemsize <= 0 for cython.array" # <<<<<<<<<<<<<< - * - * if not isinstance(format, bytes): - */ - __Pyx_Raise(__pyx_builtin_ValueError, __pyx_kp_s_itemsize_0_for_cython_array, 0, 0); - __PYX_ERR(1, 144, __pyx_L1_error) - - /* "View.MemoryView":143 - * raise ValueError, "Empty shape tuple for cython.array" - * - * if itemsize <= 0: # <<<<<<<<<<<<<< - * raise ValueError, "itemsize <= 0 for cython.array" - * - */ - } - - /* "View.MemoryView":146 - * raise ValueError, "itemsize <= 0 for cython.array" - * - * if not isinstance(format, bytes): # <<<<<<<<<<<<<< - * format = format.encode('ASCII') - * self._format = format # keep a reference to the byte string - */ - __pyx_t_2 = PyBytes_Check(__pyx_v_format); - __pyx_t_3 = (!__pyx_t_2); - if (__pyx_t_3) { - - /* "View.MemoryView":147 - * - * if not isinstance(format, bytes): - * format = format.encode('ASCII') # <<<<<<<<<<<<<< - * self._format = format # keep a reference to the byte string - * self.format = self._format - */ - __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_format, __pyx_n_s_encode); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 147, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_6 = NULL; - __pyx_t_7 = 0; - if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_5))) { - __pyx_t_6 = PyMethod_GET_SELF(__pyx_t_5); - if (likely(__pyx_t_6)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5); - __Pyx_INCREF(__pyx_t_6); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_5, function); - __pyx_t_7 = 1; - } - } - { - PyObject *__pyx_callargs[2] = {__pyx_t_6, __pyx_n_s_ASCII}; - __pyx_t_4 = __Pyx_PyObject_FastCall(__pyx_t_5, __pyx_callargs+1-__pyx_t_7, 1+__pyx_t_7); - __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; - if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 147, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - } - __Pyx_DECREF_SET(__pyx_v_format, __pyx_t_4); - __pyx_t_4 = 0; - - /* "View.MemoryView":146 - * raise ValueError, "itemsize <= 0 for cython.array" - * - * if not isinstance(format, bytes): # <<<<<<<<<<<<<< - * format = format.encode('ASCII') - * self._format = format # keep a reference to the byte string - */ - } - - /* "View.MemoryView":148 - * if not isinstance(format, bytes): - * format = format.encode('ASCII') - * self._format = format # keep a reference to the byte string # <<<<<<<<<<<<<< - * self.format = self._format - * - */ - if (!(likely(PyBytes_CheckExact(__pyx_v_format))||((__pyx_v_format) == Py_None) || __Pyx_RaiseUnexpectedTypeError("bytes", __pyx_v_format))) __PYX_ERR(1, 148, __pyx_L1_error) - __pyx_t_4 = __pyx_v_format; - __Pyx_INCREF(__pyx_t_4); - __Pyx_GIVEREF(__pyx_t_4); - __Pyx_GOTREF(__pyx_v_self->_format); - __Pyx_DECREF(__pyx_v_self->_format); - __pyx_v_self->_format = ((PyObject*)__pyx_t_4); - __pyx_t_4 = 0; - - /* "View.MemoryView":149 - * format = format.encode('ASCII') - * self._format = format # keep a reference to the byte string - * self.format = self._format # <<<<<<<<<<<<<< - * - * - */ - if (unlikely(__pyx_v_self->_format == Py_None)) { - PyErr_SetString(PyExc_TypeError, "expected bytes, NoneType found"); - __PYX_ERR(1, 149, __pyx_L1_error) - } - __pyx_t_8 = __Pyx_PyBytes_AsWritableString(__pyx_v_self->_format); if (unlikely((!__pyx_t_8) && PyErr_Occurred())) __PYX_ERR(1, 149, __pyx_L1_error) - __pyx_v_self->format = __pyx_t_8; - - /* "View.MemoryView":152 - * - * - * self._shape = PyObject_Malloc(sizeof(Py_ssize_t)*self.ndim*2) # <<<<<<<<<<<<<< - * self._strides = self._shape + self.ndim - * - */ - __pyx_v_self->_shape = ((Py_ssize_t *)PyObject_Malloc((((sizeof(Py_ssize_t)) * __pyx_v_self->ndim) * 2))); - - /* "View.MemoryView":153 - * - * self._shape = PyObject_Malloc(sizeof(Py_ssize_t)*self.ndim*2) - * self._strides = self._shape + self.ndim # <<<<<<<<<<<<<< - * - * if not self._shape: - */ - __pyx_v_self->_strides = (__pyx_v_self->_shape + __pyx_v_self->ndim); - - /* "View.MemoryView":155 - * self._strides = self._shape + self.ndim - * - * if not self._shape: # <<<<<<<<<<<<<< - * raise MemoryError, "unable to allocate shape and strides." - * - */ - __pyx_t_3 = (!(__pyx_v_self->_shape != 0)); - if (unlikely(__pyx_t_3)) { - - /* "View.MemoryView":156 - * - * if not self._shape: - * raise MemoryError, "unable to allocate shape and strides." # <<<<<<<<<<<<<< - * - * - */ - __Pyx_Raise(__pyx_builtin_MemoryError, __pyx_kp_s_unable_to_allocate_shape_and_str, 0, 0); - __PYX_ERR(1, 156, __pyx_L1_error) - - /* "View.MemoryView":155 - * self._strides = self._shape + self.ndim - * - * if not self._shape: # <<<<<<<<<<<<<< - * raise MemoryError, "unable to allocate shape and strides." - * - */ - } - - /* "View.MemoryView":159 - * - * - * for idx, dim in enumerate(shape): # <<<<<<<<<<<<<< - * if dim <= 0: - * raise ValueError, f"Invalid shape in axis {idx}: {dim}." - */ - __pyx_t_7 = 0; - __pyx_t_4 = __pyx_v_shape; __Pyx_INCREF(__pyx_t_4); __pyx_t_1 = 0; - for (;;) { - if (__pyx_t_1 >= PyTuple_GET_SIZE(__pyx_t_4)) break; - #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS - __pyx_t_5 = PyTuple_GET_ITEM(__pyx_t_4, __pyx_t_1); __Pyx_INCREF(__pyx_t_5); __pyx_t_1++; if (unlikely((0 < 0))) __PYX_ERR(1, 159, __pyx_L1_error) - #else - __pyx_t_5 = PySequence_ITEM(__pyx_t_4, __pyx_t_1); __pyx_t_1++; if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 159, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - #endif - __pyx_t_9 = __Pyx_PyIndex_AsSsize_t(__pyx_t_5); if (unlikely((__pyx_t_9 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 159, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_v_dim = __pyx_t_9; - __pyx_v_idx = __pyx_t_7; - __pyx_t_7 = (__pyx_t_7 + 1); - - /* "View.MemoryView":160 - * - * for idx, dim in enumerate(shape): - * if dim <= 0: # <<<<<<<<<<<<<< - * raise ValueError, f"Invalid shape in axis {idx}: {dim}." - * self._shape[idx] = dim - */ - __pyx_t_3 = (__pyx_v_dim <= 0); - if (unlikely(__pyx_t_3)) { - - /* "View.MemoryView":161 - * for idx, dim in enumerate(shape): - * if dim <= 0: - * raise ValueError, f"Invalid shape in axis {idx}: {dim}." # <<<<<<<<<<<<<< - * self._shape[idx] = dim - * - */ - __pyx_t_5 = PyTuple_New(5); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 161, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_9 = 0; - __pyx_t_10 = 127; - __Pyx_INCREF(__pyx_kp_u_Invalid_shape_in_axis); - __pyx_t_9 += 22; - __Pyx_GIVEREF(__pyx_kp_u_Invalid_shape_in_axis); - PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_kp_u_Invalid_shape_in_axis); - __pyx_t_6 = __Pyx_PyUnicode_From_int(__pyx_v_idx, 0, ' ', 'd'); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 161, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_6); - __pyx_t_9 += __Pyx_PyUnicode_GET_LENGTH(__pyx_t_6); - __Pyx_GIVEREF(__pyx_t_6); - PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_6); - __pyx_t_6 = 0; - __Pyx_INCREF(__pyx_kp_u_); - __pyx_t_9 += 2; - __Pyx_GIVEREF(__pyx_kp_u_); - PyTuple_SET_ITEM(__pyx_t_5, 2, __pyx_kp_u_); - __pyx_t_6 = __Pyx_PyUnicode_From_Py_ssize_t(__pyx_v_dim, 0, ' ', 'd'); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 161, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_6); - __pyx_t_9 += __Pyx_PyUnicode_GET_LENGTH(__pyx_t_6); - __Pyx_GIVEREF(__pyx_t_6); - PyTuple_SET_ITEM(__pyx_t_5, 3, __pyx_t_6); - __pyx_t_6 = 0; - __Pyx_INCREF(__pyx_kp_u__2); - __pyx_t_9 += 1; - __Pyx_GIVEREF(__pyx_kp_u__2); - PyTuple_SET_ITEM(__pyx_t_5, 4, __pyx_kp_u__2); - __pyx_t_6 = __Pyx_PyUnicode_Join(__pyx_t_5, 5, __pyx_t_9, __pyx_t_10); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 161, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_6); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __Pyx_Raise(__pyx_builtin_ValueError, __pyx_t_6, 0, 0); - __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; - __PYX_ERR(1, 161, __pyx_L1_error) - - /* "View.MemoryView":160 - * - * for idx, dim in enumerate(shape): - * if dim <= 0: # <<<<<<<<<<<<<< - * raise ValueError, f"Invalid shape in axis {idx}: {dim}." - * self._shape[idx] = dim - */ - } - - /* "View.MemoryView":162 - * if dim <= 0: - * raise ValueError, f"Invalid shape in axis {idx}: {dim}." - * self._shape[idx] = dim # <<<<<<<<<<<<<< - * - * cdef char order - */ - (__pyx_v_self->_shape[__pyx_v_idx]) = __pyx_v_dim; - - /* "View.MemoryView":159 - * - * - * for idx, dim in enumerate(shape): # <<<<<<<<<<<<<< - * if dim <= 0: - * raise ValueError, f"Invalid shape in axis {idx}: {dim}." - */ - } - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - - /* "View.MemoryView":165 - * - * cdef char order - * if mode == 'c': # <<<<<<<<<<<<<< - * order = b'C' - * self.mode = u'c' - */ - __pyx_t_3 = (__Pyx_PyString_Equals(__pyx_v_mode, __pyx_n_s_c, Py_EQ)); if (unlikely((__pyx_t_3 < 0))) __PYX_ERR(1, 165, __pyx_L1_error) - if (__pyx_t_3) { - - /* "View.MemoryView":166 - * cdef char order - * if mode == 'c': - * order = b'C' # <<<<<<<<<<<<<< - * self.mode = u'c' - * elif mode == 'fortran': - */ - __pyx_v_order = 'C'; - - /* "View.MemoryView":167 - * if mode == 'c': - * order = b'C' - * self.mode = u'c' # <<<<<<<<<<<<<< - * elif mode == 'fortran': - * order = b'F' - */ - __Pyx_INCREF(__pyx_n_u_c); - __Pyx_GIVEREF(__pyx_n_u_c); - __Pyx_GOTREF(__pyx_v_self->mode); - __Pyx_DECREF(__pyx_v_self->mode); - __pyx_v_self->mode = __pyx_n_u_c; - - /* "View.MemoryView":165 - * - * cdef char order - * if mode == 'c': # <<<<<<<<<<<<<< - * order = b'C' - * self.mode = u'c' - */ - goto __pyx_L11; - } - - /* "View.MemoryView":168 - * order = b'C' - * self.mode = u'c' - * elif mode == 'fortran': # <<<<<<<<<<<<<< - * order = b'F' - * self.mode = u'fortran' - */ - __pyx_t_3 = (__Pyx_PyString_Equals(__pyx_v_mode, __pyx_n_s_fortran, Py_EQ)); if (unlikely((__pyx_t_3 < 0))) __PYX_ERR(1, 168, __pyx_L1_error) - if (likely(__pyx_t_3)) { - - /* "View.MemoryView":169 - * self.mode = u'c' - * elif mode == 'fortran': - * order = b'F' # <<<<<<<<<<<<<< - * self.mode = u'fortran' - * else: - */ - __pyx_v_order = 'F'; - - /* "View.MemoryView":170 - * elif mode == 'fortran': - * order = b'F' - * self.mode = u'fortran' # <<<<<<<<<<<<<< - * else: - * raise ValueError, f"Invalid mode, expected 'c' or 'fortran', got {mode}" - */ - __Pyx_INCREF(__pyx_n_u_fortran); - __Pyx_GIVEREF(__pyx_n_u_fortran); - __Pyx_GOTREF(__pyx_v_self->mode); - __Pyx_DECREF(__pyx_v_self->mode); - __pyx_v_self->mode = __pyx_n_u_fortran; - - /* "View.MemoryView":168 - * order = b'C' - * self.mode = u'c' - * elif mode == 'fortran': # <<<<<<<<<<<<<< - * order = b'F' - * self.mode = u'fortran' - */ - goto __pyx_L11; - } - - /* "View.MemoryView":172 - * self.mode = u'fortran' - * else: - * raise ValueError, f"Invalid mode, expected 'c' or 'fortran', got {mode}" # <<<<<<<<<<<<<< - * - * self.len = fill_contig_strides_array(self._shape, self._strides, itemsize, self.ndim, order) - */ - /*else*/ { - __pyx_t_4 = __Pyx_PyObject_FormatSimple(__pyx_v_mode, __pyx_empty_unicode); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 172, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __pyx_t_6 = __Pyx_PyUnicode_Concat(__pyx_kp_u_Invalid_mode_expected_c_or_fortr, __pyx_t_4); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 172, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_6); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __Pyx_Raise(__pyx_builtin_ValueError, __pyx_t_6, 0, 0); - __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; - __PYX_ERR(1, 172, __pyx_L1_error) - } - __pyx_L11:; - - /* "View.MemoryView":174 - * raise ValueError, f"Invalid mode, expected 'c' or 'fortran', got {mode}" - * - * self.len = fill_contig_strides_array(self._shape, self._strides, itemsize, self.ndim, order) # <<<<<<<<<<<<<< - * - * self.free_data = allocate_buffer - */ - __pyx_v_self->len = __pyx_fill_contig_strides_array(__pyx_v_self->_shape, __pyx_v_self->_strides, __pyx_v_itemsize, __pyx_v_self->ndim, __pyx_v_order); - - /* "View.MemoryView":176 - * self.len = fill_contig_strides_array(self._shape, self._strides, itemsize, self.ndim, order) - * - * self.free_data = allocate_buffer # <<<<<<<<<<<<<< - * self.dtype_is_object = format == b'O' - * - */ - __pyx_v_self->free_data = __pyx_v_allocate_buffer; - - /* "View.MemoryView":177 - * - * self.free_data = allocate_buffer - * self.dtype_is_object = format == b'O' # <<<<<<<<<<<<<< - * - * if allocate_buffer: - */ - __pyx_t_6 = PyObject_RichCompare(__pyx_v_format, __pyx_n_b_O, Py_EQ); __Pyx_XGOTREF(__pyx_t_6); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 177, __pyx_L1_error) - __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_t_6); if (unlikely((__pyx_t_3 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 177, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; - __pyx_v_self->dtype_is_object = __pyx_t_3; - - /* "View.MemoryView":179 - * self.dtype_is_object = format == b'O' - * - * if allocate_buffer: # <<<<<<<<<<<<<< - * _allocate_buffer(self) - * - */ - if (__pyx_v_allocate_buffer) { - - /* "View.MemoryView":180 - * - * if allocate_buffer: - * _allocate_buffer(self) # <<<<<<<<<<<<<< - * - * @cname('getbuffer') - */ - __pyx_t_7 = __pyx_array_allocate_buffer(__pyx_v_self); if (unlikely(__pyx_t_7 == ((int)-1))) __PYX_ERR(1, 180, __pyx_L1_error) - - /* "View.MemoryView":179 - * self.dtype_is_object = format == b'O' - * - * if allocate_buffer: # <<<<<<<<<<<<<< - * _allocate_buffer(self) - * - */ - } - - /* "View.MemoryView":131 - * cdef bint dtype_is_object - * - * def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, # <<<<<<<<<<<<<< - * mode="c", bint allocate_buffer=True): - * - */ - - /* function exit code */ - __pyx_r = 0; - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_4); - __Pyx_XDECREF(__pyx_t_5); - __Pyx_XDECREF(__pyx_t_6); - __Pyx_AddTraceback("View.MemoryView.array.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = -1; - __pyx_L0:; - __Pyx_XDECREF(__pyx_v_format); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":182 - * _allocate_buffer(self) - * - * @cname('getbuffer') # <<<<<<<<<<<<<< - * def __getbuffer__(self, Py_buffer *info, int flags): - * cdef int bufmode = -1 - */ - -/* Python wrapper */ -CYTHON_UNUSED static int __pyx_array_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ -CYTHON_UNUSED static int __pyx_array_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { - CYTHON_UNUSED PyObject *const *__pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); - int __pyx_r; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__getbuffer__ (wrapper)", 0); - __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_2__getbuffer__(((struct __pyx_array_obj *)__pyx_v_self), ((Py_buffer *)__pyx_v_info), ((int)__pyx_v_flags)); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_2__getbuffer__(struct __pyx_array_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { - int __pyx_v_bufmode; - int __pyx_r; - __Pyx_RefNannyDeclarations - int __pyx_t_1; - char *__pyx_t_2; - Py_ssize_t __pyx_t_3; - int __pyx_t_4; - Py_ssize_t *__pyx_t_5; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - if (unlikely(__pyx_v_info == NULL)) { - PyErr_SetString(PyExc_BufferError, "PyObject_GetBuffer: view==NULL argument is obsolete"); - return -1; - } - __Pyx_RefNannySetupContext("__getbuffer__", 0); - __pyx_v_info->obj = Py_None; __Pyx_INCREF(Py_None); - __Pyx_GIVEREF(__pyx_v_info->obj); - - /* "View.MemoryView":184 - * @cname('getbuffer') - * def __getbuffer__(self, Py_buffer *info, int flags): - * cdef int bufmode = -1 # <<<<<<<<<<<<<< - * if flags & (PyBUF_C_CONTIGUOUS | PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS): - * if self.mode == u"c": - */ - __pyx_v_bufmode = -1; - - /* "View.MemoryView":185 - * def __getbuffer__(self, Py_buffer *info, int flags): - * cdef int bufmode = -1 - * if flags & (PyBUF_C_CONTIGUOUS | PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS): # <<<<<<<<<<<<<< - * if self.mode == u"c": - * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS - */ - __pyx_t_1 = ((__pyx_v_flags & ((PyBUF_C_CONTIGUOUS | PyBUF_F_CONTIGUOUS) | PyBUF_ANY_CONTIGUOUS)) != 0); - if (__pyx_t_1) { - - /* "View.MemoryView":186 - * cdef int bufmode = -1 - * if flags & (PyBUF_C_CONTIGUOUS | PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS): - * if self.mode == u"c": # <<<<<<<<<<<<<< - * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS - * elif self.mode == u"fortran": - */ - __pyx_t_1 = (__Pyx_PyUnicode_Equals(__pyx_v_self->mode, __pyx_n_u_c, Py_EQ)); if (unlikely((__pyx_t_1 < 0))) __PYX_ERR(1, 186, __pyx_L1_error) - if (__pyx_t_1) { - - /* "View.MemoryView":187 - * if flags & (PyBUF_C_CONTIGUOUS | PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS): - * if self.mode == u"c": - * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS # <<<<<<<<<<<<<< - * elif self.mode == u"fortran": - * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS - */ - __pyx_v_bufmode = (PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS); - - /* "View.MemoryView":186 - * cdef int bufmode = -1 - * if flags & (PyBUF_C_CONTIGUOUS | PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS): - * if self.mode == u"c": # <<<<<<<<<<<<<< - * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS - * elif self.mode == u"fortran": - */ - goto __pyx_L4; - } - - /* "View.MemoryView":188 - * if self.mode == u"c": - * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS - * elif self.mode == u"fortran": # <<<<<<<<<<<<<< - * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS - * if not (flags & bufmode): - */ - __pyx_t_1 = (__Pyx_PyUnicode_Equals(__pyx_v_self->mode, __pyx_n_u_fortran, Py_EQ)); if (unlikely((__pyx_t_1 < 0))) __PYX_ERR(1, 188, __pyx_L1_error) - if (__pyx_t_1) { - - /* "View.MemoryView":189 - * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS - * elif self.mode == u"fortran": - * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS # <<<<<<<<<<<<<< - * if not (flags & bufmode): - * raise ValueError, "Can only create a buffer that is contiguous in memory." - */ - __pyx_v_bufmode = (PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS); - - /* "View.MemoryView":188 - * if self.mode == u"c": - * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS - * elif self.mode == u"fortran": # <<<<<<<<<<<<<< - * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS - * if not (flags & bufmode): - */ - } - __pyx_L4:; - - /* "View.MemoryView":190 - * elif self.mode == u"fortran": - * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS - * if not (flags & bufmode): # <<<<<<<<<<<<<< - * raise ValueError, "Can only create a buffer that is contiguous in memory." - * info.buf = self.data - */ - __pyx_t_1 = (!((__pyx_v_flags & __pyx_v_bufmode) != 0)); - if (unlikely(__pyx_t_1)) { - - /* "View.MemoryView":191 - * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS - * if not (flags & bufmode): - * raise ValueError, "Can only create a buffer that is contiguous in memory." # <<<<<<<<<<<<<< - * info.buf = self.data - * info.len = self.len - */ - __Pyx_Raise(__pyx_builtin_ValueError, __pyx_kp_s_Can_only_create_a_buffer_that_is, 0, 0); - __PYX_ERR(1, 191, __pyx_L1_error) - - /* "View.MemoryView":190 - * elif self.mode == u"fortran": - * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS - * if not (flags & bufmode): # <<<<<<<<<<<<<< - * raise ValueError, "Can only create a buffer that is contiguous in memory." - * info.buf = self.data - */ - } - - /* "View.MemoryView":185 - * def __getbuffer__(self, Py_buffer *info, int flags): - * cdef int bufmode = -1 - * if flags & (PyBUF_C_CONTIGUOUS | PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS): # <<<<<<<<<<<<<< - * if self.mode == u"c": - * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS - */ - } - - /* "View.MemoryView":192 - * if not (flags & bufmode): - * raise ValueError, "Can only create a buffer that is contiguous in memory." - * info.buf = self.data # <<<<<<<<<<<<<< - * info.len = self.len - * - */ - __pyx_t_2 = __pyx_v_self->data; - __pyx_v_info->buf = __pyx_t_2; - - /* "View.MemoryView":193 - * raise ValueError, "Can only create a buffer that is contiguous in memory." - * info.buf = self.data - * info.len = self.len # <<<<<<<<<<<<<< - * - * if flags & PyBUF_STRIDES: - */ - __pyx_t_3 = __pyx_v_self->len; - __pyx_v_info->len = __pyx_t_3; - - /* "View.MemoryView":195 - * info.len = self.len - * - * if flags & PyBUF_STRIDES: # <<<<<<<<<<<<<< - * info.ndim = self.ndim - * info.shape = self._shape - */ - __pyx_t_1 = ((__pyx_v_flags & PyBUF_STRIDES) != 0); - if (__pyx_t_1) { - - /* "View.MemoryView":196 - * - * if flags & PyBUF_STRIDES: - * info.ndim = self.ndim # <<<<<<<<<<<<<< - * info.shape = self._shape - * info.strides = self._strides - */ - __pyx_t_4 = __pyx_v_self->ndim; - __pyx_v_info->ndim = __pyx_t_4; - - /* "View.MemoryView":197 - * if flags & PyBUF_STRIDES: - * info.ndim = self.ndim - * info.shape = self._shape # <<<<<<<<<<<<<< - * info.strides = self._strides - * else: - */ - __pyx_t_5 = __pyx_v_self->_shape; - __pyx_v_info->shape = __pyx_t_5; - - /* "View.MemoryView":198 - * info.ndim = self.ndim - * info.shape = self._shape - * info.strides = self._strides # <<<<<<<<<<<<<< - * else: - * info.ndim = 1 - */ - __pyx_t_5 = __pyx_v_self->_strides; - __pyx_v_info->strides = __pyx_t_5; - - /* "View.MemoryView":195 - * info.len = self.len - * - * if flags & PyBUF_STRIDES: # <<<<<<<<<<<<<< - * info.ndim = self.ndim - * info.shape = self._shape - */ - goto __pyx_L6; - } - - /* "View.MemoryView":200 - * info.strides = self._strides - * else: - * info.ndim = 1 # <<<<<<<<<<<<<< - * info.shape = &self.len if flags & PyBUF_ND else NULL - * info.strides = NULL - */ - /*else*/ { - __pyx_v_info->ndim = 1; - - /* "View.MemoryView":201 - * else: - * info.ndim = 1 - * info.shape = &self.len if flags & PyBUF_ND else NULL # <<<<<<<<<<<<<< - * info.strides = NULL - * - */ - if (((__pyx_v_flags & PyBUF_ND) != 0)) { - __pyx_t_5 = (&__pyx_v_self->len); - } else { - __pyx_t_5 = NULL; - } - __pyx_v_info->shape = __pyx_t_5; - - /* "View.MemoryView":202 - * info.ndim = 1 - * info.shape = &self.len if flags & PyBUF_ND else NULL - * info.strides = NULL # <<<<<<<<<<<<<< - * - * info.suboffsets = NULL - */ - __pyx_v_info->strides = NULL; - } - __pyx_L6:; - - /* "View.MemoryView":204 - * info.strides = NULL - * - * info.suboffsets = NULL # <<<<<<<<<<<<<< - * info.itemsize = self.itemsize - * info.readonly = 0 - */ - __pyx_v_info->suboffsets = NULL; - - /* "View.MemoryView":205 - * - * info.suboffsets = NULL - * info.itemsize = self.itemsize # <<<<<<<<<<<<<< - * info.readonly = 0 - * info.format = self.format if flags & PyBUF_FORMAT else NULL - */ - __pyx_t_3 = __pyx_v_self->itemsize; - __pyx_v_info->itemsize = __pyx_t_3; - - /* "View.MemoryView":206 - * info.suboffsets = NULL - * info.itemsize = self.itemsize - * info.readonly = 0 # <<<<<<<<<<<<<< - * info.format = self.format if flags & PyBUF_FORMAT else NULL - * info.obj = self - */ - __pyx_v_info->readonly = 0; - - /* "View.MemoryView":207 - * info.itemsize = self.itemsize - * info.readonly = 0 - * info.format = self.format if flags & PyBUF_FORMAT else NULL # <<<<<<<<<<<<<< - * info.obj = self - * - */ - if (((__pyx_v_flags & PyBUF_FORMAT) != 0)) { - __pyx_t_2 = __pyx_v_self->format; - } else { - __pyx_t_2 = NULL; - } - __pyx_v_info->format = __pyx_t_2; - - /* "View.MemoryView":208 - * info.readonly = 0 - * info.format = self.format if flags & PyBUF_FORMAT else NULL - * info.obj = self # <<<<<<<<<<<<<< - * - * def __dealloc__(array self): - */ - __Pyx_INCREF((PyObject *)__pyx_v_self); - __Pyx_GIVEREF((PyObject *)__pyx_v_self); - __Pyx_GOTREF(__pyx_v_info->obj); - __Pyx_DECREF(__pyx_v_info->obj); - __pyx_v_info->obj = ((PyObject *)__pyx_v_self); - - /* "View.MemoryView":182 - * _allocate_buffer(self) - * - * @cname('getbuffer') # <<<<<<<<<<<<<< - * def __getbuffer__(self, Py_buffer *info, int flags): - * cdef int bufmode = -1 - */ - - /* function exit code */ - __pyx_r = 0; - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_AddTraceback("View.MemoryView.array.__getbuffer__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = -1; - if (__pyx_v_info->obj != NULL) { - __Pyx_GOTREF(__pyx_v_info->obj); - __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = 0; - } - goto __pyx_L2; - __pyx_L0:; - if (__pyx_v_info->obj == Py_None) { - __Pyx_GOTREF(__pyx_v_info->obj); - __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = 0; - } - __pyx_L2:; - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":210 - * info.obj = self - * - * def __dealloc__(array self): # <<<<<<<<<<<<<< - * if self.callback_free_data != NULL: - * self.callback_free_data(self.data) - */ - -/* Python wrapper */ -static void __pyx_array___dealloc__(PyObject *__pyx_v_self); /*proto*/ -static void __pyx_array___dealloc__(PyObject *__pyx_v_self) { - CYTHON_UNUSED PyObject *const *__pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0); - __pyx_array___pyx_pf_15View_dot_MemoryView_5array_4__dealloc__(((struct __pyx_array_obj *)__pyx_v_self)); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); -} - -static void __pyx_array___pyx_pf_15View_dot_MemoryView_5array_4__dealloc__(struct __pyx_array_obj *__pyx_v_self) { - __Pyx_RefNannyDeclarations - int __pyx_t_1; - int __pyx_t_2; - __Pyx_RefNannySetupContext("__dealloc__", 0); - - /* "View.MemoryView":211 - * - * def __dealloc__(array self): - * if self.callback_free_data != NULL: # <<<<<<<<<<<<<< - * self.callback_free_data(self.data) - * elif self.free_data and self.data is not NULL: - */ - __pyx_t_1 = (__pyx_v_self->callback_free_data != NULL); - if (__pyx_t_1) { - - /* "View.MemoryView":212 - * def __dealloc__(array self): - * if self.callback_free_data != NULL: - * self.callback_free_data(self.data) # <<<<<<<<<<<<<< - * elif self.free_data and self.data is not NULL: - * if self.dtype_is_object: - */ - __pyx_v_self->callback_free_data(__pyx_v_self->data); - - /* "View.MemoryView":211 - * - * def __dealloc__(array self): - * if self.callback_free_data != NULL: # <<<<<<<<<<<<<< - * self.callback_free_data(self.data) - * elif self.free_data and self.data is not NULL: - */ - goto __pyx_L3; - } - - /* "View.MemoryView":213 - * if self.callback_free_data != NULL: - * self.callback_free_data(self.data) - * elif self.free_data and self.data is not NULL: # <<<<<<<<<<<<<< - * if self.dtype_is_object: - * refcount_objects_in_slice(self.data, self._shape, self._strides, self.ndim, inc=False) - */ - if (__pyx_v_self->free_data) { - } else { - __pyx_t_1 = __pyx_v_self->free_data; - goto __pyx_L4_bool_binop_done; - } - __pyx_t_2 = (__pyx_v_self->data != NULL); - __pyx_t_1 = __pyx_t_2; - __pyx_L4_bool_binop_done:; - if (__pyx_t_1) { - - /* "View.MemoryView":214 - * self.callback_free_data(self.data) - * elif self.free_data and self.data is not NULL: - * if self.dtype_is_object: # <<<<<<<<<<<<<< - * refcount_objects_in_slice(self.data, self._shape, self._strides, self.ndim, inc=False) - * free(self.data) - */ - if (__pyx_v_self->dtype_is_object) { - - /* "View.MemoryView":215 - * elif self.free_data and self.data is not NULL: - * if self.dtype_is_object: - * refcount_objects_in_slice(self.data, self._shape, self._strides, self.ndim, inc=False) # <<<<<<<<<<<<<< - * free(self.data) - * PyObject_Free(self._shape) - */ - __pyx_memoryview_refcount_objects_in_slice(__pyx_v_self->data, __pyx_v_self->_shape, __pyx_v_self->_strides, __pyx_v_self->ndim, 0); - - /* "View.MemoryView":214 - * self.callback_free_data(self.data) - * elif self.free_data and self.data is not NULL: - * if self.dtype_is_object: # <<<<<<<<<<<<<< - * refcount_objects_in_slice(self.data, self._shape, self._strides, self.ndim, inc=False) - * free(self.data) - */ - } - - /* "View.MemoryView":216 - * if self.dtype_is_object: - * refcount_objects_in_slice(self.data, self._shape, self._strides, self.ndim, inc=False) - * free(self.data) # <<<<<<<<<<<<<< - * PyObject_Free(self._shape) - * - */ - free(__pyx_v_self->data); - - /* "View.MemoryView":213 - * if self.callback_free_data != NULL: - * self.callback_free_data(self.data) - * elif self.free_data and self.data is not NULL: # <<<<<<<<<<<<<< - * if self.dtype_is_object: - * refcount_objects_in_slice(self.data, self._shape, self._strides, self.ndim, inc=False) - */ - } - __pyx_L3:; - - /* "View.MemoryView":217 - * refcount_objects_in_slice(self.data, self._shape, self._strides, self.ndim, inc=False) - * free(self.data) - * PyObject_Free(self._shape) # <<<<<<<<<<<<<< - * - * @property - */ - PyObject_Free(__pyx_v_self->_shape); - - /* "View.MemoryView":210 - * info.obj = self - * - * def __dealloc__(array self): # <<<<<<<<<<<<<< - * if self.callback_free_data != NULL: - * self.callback_free_data(self.data) - */ - - /* function exit code */ - __Pyx_RefNannyFinishContext(); -} - -/* "View.MemoryView":219 - * PyObject_Free(self._shape) - * - * @property # <<<<<<<<<<<<<< - * def memview(self): - * return self.get_memview() - */ - -/* Python wrapper */ -static PyObject *__pyx_pw_15View_dot_MemoryView_5array_7memview_1__get__(PyObject *__pyx_v_self); /*proto*/ -static PyObject *__pyx_pw_15View_dot_MemoryView_5array_7memview_1__get__(PyObject *__pyx_v_self) { - CYTHON_UNUSED PyObject *const *__pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); - __pyx_r = __pyx_pf_15View_dot_MemoryView_5array_7memview___get__(((struct __pyx_array_obj *)__pyx_v_self)); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_pf_15View_dot_MemoryView_5array_7memview___get__(struct __pyx_array_obj *__pyx_v_self) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("__get__", 0); - - /* "View.MemoryView":221 - * @property - * def memview(self): - * return self.get_memview() # <<<<<<<<<<<<<< - * - * @cname('get_memview') - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = ((struct __pyx_vtabstruct_array *)__pyx_v_self->__pyx_vtab)->get_memview(__pyx_v_self); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 221, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_r = __pyx_t_1; - __pyx_t_1 = 0; - goto __pyx_L0; - - /* "View.MemoryView":219 - * PyObject_Free(self._shape) - * - * @property # <<<<<<<<<<<<<< - * def memview(self): - * return self.get_memview() - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_AddTraceback("View.MemoryView.array.memview.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":224 - * - * @cname('get_memview') - * cdef get_memview(self): # <<<<<<<<<<<<<< - * flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE - * return memoryview(self, flags, self.dtype_is_object) - */ - -static PyObject *__pyx_array_get_memview(struct __pyx_array_obj *__pyx_v_self) { - int __pyx_v_flags; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - PyObject *__pyx_t_2 = NULL; - PyObject *__pyx_t_3 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("get_memview", 0); - - /* "View.MemoryView":225 - * @cname('get_memview') - * cdef get_memview(self): - * flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE # <<<<<<<<<<<<<< - * return memoryview(self, flags, self.dtype_is_object) - * - */ - __pyx_v_flags = ((PyBUF_ANY_CONTIGUOUS | PyBUF_FORMAT) | PyBUF_WRITABLE); - - /* "View.MemoryView":226 - * cdef get_memview(self): - * flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE - * return memoryview(self, flags, self.dtype_is_object) # <<<<<<<<<<<<<< - * - * def __len__(self): - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_flags); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 226, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_v_self->dtype_is_object); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 226, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 226, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_INCREF((PyObject *)__pyx_v_self); - __Pyx_GIVEREF((PyObject *)__pyx_v_self); - PyTuple_SET_ITEM(__pyx_t_3, 0, ((PyObject *)__pyx_v_self)); - __Pyx_GIVEREF(__pyx_t_1); - PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_1); - __Pyx_GIVEREF(__pyx_t_2); - PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_t_2); - __pyx_t_1 = 0; - __pyx_t_2 = 0; - __pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryview_type), __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 226, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_r = __pyx_t_2; - __pyx_t_2 = 0; - goto __pyx_L0; - - /* "View.MemoryView":224 - * - * @cname('get_memview') - * cdef get_memview(self): # <<<<<<<<<<<<<< - * flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE - * return memoryview(self, flags, self.dtype_is_object) - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_2); - __Pyx_XDECREF(__pyx_t_3); - __Pyx_AddTraceback("View.MemoryView.array.get_memview", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = 0; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":228 - * return memoryview(self, flags, self.dtype_is_object) - * - * def __len__(self): # <<<<<<<<<<<<<< - * return self._shape[0] - * - */ - -/* Python wrapper */ -static Py_ssize_t __pyx_array___len__(PyObject *__pyx_v_self); /*proto*/ -static Py_ssize_t __pyx_array___len__(PyObject *__pyx_v_self) { - CYTHON_UNUSED PyObject *const *__pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); - Py_ssize_t __pyx_r; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__len__ (wrapper)", 0); - __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_6__len__(((struct __pyx_array_obj *)__pyx_v_self)); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static Py_ssize_t __pyx_array___pyx_pf_15View_dot_MemoryView_5array_6__len__(struct __pyx_array_obj *__pyx_v_self) { - Py_ssize_t __pyx_r; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__len__", 0); - - /* "View.MemoryView":229 - * - * def __len__(self): - * return self._shape[0] # <<<<<<<<<<<<<< - * - * def __getattr__(self, attr): - */ - __pyx_r = (__pyx_v_self->_shape[0]); - goto __pyx_L0; - - /* "View.MemoryView":228 - * return memoryview(self, flags, self.dtype_is_object) - * - * def __len__(self): # <<<<<<<<<<<<<< - * return self._shape[0] - * - */ - - /* function exit code */ - __pyx_L0:; - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":231 - * return self._shape[0] - * - * def __getattr__(self, attr): # <<<<<<<<<<<<<< - * return getattr(self.memview, attr) - * - */ - -/* Python wrapper */ -static PyObject *__pyx_array___getattr__(PyObject *__pyx_v_self, PyObject *__pyx_v_attr); /*proto*/ -static PyObject *__pyx_array___getattr__(PyObject *__pyx_v_self, PyObject *__pyx_v_attr) { - CYTHON_UNUSED PyObject *const *__pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__getattr__ (wrapper)", 0); - __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_8__getattr__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v_attr)); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_8__getattr__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_attr) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - PyObject *__pyx_t_2 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("__getattr__", 0); - - /* "View.MemoryView":232 - * - * def __getattr__(self, attr): - * return getattr(self.memview, attr) # <<<<<<<<<<<<<< - * - * def __getitem__(self, item): - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_memview); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 232, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = __Pyx_GetAttr(__pyx_t_1, __pyx_v_attr); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 232, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_r = __pyx_t_2; - __pyx_t_2 = 0; - goto __pyx_L0; - - /* "View.MemoryView":231 - * return self._shape[0] - * - * def __getattr__(self, attr): # <<<<<<<<<<<<<< - * return getattr(self.memview, attr) - * - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_2); - __Pyx_AddTraceback("View.MemoryView.array.__getattr__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":234 - * return getattr(self.memview, attr) - * - * def __getitem__(self, item): # <<<<<<<<<<<<<< - * return self.memview[item] - * - */ - -/* Python wrapper */ -static PyObject *__pyx_array___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item); /*proto*/ -static PyObject *__pyx_array___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item) { - CYTHON_UNUSED PyObject *const *__pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__getitem__ (wrapper)", 0); - __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_10__getitem__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v_item)); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_10__getitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - PyObject *__pyx_t_2 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("__getitem__", 0); - - /* "View.MemoryView":235 - * - * def __getitem__(self, item): - * return self.memview[item] # <<<<<<<<<<<<<< - * - * def __setitem__(self, item, value): - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_memview); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 235, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = __Pyx_PyObject_GetItem(__pyx_t_1, __pyx_v_item); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 235, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_r = __pyx_t_2; - __pyx_t_2 = 0; - goto __pyx_L0; - - /* "View.MemoryView":234 - * return getattr(self.memview, attr) - * - * def __getitem__(self, item): # <<<<<<<<<<<<<< - * return self.memview[item] - * - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_2); - __Pyx_AddTraceback("View.MemoryView.array.__getitem__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":237 - * return self.memview[item] - * - * def __setitem__(self, item, value): # <<<<<<<<<<<<<< - * self.memview[item] = value - * - */ - -/* Python wrapper */ -static int __pyx_array___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value); /*proto*/ -static int __pyx_array___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value) { - CYTHON_UNUSED PyObject *const *__pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); - int __pyx_r; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0); - __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_12__setitem__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v_item), ((PyObject *)__pyx_v_value)); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_12__setitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value) { - int __pyx_r; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("__setitem__", 0); - - /* "View.MemoryView":238 - * - * def __setitem__(self, item, value): - * self.memview[item] = value # <<<<<<<<<<<<<< - * - * - */ - __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_memview); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 238, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - if (unlikely((PyObject_SetItem(__pyx_t_1, __pyx_v_item, __pyx_v_value) < 0))) __PYX_ERR(1, 238, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - - /* "View.MemoryView":237 - * return self.memview[item] - * - * def __setitem__(self, item, value): # <<<<<<<<<<<<<< - * self.memview[item] = value - * - */ - - /* function exit code */ - __pyx_r = 0; - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_AddTraceback("View.MemoryView.array.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = -1; - __pyx_L0:; - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "(tree fragment)":1 - * def __reduce_cython__(self): # <<<<<<<<<<<<<< - * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" - * def __setstate_cython__(self, __pyx_state): - */ - -/* Python wrapper */ -static PyObject *__pyx_pw___pyx_array_1__reduce_cython__(PyObject *__pyx_v_self, -#if CYTHON_METH_FASTCALL -PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds -#else -PyObject *__pyx_args, PyObject *__pyx_kwds -#endif -); /*proto*/ -static PyObject *__pyx_pw___pyx_array_1__reduce_cython__(PyObject *__pyx_v_self, -#if CYTHON_METH_FASTCALL -PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds -#else -PyObject *__pyx_args, PyObject *__pyx_kwds -#endif -) { - #if !CYTHON_METH_FASTCALL - CYTHON_UNUSED const Py_ssize_t __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); - #endif - CYTHON_UNUSED PyObject *const *__pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0); - if (unlikely(__pyx_nargs > 0)) { - __Pyx_RaiseArgtupleInvalid("__reduce_cython__", 1, 0, 0, __pyx_nargs); return NULL;} - if (unlikely(__pyx_kwds) && __Pyx_NumKwargs_FASTCALL(__pyx_kwds) && unlikely(!__Pyx_CheckKeywordStrings(__pyx_kwds, "__reduce_cython__", 0))) return NULL; - __pyx_r = __pyx_pf___pyx_array___reduce_cython__(((struct __pyx_array_obj *)__pyx_v_self)); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_pf___pyx_array___reduce_cython__(CYTHON_UNUSED struct __pyx_array_obj *__pyx_v_self) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("__reduce_cython__", 0); - - /* "(tree fragment)":2 - * def __reduce_cython__(self): - * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" # <<<<<<<<<<<<<< - * def __setstate_cython__(self, __pyx_state): - * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" - */ - __Pyx_Raise(__pyx_builtin_TypeError, __pyx_kp_s_no_default___reduce___due_to_non, 0, 0); - __PYX_ERR(1, 2, __pyx_L1_error) - - /* "(tree fragment)":1 - * def __reduce_cython__(self): # <<<<<<<<<<<<<< - * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" - * def __setstate_cython__(self, __pyx_state): - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_AddTraceback("View.MemoryView.array.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "(tree fragment)":3 - * def __reduce_cython__(self): - * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" - * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< - * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" - */ - -/* Python wrapper */ -static PyObject *__pyx_pw___pyx_array_3__setstate_cython__(PyObject *__pyx_v_self, -#if CYTHON_METH_FASTCALL -PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds -#else -PyObject *__pyx_args, PyObject *__pyx_kwds -#endif -); /*proto*/ -static PyObject *__pyx_pw___pyx_array_3__setstate_cython__(PyObject *__pyx_v_self, -#if CYTHON_METH_FASTCALL -PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds -#else -PyObject *__pyx_args, PyObject *__pyx_kwds -#endif -) { - CYTHON_UNUSED PyObject *__pyx_v___pyx_state = 0; - #if !CYTHON_METH_FASTCALL - CYTHON_UNUSED const Py_ssize_t __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); - #endif - CYTHON_UNUSED PyObject *const *__pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0); - { - PyObject **__pyx_pyargnames[] = {&__pyx_n_s_pyx_state,0}; - PyObject* values[1] = {0}; - if (__pyx_kwds) { - Py_ssize_t kw_args; - switch (__pyx_nargs) { - case 1: values[0] = __Pyx_Arg_FASTCALL(__pyx_args, 0); - CYTHON_FALLTHROUGH; - case 0: break; - default: goto __pyx_L5_argtuple_error; - } - kw_args = __Pyx_NumKwargs_FASTCALL(__pyx_kwds); - switch (__pyx_nargs) { - case 0: - if (likely((values[0] = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_pyx_state)) != 0)) kw_args--; - else if (unlikely(PyErr_Occurred())) __PYX_ERR(1, 3, __pyx_L3_error) - else goto __pyx_L5_argtuple_error; - } - if (unlikely(kw_args > 0)) { - const Py_ssize_t kwd_pos_args = __pyx_nargs; - if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values + 0, kwd_pos_args, "__setstate_cython__") < 0)) __PYX_ERR(1, 3, __pyx_L3_error) - } - } else if (unlikely(__pyx_nargs != 1)) { - goto __pyx_L5_argtuple_error; - } else { - values[0] = __Pyx_Arg_FASTCALL(__pyx_args, 0); - } - __pyx_v___pyx_state = values[0]; - } - goto __pyx_L4_argument_unpacking_done; - __pyx_L5_argtuple_error:; - __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, __pyx_nargs); __PYX_ERR(1, 3, __pyx_L3_error) - __pyx_L3_error:; - __Pyx_AddTraceback("View.MemoryView.array.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __Pyx_RefNannyFinishContext(); - return NULL; - __pyx_L4_argument_unpacking_done:; - __pyx_r = __pyx_pf___pyx_array_2__setstate_cython__(((struct __pyx_array_obj *)__pyx_v_self), __pyx_v___pyx_state); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_pf___pyx_array_2__setstate_cython__(CYTHON_UNUSED struct __pyx_array_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("__setstate_cython__", 0); - - /* "(tree fragment)":4 - * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" - * def __setstate_cython__(self, __pyx_state): - * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" # <<<<<<<<<<<<<< - */ - __Pyx_Raise(__pyx_builtin_TypeError, __pyx_kp_s_no_default___reduce___due_to_non, 0, 0); - __PYX_ERR(1, 4, __pyx_L1_error) - - /* "(tree fragment)":3 - * def __reduce_cython__(self): - * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" - * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< - * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_AddTraceback("View.MemoryView.array.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":248 - * - * @cname("__pyx_array_allocate_buffer") - * cdef int _allocate_buffer(array self) except -1: # <<<<<<<<<<<<<< - * - * - */ - -static int __pyx_array_allocate_buffer(struct __pyx_array_obj *__pyx_v_self) { - Py_ssize_t __pyx_v_i; - PyObject **__pyx_v_p; - int __pyx_r; - __Pyx_RefNannyDeclarations - int __pyx_t_1; - Py_ssize_t __pyx_t_2; - Py_ssize_t __pyx_t_3; - Py_ssize_t __pyx_t_4; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("_allocate_buffer", 0); - - /* "View.MemoryView":254 - * cdef PyObject **p - * - * self.free_data = True # <<<<<<<<<<<<<< - * self.data = malloc(self.len) - * if not self.data: - */ - __pyx_v_self->free_data = 1; - - /* "View.MemoryView":255 - * - * self.free_data = True - * self.data = malloc(self.len) # <<<<<<<<<<<<<< - * if not self.data: - * raise MemoryError, "unable to allocate array data." - */ - __pyx_v_self->data = ((char *)malloc(__pyx_v_self->len)); - - /* "View.MemoryView":256 - * self.free_data = True - * self.data = malloc(self.len) - * if not self.data: # <<<<<<<<<<<<<< - * raise MemoryError, "unable to allocate array data." - * - */ - __pyx_t_1 = (!(__pyx_v_self->data != 0)); - if (unlikely(__pyx_t_1)) { - - /* "View.MemoryView":257 - * self.data = malloc(self.len) - * if not self.data: - * raise MemoryError, "unable to allocate array data." # <<<<<<<<<<<<<< - * - * if self.dtype_is_object: - */ - __Pyx_Raise(__pyx_builtin_MemoryError, __pyx_kp_s_unable_to_allocate_array_data, 0, 0); - __PYX_ERR(1, 257, __pyx_L1_error) - - /* "View.MemoryView":256 - * self.free_data = True - * self.data = malloc(self.len) - * if not self.data: # <<<<<<<<<<<<<< - * raise MemoryError, "unable to allocate array data." - * - */ - } - - /* "View.MemoryView":259 - * raise MemoryError, "unable to allocate array data." - * - * if self.dtype_is_object: # <<<<<<<<<<<<<< - * p = self.data - * for i in range(self.len // self.itemsize): - */ - if (__pyx_v_self->dtype_is_object) { - - /* "View.MemoryView":260 - * - * if self.dtype_is_object: - * p = self.data # <<<<<<<<<<<<<< - * for i in range(self.len // self.itemsize): - * p[i] = Py_None - */ - __pyx_v_p = ((PyObject **)__pyx_v_self->data); - - /* "View.MemoryView":261 - * if self.dtype_is_object: - * p = self.data - * for i in range(self.len // self.itemsize): # <<<<<<<<<<<<<< - * p[i] = Py_None - * Py_INCREF(Py_None) - */ - if (unlikely(__pyx_v_self->itemsize == 0)) { - PyErr_SetString(PyExc_ZeroDivisionError, "integer division or modulo by zero"); - __PYX_ERR(1, 261, __pyx_L1_error) - } - else if (sizeof(Py_ssize_t) == sizeof(long) && (!(((Py_ssize_t)-1) > 0)) && unlikely(__pyx_v_self->itemsize == (Py_ssize_t)-1) && unlikely(__Pyx_UNARY_NEG_WOULD_OVERFLOW(__pyx_v_self->len))) { - PyErr_SetString(PyExc_OverflowError, "value too large to perform division"); - __PYX_ERR(1, 261, __pyx_L1_error) - } - __pyx_t_2 = __Pyx_div_Py_ssize_t(__pyx_v_self->len, __pyx_v_self->itemsize); - __pyx_t_3 = __pyx_t_2; - for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { - __pyx_v_i = __pyx_t_4; - - /* "View.MemoryView":262 - * p = self.data - * for i in range(self.len // self.itemsize): - * p[i] = Py_None # <<<<<<<<<<<<<< - * Py_INCREF(Py_None) - * return 0 - */ - (__pyx_v_p[__pyx_v_i]) = Py_None; - - /* "View.MemoryView":263 - * for i in range(self.len // self.itemsize): - * p[i] = Py_None - * Py_INCREF(Py_None) # <<<<<<<<<<<<<< - * return 0 - * - */ - Py_INCREF(Py_None); - } - - /* "View.MemoryView":259 - * raise MemoryError, "unable to allocate array data." - * - * if self.dtype_is_object: # <<<<<<<<<<<<<< - * p = self.data - * for i in range(self.len // self.itemsize): - */ - } - - /* "View.MemoryView":264 - * p[i] = Py_None - * Py_INCREF(Py_None) - * return 0 # <<<<<<<<<<<<<< - * - * - */ - __pyx_r = 0; - goto __pyx_L0; - - /* "View.MemoryView":248 - * - * @cname("__pyx_array_allocate_buffer") - * cdef int _allocate_buffer(array self) except -1: # <<<<<<<<<<<<<< - * - * - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_AddTraceback("View.MemoryView._allocate_buffer", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = -1; - __pyx_L0:; - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":268 - * - * @cname("__pyx_array_new") - * cdef array array_cwrapper(tuple shape, Py_ssize_t itemsize, char *format, char *c_mode, char *buf): # <<<<<<<<<<<<<< - * cdef array result - * cdef str mode = "fortran" if c_mode[0] == b'f' else "c" # this often comes from a constant C string. - */ - -static struct __pyx_array_obj *__pyx_array_new(PyObject *__pyx_v_shape, Py_ssize_t __pyx_v_itemsize, char *__pyx_v_format, char *__pyx_v_c_mode, char *__pyx_v_buf) { - struct __pyx_array_obj *__pyx_v_result = 0; - PyObject *__pyx_v_mode = 0; - struct __pyx_array_obj *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - int __pyx_t_2; - PyObject *__pyx_t_3 = NULL; - PyObject *__pyx_t_4 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("array_cwrapper", 0); - - /* "View.MemoryView":270 - * cdef array array_cwrapper(tuple shape, Py_ssize_t itemsize, char *format, char *c_mode, char *buf): - * cdef array result - * cdef str mode = "fortran" if c_mode[0] == b'f' else "c" # this often comes from a constant C string. # <<<<<<<<<<<<<< - * - * if buf is NULL: - */ - if (((__pyx_v_c_mode[0]) == 'f')) { - __Pyx_INCREF(__pyx_n_s_fortran); - __pyx_t_1 = __pyx_n_s_fortran; - } else { - __Pyx_INCREF(__pyx_n_s_c); - __pyx_t_1 = __pyx_n_s_c; - } - __pyx_v_mode = ((PyObject*)__pyx_t_1); - __pyx_t_1 = 0; - - /* "View.MemoryView":272 - * cdef str mode = "fortran" if c_mode[0] == b'f' else "c" # this often comes from a constant C string. - * - * if buf is NULL: # <<<<<<<<<<<<<< - * result = array.__new__(array, shape, itemsize, format, mode) - * else: - */ - __pyx_t_2 = (__pyx_v_buf == NULL); - if (__pyx_t_2) { - - /* "View.MemoryView":273 - * - * if buf is NULL: - * result = array.__new__(array, shape, itemsize, format, mode) # <<<<<<<<<<<<<< - * else: - * result = array.__new__(array, shape, itemsize, format, mode, allocate_buffer=False) - */ - __pyx_t_1 = PyInt_FromSsize_t(__pyx_v_itemsize); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 273, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_3 = __Pyx_PyBytes_FromString(__pyx_v_format); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 273, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_4 = PyTuple_New(4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 273, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __Pyx_INCREF(__pyx_v_shape); - __Pyx_GIVEREF(__pyx_v_shape); - PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_v_shape); - __Pyx_GIVEREF(__pyx_t_1); - PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_1); - __Pyx_GIVEREF(__pyx_t_3); - PyTuple_SET_ITEM(__pyx_t_4, 2, __pyx_t_3); - __Pyx_INCREF(__pyx_v_mode); - __Pyx_GIVEREF(__pyx_v_mode); - PyTuple_SET_ITEM(__pyx_t_4, 3, __pyx_v_mode); - __pyx_t_1 = 0; - __pyx_t_3 = 0; - __pyx_t_3 = ((PyObject *)__pyx_tp_new_array(((PyTypeObject *)__pyx_array_type), __pyx_t_4, NULL)); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 273, __pyx_L1_error) - __Pyx_GOTREF((PyObject *)__pyx_t_3); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __pyx_v_result = ((struct __pyx_array_obj *)__pyx_t_3); - __pyx_t_3 = 0; - - /* "View.MemoryView":272 - * cdef str mode = "fortran" if c_mode[0] == b'f' else "c" # this often comes from a constant C string. - * - * if buf is NULL: # <<<<<<<<<<<<<< - * result = array.__new__(array, shape, itemsize, format, mode) - * else: - */ - goto __pyx_L3; - } - - /* "View.MemoryView":275 - * result = array.__new__(array, shape, itemsize, format, mode) - * else: - * result = array.__new__(array, shape, itemsize, format, mode, allocate_buffer=False) # <<<<<<<<<<<<<< - * result.data = buf - * - */ - /*else*/ { - __pyx_t_3 = PyInt_FromSsize_t(__pyx_v_itemsize); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 275, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_4 = __Pyx_PyBytes_FromString(__pyx_v_format); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 275, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __pyx_t_1 = PyTuple_New(4); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 275, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_INCREF(__pyx_v_shape); - __Pyx_GIVEREF(__pyx_v_shape); - PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_shape); - __Pyx_GIVEREF(__pyx_t_3); - PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_t_3); - __Pyx_GIVEREF(__pyx_t_4); - PyTuple_SET_ITEM(__pyx_t_1, 2, __pyx_t_4); - __Pyx_INCREF(__pyx_v_mode); - __Pyx_GIVEREF(__pyx_v_mode); - PyTuple_SET_ITEM(__pyx_t_1, 3, __pyx_v_mode); - __pyx_t_3 = 0; - __pyx_t_4 = 0; - __pyx_t_4 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 275, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - if (PyDict_SetItem(__pyx_t_4, __pyx_n_s_allocate_buffer, Py_False) < 0) __PYX_ERR(1, 275, __pyx_L1_error) - __pyx_t_3 = ((PyObject *)__pyx_tp_new_array(((PyTypeObject *)__pyx_array_type), __pyx_t_1, __pyx_t_4)); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 275, __pyx_L1_error) - __Pyx_GOTREF((PyObject *)__pyx_t_3); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __pyx_v_result = ((struct __pyx_array_obj *)__pyx_t_3); - __pyx_t_3 = 0; - - /* "View.MemoryView":276 - * else: - * result = array.__new__(array, shape, itemsize, format, mode, allocate_buffer=False) - * result.data = buf # <<<<<<<<<<<<<< - * - * return result - */ - __pyx_v_result->data = __pyx_v_buf; - } - __pyx_L3:; - - /* "View.MemoryView":278 - * result.data = buf - * - * return result # <<<<<<<<<<<<<< - * - * - */ - __Pyx_XDECREF((PyObject *)__pyx_r); - __Pyx_INCREF((PyObject *)__pyx_v_result); - __pyx_r = __pyx_v_result; - goto __pyx_L0; - - /* "View.MemoryView":268 - * - * @cname("__pyx_array_new") - * cdef array array_cwrapper(tuple shape, Py_ssize_t itemsize, char *format, char *c_mode, char *buf): # <<<<<<<<<<<<<< - * cdef array result - * cdef str mode = "fortran" if c_mode[0] == b'f' else "c" # this often comes from a constant C string. - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_3); - __Pyx_XDECREF(__pyx_t_4); - __Pyx_AddTraceback("View.MemoryView.array_cwrapper", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = 0; - __pyx_L0:; - __Pyx_XDECREF((PyObject *)__pyx_v_result); - __Pyx_XDECREF(__pyx_v_mode); - __Pyx_XGIVEREF((PyObject *)__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":304 - * cdef class Enum(object): - * cdef object name - * def __init__(self, name): # <<<<<<<<<<<<<< - * self.name = name - * def __repr__(self): - */ - -/* Python wrapper */ -static int __pyx_MemviewEnum___init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ -static int __pyx_MemviewEnum___init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { - PyObject *__pyx_v_name = 0; - CYTHON_UNUSED const Py_ssize_t __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); - CYTHON_UNUSED PyObject *const *__pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - int __pyx_r; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__init__ (wrapper)", 0); - { - PyObject **__pyx_pyargnames[] = {&__pyx_n_s_name,0}; - PyObject* values[1] = {0}; - if (__pyx_kwds) { - Py_ssize_t kw_args; - switch (__pyx_nargs) { - case 1: values[0] = __Pyx_Arg_VARARGS(__pyx_args, 0); - CYTHON_FALLTHROUGH; - case 0: break; - default: goto __pyx_L5_argtuple_error; - } - kw_args = __Pyx_NumKwargs_VARARGS(__pyx_kwds); - switch (__pyx_nargs) { - case 0: - if (likely((values[0] = __Pyx_GetKwValue_VARARGS(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_name)) != 0)) kw_args--; - else if (unlikely(PyErr_Occurred())) __PYX_ERR(1, 304, __pyx_L3_error) - else goto __pyx_L5_argtuple_error; - } - if (unlikely(kw_args > 0)) { - const Py_ssize_t kwd_pos_args = __pyx_nargs; - if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values + 0, kwd_pos_args, "__init__") < 0)) __PYX_ERR(1, 304, __pyx_L3_error) - } - } else if (unlikely(__pyx_nargs != 1)) { - goto __pyx_L5_argtuple_error; - } else { - values[0] = __Pyx_Arg_VARARGS(__pyx_args, 0); - } - __pyx_v_name = values[0]; - } - goto __pyx_L4_argument_unpacking_done; - __pyx_L5_argtuple_error:; - __Pyx_RaiseArgtupleInvalid("__init__", 1, 1, 1, __pyx_nargs); __PYX_ERR(1, 304, __pyx_L3_error) - __pyx_L3_error:; - __Pyx_AddTraceback("View.MemoryView.Enum.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __Pyx_RefNannyFinishContext(); - return -1; - __pyx_L4_argument_unpacking_done:; - __pyx_r = __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum___init__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self), __pyx_v_name); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static int __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum___init__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v_name) { - int __pyx_r; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__init__", 0); - - /* "View.MemoryView":305 - * cdef object name - * def __init__(self, name): - * self.name = name # <<<<<<<<<<<<<< - * def __repr__(self): - * return self.name - */ - __Pyx_INCREF(__pyx_v_name); - __Pyx_GIVEREF(__pyx_v_name); - __Pyx_GOTREF(__pyx_v_self->name); - __Pyx_DECREF(__pyx_v_self->name); - __pyx_v_self->name = __pyx_v_name; - - /* "View.MemoryView":304 - * cdef class Enum(object): - * cdef object name - * def __init__(self, name): # <<<<<<<<<<<<<< - * self.name = name - * def __repr__(self): - */ - - /* function exit code */ - __pyx_r = 0; - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":306 - * def __init__(self, name): - * self.name = name - * def __repr__(self): # <<<<<<<<<<<<<< - * return self.name - * - */ - -/* Python wrapper */ -static PyObject *__pyx_MemviewEnum___repr__(PyObject *__pyx_v_self); /*proto*/ -static PyObject *__pyx_MemviewEnum___repr__(PyObject *__pyx_v_self) { - CYTHON_UNUSED PyObject *const *__pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0); - __pyx_r = __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum_2__repr__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self)); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum_2__repr__(struct __pyx_MemviewEnum_obj *__pyx_v_self) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__repr__", 0); - - /* "View.MemoryView":307 - * self.name = name - * def __repr__(self): - * return self.name # <<<<<<<<<<<<<< - * - * cdef generic = Enum("") - */ - __Pyx_XDECREF(__pyx_r); - __Pyx_INCREF(__pyx_v_self->name); - __pyx_r = __pyx_v_self->name; - goto __pyx_L0; - - /* "View.MemoryView":306 - * def __init__(self, name): - * self.name = name - * def __repr__(self): # <<<<<<<<<<<<<< - * return self.name - * - */ - - /* function exit code */ - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "(tree fragment)":1 - * def __reduce_cython__(self): # <<<<<<<<<<<<<< - * cdef tuple state - * cdef object _dict - */ - -/* Python wrapper */ -static PyObject *__pyx_pw___pyx_MemviewEnum_1__reduce_cython__(PyObject *__pyx_v_self, -#if CYTHON_METH_FASTCALL -PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds -#else -PyObject *__pyx_args, PyObject *__pyx_kwds -#endif -); /*proto*/ -static PyObject *__pyx_pw___pyx_MemviewEnum_1__reduce_cython__(PyObject *__pyx_v_self, -#if CYTHON_METH_FASTCALL -PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds -#else -PyObject *__pyx_args, PyObject *__pyx_kwds -#endif -) { - #if !CYTHON_METH_FASTCALL - CYTHON_UNUSED const Py_ssize_t __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); - #endif - CYTHON_UNUSED PyObject *const *__pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0); - if (unlikely(__pyx_nargs > 0)) { - __Pyx_RaiseArgtupleInvalid("__reduce_cython__", 1, 0, 0, __pyx_nargs); return NULL;} - if (unlikely(__pyx_kwds) && __Pyx_NumKwargs_FASTCALL(__pyx_kwds) && unlikely(!__Pyx_CheckKeywordStrings(__pyx_kwds, "__reduce_cython__", 0))) return NULL; - __pyx_r = __pyx_pf___pyx_MemviewEnum___reduce_cython__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self)); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_pf___pyx_MemviewEnum___reduce_cython__(struct __pyx_MemviewEnum_obj *__pyx_v_self) { - PyObject *__pyx_v_state = 0; - PyObject *__pyx_v__dict = 0; - int __pyx_v_use_setstate; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - int __pyx_t_2; - PyObject *__pyx_t_3 = NULL; - PyObject *__pyx_t_4 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("__reduce_cython__", 0); - - /* "(tree fragment)":5 - * cdef object _dict - * cdef bint use_setstate - * state = (self.name,) # <<<<<<<<<<<<<< - * _dict = getattr(self, '__dict__', None) - * if _dict is not None: - */ - __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 5, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_INCREF(__pyx_v_self->name); - __Pyx_GIVEREF(__pyx_v_self->name); - PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_self->name); - __pyx_v_state = ((PyObject*)__pyx_t_1); - __pyx_t_1 = 0; - - /* "(tree fragment)":6 - * cdef bint use_setstate - * state = (self.name,) - * _dict = getattr(self, '__dict__', None) # <<<<<<<<<<<<<< - * if _dict is not None: - * state += (_dict,) - */ - __pyx_t_1 = __Pyx_GetAttr3(((PyObject *)__pyx_v_self), __pyx_n_s_dict, Py_None); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 6, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_v__dict = __pyx_t_1; - __pyx_t_1 = 0; - - /* "(tree fragment)":7 - * state = (self.name,) - * _dict = getattr(self, '__dict__', None) - * if _dict is not None: # <<<<<<<<<<<<<< - * state += (_dict,) - * use_setstate = True - */ - __pyx_t_2 = (__pyx_v__dict != Py_None); - if (__pyx_t_2) { - - /* "(tree fragment)":8 - * _dict = getattr(self, '__dict__', None) - * if _dict is not None: - * state += (_dict,) # <<<<<<<<<<<<<< - * use_setstate = True - * else: - */ - __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 8, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_INCREF(__pyx_v__dict); - __Pyx_GIVEREF(__pyx_v__dict); - PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v__dict); - __pyx_t_3 = PyNumber_InPlaceAdd(__pyx_v_state, __pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 8, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __Pyx_DECREF_SET(__pyx_v_state, ((PyObject*)__pyx_t_3)); - __pyx_t_3 = 0; - - /* "(tree fragment)":9 - * if _dict is not None: - * state += (_dict,) - * use_setstate = True # <<<<<<<<<<<<<< - * else: - * use_setstate = self.name is not None - */ - __pyx_v_use_setstate = 1; - - /* "(tree fragment)":7 - * state = (self.name,) - * _dict = getattr(self, '__dict__', None) - * if _dict is not None: # <<<<<<<<<<<<<< - * state += (_dict,) - * use_setstate = True - */ - goto __pyx_L3; - } - - /* "(tree fragment)":11 - * use_setstate = True - * else: - * use_setstate = self.name is not None # <<<<<<<<<<<<<< - * if use_setstate: - * return __pyx_unpickle_Enum, (type(self), 0x82a3537, None), state - */ - /*else*/ { - __pyx_t_2 = (__pyx_v_self->name != Py_None); - __pyx_v_use_setstate = __pyx_t_2; - } - __pyx_L3:; - - /* "(tree fragment)":12 - * else: - * use_setstate = self.name is not None - * if use_setstate: # <<<<<<<<<<<<<< - * return __pyx_unpickle_Enum, (type(self), 0x82a3537, None), state - * else: - */ - if (__pyx_v_use_setstate) { - - /* "(tree fragment)":13 - * use_setstate = self.name is not None - * if use_setstate: - * return __pyx_unpickle_Enum, (type(self), 0x82a3537, None), state # <<<<<<<<<<<<<< - * else: - * return __pyx_unpickle_Enum, (type(self), 0x82a3537, state) - */ - __Pyx_XDECREF(__pyx_r); - __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_n_s_pyx_unpickle_Enum); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 13, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_1 = PyTuple_New(3); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 13, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_INCREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))); - __Pyx_GIVEREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))); - PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))); - __Pyx_INCREF(__pyx_int_136983863); - __Pyx_GIVEREF(__pyx_int_136983863); - PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_int_136983863); - __Pyx_INCREF(Py_None); - __Pyx_GIVEREF(Py_None); - PyTuple_SET_ITEM(__pyx_t_1, 2, Py_None); - __pyx_t_4 = PyTuple_New(3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 13, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __Pyx_GIVEREF(__pyx_t_3); - PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_3); - __Pyx_GIVEREF(__pyx_t_1); - PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_1); - __Pyx_INCREF(__pyx_v_state); - __Pyx_GIVEREF(__pyx_v_state); - PyTuple_SET_ITEM(__pyx_t_4, 2, __pyx_v_state); - __pyx_t_3 = 0; - __pyx_t_1 = 0; - __pyx_r = __pyx_t_4; - __pyx_t_4 = 0; - goto __pyx_L0; - - /* "(tree fragment)":12 - * else: - * use_setstate = self.name is not None - * if use_setstate: # <<<<<<<<<<<<<< - * return __pyx_unpickle_Enum, (type(self), 0x82a3537, None), state - * else: - */ - } - - /* "(tree fragment)":15 - * return __pyx_unpickle_Enum, (type(self), 0x82a3537, None), state - * else: - * return __pyx_unpickle_Enum, (type(self), 0x82a3537, state) # <<<<<<<<<<<<<< - * def __setstate_cython__(self, __pyx_state): - * __pyx_unpickle_Enum__set_state(self, __pyx_state) - */ - /*else*/ { - __Pyx_XDECREF(__pyx_r); - __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_n_s_pyx_unpickle_Enum); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 15, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __pyx_t_1 = PyTuple_New(3); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 15, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_INCREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))); - __Pyx_GIVEREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))); - PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))); - __Pyx_INCREF(__pyx_int_136983863); - __Pyx_GIVEREF(__pyx_int_136983863); - PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_int_136983863); - __Pyx_INCREF(__pyx_v_state); - __Pyx_GIVEREF(__pyx_v_state); - PyTuple_SET_ITEM(__pyx_t_1, 2, __pyx_v_state); - __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 15, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_GIVEREF(__pyx_t_4); - PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_4); - __Pyx_GIVEREF(__pyx_t_1); - PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_1); - __pyx_t_4 = 0; - __pyx_t_1 = 0; - __pyx_r = __pyx_t_3; - __pyx_t_3 = 0; - goto __pyx_L0; - } - - /* "(tree fragment)":1 - * def __reduce_cython__(self): # <<<<<<<<<<<<<< - * cdef tuple state - * cdef object _dict - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_3); - __Pyx_XDECREF(__pyx_t_4); - __Pyx_AddTraceback("View.MemoryView.Enum.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XDECREF(__pyx_v_state); - __Pyx_XDECREF(__pyx_v__dict); - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "(tree fragment)":16 - * else: - * return __pyx_unpickle_Enum, (type(self), 0x82a3537, state) - * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< - * __pyx_unpickle_Enum__set_state(self, __pyx_state) - */ - -/* Python wrapper */ -static PyObject *__pyx_pw___pyx_MemviewEnum_3__setstate_cython__(PyObject *__pyx_v_self, -#if CYTHON_METH_FASTCALL -PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds -#else -PyObject *__pyx_args, PyObject *__pyx_kwds -#endif -); /*proto*/ -static PyObject *__pyx_pw___pyx_MemviewEnum_3__setstate_cython__(PyObject *__pyx_v_self, -#if CYTHON_METH_FASTCALL -PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds -#else -PyObject *__pyx_args, PyObject *__pyx_kwds -#endif -) { - PyObject *__pyx_v___pyx_state = 0; - #if !CYTHON_METH_FASTCALL - CYTHON_UNUSED const Py_ssize_t __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); - #endif - CYTHON_UNUSED PyObject *const *__pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0); - { - PyObject **__pyx_pyargnames[] = {&__pyx_n_s_pyx_state,0}; - PyObject* values[1] = {0}; - if (__pyx_kwds) { - Py_ssize_t kw_args; - switch (__pyx_nargs) { - case 1: values[0] = __Pyx_Arg_FASTCALL(__pyx_args, 0); - CYTHON_FALLTHROUGH; - case 0: break; - default: goto __pyx_L5_argtuple_error; - } - kw_args = __Pyx_NumKwargs_FASTCALL(__pyx_kwds); - switch (__pyx_nargs) { - case 0: - if (likely((values[0] = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_pyx_state)) != 0)) kw_args--; - else if (unlikely(PyErr_Occurred())) __PYX_ERR(1, 16, __pyx_L3_error) - else goto __pyx_L5_argtuple_error; - } - if (unlikely(kw_args > 0)) { - const Py_ssize_t kwd_pos_args = __pyx_nargs; - if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values + 0, kwd_pos_args, "__setstate_cython__") < 0)) __PYX_ERR(1, 16, __pyx_L3_error) - } - } else if (unlikely(__pyx_nargs != 1)) { - goto __pyx_L5_argtuple_error; - } else { - values[0] = __Pyx_Arg_FASTCALL(__pyx_args, 0); - } - __pyx_v___pyx_state = values[0]; - } - goto __pyx_L4_argument_unpacking_done; - __pyx_L5_argtuple_error:; - __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, __pyx_nargs); __PYX_ERR(1, 16, __pyx_L3_error) - __pyx_L3_error:; - __Pyx_AddTraceback("View.MemoryView.Enum.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __Pyx_RefNannyFinishContext(); - return NULL; - __pyx_L4_argument_unpacking_done:; - __pyx_r = __pyx_pf___pyx_MemviewEnum_2__setstate_cython__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self), __pyx_v___pyx_state); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_pf___pyx_MemviewEnum_2__setstate_cython__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v___pyx_state) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("__setstate_cython__", 0); - - /* "(tree fragment)":17 - * return __pyx_unpickle_Enum, (type(self), 0x82a3537, state) - * def __setstate_cython__(self, __pyx_state): - * __pyx_unpickle_Enum__set_state(self, __pyx_state) # <<<<<<<<<<<<<< - */ - if (!(likely(PyTuple_CheckExact(__pyx_v___pyx_state))||((__pyx_v___pyx_state) == Py_None) || __Pyx_RaiseUnexpectedTypeError("tuple", __pyx_v___pyx_state))) __PYX_ERR(1, 17, __pyx_L1_error) - __pyx_t_1 = __pyx_unpickle_Enum__set_state(__pyx_v_self, ((PyObject*)__pyx_v___pyx_state)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 17, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - - /* "(tree fragment)":16 - * else: - * return __pyx_unpickle_Enum, (type(self), 0x82a3537, state) - * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< - * __pyx_unpickle_Enum__set_state(self, __pyx_state) - */ - - /* function exit code */ - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_AddTraceback("View.MemoryView.Enum.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":349 - * cdef __Pyx_TypeInfo *typeinfo - * - * def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False): # <<<<<<<<<<<<<< - * self.obj = obj - * self.flags = flags - */ - -/* Python wrapper */ -static int __pyx_memoryview___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ -static int __pyx_memoryview___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { - PyObject *__pyx_v_obj = 0; - int __pyx_v_flags; - int __pyx_v_dtype_is_object; - CYTHON_UNUSED const Py_ssize_t __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); - CYTHON_UNUSED PyObject *const *__pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - int __pyx_r; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__cinit__ (wrapper)", 0); - { - PyObject **__pyx_pyargnames[] = {&__pyx_n_s_obj,&__pyx_n_s_flags,&__pyx_n_s_dtype_is_object,0}; - PyObject* values[3] = {0,0,0}; - if (__pyx_kwds) { - Py_ssize_t kw_args; - switch (__pyx_nargs) { - case 3: values[2] = __Pyx_Arg_VARARGS(__pyx_args, 2); - CYTHON_FALLTHROUGH; - case 2: values[1] = __Pyx_Arg_VARARGS(__pyx_args, 1); - CYTHON_FALLTHROUGH; - case 1: values[0] = __Pyx_Arg_VARARGS(__pyx_args, 0); - CYTHON_FALLTHROUGH; - case 0: break; - default: goto __pyx_L5_argtuple_error; - } - kw_args = __Pyx_NumKwargs_VARARGS(__pyx_kwds); - switch (__pyx_nargs) { - case 0: - if (likely((values[0] = __Pyx_GetKwValue_VARARGS(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_obj)) != 0)) kw_args--; - else if (unlikely(PyErr_Occurred())) __PYX_ERR(1, 349, __pyx_L3_error) - else goto __pyx_L5_argtuple_error; - CYTHON_FALLTHROUGH; - case 1: - if (likely((values[1] = __Pyx_GetKwValue_VARARGS(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_flags)) != 0)) kw_args--; - else if (unlikely(PyErr_Occurred())) __PYX_ERR(1, 349, __pyx_L3_error) - else { - __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 2, 3, 1); __PYX_ERR(1, 349, __pyx_L3_error) - } - CYTHON_FALLTHROUGH; - case 2: - if (kw_args > 0) { - PyObject* value = __Pyx_GetKwValue_VARARGS(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_dtype_is_object); - if (value) { values[2] = value; kw_args--; } - else if (unlikely(PyErr_Occurred())) __PYX_ERR(1, 349, __pyx_L3_error) - } - } - if (unlikely(kw_args > 0)) { - const Py_ssize_t kwd_pos_args = __pyx_nargs; - if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values + 0, kwd_pos_args, "__cinit__") < 0)) __PYX_ERR(1, 349, __pyx_L3_error) - } - } else { - switch (__pyx_nargs) { - case 3: values[2] = __Pyx_Arg_VARARGS(__pyx_args, 2); - CYTHON_FALLTHROUGH; - case 2: values[1] = __Pyx_Arg_VARARGS(__pyx_args, 1); - values[0] = __Pyx_Arg_VARARGS(__pyx_args, 0); - break; - default: goto __pyx_L5_argtuple_error; - } - } - __pyx_v_obj = values[0]; - __pyx_v_flags = __Pyx_PyInt_As_int(values[1]); if (unlikely((__pyx_v_flags == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 349, __pyx_L3_error) - if (values[2]) { - __pyx_v_dtype_is_object = __Pyx_PyObject_IsTrue(values[2]); if (unlikely((__pyx_v_dtype_is_object == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 349, __pyx_L3_error) - } else { - __pyx_v_dtype_is_object = ((int)0); - } - } - goto __pyx_L4_argument_unpacking_done; - __pyx_L5_argtuple_error:; - __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 2, 3, __pyx_nargs); __PYX_ERR(1, 349, __pyx_L3_error) - __pyx_L3_error:; - __Pyx_AddTraceback("View.MemoryView.memoryview.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __Pyx_RefNannyFinishContext(); - return -1; - __pyx_L4_argument_unpacking_done:; - __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview___cinit__(((struct __pyx_memoryview_obj *)__pyx_v_self), __pyx_v_obj, __pyx_v_flags, __pyx_v_dtype_is_object); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview___cinit__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj, int __pyx_v_flags, int __pyx_v_dtype_is_object) { - int __pyx_r; - __Pyx_RefNannyDeclarations - int __pyx_t_1; - int __pyx_t_2; - int __pyx_t_3; - Py_intptr_t __pyx_t_4; - size_t __pyx_t_5; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("__cinit__", 0); - - /* "View.MemoryView":350 - * - * def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False): - * self.obj = obj # <<<<<<<<<<<<<< - * self.flags = flags - * if type(self) is memoryview or obj is not None: - */ - __Pyx_INCREF(__pyx_v_obj); - __Pyx_GIVEREF(__pyx_v_obj); - __Pyx_GOTREF(__pyx_v_self->obj); - __Pyx_DECREF(__pyx_v_self->obj); - __pyx_v_self->obj = __pyx_v_obj; - - /* "View.MemoryView":351 - * def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False): - * self.obj = obj - * self.flags = flags # <<<<<<<<<<<<<< - * if type(self) is memoryview or obj is not None: - * __Pyx_GetBuffer(obj, &self.view, flags) - */ - __pyx_v_self->flags = __pyx_v_flags; - - /* "View.MemoryView":352 - * self.obj = obj - * self.flags = flags - * if type(self) is memoryview or obj is not None: # <<<<<<<<<<<<<< - * __Pyx_GetBuffer(obj, &self.view, flags) - * if self.view.obj == NULL: - */ - __pyx_t_2 = (((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))) == ((PyObject *)__pyx_memoryview_type)); - if (!__pyx_t_2) { - } else { - __pyx_t_1 = __pyx_t_2; - goto __pyx_L4_bool_binop_done; - } - __pyx_t_2 = (__pyx_v_obj != Py_None); - __pyx_t_1 = __pyx_t_2; - __pyx_L4_bool_binop_done:; - if (__pyx_t_1) { - - /* "View.MemoryView":353 - * self.flags = flags - * if type(self) is memoryview or obj is not None: - * __Pyx_GetBuffer(obj, &self.view, flags) # <<<<<<<<<<<<<< - * if self.view.obj == NULL: - * (<__pyx_buffer *> &self.view).obj = Py_None - */ - __pyx_t_3 = __Pyx_GetBuffer(__pyx_v_obj, (&__pyx_v_self->view), __pyx_v_flags); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(1, 353, __pyx_L1_error) - - /* "View.MemoryView":354 - * if type(self) is memoryview or obj is not None: - * __Pyx_GetBuffer(obj, &self.view, flags) - * if self.view.obj == NULL: # <<<<<<<<<<<<<< - * (<__pyx_buffer *> &self.view).obj = Py_None - * Py_INCREF(Py_None) - */ - __pyx_t_1 = (((PyObject *)__pyx_v_self->view.obj) == NULL); - if (__pyx_t_1) { - - /* "View.MemoryView":355 - * __Pyx_GetBuffer(obj, &self.view, flags) - * if self.view.obj == NULL: - * (<__pyx_buffer *> &self.view).obj = Py_None # <<<<<<<<<<<<<< - * Py_INCREF(Py_None) - * - */ - ((Py_buffer *)(&__pyx_v_self->view))->obj = Py_None; - - /* "View.MemoryView":356 - * if self.view.obj == NULL: - * (<__pyx_buffer *> &self.view).obj = Py_None - * Py_INCREF(Py_None) # <<<<<<<<<<<<<< - * - * if not __PYX_CYTHON_ATOMICS_ENABLED(): - */ - Py_INCREF(Py_None); - - /* "View.MemoryView":354 - * if type(self) is memoryview or obj is not None: - * __Pyx_GetBuffer(obj, &self.view, flags) - * if self.view.obj == NULL: # <<<<<<<<<<<<<< - * (<__pyx_buffer *> &self.view).obj = Py_None - * Py_INCREF(Py_None) - */ - } - - /* "View.MemoryView":352 - * self.obj = obj - * self.flags = flags - * if type(self) is memoryview or obj is not None: # <<<<<<<<<<<<<< - * __Pyx_GetBuffer(obj, &self.view, flags) - * if self.view.obj == NULL: - */ - } - - /* "View.MemoryView":358 - * Py_INCREF(Py_None) - * - * if not __PYX_CYTHON_ATOMICS_ENABLED(): # <<<<<<<<<<<<<< - * global __pyx_memoryview_thread_locks_used - * if __pyx_memoryview_thread_locks_used < 8: - */ - __pyx_t_1 = (!__PYX_CYTHON_ATOMICS_ENABLED()); - if (__pyx_t_1) { - - /* "View.MemoryView":360 - * if not __PYX_CYTHON_ATOMICS_ENABLED(): - * global __pyx_memoryview_thread_locks_used - * if __pyx_memoryview_thread_locks_used < 8: # <<<<<<<<<<<<<< - * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] - * __pyx_memoryview_thread_locks_used += 1 - */ - __pyx_t_1 = (__pyx_memoryview_thread_locks_used < 8); - if (__pyx_t_1) { - - /* "View.MemoryView":361 - * global __pyx_memoryview_thread_locks_used - * if __pyx_memoryview_thread_locks_used < 8: - * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] # <<<<<<<<<<<<<< - * __pyx_memoryview_thread_locks_used += 1 - * if self.lock is NULL: - */ - __pyx_v_self->lock = (__pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]); - - /* "View.MemoryView":362 - * if __pyx_memoryview_thread_locks_used < 8: - * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] - * __pyx_memoryview_thread_locks_used += 1 # <<<<<<<<<<<<<< - * if self.lock is NULL: - * self.lock = PyThread_allocate_lock() - */ - __pyx_memoryview_thread_locks_used = (__pyx_memoryview_thread_locks_used + 1); - - /* "View.MemoryView":360 - * if not __PYX_CYTHON_ATOMICS_ENABLED(): - * global __pyx_memoryview_thread_locks_used - * if __pyx_memoryview_thread_locks_used < 8: # <<<<<<<<<<<<<< - * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] - * __pyx_memoryview_thread_locks_used += 1 - */ - } - - /* "View.MemoryView":363 - * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] - * __pyx_memoryview_thread_locks_used += 1 - * if self.lock is NULL: # <<<<<<<<<<<<<< - * self.lock = PyThread_allocate_lock() - * if self.lock is NULL: - */ - __pyx_t_1 = (__pyx_v_self->lock == NULL); - if (__pyx_t_1) { - - /* "View.MemoryView":364 - * __pyx_memoryview_thread_locks_used += 1 - * if self.lock is NULL: - * self.lock = PyThread_allocate_lock() # <<<<<<<<<<<<<< - * if self.lock is NULL: - * raise MemoryError - */ - __pyx_v_self->lock = PyThread_allocate_lock(); - - /* "View.MemoryView":365 - * if self.lock is NULL: - * self.lock = PyThread_allocate_lock() - * if self.lock is NULL: # <<<<<<<<<<<<<< - * raise MemoryError - * - */ - __pyx_t_1 = (__pyx_v_self->lock == NULL); - if (unlikely(__pyx_t_1)) { - - /* "View.MemoryView":366 - * self.lock = PyThread_allocate_lock() - * if self.lock is NULL: - * raise MemoryError # <<<<<<<<<<<<<< - * - * if flags & PyBUF_FORMAT: - */ - PyErr_NoMemory(); __PYX_ERR(1, 366, __pyx_L1_error) - - /* "View.MemoryView":365 - * if self.lock is NULL: - * self.lock = PyThread_allocate_lock() - * if self.lock is NULL: # <<<<<<<<<<<<<< - * raise MemoryError - * - */ - } - - /* "View.MemoryView":363 - * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] - * __pyx_memoryview_thread_locks_used += 1 - * if self.lock is NULL: # <<<<<<<<<<<<<< - * self.lock = PyThread_allocate_lock() - * if self.lock is NULL: - */ - } - - /* "View.MemoryView":358 - * Py_INCREF(Py_None) - * - * if not __PYX_CYTHON_ATOMICS_ENABLED(): # <<<<<<<<<<<<<< - * global __pyx_memoryview_thread_locks_used - * if __pyx_memoryview_thread_locks_used < 8: - */ - } - - /* "View.MemoryView":368 - * raise MemoryError - * - * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< - * self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0') - * else: - */ - __pyx_t_1 = ((__pyx_v_flags & PyBUF_FORMAT) != 0); - if (__pyx_t_1) { - - /* "View.MemoryView":369 - * - * if flags & PyBUF_FORMAT: - * self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0') # <<<<<<<<<<<<<< - * else: - * self.dtype_is_object = dtype_is_object - */ - __pyx_t_2 = ((__pyx_v_self->view.format[0]) == 'O'); - if (__pyx_t_2) { - } else { - __pyx_t_1 = __pyx_t_2; - goto __pyx_L12_bool_binop_done; - } - __pyx_t_2 = ((__pyx_v_self->view.format[1]) == '\x00'); - __pyx_t_1 = __pyx_t_2; - __pyx_L12_bool_binop_done:; - __pyx_v_self->dtype_is_object = __pyx_t_1; - - /* "View.MemoryView":368 - * raise MemoryError - * - * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< - * self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0') - * else: - */ - goto __pyx_L11; - } - - /* "View.MemoryView":371 - * self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0') - * else: - * self.dtype_is_object = dtype_is_object # <<<<<<<<<<<<<< - * - * assert (&self.acquisition_count) % sizeof(__pyx_atomic_int_type) == 0 - */ - /*else*/ { - __pyx_v_self->dtype_is_object = __pyx_v_dtype_is_object; - } - __pyx_L11:; - - /* "View.MemoryView":373 - * self.dtype_is_object = dtype_is_object - * - * assert (&self.acquisition_count) % sizeof(__pyx_atomic_int_type) == 0 # <<<<<<<<<<<<<< - * self.typeinfo = NULL - * - */ - #ifndef CYTHON_WITHOUT_ASSERTIONS - if (unlikely(__pyx_assertions_enabled())) { - __pyx_t_4 = ((Py_intptr_t)((void *)(&__pyx_v_self->acquisition_count))); - __pyx_t_5 = (sizeof(__pyx_atomic_int_type)); - if (unlikely(__pyx_t_5 == 0)) { - PyErr_SetString(PyExc_ZeroDivisionError, "integer division or modulo by zero"); - __PYX_ERR(1, 373, __pyx_L1_error) - } - __pyx_t_1 = ((__pyx_t_4 % __pyx_t_5) == 0); - if (unlikely(!__pyx_t_1)) { - __Pyx_Raise(__pyx_builtin_AssertionError, 0, 0, 0); - __PYX_ERR(1, 373, __pyx_L1_error) - } - } - #else - if ((1)); else __PYX_ERR(1, 373, __pyx_L1_error) - #endif - - /* "View.MemoryView":374 - * - * assert (&self.acquisition_count) % sizeof(__pyx_atomic_int_type) == 0 - * self.typeinfo = NULL # <<<<<<<<<<<<<< - * - * def __dealloc__(memoryview self): - */ - __pyx_v_self->typeinfo = NULL; - - /* "View.MemoryView":349 - * cdef __Pyx_TypeInfo *typeinfo - * - * def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False): # <<<<<<<<<<<<<< - * self.obj = obj - * self.flags = flags - */ - - /* function exit code */ - __pyx_r = 0; - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_AddTraceback("View.MemoryView.memoryview.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = -1; - __pyx_L0:; - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":376 - * self.typeinfo = NULL - * - * def __dealloc__(memoryview self): # <<<<<<<<<<<<<< - * if self.obj is not None: - * __Pyx_ReleaseBuffer(&self.view) - */ - -/* Python wrapper */ -static void __pyx_memoryview___dealloc__(PyObject *__pyx_v_self); /*proto*/ -static void __pyx_memoryview___dealloc__(PyObject *__pyx_v_self) { - CYTHON_UNUSED PyObject *const *__pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0); - __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_2__dealloc__(((struct __pyx_memoryview_obj *)__pyx_v_self)); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); -} - -static void __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_2__dealloc__(struct __pyx_memoryview_obj *__pyx_v_self) { - int __pyx_v_i; - __Pyx_RefNannyDeclarations - int __pyx_t_1; - int __pyx_t_2; - int __pyx_t_3; - int __pyx_t_4; - PyThread_type_lock __pyx_t_5; - PyThread_type_lock __pyx_t_6; - __Pyx_RefNannySetupContext("__dealloc__", 0); - - /* "View.MemoryView":377 - * - * def __dealloc__(memoryview self): - * if self.obj is not None: # <<<<<<<<<<<<<< - * __Pyx_ReleaseBuffer(&self.view) - * elif (<__pyx_buffer *> &self.view).obj == Py_None: - */ - __pyx_t_1 = (__pyx_v_self->obj != Py_None); - if (__pyx_t_1) { - - /* "View.MemoryView":378 - * def __dealloc__(memoryview self): - * if self.obj is not None: - * __Pyx_ReleaseBuffer(&self.view) # <<<<<<<<<<<<<< - * elif (<__pyx_buffer *> &self.view).obj == Py_None: - * - */ - __Pyx_ReleaseBuffer((&__pyx_v_self->view)); - - /* "View.MemoryView":377 - * - * def __dealloc__(memoryview self): - * if self.obj is not None: # <<<<<<<<<<<<<< - * __Pyx_ReleaseBuffer(&self.view) - * elif (<__pyx_buffer *> &self.view).obj == Py_None: - */ - goto __pyx_L3; - } - - /* "View.MemoryView":379 - * if self.obj is not None: - * __Pyx_ReleaseBuffer(&self.view) - * elif (<__pyx_buffer *> &self.view).obj == Py_None: # <<<<<<<<<<<<<< - * - * (<__pyx_buffer *> &self.view).obj = NULL - */ - __pyx_t_1 = (((Py_buffer *)(&__pyx_v_self->view))->obj == Py_None); - if (__pyx_t_1) { - - /* "View.MemoryView":381 - * elif (<__pyx_buffer *> &self.view).obj == Py_None: - * - * (<__pyx_buffer *> &self.view).obj = NULL # <<<<<<<<<<<<<< - * Py_DECREF(Py_None) - * - */ - ((Py_buffer *)(&__pyx_v_self->view))->obj = NULL; - - /* "View.MemoryView":382 - * - * (<__pyx_buffer *> &self.view).obj = NULL - * Py_DECREF(Py_None) # <<<<<<<<<<<<<< - * - * cdef int i - */ - Py_DECREF(Py_None); - - /* "View.MemoryView":379 - * if self.obj is not None: - * __Pyx_ReleaseBuffer(&self.view) - * elif (<__pyx_buffer *> &self.view).obj == Py_None: # <<<<<<<<<<<<<< - * - * (<__pyx_buffer *> &self.view).obj = NULL - */ - } - __pyx_L3:; - - /* "View.MemoryView":386 - * cdef int i - * global __pyx_memoryview_thread_locks_used - * if self.lock != NULL: # <<<<<<<<<<<<<< - * for i in range(__pyx_memoryview_thread_locks_used): - * if __pyx_memoryview_thread_locks[i] is self.lock: - */ - __pyx_t_1 = (__pyx_v_self->lock != NULL); - if (__pyx_t_1) { - - /* "View.MemoryView":387 - * global __pyx_memoryview_thread_locks_used - * if self.lock != NULL: - * for i in range(__pyx_memoryview_thread_locks_used): # <<<<<<<<<<<<<< - * if __pyx_memoryview_thread_locks[i] is self.lock: - * __pyx_memoryview_thread_locks_used -= 1 - */ - __pyx_t_2 = __pyx_memoryview_thread_locks_used; - __pyx_t_3 = __pyx_t_2; - for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { - __pyx_v_i = __pyx_t_4; - - /* "View.MemoryView":388 - * if self.lock != NULL: - * for i in range(__pyx_memoryview_thread_locks_used): - * if __pyx_memoryview_thread_locks[i] is self.lock: # <<<<<<<<<<<<<< - * __pyx_memoryview_thread_locks_used -= 1 - * if i != __pyx_memoryview_thread_locks_used: - */ - __pyx_t_1 = ((__pyx_memoryview_thread_locks[__pyx_v_i]) == __pyx_v_self->lock); - if (__pyx_t_1) { - - /* "View.MemoryView":389 - * for i in range(__pyx_memoryview_thread_locks_used): - * if __pyx_memoryview_thread_locks[i] is self.lock: - * __pyx_memoryview_thread_locks_used -= 1 # <<<<<<<<<<<<<< - * if i != __pyx_memoryview_thread_locks_used: - * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( - */ - __pyx_memoryview_thread_locks_used = (__pyx_memoryview_thread_locks_used - 1); - - /* "View.MemoryView":390 - * if __pyx_memoryview_thread_locks[i] is self.lock: - * __pyx_memoryview_thread_locks_used -= 1 - * if i != __pyx_memoryview_thread_locks_used: # <<<<<<<<<<<<<< - * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( - * __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i]) - */ - __pyx_t_1 = (__pyx_v_i != __pyx_memoryview_thread_locks_used); - if (__pyx_t_1) { - - /* "View.MemoryView":392 - * if i != __pyx_memoryview_thread_locks_used: - * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( - * __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i]) # <<<<<<<<<<<<<< - * break - * else: - */ - __pyx_t_5 = (__pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]); - __pyx_t_6 = (__pyx_memoryview_thread_locks[__pyx_v_i]); - - /* "View.MemoryView":391 - * __pyx_memoryview_thread_locks_used -= 1 - * if i != __pyx_memoryview_thread_locks_used: - * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( # <<<<<<<<<<<<<< - * __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i]) - * break - */ - (__pyx_memoryview_thread_locks[__pyx_v_i]) = __pyx_t_5; - (__pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]) = __pyx_t_6; - - /* "View.MemoryView":390 - * if __pyx_memoryview_thread_locks[i] is self.lock: - * __pyx_memoryview_thread_locks_used -= 1 - * if i != __pyx_memoryview_thread_locks_used: # <<<<<<<<<<<<<< - * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( - * __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i]) - */ - } - - /* "View.MemoryView":393 - * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( - * __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i]) - * break # <<<<<<<<<<<<<< - * else: - * PyThread_free_lock(self.lock) - */ - goto __pyx_L6_break; - - /* "View.MemoryView":388 - * if self.lock != NULL: - * for i in range(__pyx_memoryview_thread_locks_used): - * if __pyx_memoryview_thread_locks[i] is self.lock: # <<<<<<<<<<<<<< - * __pyx_memoryview_thread_locks_used -= 1 - * if i != __pyx_memoryview_thread_locks_used: - */ - } - } - /*else*/ { - - /* "View.MemoryView":395 - * break - * else: - * PyThread_free_lock(self.lock) # <<<<<<<<<<<<<< - * - * cdef char *get_item_pointer(memoryview self, object index) except NULL: - */ - PyThread_free_lock(__pyx_v_self->lock); - } - __pyx_L6_break:; - - /* "View.MemoryView":386 - * cdef int i - * global __pyx_memoryview_thread_locks_used - * if self.lock != NULL: # <<<<<<<<<<<<<< - * for i in range(__pyx_memoryview_thread_locks_used): - * if __pyx_memoryview_thread_locks[i] is self.lock: - */ - } - - /* "View.MemoryView":376 - * self.typeinfo = NULL - * - * def __dealloc__(memoryview self): # <<<<<<<<<<<<<< - * if self.obj is not None: - * __Pyx_ReleaseBuffer(&self.view) - */ - - /* function exit code */ - __Pyx_RefNannyFinishContext(); -} - -/* "View.MemoryView":397 - * PyThread_free_lock(self.lock) - * - * cdef char *get_item_pointer(memoryview self, object index) except NULL: # <<<<<<<<<<<<<< - * cdef Py_ssize_t dim - * cdef char *itemp = self.view.buf - */ - -static char *__pyx_memoryview_get_item_pointer(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index) { - Py_ssize_t __pyx_v_dim; - char *__pyx_v_itemp; - PyObject *__pyx_v_idx = NULL; - char *__pyx_r; - __Pyx_RefNannyDeclarations - Py_ssize_t __pyx_t_1; - PyObject *__pyx_t_2 = NULL; - Py_ssize_t __pyx_t_3; - PyObject *(*__pyx_t_4)(PyObject *); - PyObject *__pyx_t_5 = NULL; - Py_ssize_t __pyx_t_6; - char *__pyx_t_7; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("get_item_pointer", 0); - - /* "View.MemoryView":399 - * cdef char *get_item_pointer(memoryview self, object index) except NULL: - * cdef Py_ssize_t dim - * cdef char *itemp = self.view.buf # <<<<<<<<<<<<<< - * - * for dim, idx in enumerate(index): - */ - __pyx_v_itemp = ((char *)__pyx_v_self->view.buf); - - /* "View.MemoryView":401 - * cdef char *itemp = self.view.buf - * - * for dim, idx in enumerate(index): # <<<<<<<<<<<<<< - * itemp = pybuffer_index(&self.view, itemp, idx, dim) - * - */ - __pyx_t_1 = 0; - if (likely(PyList_CheckExact(__pyx_v_index)) || PyTuple_CheckExact(__pyx_v_index)) { - __pyx_t_2 = __pyx_v_index; __Pyx_INCREF(__pyx_t_2); __pyx_t_3 = 0; - __pyx_t_4 = NULL; - } else { - __pyx_t_3 = -1; __pyx_t_2 = PyObject_GetIter(__pyx_v_index); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 401, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_4 = __Pyx_PyObject_GetIterNextFunc(__pyx_t_2); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 401, __pyx_L1_error) - } - for (;;) { - if (likely(!__pyx_t_4)) { - if (likely(PyList_CheckExact(__pyx_t_2))) { - if (__pyx_t_3 >= PyList_GET_SIZE(__pyx_t_2)) break; - #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS - __pyx_t_5 = PyList_GET_ITEM(__pyx_t_2, __pyx_t_3); __Pyx_INCREF(__pyx_t_5); __pyx_t_3++; if (unlikely((0 < 0))) __PYX_ERR(1, 401, __pyx_L1_error) - #else - __pyx_t_5 = PySequence_ITEM(__pyx_t_2, __pyx_t_3); __pyx_t_3++; if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 401, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - #endif - } else { - if (__pyx_t_3 >= PyTuple_GET_SIZE(__pyx_t_2)) break; - #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS - __pyx_t_5 = PyTuple_GET_ITEM(__pyx_t_2, __pyx_t_3); __Pyx_INCREF(__pyx_t_5); __pyx_t_3++; if (unlikely((0 < 0))) __PYX_ERR(1, 401, __pyx_L1_error) - #else - __pyx_t_5 = PySequence_ITEM(__pyx_t_2, __pyx_t_3); __pyx_t_3++; if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 401, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - #endif - } - } else { - __pyx_t_5 = __pyx_t_4(__pyx_t_2); - if (unlikely(!__pyx_t_5)) { - PyObject* exc_type = PyErr_Occurred(); - if (exc_type) { - if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear(); - else __PYX_ERR(1, 401, __pyx_L1_error) - } - break; - } - __Pyx_GOTREF(__pyx_t_5); - } - __Pyx_XDECREF_SET(__pyx_v_idx, __pyx_t_5); - __pyx_t_5 = 0; - __pyx_v_dim = __pyx_t_1; - __pyx_t_1 = (__pyx_t_1 + 1); - - /* "View.MemoryView":402 - * - * for dim, idx in enumerate(index): - * itemp = pybuffer_index(&self.view, itemp, idx, dim) # <<<<<<<<<<<<<< - * - * return itemp - */ - __pyx_t_6 = __Pyx_PyIndex_AsSsize_t(__pyx_v_idx); if (unlikely((__pyx_t_6 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 402, __pyx_L1_error) - __pyx_t_7 = __pyx_pybuffer_index((&__pyx_v_self->view), __pyx_v_itemp, __pyx_t_6, __pyx_v_dim); if (unlikely(__pyx_t_7 == ((char *)NULL))) __PYX_ERR(1, 402, __pyx_L1_error) - __pyx_v_itemp = __pyx_t_7; - - /* "View.MemoryView":401 - * cdef char *itemp = self.view.buf - * - * for dim, idx in enumerate(index): # <<<<<<<<<<<<<< - * itemp = pybuffer_index(&self.view, itemp, idx, dim) - * - */ - } - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - - /* "View.MemoryView":404 - * itemp = pybuffer_index(&self.view, itemp, idx, dim) - * - * return itemp # <<<<<<<<<<<<<< - * - * - */ - __pyx_r = __pyx_v_itemp; - goto __pyx_L0; - - /* "View.MemoryView":397 - * PyThread_free_lock(self.lock) - * - * cdef char *get_item_pointer(memoryview self, object index) except NULL: # <<<<<<<<<<<<<< - * cdef Py_ssize_t dim - * cdef char *itemp = self.view.buf - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_2); - __Pyx_XDECREF(__pyx_t_5); - __Pyx_AddTraceback("View.MemoryView.memoryview.get_item_pointer", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XDECREF(__pyx_v_idx); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":407 - * - * - * def __getitem__(memoryview self, object index): # <<<<<<<<<<<<<< - * if index is Ellipsis: - * return self - */ - -/* Python wrapper */ -static PyObject *__pyx_memoryview___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index); /*proto*/ -static PyObject *__pyx_memoryview___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index) { - CYTHON_UNUSED PyObject *const *__pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__getitem__ (wrapper)", 0); - __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_4__getitem__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((PyObject *)__pyx_v_index)); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_4__getitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index) { - PyObject *__pyx_v_have_slices = NULL; - PyObject *__pyx_v_indices = NULL; - char *__pyx_v_itemp; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - int __pyx_t_1; - PyObject *__pyx_t_2 = NULL; - PyObject *__pyx_t_3 = NULL; - PyObject *__pyx_t_4 = NULL; - char *__pyx_t_5; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("__getitem__", 0); - - /* "View.MemoryView":408 - * - * def __getitem__(memoryview self, object index): - * if index is Ellipsis: # <<<<<<<<<<<<<< - * return self - * - */ - __pyx_t_1 = (__pyx_v_index == __pyx_builtin_Ellipsis); - if (__pyx_t_1) { - - /* "View.MemoryView":409 - * def __getitem__(memoryview self, object index): - * if index is Ellipsis: - * return self # <<<<<<<<<<<<<< - * - * have_slices, indices = _unellipsify(index, self.view.ndim) - */ - __Pyx_XDECREF(__pyx_r); - __Pyx_INCREF((PyObject *)__pyx_v_self); - __pyx_r = ((PyObject *)__pyx_v_self); - goto __pyx_L0; - - /* "View.MemoryView":408 - * - * def __getitem__(memoryview self, object index): - * if index is Ellipsis: # <<<<<<<<<<<<<< - * return self - * - */ - } - - /* "View.MemoryView":411 - * return self - * - * have_slices, indices = _unellipsify(index, self.view.ndim) # <<<<<<<<<<<<<< - * - * cdef char *itemp - */ - __pyx_t_2 = _unellipsify(__pyx_v_index, __pyx_v_self->view.ndim); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 411, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - if (likely(__pyx_t_2 != Py_None)) { - PyObject* sequence = __pyx_t_2; - Py_ssize_t size = __Pyx_PySequence_SIZE(sequence); - if (unlikely(size != 2)) { - if (size > 2) __Pyx_RaiseTooManyValuesError(2); - else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size); - __PYX_ERR(1, 411, __pyx_L1_error) - } - #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS - __pyx_t_3 = PyTuple_GET_ITEM(sequence, 0); - __pyx_t_4 = PyTuple_GET_ITEM(sequence, 1); - __Pyx_INCREF(__pyx_t_3); - __Pyx_INCREF(__pyx_t_4); - #else - __pyx_t_3 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 411, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_4 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 411, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - #endif - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - } else { - __Pyx_RaiseNoneNotIterableError(); __PYX_ERR(1, 411, __pyx_L1_error) - } - __pyx_v_have_slices = __pyx_t_3; - __pyx_t_3 = 0; - __pyx_v_indices = __pyx_t_4; - __pyx_t_4 = 0; - - /* "View.MemoryView":414 - * - * cdef char *itemp - * if have_slices: # <<<<<<<<<<<<<< - * return memview_slice(self, indices) - * else: - */ - __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_v_have_slices); if (unlikely((__pyx_t_1 < 0))) __PYX_ERR(1, 414, __pyx_L1_error) - if (__pyx_t_1) { - - /* "View.MemoryView":415 - * cdef char *itemp - * if have_slices: - * return memview_slice(self, indices) # <<<<<<<<<<<<<< - * else: - * itemp = self.get_item_pointer(indices) - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_2 = ((PyObject *)__pyx_memview_slice(__pyx_v_self, __pyx_v_indices)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 415, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_r = __pyx_t_2; - __pyx_t_2 = 0; - goto __pyx_L0; - - /* "View.MemoryView":414 - * - * cdef char *itemp - * if have_slices: # <<<<<<<<<<<<<< - * return memview_slice(self, indices) - * else: - */ - } - - /* "View.MemoryView":417 - * return memview_slice(self, indices) - * else: - * itemp = self.get_item_pointer(indices) # <<<<<<<<<<<<<< - * return self.convert_item_to_object(itemp) - * - */ - /*else*/ { - __pyx_t_5 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->get_item_pointer(__pyx_v_self, __pyx_v_indices); if (unlikely(__pyx_t_5 == ((char *)NULL))) __PYX_ERR(1, 417, __pyx_L1_error) - __pyx_v_itemp = __pyx_t_5; - - /* "View.MemoryView":418 - * else: - * itemp = self.get_item_pointer(indices) - * return self.convert_item_to_object(itemp) # <<<<<<<<<<<<<< - * - * def __setitem__(memoryview self, object index, object value): - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->convert_item_to_object(__pyx_v_self, __pyx_v_itemp); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 418, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_r = __pyx_t_2; - __pyx_t_2 = 0; - goto __pyx_L0; - } - - /* "View.MemoryView":407 - * - * - * def __getitem__(memoryview self, object index): # <<<<<<<<<<<<<< - * if index is Ellipsis: - * return self - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_2); - __Pyx_XDECREF(__pyx_t_3); - __Pyx_XDECREF(__pyx_t_4); - __Pyx_AddTraceback("View.MemoryView.memoryview.__getitem__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XDECREF(__pyx_v_have_slices); - __Pyx_XDECREF(__pyx_v_indices); - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":420 - * return self.convert_item_to_object(itemp) - * - * def __setitem__(memoryview self, object index, object value): # <<<<<<<<<<<<<< - * if self.view.readonly: - * raise TypeError, "Cannot assign to read-only memoryview" - */ - -/* Python wrapper */ -static int __pyx_memoryview___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value); /*proto*/ -static int __pyx_memoryview___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value) { - CYTHON_UNUSED PyObject *const *__pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); - int __pyx_r; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0); - __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_6__setitem__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((PyObject *)__pyx_v_index), ((PyObject *)__pyx_v_value)); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_6__setitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value) { - PyObject *__pyx_v_have_slices = NULL; - PyObject *__pyx_v_obj = NULL; - int __pyx_r; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - PyObject *__pyx_t_2 = NULL; - PyObject *__pyx_t_3 = NULL; - int __pyx_t_4; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("__setitem__", 0); - __Pyx_INCREF(__pyx_v_index); - - /* "View.MemoryView":421 - * - * def __setitem__(memoryview self, object index, object value): - * if self.view.readonly: # <<<<<<<<<<<<<< - * raise TypeError, "Cannot assign to read-only memoryview" - * - */ - if (unlikely(__pyx_v_self->view.readonly)) { - - /* "View.MemoryView":422 - * def __setitem__(memoryview self, object index, object value): - * if self.view.readonly: - * raise TypeError, "Cannot assign to read-only memoryview" # <<<<<<<<<<<<<< - * - * have_slices, index = _unellipsify(index, self.view.ndim) - */ - __Pyx_Raise(__pyx_builtin_TypeError, __pyx_kp_s_Cannot_assign_to_read_only_memor, 0, 0); - __PYX_ERR(1, 422, __pyx_L1_error) - - /* "View.MemoryView":421 - * - * def __setitem__(memoryview self, object index, object value): - * if self.view.readonly: # <<<<<<<<<<<<<< - * raise TypeError, "Cannot assign to read-only memoryview" - * - */ - } - - /* "View.MemoryView":424 - * raise TypeError, "Cannot assign to read-only memoryview" - * - * have_slices, index = _unellipsify(index, self.view.ndim) # <<<<<<<<<<<<<< - * - * if have_slices: - */ - __pyx_t_1 = _unellipsify(__pyx_v_index, __pyx_v_self->view.ndim); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 424, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - if (likely(__pyx_t_1 != Py_None)) { - PyObject* sequence = __pyx_t_1; - Py_ssize_t size = __Pyx_PySequence_SIZE(sequence); - if (unlikely(size != 2)) { - if (size > 2) __Pyx_RaiseTooManyValuesError(2); - else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size); - __PYX_ERR(1, 424, __pyx_L1_error) - } - #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS - __pyx_t_2 = PyTuple_GET_ITEM(sequence, 0); - __pyx_t_3 = PyTuple_GET_ITEM(sequence, 1); - __Pyx_INCREF(__pyx_t_2); - __Pyx_INCREF(__pyx_t_3); - #else - __pyx_t_2 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 424, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_3 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 424, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - #endif - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - } else { - __Pyx_RaiseNoneNotIterableError(); __PYX_ERR(1, 424, __pyx_L1_error) - } - __pyx_v_have_slices = __pyx_t_2; - __pyx_t_2 = 0; - __Pyx_DECREF_SET(__pyx_v_index, __pyx_t_3); - __pyx_t_3 = 0; - - /* "View.MemoryView":426 - * have_slices, index = _unellipsify(index, self.view.ndim) - * - * if have_slices: # <<<<<<<<<<<<<< - * obj = self.is_slice(value) - * if obj: - */ - __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_v_have_slices); if (unlikely((__pyx_t_4 < 0))) __PYX_ERR(1, 426, __pyx_L1_error) - if (__pyx_t_4) { - - /* "View.MemoryView":427 - * - * if have_slices: - * obj = self.is_slice(value) # <<<<<<<<<<<<<< - * if obj: - * self.setitem_slice_assignment(self[index], obj) - */ - __pyx_t_1 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->is_slice(__pyx_v_self, __pyx_v_value); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 427, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_v_obj = __pyx_t_1; - __pyx_t_1 = 0; - - /* "View.MemoryView":428 - * if have_slices: - * obj = self.is_slice(value) - * if obj: # <<<<<<<<<<<<<< - * self.setitem_slice_assignment(self[index], obj) - * else: - */ - __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_v_obj); if (unlikely((__pyx_t_4 < 0))) __PYX_ERR(1, 428, __pyx_L1_error) - if (__pyx_t_4) { - - /* "View.MemoryView":429 - * obj = self.is_slice(value) - * if obj: - * self.setitem_slice_assignment(self[index], obj) # <<<<<<<<<<<<<< - * else: - * self.setitem_slice_assign_scalar(self[index], value) - */ - __pyx_t_1 = __Pyx_PyObject_GetItem(((PyObject *)__pyx_v_self), __pyx_v_index); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 429, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_3 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->setitem_slice_assignment(__pyx_v_self, __pyx_t_1, __pyx_v_obj); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 429, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - - /* "View.MemoryView":428 - * if have_slices: - * obj = self.is_slice(value) - * if obj: # <<<<<<<<<<<<<< - * self.setitem_slice_assignment(self[index], obj) - * else: - */ - goto __pyx_L5; - } - - /* "View.MemoryView":431 - * self.setitem_slice_assignment(self[index], obj) - * else: - * self.setitem_slice_assign_scalar(self[index], value) # <<<<<<<<<<<<<< - * else: - * self.setitem_indexed(index, value) - */ - /*else*/ { - __pyx_t_3 = __Pyx_PyObject_GetItem(((PyObject *)__pyx_v_self), __pyx_v_index); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 431, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_memoryview_type))))) __PYX_ERR(1, 431, __pyx_L1_error) - __pyx_t_1 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->setitem_slice_assign_scalar(__pyx_v_self, ((struct __pyx_memoryview_obj *)__pyx_t_3), __pyx_v_value); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 431, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - } - __pyx_L5:; - - /* "View.MemoryView":426 - * have_slices, index = _unellipsify(index, self.view.ndim) - * - * if have_slices: # <<<<<<<<<<<<<< - * obj = self.is_slice(value) - * if obj: - */ - goto __pyx_L4; - } - - /* "View.MemoryView":433 - * self.setitem_slice_assign_scalar(self[index], value) - * else: - * self.setitem_indexed(index, value) # <<<<<<<<<<<<<< - * - * cdef is_slice(self, obj): - */ - /*else*/ { - __pyx_t_1 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->setitem_indexed(__pyx_v_self, __pyx_v_index, __pyx_v_value); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 433, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - } - __pyx_L4:; - - /* "View.MemoryView":420 - * return self.convert_item_to_object(itemp) - * - * def __setitem__(memoryview self, object index, object value): # <<<<<<<<<<<<<< - * if self.view.readonly: - * raise TypeError, "Cannot assign to read-only memoryview" - */ - - /* function exit code */ - __pyx_r = 0; - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_2); - __Pyx_XDECREF(__pyx_t_3); - __Pyx_AddTraceback("View.MemoryView.memoryview.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = -1; - __pyx_L0:; - __Pyx_XDECREF(__pyx_v_have_slices); - __Pyx_XDECREF(__pyx_v_obj); - __Pyx_XDECREF(__pyx_v_index); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":435 - * self.setitem_indexed(index, value) - * - * cdef is_slice(self, obj): # <<<<<<<<<<<<<< - * if not isinstance(obj, memoryview): - * try: - */ - -static PyObject *__pyx_memoryview_is_slice(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - int __pyx_t_1; - int __pyx_t_2; - PyObject *__pyx_t_3 = NULL; - PyObject *__pyx_t_4 = NULL; - PyObject *__pyx_t_5 = NULL; - PyObject *__pyx_t_6 = NULL; - PyObject *__pyx_t_7 = NULL; - PyObject *__pyx_t_8 = NULL; - int __pyx_t_9; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("is_slice", 0); - __Pyx_INCREF(__pyx_v_obj); - - /* "View.MemoryView":436 - * - * cdef is_slice(self, obj): - * if not isinstance(obj, memoryview): # <<<<<<<<<<<<<< - * try: - * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, - */ - __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_obj, __pyx_memoryview_type); - __pyx_t_2 = (!__pyx_t_1); - if (__pyx_t_2) { - - /* "View.MemoryView":437 - * cdef is_slice(self, obj): - * if not isinstance(obj, memoryview): - * try: # <<<<<<<<<<<<<< - * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, - * self.dtype_is_object) - */ - { - __Pyx_PyThreadState_declare - __Pyx_PyThreadState_assign - __Pyx_ExceptionSave(&__pyx_t_3, &__pyx_t_4, &__pyx_t_5); - __Pyx_XGOTREF(__pyx_t_3); - __Pyx_XGOTREF(__pyx_t_4); - __Pyx_XGOTREF(__pyx_t_5); - /*try:*/ { - - /* "View.MemoryView":438 - * if not isinstance(obj, memoryview): - * try: - * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, # <<<<<<<<<<<<<< - * self.dtype_is_object) - * except TypeError: - */ - __pyx_t_6 = __Pyx_PyInt_From_int(((__pyx_v_self->flags & (~PyBUF_WRITABLE)) | PyBUF_ANY_CONTIGUOUS)); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 438, __pyx_L4_error) - __Pyx_GOTREF(__pyx_t_6); - - /* "View.MemoryView":439 - * try: - * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, - * self.dtype_is_object) # <<<<<<<<<<<<<< - * except TypeError: - * return None - */ - __pyx_t_7 = __Pyx_PyBool_FromLong(__pyx_v_self->dtype_is_object); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 439, __pyx_L4_error) - __Pyx_GOTREF(__pyx_t_7); - - /* "View.MemoryView":438 - * if not isinstance(obj, memoryview): - * try: - * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, # <<<<<<<<<<<<<< - * self.dtype_is_object) - * except TypeError: - */ - __pyx_t_8 = PyTuple_New(3); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 438, __pyx_L4_error) - __Pyx_GOTREF(__pyx_t_8); - __Pyx_INCREF(__pyx_v_obj); - __Pyx_GIVEREF(__pyx_v_obj); - PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_v_obj); - __Pyx_GIVEREF(__pyx_t_6); - PyTuple_SET_ITEM(__pyx_t_8, 1, __pyx_t_6); - __Pyx_GIVEREF(__pyx_t_7); - PyTuple_SET_ITEM(__pyx_t_8, 2, __pyx_t_7); - __pyx_t_6 = 0; - __pyx_t_7 = 0; - __pyx_t_7 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryview_type), __pyx_t_8, NULL); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 438, __pyx_L4_error) - __Pyx_GOTREF(__pyx_t_7); - __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; - __Pyx_DECREF_SET(__pyx_v_obj, __pyx_t_7); - __pyx_t_7 = 0; - - /* "View.MemoryView":437 - * cdef is_slice(self, obj): - * if not isinstance(obj, memoryview): - * try: # <<<<<<<<<<<<<< - * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, - * self.dtype_is_object) - */ - } - __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; - __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; - __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; - goto __pyx_L9_try_end; - __pyx_L4_error:; - __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; - __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; - __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; - - /* "View.MemoryView":440 - * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, - * self.dtype_is_object) - * except TypeError: # <<<<<<<<<<<<<< - * return None - * - */ - __pyx_t_9 = __Pyx_PyErr_ExceptionMatches(__pyx_builtin_TypeError); - if (__pyx_t_9) { - __Pyx_AddTraceback("View.MemoryView.memoryview.is_slice", __pyx_clineno, __pyx_lineno, __pyx_filename); - if (__Pyx_GetException(&__pyx_t_7, &__pyx_t_8, &__pyx_t_6) < 0) __PYX_ERR(1, 440, __pyx_L6_except_error) - __Pyx_XGOTREF(__pyx_t_7); - __Pyx_XGOTREF(__pyx_t_8); - __Pyx_XGOTREF(__pyx_t_6); - - /* "View.MemoryView":441 - * self.dtype_is_object) - * except TypeError: - * return None # <<<<<<<<<<<<<< - * - * return obj - */ - __Pyx_XDECREF(__pyx_r); - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; - __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; - __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; - goto __pyx_L7_except_return; - } - goto __pyx_L6_except_error; - - /* "View.MemoryView":437 - * cdef is_slice(self, obj): - * if not isinstance(obj, memoryview): - * try: # <<<<<<<<<<<<<< - * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, - * self.dtype_is_object) - */ - __pyx_L6_except_error:; - __Pyx_XGIVEREF(__pyx_t_3); - __Pyx_XGIVEREF(__pyx_t_4); - __Pyx_XGIVEREF(__pyx_t_5); - __Pyx_ExceptionReset(__pyx_t_3, __pyx_t_4, __pyx_t_5); - goto __pyx_L1_error; - __pyx_L7_except_return:; - __Pyx_XGIVEREF(__pyx_t_3); - __Pyx_XGIVEREF(__pyx_t_4); - __Pyx_XGIVEREF(__pyx_t_5); - __Pyx_ExceptionReset(__pyx_t_3, __pyx_t_4, __pyx_t_5); - goto __pyx_L0; - __pyx_L9_try_end:; - } - - /* "View.MemoryView":436 - * - * cdef is_slice(self, obj): - * if not isinstance(obj, memoryview): # <<<<<<<<<<<<<< - * try: - * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, - */ - } - - /* "View.MemoryView":443 - * return None - * - * return obj # <<<<<<<<<<<<<< - * - * cdef setitem_slice_assignment(self, dst, src): - */ - __Pyx_XDECREF(__pyx_r); - __Pyx_INCREF(__pyx_v_obj); - __pyx_r = __pyx_v_obj; - goto __pyx_L0; - - /* "View.MemoryView":435 - * self.setitem_indexed(index, value) - * - * cdef is_slice(self, obj): # <<<<<<<<<<<<<< - * if not isinstance(obj, memoryview): - * try: - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_6); - __Pyx_XDECREF(__pyx_t_7); - __Pyx_XDECREF(__pyx_t_8); - __Pyx_AddTraceback("View.MemoryView.memoryview.is_slice", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = 0; - __pyx_L0:; - __Pyx_XDECREF(__pyx_v_obj); - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":445 - * return obj - * - * cdef setitem_slice_assignment(self, dst, src): # <<<<<<<<<<<<<< - * cdef __Pyx_memviewslice dst_slice - * cdef __Pyx_memviewslice src_slice - */ - -static PyObject *__pyx_memoryview_setitem_slice_assignment(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_dst, PyObject *__pyx_v_src) { - __Pyx_memviewslice __pyx_v_dst_slice; - __Pyx_memviewslice __pyx_v_src_slice; - __Pyx_memviewslice __pyx_v_msrc; - __Pyx_memviewslice __pyx_v_mdst; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - __Pyx_memviewslice *__pyx_t_1; - PyObject *__pyx_t_2 = NULL; - int __pyx_t_3; - int __pyx_t_4; - int __pyx_t_5; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("setitem_slice_assignment", 0); - - /* "View.MemoryView":448 - * cdef __Pyx_memviewslice dst_slice - * cdef __Pyx_memviewslice src_slice - * cdef __Pyx_memviewslice msrc = get_slice_from_memview(src, &src_slice)[0] # <<<<<<<<<<<<<< - * cdef __Pyx_memviewslice mdst = get_slice_from_memview(dst, &dst_slice)[0] - * - */ - if (!(likely(((__pyx_v_src) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_src, __pyx_memoryview_type))))) __PYX_ERR(1, 448, __pyx_L1_error) - __pyx_t_1 = __pyx_memoryview_get_slice_from_memoryview(((struct __pyx_memoryview_obj *)__pyx_v_src), (&__pyx_v_src_slice)); if (unlikely(__pyx_t_1 == ((__Pyx_memviewslice *)NULL))) __PYX_ERR(1, 448, __pyx_L1_error) - __pyx_v_msrc = (__pyx_t_1[0]); - - /* "View.MemoryView":449 - * cdef __Pyx_memviewslice src_slice - * cdef __Pyx_memviewslice msrc = get_slice_from_memview(src, &src_slice)[0] - * cdef __Pyx_memviewslice mdst = get_slice_from_memview(dst, &dst_slice)[0] # <<<<<<<<<<<<<< - * - * memoryview_copy_contents(msrc, mdst, src.ndim, dst.ndim, self.dtype_is_object) - */ - if (!(likely(((__pyx_v_dst) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_dst, __pyx_memoryview_type))))) __PYX_ERR(1, 449, __pyx_L1_error) - __pyx_t_1 = __pyx_memoryview_get_slice_from_memoryview(((struct __pyx_memoryview_obj *)__pyx_v_dst), (&__pyx_v_dst_slice)); if (unlikely(__pyx_t_1 == ((__Pyx_memviewslice *)NULL))) __PYX_ERR(1, 449, __pyx_L1_error) - __pyx_v_mdst = (__pyx_t_1[0]); - - /* "View.MemoryView":451 - * cdef __Pyx_memviewslice mdst = get_slice_from_memview(dst, &dst_slice)[0] - * - * memoryview_copy_contents(msrc, mdst, src.ndim, dst.ndim, self.dtype_is_object) # <<<<<<<<<<<<<< - * - * cdef setitem_slice_assign_scalar(self, memoryview dst, value): - */ - __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_src, __pyx_n_s_ndim); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 451, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_3 = __Pyx_PyInt_As_int(__pyx_t_2); if (unlikely((__pyx_t_3 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 451, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_dst, __pyx_n_s_ndim); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 451, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_4 = __Pyx_PyInt_As_int(__pyx_t_2); if (unlikely((__pyx_t_4 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 451, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_5 = __pyx_memoryview_copy_contents(__pyx_v_msrc, __pyx_v_mdst, __pyx_t_3, __pyx_t_4, __pyx_v_self->dtype_is_object); if (unlikely(__pyx_t_5 == ((int)-1))) __PYX_ERR(1, 451, __pyx_L1_error) - - /* "View.MemoryView":445 - * return obj - * - * cdef setitem_slice_assignment(self, dst, src): # <<<<<<<<<<<<<< - * cdef __Pyx_memviewslice dst_slice - * cdef __Pyx_memviewslice src_slice - */ - - /* function exit code */ - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_2); - __Pyx_AddTraceback("View.MemoryView.memoryview.setitem_slice_assignment", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = 0; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":453 - * memoryview_copy_contents(msrc, mdst, src.ndim, dst.ndim, self.dtype_is_object) - * - * cdef setitem_slice_assign_scalar(self, memoryview dst, value): # <<<<<<<<<<<<<< - * cdef int array[128] - * cdef void *tmp = NULL - */ - -static PyObject *__pyx_memoryview_setitem_slice_assign_scalar(struct __pyx_memoryview_obj *__pyx_v_self, struct __pyx_memoryview_obj *__pyx_v_dst, PyObject *__pyx_v_value) { - int __pyx_v_array[0x80]; - void *__pyx_v_tmp; - void *__pyx_v_item; - __Pyx_memviewslice *__pyx_v_dst_slice; - __Pyx_memviewslice __pyx_v_tmp_slice; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - __Pyx_memviewslice *__pyx_t_1; - int __pyx_t_2; - PyObject *__pyx_t_3 = NULL; - int __pyx_t_4; - int __pyx_t_5; - char const *__pyx_t_6; - PyObject *__pyx_t_7 = NULL; - PyObject *__pyx_t_8 = NULL; - PyObject *__pyx_t_9 = NULL; - PyObject *__pyx_t_10 = NULL; - PyObject *__pyx_t_11 = NULL; - PyObject *__pyx_t_12 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("setitem_slice_assign_scalar", 0); - - /* "View.MemoryView":455 - * cdef setitem_slice_assign_scalar(self, memoryview dst, value): - * cdef int array[128] - * cdef void *tmp = NULL # <<<<<<<<<<<<<< - * cdef void *item - * - */ - __pyx_v_tmp = NULL; - - /* "View.MemoryView":460 - * cdef __Pyx_memviewslice *dst_slice - * cdef __Pyx_memviewslice tmp_slice - * dst_slice = get_slice_from_memview(dst, &tmp_slice) # <<<<<<<<<<<<<< - * - * if self.view.itemsize > sizeof(array): - */ - __pyx_t_1 = __pyx_memoryview_get_slice_from_memoryview(__pyx_v_dst, (&__pyx_v_tmp_slice)); if (unlikely(__pyx_t_1 == ((__Pyx_memviewslice *)NULL))) __PYX_ERR(1, 460, __pyx_L1_error) - __pyx_v_dst_slice = __pyx_t_1; - - /* "View.MemoryView":462 - * dst_slice = get_slice_from_memview(dst, &tmp_slice) - * - * if self.view.itemsize > sizeof(array): # <<<<<<<<<<<<<< - * tmp = PyMem_Malloc(self.view.itemsize) - * if tmp == NULL: - */ - __pyx_t_2 = (((size_t)__pyx_v_self->view.itemsize) > (sizeof(__pyx_v_array))); - if (__pyx_t_2) { - - /* "View.MemoryView":463 - * - * if self.view.itemsize > sizeof(array): - * tmp = PyMem_Malloc(self.view.itemsize) # <<<<<<<<<<<<<< - * if tmp == NULL: - * raise MemoryError - */ - __pyx_v_tmp = PyMem_Malloc(__pyx_v_self->view.itemsize); - - /* "View.MemoryView":464 - * if self.view.itemsize > sizeof(array): - * tmp = PyMem_Malloc(self.view.itemsize) - * if tmp == NULL: # <<<<<<<<<<<<<< - * raise MemoryError - * item = tmp - */ - __pyx_t_2 = (__pyx_v_tmp == NULL); - if (unlikely(__pyx_t_2)) { - - /* "View.MemoryView":465 - * tmp = PyMem_Malloc(self.view.itemsize) - * if tmp == NULL: - * raise MemoryError # <<<<<<<<<<<<<< - * item = tmp - * else: - */ - PyErr_NoMemory(); __PYX_ERR(1, 465, __pyx_L1_error) - - /* "View.MemoryView":464 - * if self.view.itemsize > sizeof(array): - * tmp = PyMem_Malloc(self.view.itemsize) - * if tmp == NULL: # <<<<<<<<<<<<<< - * raise MemoryError - * item = tmp - */ - } - - /* "View.MemoryView":466 - * if tmp == NULL: - * raise MemoryError - * item = tmp # <<<<<<<<<<<<<< - * else: - * item = array - */ - __pyx_v_item = __pyx_v_tmp; - - /* "View.MemoryView":462 - * dst_slice = get_slice_from_memview(dst, &tmp_slice) - * - * if self.view.itemsize > sizeof(array): # <<<<<<<<<<<<<< - * tmp = PyMem_Malloc(self.view.itemsize) - * if tmp == NULL: - */ - goto __pyx_L3; - } - - /* "View.MemoryView":468 - * item = tmp - * else: - * item = array # <<<<<<<<<<<<<< - * - * try: - */ - /*else*/ { - __pyx_v_item = ((void *)__pyx_v_array); - } - __pyx_L3:; - - /* "View.MemoryView":470 - * item = array - * - * try: # <<<<<<<<<<<<<< - * if self.dtype_is_object: - * ( item)[0] = value - */ - /*try:*/ { - - /* "View.MemoryView":471 - * - * try: - * if self.dtype_is_object: # <<<<<<<<<<<<<< - * ( item)[0] = value - * else: - */ - if (__pyx_v_self->dtype_is_object) { - - /* "View.MemoryView":472 - * try: - * if self.dtype_is_object: - * ( item)[0] = value # <<<<<<<<<<<<<< - * else: - * self.assign_item_from_object( item, value) - */ - (((PyObject **)__pyx_v_item)[0]) = ((PyObject *)__pyx_v_value); - - /* "View.MemoryView":471 - * - * try: - * if self.dtype_is_object: # <<<<<<<<<<<<<< - * ( item)[0] = value - * else: - */ - goto __pyx_L8; - } - - /* "View.MemoryView":474 - * ( item)[0] = value - * else: - * self.assign_item_from_object( item, value) # <<<<<<<<<<<<<< - * - * - */ - /*else*/ { - __pyx_t_3 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->assign_item_from_object(__pyx_v_self, ((char *)__pyx_v_item), __pyx_v_value); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 474, __pyx_L6_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - } - __pyx_L8:; - - /* "View.MemoryView":478 - * - * - * if self.view.suboffsets != NULL: # <<<<<<<<<<<<<< - * assert_direct_dimensions(self.view.suboffsets, self.view.ndim) - * slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize, - */ - __pyx_t_2 = (__pyx_v_self->view.suboffsets != NULL); - if (__pyx_t_2) { - - /* "View.MemoryView":479 - * - * if self.view.suboffsets != NULL: - * assert_direct_dimensions(self.view.suboffsets, self.view.ndim) # <<<<<<<<<<<<<< - * slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize, - * item, self.dtype_is_object) - */ - __pyx_t_4 = assert_direct_dimensions(__pyx_v_self->view.suboffsets, __pyx_v_self->view.ndim); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(1, 479, __pyx_L6_error) - - /* "View.MemoryView":478 - * - * - * if self.view.suboffsets != NULL: # <<<<<<<<<<<<<< - * assert_direct_dimensions(self.view.suboffsets, self.view.ndim) - * slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize, - */ - } - - /* "View.MemoryView":480 - * if self.view.suboffsets != NULL: - * assert_direct_dimensions(self.view.suboffsets, self.view.ndim) - * slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize, # <<<<<<<<<<<<<< - * item, self.dtype_is_object) - * finally: - */ - __pyx_memoryview_slice_assign_scalar(__pyx_v_dst_slice, __pyx_v_dst->view.ndim, __pyx_v_self->view.itemsize, __pyx_v_item, __pyx_v_self->dtype_is_object); - } - - /* "View.MemoryView":483 - * item, self.dtype_is_object) - * finally: - * PyMem_Free(tmp) # <<<<<<<<<<<<<< - * - * cdef setitem_indexed(self, index, value): - */ - /*finally:*/ { - /*normal exit:*/{ - PyMem_Free(__pyx_v_tmp); - goto __pyx_L7; - } - __pyx_L6_error:; - /*exception exit:*/{ - __Pyx_PyThreadState_declare - __Pyx_PyThreadState_assign - __pyx_t_7 = 0; __pyx_t_8 = 0; __pyx_t_9 = 0; __pyx_t_10 = 0; __pyx_t_11 = 0; __pyx_t_12 = 0; - __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; - if (PY_MAJOR_VERSION >= 3) __Pyx_ExceptionSwap(&__pyx_t_10, &__pyx_t_11, &__pyx_t_12); - if ((PY_MAJOR_VERSION < 3) || unlikely(__Pyx_GetException(&__pyx_t_7, &__pyx_t_8, &__pyx_t_9) < 0)) __Pyx_ErrFetch(&__pyx_t_7, &__pyx_t_8, &__pyx_t_9); - __Pyx_XGOTREF(__pyx_t_7); - __Pyx_XGOTREF(__pyx_t_8); - __Pyx_XGOTREF(__pyx_t_9); - __Pyx_XGOTREF(__pyx_t_10); - __Pyx_XGOTREF(__pyx_t_11); - __Pyx_XGOTREF(__pyx_t_12); - __pyx_t_4 = __pyx_lineno; __pyx_t_5 = __pyx_clineno; __pyx_t_6 = __pyx_filename; - { - PyMem_Free(__pyx_v_tmp); - } - if (PY_MAJOR_VERSION >= 3) { - __Pyx_XGIVEREF(__pyx_t_10); - __Pyx_XGIVEREF(__pyx_t_11); - __Pyx_XGIVEREF(__pyx_t_12); - __Pyx_ExceptionReset(__pyx_t_10, __pyx_t_11, __pyx_t_12); - } - __Pyx_XGIVEREF(__pyx_t_7); - __Pyx_XGIVEREF(__pyx_t_8); - __Pyx_XGIVEREF(__pyx_t_9); - __Pyx_ErrRestore(__pyx_t_7, __pyx_t_8, __pyx_t_9); - __pyx_t_7 = 0; __pyx_t_8 = 0; __pyx_t_9 = 0; __pyx_t_10 = 0; __pyx_t_11 = 0; __pyx_t_12 = 0; - __pyx_lineno = __pyx_t_4; __pyx_clineno = __pyx_t_5; __pyx_filename = __pyx_t_6; - goto __pyx_L1_error; - } - __pyx_L7:; - } - - /* "View.MemoryView":453 - * memoryview_copy_contents(msrc, mdst, src.ndim, dst.ndim, self.dtype_is_object) - * - * cdef setitem_slice_assign_scalar(self, memoryview dst, value): # <<<<<<<<<<<<<< - * cdef int array[128] - * cdef void *tmp = NULL - */ - - /* function exit code */ - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_3); - __Pyx_AddTraceback("View.MemoryView.memoryview.setitem_slice_assign_scalar", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = 0; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":485 - * PyMem_Free(tmp) - * - * cdef setitem_indexed(self, index, value): # <<<<<<<<<<<<<< - * cdef char *itemp = self.get_item_pointer(index) - * self.assign_item_from_object(itemp, value) - */ - -static PyObject *__pyx_memoryview_setitem_indexed(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value) { - char *__pyx_v_itemp; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - char *__pyx_t_1; - PyObject *__pyx_t_2 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("setitem_indexed", 0); - - /* "View.MemoryView":486 - * - * cdef setitem_indexed(self, index, value): - * cdef char *itemp = self.get_item_pointer(index) # <<<<<<<<<<<<<< - * self.assign_item_from_object(itemp, value) - * - */ - __pyx_t_1 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->get_item_pointer(__pyx_v_self, __pyx_v_index); if (unlikely(__pyx_t_1 == ((char *)NULL))) __PYX_ERR(1, 486, __pyx_L1_error) - __pyx_v_itemp = __pyx_t_1; - - /* "View.MemoryView":487 - * cdef setitem_indexed(self, index, value): - * cdef char *itemp = self.get_item_pointer(index) - * self.assign_item_from_object(itemp, value) # <<<<<<<<<<<<<< - * - * cdef convert_item_to_object(self, char *itemp): - */ - __pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->assign_item_from_object(__pyx_v_self, __pyx_v_itemp, __pyx_v_value); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 487, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - - /* "View.MemoryView":485 - * PyMem_Free(tmp) - * - * cdef setitem_indexed(self, index, value): # <<<<<<<<<<<<<< - * cdef char *itemp = self.get_item_pointer(index) - * self.assign_item_from_object(itemp, value) - */ - - /* function exit code */ - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_2); - __Pyx_AddTraceback("View.MemoryView.memoryview.setitem_indexed", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = 0; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":489 - * self.assign_item_from_object(itemp, value) - * - * cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<< - * """Only used if instantiated manually by the user, or if Cython doesn't - * know how to convert the type""" - */ - -static PyObject *__pyx_memoryview_convert_item_to_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp) { - PyObject *__pyx_v_struct = NULL; - PyObject *__pyx_v_bytesitem = 0; - PyObject *__pyx_v_result = NULL; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - PyObject *__pyx_t_2 = NULL; - PyObject *__pyx_t_3 = NULL; - PyObject *__pyx_t_4 = NULL; - PyObject *__pyx_t_5 = NULL; - PyObject *__pyx_t_6 = NULL; - PyObject *__pyx_t_7 = NULL; - int __pyx_t_8; - Py_ssize_t __pyx_t_9; - int __pyx_t_10; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("convert_item_to_object", 0); - - /* "View.MemoryView":492 - * """Only used if instantiated manually by the user, or if Cython doesn't - * know how to convert the type""" - * import struct # <<<<<<<<<<<<<< - * cdef bytes bytesitem - * - */ - __pyx_t_1 = __Pyx_ImportDottedModule(__pyx_n_s_struct, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 492, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_v_struct = __pyx_t_1; - __pyx_t_1 = 0; - - /* "View.MemoryView":495 - * cdef bytes bytesitem - * - * bytesitem = itemp[:self.view.itemsize] # <<<<<<<<<<<<<< - * try: - * result = struct.unpack(self.view.format, bytesitem) - */ - __pyx_t_1 = __Pyx_PyBytes_FromStringAndSize(__pyx_v_itemp + 0, __pyx_v_self->view.itemsize - 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 495, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_v_bytesitem = ((PyObject*)__pyx_t_1); - __pyx_t_1 = 0; - - /* "View.MemoryView":496 - * - * bytesitem = itemp[:self.view.itemsize] - * try: # <<<<<<<<<<<<<< - * result = struct.unpack(self.view.format, bytesitem) - * except struct.error: - */ - { - __Pyx_PyThreadState_declare - __Pyx_PyThreadState_assign - __Pyx_ExceptionSave(&__pyx_t_2, &__pyx_t_3, &__pyx_t_4); - __Pyx_XGOTREF(__pyx_t_2); - __Pyx_XGOTREF(__pyx_t_3); - __Pyx_XGOTREF(__pyx_t_4); - /*try:*/ { - - /* "View.MemoryView":497 - * bytesitem = itemp[:self.view.itemsize] - * try: - * result = struct.unpack(self.view.format, bytesitem) # <<<<<<<<<<<<<< - * except struct.error: - * raise ValueError, "Unable to convert item to object" - */ - __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_unpack); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 497, __pyx_L3_error) - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_6 = __Pyx_PyBytes_FromString(__pyx_v_self->view.format); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 497, __pyx_L3_error) - __Pyx_GOTREF(__pyx_t_6); - __pyx_t_7 = NULL; - __pyx_t_8 = 0; - if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_5))) { - __pyx_t_7 = PyMethod_GET_SELF(__pyx_t_5); - if (likely(__pyx_t_7)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5); - __Pyx_INCREF(__pyx_t_7); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_5, function); - __pyx_t_8 = 1; - } - } - { - PyObject *__pyx_callargs[3] = {__pyx_t_7, __pyx_t_6, __pyx_v_bytesitem}; - __pyx_t_1 = __Pyx_PyObject_FastCall(__pyx_t_5, __pyx_callargs+1-__pyx_t_8, 2+__pyx_t_8); - __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; - __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; - if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 497, __pyx_L3_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - } - __pyx_v_result = __pyx_t_1; - __pyx_t_1 = 0; - - /* "View.MemoryView":496 - * - * bytesitem = itemp[:self.view.itemsize] - * try: # <<<<<<<<<<<<<< - * result = struct.unpack(self.view.format, bytesitem) - * except struct.error: - */ - } - - /* "View.MemoryView":501 - * raise ValueError, "Unable to convert item to object" - * else: - * if len(self.view.format) == 1: # <<<<<<<<<<<<<< - * return result[0] - * return result - */ - /*else:*/ { - __pyx_t_9 = __Pyx_ssize_strlen(__pyx_v_self->view.format); if (unlikely(__pyx_t_9 == ((Py_ssize_t)-1))) __PYX_ERR(1, 501, __pyx_L5_except_error) - __pyx_t_10 = (__pyx_t_9 == 1); - if (__pyx_t_10) { - - /* "View.MemoryView":502 - * else: - * if len(self.view.format) == 1: - * return result[0] # <<<<<<<<<<<<<< - * return result - * - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = __Pyx_GetItemInt(__pyx_v_result, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 502, __pyx_L5_except_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_r = __pyx_t_1; - __pyx_t_1 = 0; - goto __pyx_L6_except_return; - - /* "View.MemoryView":501 - * raise ValueError, "Unable to convert item to object" - * else: - * if len(self.view.format) == 1: # <<<<<<<<<<<<<< - * return result[0] - * return result - */ - } - - /* "View.MemoryView":503 - * if len(self.view.format) == 1: - * return result[0] - * return result # <<<<<<<<<<<<<< - * - * cdef assign_item_from_object(self, char *itemp, object value): - */ - __Pyx_XDECREF(__pyx_r); - __Pyx_INCREF(__pyx_v_result); - __pyx_r = __pyx_v_result; - goto __pyx_L6_except_return; - } - __pyx_L3_error:; - __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; - __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; - __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; - __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; - - /* "View.MemoryView":498 - * try: - * result = struct.unpack(self.view.format, bytesitem) - * except struct.error: # <<<<<<<<<<<<<< - * raise ValueError, "Unable to convert item to object" - * else: - */ - __Pyx_ErrFetch(&__pyx_t_1, &__pyx_t_5, &__pyx_t_6); - __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_error); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 498, __pyx_L5_except_error) - __Pyx_GOTREF(__pyx_t_7); - __pyx_t_8 = __Pyx_PyErr_GivenExceptionMatches(__pyx_t_1, __pyx_t_7); - __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; - __Pyx_ErrRestore(__pyx_t_1, __pyx_t_5, __pyx_t_6); - __pyx_t_1 = 0; __pyx_t_5 = 0; __pyx_t_6 = 0; - if (__pyx_t_8) { - __Pyx_AddTraceback("View.MemoryView.memoryview.convert_item_to_object", __pyx_clineno, __pyx_lineno, __pyx_filename); - if (__Pyx_GetException(&__pyx_t_6, &__pyx_t_5, &__pyx_t_1) < 0) __PYX_ERR(1, 498, __pyx_L5_except_error) - __Pyx_XGOTREF(__pyx_t_6); - __Pyx_XGOTREF(__pyx_t_5); - __Pyx_XGOTREF(__pyx_t_1); - - /* "View.MemoryView":499 - * result = struct.unpack(self.view.format, bytesitem) - * except struct.error: - * raise ValueError, "Unable to convert item to object" # <<<<<<<<<<<<<< - * else: - * if len(self.view.format) == 1: - */ - __Pyx_Raise(__pyx_builtin_ValueError, __pyx_kp_s_Unable_to_convert_item_to_object, 0, 0); - __PYX_ERR(1, 499, __pyx_L5_except_error) - } - goto __pyx_L5_except_error; - - /* "View.MemoryView":496 - * - * bytesitem = itemp[:self.view.itemsize] - * try: # <<<<<<<<<<<<<< - * result = struct.unpack(self.view.format, bytesitem) - * except struct.error: - */ - __pyx_L5_except_error:; - __Pyx_XGIVEREF(__pyx_t_2); - __Pyx_XGIVEREF(__pyx_t_3); - __Pyx_XGIVEREF(__pyx_t_4); - __Pyx_ExceptionReset(__pyx_t_2, __pyx_t_3, __pyx_t_4); - goto __pyx_L1_error; - __pyx_L6_except_return:; - __Pyx_XGIVEREF(__pyx_t_2); - __Pyx_XGIVEREF(__pyx_t_3); - __Pyx_XGIVEREF(__pyx_t_4); - __Pyx_ExceptionReset(__pyx_t_2, __pyx_t_3, __pyx_t_4); - goto __pyx_L0; - } - - /* "View.MemoryView":489 - * self.assign_item_from_object(itemp, value) - * - * cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<< - * """Only used if instantiated manually by the user, or if Cython doesn't - * know how to convert the type""" - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_5); - __Pyx_XDECREF(__pyx_t_6); - __Pyx_XDECREF(__pyx_t_7); - __Pyx_AddTraceback("View.MemoryView.memoryview.convert_item_to_object", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = 0; - __pyx_L0:; - __Pyx_XDECREF(__pyx_v_struct); - __Pyx_XDECREF(__pyx_v_bytesitem); - __Pyx_XDECREF(__pyx_v_result); - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":505 - * return result - * - * cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<< - * """Only used if instantiated manually by the user, or if Cython doesn't - * know how to convert the type""" - */ - -static PyObject *__pyx_memoryview_assign_item_from_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value) { - PyObject *__pyx_v_struct = NULL; - char __pyx_v_c; - PyObject *__pyx_v_bytesvalue = 0; - Py_ssize_t __pyx_v_i; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - int __pyx_t_2; - PyObject *__pyx_t_3 = NULL; - PyObject *__pyx_t_4 = NULL; - PyObject *__pyx_t_5 = NULL; - int __pyx_t_6; - Py_ssize_t __pyx_t_7; - PyObject *__pyx_t_8 = NULL; - char *__pyx_t_9; - char *__pyx_t_10; - char *__pyx_t_11; - char *__pyx_t_12; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("assign_item_from_object", 0); - - /* "View.MemoryView":508 - * """Only used if instantiated manually by the user, or if Cython doesn't - * know how to convert the type""" - * import struct # <<<<<<<<<<<<<< - * cdef char c - * cdef bytes bytesvalue - */ - __pyx_t_1 = __Pyx_ImportDottedModule(__pyx_n_s_struct, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 508, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_v_struct = __pyx_t_1; - __pyx_t_1 = 0; - - /* "View.MemoryView":513 - * cdef Py_ssize_t i - * - * if isinstance(value, tuple): # <<<<<<<<<<<<<< - * bytesvalue = struct.pack(self.view.format, *value) - * else: - */ - __pyx_t_2 = PyTuple_Check(__pyx_v_value); - if (__pyx_t_2) { - - /* "View.MemoryView":514 - * - * if isinstance(value, tuple): - * bytesvalue = struct.pack(self.view.format, *value) # <<<<<<<<<<<<<< - * else: - * bytesvalue = struct.pack(self.view.format, value) - */ - __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_pack); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 514, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_3 = __Pyx_PyBytes_FromString(__pyx_v_self->view.format); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 514, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 514, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __Pyx_GIVEREF(__pyx_t_3); - PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_3); - __pyx_t_3 = 0; - __pyx_t_3 = __Pyx_PySequence_Tuple(__pyx_v_value); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 514, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_5 = PyNumber_Add(__pyx_t_4, __pyx_t_3); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 514, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_3 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_t_5, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 514, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - if (!(likely(PyBytes_CheckExact(__pyx_t_3))||((__pyx_t_3) == Py_None) || __Pyx_RaiseUnexpectedTypeError("bytes", __pyx_t_3))) __PYX_ERR(1, 514, __pyx_L1_error) - __pyx_v_bytesvalue = ((PyObject*)__pyx_t_3); - __pyx_t_3 = 0; - - /* "View.MemoryView":513 - * cdef Py_ssize_t i - * - * if isinstance(value, tuple): # <<<<<<<<<<<<<< - * bytesvalue = struct.pack(self.view.format, *value) - * else: - */ - goto __pyx_L3; - } - - /* "View.MemoryView":516 - * bytesvalue = struct.pack(self.view.format, *value) - * else: - * bytesvalue = struct.pack(self.view.format, value) # <<<<<<<<<<<<<< - * - * for i, c in enumerate(bytesvalue): - */ - /*else*/ { - __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_pack); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 516, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_1 = __Pyx_PyBytes_FromString(__pyx_v_self->view.format); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 516, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_4 = NULL; - __pyx_t_6 = 0; - if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_5))) { - __pyx_t_4 = PyMethod_GET_SELF(__pyx_t_5); - if (likely(__pyx_t_4)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5); - __Pyx_INCREF(__pyx_t_4); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_5, function); - __pyx_t_6 = 1; - } - } - { - PyObject *__pyx_callargs[3] = {__pyx_t_4, __pyx_t_1, __pyx_v_value}; - __pyx_t_3 = __Pyx_PyObject_FastCall(__pyx_t_5, __pyx_callargs+1-__pyx_t_6, 2+__pyx_t_6); - __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 516, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - } - if (!(likely(PyBytes_CheckExact(__pyx_t_3))||((__pyx_t_3) == Py_None) || __Pyx_RaiseUnexpectedTypeError("bytes", __pyx_t_3))) __PYX_ERR(1, 516, __pyx_L1_error) - __pyx_v_bytesvalue = ((PyObject*)__pyx_t_3); - __pyx_t_3 = 0; - } - __pyx_L3:; - - /* "View.MemoryView":518 - * bytesvalue = struct.pack(self.view.format, value) - * - * for i, c in enumerate(bytesvalue): # <<<<<<<<<<<<<< - * itemp[i] = c - * - */ - __pyx_t_7 = 0; - if (unlikely(__pyx_v_bytesvalue == Py_None)) { - PyErr_SetString(PyExc_TypeError, "'NoneType' is not iterable"); - __PYX_ERR(1, 518, __pyx_L1_error) - } - __Pyx_INCREF(__pyx_v_bytesvalue); - __pyx_t_8 = __pyx_v_bytesvalue; - __pyx_t_10 = PyBytes_AS_STRING(__pyx_t_8); - __pyx_t_11 = (__pyx_t_10 + PyBytes_GET_SIZE(__pyx_t_8)); - for (__pyx_t_12 = __pyx_t_10; __pyx_t_12 < __pyx_t_11; __pyx_t_12++) { - __pyx_t_9 = __pyx_t_12; - __pyx_v_c = (__pyx_t_9[0]); - - /* "View.MemoryView":519 - * - * for i, c in enumerate(bytesvalue): - * itemp[i] = c # <<<<<<<<<<<<<< - * - * @cname('getbuffer') - */ - __pyx_v_i = __pyx_t_7; - - /* "View.MemoryView":518 - * bytesvalue = struct.pack(self.view.format, value) - * - * for i, c in enumerate(bytesvalue): # <<<<<<<<<<<<<< - * itemp[i] = c - * - */ - __pyx_t_7 = (__pyx_t_7 + 1); - - /* "View.MemoryView":519 - * - * for i, c in enumerate(bytesvalue): - * itemp[i] = c # <<<<<<<<<<<<<< - * - * @cname('getbuffer') - */ - (__pyx_v_itemp[__pyx_v_i]) = __pyx_v_c; - } - __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; - - /* "View.MemoryView":505 - * return result - * - * cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<< - * """Only used if instantiated manually by the user, or if Cython doesn't - * know how to convert the type""" - */ - - /* function exit code */ - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_3); - __Pyx_XDECREF(__pyx_t_4); - __Pyx_XDECREF(__pyx_t_5); - __Pyx_XDECREF(__pyx_t_8); - __Pyx_AddTraceback("View.MemoryView.memoryview.assign_item_from_object", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = 0; - __pyx_L0:; - __Pyx_XDECREF(__pyx_v_struct); - __Pyx_XDECREF(__pyx_v_bytesvalue); - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":521 - * itemp[i] = c - * - * @cname('getbuffer') # <<<<<<<<<<<<<< - * def __getbuffer__(self, Py_buffer *info, int flags): - * if flags & PyBUF_WRITABLE and self.view.readonly: - */ - -/* Python wrapper */ -CYTHON_UNUSED static int __pyx_memoryview_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ -CYTHON_UNUSED static int __pyx_memoryview_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { - CYTHON_UNUSED PyObject *const *__pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); - int __pyx_r; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__getbuffer__ (wrapper)", 0); - __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_8__getbuffer__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((Py_buffer *)__pyx_v_info), ((int)__pyx_v_flags)); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_8__getbuffer__(struct __pyx_memoryview_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { - int __pyx_r; - __Pyx_RefNannyDeclarations - int __pyx_t_1; - int __pyx_t_2; - Py_ssize_t *__pyx_t_3; - char *__pyx_t_4; - void *__pyx_t_5; - int __pyx_t_6; - Py_ssize_t __pyx_t_7; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - if (unlikely(__pyx_v_info == NULL)) { - PyErr_SetString(PyExc_BufferError, "PyObject_GetBuffer: view==NULL argument is obsolete"); - return -1; - } - __Pyx_RefNannySetupContext("__getbuffer__", 0); - __pyx_v_info->obj = Py_None; __Pyx_INCREF(Py_None); - __Pyx_GIVEREF(__pyx_v_info->obj); - - /* "View.MemoryView":523 - * @cname('getbuffer') - * def __getbuffer__(self, Py_buffer *info, int flags): - * if flags & PyBUF_WRITABLE and self.view.readonly: # <<<<<<<<<<<<<< - * raise ValueError, "Cannot create writable memory view from read-only memoryview" - * - */ - __pyx_t_2 = ((__pyx_v_flags & PyBUF_WRITABLE) != 0); - if (__pyx_t_2) { - } else { - __pyx_t_1 = __pyx_t_2; - goto __pyx_L4_bool_binop_done; - } - __pyx_t_1 = __pyx_v_self->view.readonly; - __pyx_L4_bool_binop_done:; - if (unlikely(__pyx_t_1)) { - - /* "View.MemoryView":524 - * def __getbuffer__(self, Py_buffer *info, int flags): - * if flags & PyBUF_WRITABLE and self.view.readonly: - * raise ValueError, "Cannot create writable memory view from read-only memoryview" # <<<<<<<<<<<<<< - * - * if flags & PyBUF_ND: - */ - __Pyx_Raise(__pyx_builtin_ValueError, __pyx_kp_s_Cannot_create_writable_memory_vi, 0, 0); - __PYX_ERR(1, 524, __pyx_L1_error) - - /* "View.MemoryView":523 - * @cname('getbuffer') - * def __getbuffer__(self, Py_buffer *info, int flags): - * if flags & PyBUF_WRITABLE and self.view.readonly: # <<<<<<<<<<<<<< - * raise ValueError, "Cannot create writable memory view from read-only memoryview" - * - */ - } - - /* "View.MemoryView":526 - * raise ValueError, "Cannot create writable memory view from read-only memoryview" - * - * if flags & PyBUF_ND: # <<<<<<<<<<<<<< - * info.shape = self.view.shape - * else: - */ - __pyx_t_1 = ((__pyx_v_flags & PyBUF_ND) != 0); - if (__pyx_t_1) { - - /* "View.MemoryView":527 - * - * if flags & PyBUF_ND: - * info.shape = self.view.shape # <<<<<<<<<<<<<< - * else: - * info.shape = NULL - */ - __pyx_t_3 = __pyx_v_self->view.shape; - __pyx_v_info->shape = __pyx_t_3; - - /* "View.MemoryView":526 - * raise ValueError, "Cannot create writable memory view from read-only memoryview" - * - * if flags & PyBUF_ND: # <<<<<<<<<<<<<< - * info.shape = self.view.shape - * else: - */ - goto __pyx_L6; - } - - /* "View.MemoryView":529 - * info.shape = self.view.shape - * else: - * info.shape = NULL # <<<<<<<<<<<<<< - * - * if flags & PyBUF_STRIDES: - */ - /*else*/ { - __pyx_v_info->shape = NULL; - } - __pyx_L6:; - - /* "View.MemoryView":531 - * info.shape = NULL - * - * if flags & PyBUF_STRIDES: # <<<<<<<<<<<<<< - * info.strides = self.view.strides - * else: - */ - __pyx_t_1 = ((__pyx_v_flags & PyBUF_STRIDES) != 0); - if (__pyx_t_1) { - - /* "View.MemoryView":532 - * - * if flags & PyBUF_STRIDES: - * info.strides = self.view.strides # <<<<<<<<<<<<<< - * else: - * info.strides = NULL - */ - __pyx_t_3 = __pyx_v_self->view.strides; - __pyx_v_info->strides = __pyx_t_3; - - /* "View.MemoryView":531 - * info.shape = NULL - * - * if flags & PyBUF_STRIDES: # <<<<<<<<<<<<<< - * info.strides = self.view.strides - * else: - */ - goto __pyx_L7; - } - - /* "View.MemoryView":534 - * info.strides = self.view.strides - * else: - * info.strides = NULL # <<<<<<<<<<<<<< - * - * if flags & PyBUF_INDIRECT: - */ - /*else*/ { - __pyx_v_info->strides = NULL; - } - __pyx_L7:; - - /* "View.MemoryView":536 - * info.strides = NULL - * - * if flags & PyBUF_INDIRECT: # <<<<<<<<<<<<<< - * info.suboffsets = self.view.suboffsets - * else: - */ - __pyx_t_1 = ((__pyx_v_flags & PyBUF_INDIRECT) != 0); - if (__pyx_t_1) { - - /* "View.MemoryView":537 - * - * if flags & PyBUF_INDIRECT: - * info.suboffsets = self.view.suboffsets # <<<<<<<<<<<<<< - * else: - * info.suboffsets = NULL - */ - __pyx_t_3 = __pyx_v_self->view.suboffsets; - __pyx_v_info->suboffsets = __pyx_t_3; - - /* "View.MemoryView":536 - * info.strides = NULL - * - * if flags & PyBUF_INDIRECT: # <<<<<<<<<<<<<< - * info.suboffsets = self.view.suboffsets - * else: - */ - goto __pyx_L8; - } - - /* "View.MemoryView":539 - * info.suboffsets = self.view.suboffsets - * else: - * info.suboffsets = NULL # <<<<<<<<<<<<<< - * - * if flags & PyBUF_FORMAT: - */ - /*else*/ { - __pyx_v_info->suboffsets = NULL; - } - __pyx_L8:; - - /* "View.MemoryView":541 - * info.suboffsets = NULL - * - * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< - * info.format = self.view.format - * else: - */ - __pyx_t_1 = ((__pyx_v_flags & PyBUF_FORMAT) != 0); - if (__pyx_t_1) { - - /* "View.MemoryView":542 - * - * if flags & PyBUF_FORMAT: - * info.format = self.view.format # <<<<<<<<<<<<<< - * else: - * info.format = NULL - */ - __pyx_t_4 = __pyx_v_self->view.format; - __pyx_v_info->format = __pyx_t_4; - - /* "View.MemoryView":541 - * info.suboffsets = NULL - * - * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< - * info.format = self.view.format - * else: - */ - goto __pyx_L9; - } - - /* "View.MemoryView":544 - * info.format = self.view.format - * else: - * info.format = NULL # <<<<<<<<<<<<<< - * - * info.buf = self.view.buf - */ - /*else*/ { - __pyx_v_info->format = NULL; - } - __pyx_L9:; - - /* "View.MemoryView":546 - * info.format = NULL - * - * info.buf = self.view.buf # <<<<<<<<<<<<<< - * info.ndim = self.view.ndim - * info.itemsize = self.view.itemsize - */ - __pyx_t_5 = __pyx_v_self->view.buf; - __pyx_v_info->buf = __pyx_t_5; - - /* "View.MemoryView":547 - * - * info.buf = self.view.buf - * info.ndim = self.view.ndim # <<<<<<<<<<<<<< - * info.itemsize = self.view.itemsize - * info.len = self.view.len - */ - __pyx_t_6 = __pyx_v_self->view.ndim; - __pyx_v_info->ndim = __pyx_t_6; - - /* "View.MemoryView":548 - * info.buf = self.view.buf - * info.ndim = self.view.ndim - * info.itemsize = self.view.itemsize # <<<<<<<<<<<<<< - * info.len = self.view.len - * info.readonly = self.view.readonly - */ - __pyx_t_7 = __pyx_v_self->view.itemsize; - __pyx_v_info->itemsize = __pyx_t_7; - - /* "View.MemoryView":549 - * info.ndim = self.view.ndim - * info.itemsize = self.view.itemsize - * info.len = self.view.len # <<<<<<<<<<<<<< - * info.readonly = self.view.readonly - * info.obj = self - */ - __pyx_t_7 = __pyx_v_self->view.len; - __pyx_v_info->len = __pyx_t_7; - - /* "View.MemoryView":550 - * info.itemsize = self.view.itemsize - * info.len = self.view.len - * info.readonly = self.view.readonly # <<<<<<<<<<<<<< - * info.obj = self - * - */ - __pyx_t_1 = __pyx_v_self->view.readonly; - __pyx_v_info->readonly = __pyx_t_1; - - /* "View.MemoryView":551 - * info.len = self.view.len - * info.readonly = self.view.readonly - * info.obj = self # <<<<<<<<<<<<<< - * - * - */ - __Pyx_INCREF((PyObject *)__pyx_v_self); - __Pyx_GIVEREF((PyObject *)__pyx_v_self); - __Pyx_GOTREF(__pyx_v_info->obj); - __Pyx_DECREF(__pyx_v_info->obj); - __pyx_v_info->obj = ((PyObject *)__pyx_v_self); - - /* "View.MemoryView":521 - * itemp[i] = c - * - * @cname('getbuffer') # <<<<<<<<<<<<<< - * def __getbuffer__(self, Py_buffer *info, int flags): - * if flags & PyBUF_WRITABLE and self.view.readonly: - */ - - /* function exit code */ - __pyx_r = 0; - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_AddTraceback("View.MemoryView.memoryview.__getbuffer__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = -1; - if (__pyx_v_info->obj != NULL) { - __Pyx_GOTREF(__pyx_v_info->obj); - __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = 0; - } - goto __pyx_L2; - __pyx_L0:; - if (__pyx_v_info->obj == Py_None) { - __Pyx_GOTREF(__pyx_v_info->obj); - __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = 0; - } - __pyx_L2:; - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":554 - * - * - * @property # <<<<<<<<<<<<<< - * def T(self): - * cdef _memoryviewslice result = memoryview_copy(self) - */ - -/* Python wrapper */ -static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_1T_1__get__(PyObject *__pyx_v_self); /*proto*/ -static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_1T_1__get__(PyObject *__pyx_v_self) { - CYTHON_UNUSED PyObject *const *__pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); - __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_1T___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_1T___get__(struct __pyx_memoryview_obj *__pyx_v_self) { - struct __pyx_memoryviewslice_obj *__pyx_v_result = 0; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - int __pyx_t_2; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("__get__", 0); - - /* "View.MemoryView":556 - * @property - * def T(self): - * cdef _memoryviewslice result = memoryview_copy(self) # <<<<<<<<<<<<<< - * transpose_memslice(&result.from_slice) - * return result - */ - __pyx_t_1 = __pyx_memoryview_copy_object(__pyx_v_self); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 556, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_memoryviewslice_type))))) __PYX_ERR(1, 556, __pyx_L1_error) - __pyx_v_result = ((struct __pyx_memoryviewslice_obj *)__pyx_t_1); - __pyx_t_1 = 0; - - /* "View.MemoryView":557 - * def T(self): - * cdef _memoryviewslice result = memoryview_copy(self) - * transpose_memslice(&result.from_slice) # <<<<<<<<<<<<<< - * return result - * - */ - __pyx_t_2 = __pyx_memslice_transpose((&__pyx_v_result->from_slice)); if (unlikely(__pyx_t_2 == ((int)-1))) __PYX_ERR(1, 557, __pyx_L1_error) - - /* "View.MemoryView":558 - * cdef _memoryviewslice result = memoryview_copy(self) - * transpose_memslice(&result.from_slice) - * return result # <<<<<<<<<<<<<< - * - * @property - */ - __Pyx_XDECREF(__pyx_r); - __Pyx_INCREF((PyObject *)__pyx_v_result); - __pyx_r = ((PyObject *)__pyx_v_result); - goto __pyx_L0; - - /* "View.MemoryView":554 - * - * - * @property # <<<<<<<<<<<<<< - * def T(self): - * cdef _memoryviewslice result = memoryview_copy(self) - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_AddTraceback("View.MemoryView.memoryview.T.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XDECREF((PyObject *)__pyx_v_result); - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":560 - * return result - * - * @property # <<<<<<<<<<<<<< - * def base(self): - * return self._get_base() - */ - -/* Python wrapper */ -static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4base_1__get__(PyObject *__pyx_v_self); /*proto*/ -static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4base_1__get__(PyObject *__pyx_v_self) { - CYTHON_UNUSED PyObject *const *__pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); - __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_4base___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4base___get__(struct __pyx_memoryview_obj *__pyx_v_self) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("__get__", 0); - - /* "View.MemoryView":562 - * @property - * def base(self): - * return self._get_base() # <<<<<<<<<<<<<< - * - * cdef _get_base(self): - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->_get_base(__pyx_v_self); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 562, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_r = __pyx_t_1; - __pyx_t_1 = 0; - goto __pyx_L0; - - /* "View.MemoryView":560 - * return result - * - * @property # <<<<<<<<<<<<<< - * def base(self): - * return self._get_base() - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_AddTraceback("View.MemoryView.memoryview.base.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":564 - * return self._get_base() - * - * cdef _get_base(self): # <<<<<<<<<<<<<< - * return self.obj - * - */ - -static PyObject *__pyx_memoryview__get_base(struct __pyx_memoryview_obj *__pyx_v_self) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("_get_base", 0); - - /* "View.MemoryView":565 - * - * cdef _get_base(self): - * return self.obj # <<<<<<<<<<<<<< - * - * @property - */ - __Pyx_XDECREF(__pyx_r); - __Pyx_INCREF(__pyx_v_self->obj); - __pyx_r = __pyx_v_self->obj; - goto __pyx_L0; - - /* "View.MemoryView":564 - * return self._get_base() - * - * cdef _get_base(self): # <<<<<<<<<<<<<< - * return self.obj - * - */ - - /* function exit code */ - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":567 - * return self.obj - * - * @property # <<<<<<<<<<<<<< - * def shape(self): - * return tuple([length for length in self.view.shape[:self.view.ndim]]) - */ - -/* Python wrapper */ -static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_5shape_1__get__(PyObject *__pyx_v_self); /*proto*/ -static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_5shape_1__get__(PyObject *__pyx_v_self) { - CYTHON_UNUSED PyObject *const *__pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); - __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_5shape___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_5shape___get__(struct __pyx_memoryview_obj *__pyx_v_self) { - Py_ssize_t __pyx_7genexpr__pyx_v_length; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - Py_ssize_t *__pyx_t_2; - Py_ssize_t *__pyx_t_3; - Py_ssize_t *__pyx_t_4; - PyObject *__pyx_t_5 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("__get__", 0); - - /* "View.MemoryView":569 - * @property - * def shape(self): - * return tuple([length for length in self.view.shape[:self.view.ndim]]) # <<<<<<<<<<<<<< - * - * @property - */ - __Pyx_XDECREF(__pyx_r); - { /* enter inner scope */ - __pyx_t_1 = PyList_New(0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 569, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_3 = (__pyx_v_self->view.shape + __pyx_v_self->view.ndim); - for (__pyx_t_4 = __pyx_v_self->view.shape; __pyx_t_4 < __pyx_t_3; __pyx_t_4++) { - __pyx_t_2 = __pyx_t_4; - __pyx_7genexpr__pyx_v_length = (__pyx_t_2[0]); - __pyx_t_5 = PyInt_FromSsize_t(__pyx_7genexpr__pyx_v_length); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 569, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - if (unlikely(__Pyx_ListComp_Append(__pyx_t_1, (PyObject*)__pyx_t_5))) __PYX_ERR(1, 569, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - } - } /* exit inner scope */ - __pyx_t_5 = PyList_AsTuple(((PyObject*)__pyx_t_1)); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 569, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_r = __pyx_t_5; - __pyx_t_5 = 0; - goto __pyx_L0; - - /* "View.MemoryView":567 - * return self.obj - * - * @property # <<<<<<<<<<<<<< - * def shape(self): - * return tuple([length for length in self.view.shape[:self.view.ndim]]) - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_5); - __Pyx_AddTraceback("View.MemoryView.memoryview.shape.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":571 - * return tuple([length for length in self.view.shape[:self.view.ndim]]) - * - * @property # <<<<<<<<<<<<<< - * def strides(self): - * if self.view.strides == NULL: - */ - -/* Python wrapper */ -static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_7strides_1__get__(PyObject *__pyx_v_self); /*proto*/ -static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_7strides_1__get__(PyObject *__pyx_v_self) { - CYTHON_UNUSED PyObject *const *__pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); - __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_7strides___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_7strides___get__(struct __pyx_memoryview_obj *__pyx_v_self) { - Py_ssize_t __pyx_8genexpr1__pyx_v_stride; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - int __pyx_t_1; - PyObject *__pyx_t_2 = NULL; - Py_ssize_t *__pyx_t_3; - Py_ssize_t *__pyx_t_4; - Py_ssize_t *__pyx_t_5; - PyObject *__pyx_t_6 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("__get__", 0); - - /* "View.MemoryView":573 - * @property - * def strides(self): - * if self.view.strides == NULL: # <<<<<<<<<<<<<< - * - * raise ValueError, "Buffer view does not expose strides" - */ - __pyx_t_1 = (__pyx_v_self->view.strides == NULL); - if (unlikely(__pyx_t_1)) { - - /* "View.MemoryView":575 - * if self.view.strides == NULL: - * - * raise ValueError, "Buffer view does not expose strides" # <<<<<<<<<<<<<< - * - * return tuple([stride for stride in self.view.strides[:self.view.ndim]]) - */ - __Pyx_Raise(__pyx_builtin_ValueError, __pyx_kp_s_Buffer_view_does_not_expose_stri, 0, 0); - __PYX_ERR(1, 575, __pyx_L1_error) - - /* "View.MemoryView":573 - * @property - * def strides(self): - * if self.view.strides == NULL: # <<<<<<<<<<<<<< - * - * raise ValueError, "Buffer view does not expose strides" - */ - } - - /* "View.MemoryView":577 - * raise ValueError, "Buffer view does not expose strides" - * - * return tuple([stride for stride in self.view.strides[:self.view.ndim]]) # <<<<<<<<<<<<<< - * - * @property - */ - __Pyx_XDECREF(__pyx_r); - { /* enter inner scope */ - __pyx_t_2 = PyList_New(0); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 577, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_4 = (__pyx_v_self->view.strides + __pyx_v_self->view.ndim); - for (__pyx_t_5 = __pyx_v_self->view.strides; __pyx_t_5 < __pyx_t_4; __pyx_t_5++) { - __pyx_t_3 = __pyx_t_5; - __pyx_8genexpr1__pyx_v_stride = (__pyx_t_3[0]); - __pyx_t_6 = PyInt_FromSsize_t(__pyx_8genexpr1__pyx_v_stride); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 577, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_6); - if (unlikely(__Pyx_ListComp_Append(__pyx_t_2, (PyObject*)__pyx_t_6))) __PYX_ERR(1, 577, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; - } - } /* exit inner scope */ - __pyx_t_6 = PyList_AsTuple(((PyObject*)__pyx_t_2)); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 577, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_6); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_r = __pyx_t_6; - __pyx_t_6 = 0; - goto __pyx_L0; - - /* "View.MemoryView":571 - * return tuple([length for length in self.view.shape[:self.view.ndim]]) - * - * @property # <<<<<<<<<<<<<< - * def strides(self): - * if self.view.strides == NULL: - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_2); - __Pyx_XDECREF(__pyx_t_6); - __Pyx_AddTraceback("View.MemoryView.memoryview.strides.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":579 - * return tuple([stride for stride in self.view.strides[:self.view.ndim]]) - * - * @property # <<<<<<<<<<<<<< - * def suboffsets(self): - * if self.view.suboffsets == NULL: - */ - -/* Python wrapper */ -static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_10suboffsets_1__get__(PyObject *__pyx_v_self); /*proto*/ -static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_10suboffsets_1__get__(PyObject *__pyx_v_self) { - CYTHON_UNUSED PyObject *const *__pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); - __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_10suboffsets___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_10suboffsets___get__(struct __pyx_memoryview_obj *__pyx_v_self) { - Py_ssize_t __pyx_8genexpr2__pyx_v_suboffset; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - int __pyx_t_1; - PyObject *__pyx_t_2 = NULL; - Py_ssize_t *__pyx_t_3; - Py_ssize_t *__pyx_t_4; - Py_ssize_t *__pyx_t_5; - PyObject *__pyx_t_6 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("__get__", 0); - - /* "View.MemoryView":581 - * @property - * def suboffsets(self): - * if self.view.suboffsets == NULL: # <<<<<<<<<<<<<< - * return (-1,) * self.view.ndim - * - */ - __pyx_t_1 = (__pyx_v_self->view.suboffsets == NULL); - if (__pyx_t_1) { - - /* "View.MemoryView":582 - * def suboffsets(self): - * if self.view.suboffsets == NULL: - * return (-1,) * self.view.ndim # <<<<<<<<<<<<<< - * - * return tuple([suboffset for suboffset in self.view.suboffsets[:self.view.ndim]]) - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_2 = __Pyx_PySequence_Multiply(__pyx_tuple__4, __pyx_v_self->view.ndim); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 582, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_r = __pyx_t_2; - __pyx_t_2 = 0; - goto __pyx_L0; - - /* "View.MemoryView":581 - * @property - * def suboffsets(self): - * if self.view.suboffsets == NULL: # <<<<<<<<<<<<<< - * return (-1,) * self.view.ndim - * - */ - } - - /* "View.MemoryView":584 - * return (-1,) * self.view.ndim - * - * return tuple([suboffset for suboffset in self.view.suboffsets[:self.view.ndim]]) # <<<<<<<<<<<<<< - * - * @property - */ - __Pyx_XDECREF(__pyx_r); - { /* enter inner scope */ - __pyx_t_2 = PyList_New(0); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 584, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_4 = (__pyx_v_self->view.suboffsets + __pyx_v_self->view.ndim); - for (__pyx_t_5 = __pyx_v_self->view.suboffsets; __pyx_t_5 < __pyx_t_4; __pyx_t_5++) { - __pyx_t_3 = __pyx_t_5; - __pyx_8genexpr2__pyx_v_suboffset = (__pyx_t_3[0]); - __pyx_t_6 = PyInt_FromSsize_t(__pyx_8genexpr2__pyx_v_suboffset); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 584, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_6); - if (unlikely(__Pyx_ListComp_Append(__pyx_t_2, (PyObject*)__pyx_t_6))) __PYX_ERR(1, 584, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; - } - } /* exit inner scope */ - __pyx_t_6 = PyList_AsTuple(((PyObject*)__pyx_t_2)); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 584, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_6); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_r = __pyx_t_6; - __pyx_t_6 = 0; - goto __pyx_L0; - - /* "View.MemoryView":579 - * return tuple([stride for stride in self.view.strides[:self.view.ndim]]) - * - * @property # <<<<<<<<<<<<<< - * def suboffsets(self): - * if self.view.suboffsets == NULL: - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_2); - __Pyx_XDECREF(__pyx_t_6); - __Pyx_AddTraceback("View.MemoryView.memoryview.suboffsets.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":586 - * return tuple([suboffset for suboffset in self.view.suboffsets[:self.view.ndim]]) - * - * @property # <<<<<<<<<<<<<< - * def ndim(self): - * return self.view.ndim - */ - -/* Python wrapper */ -static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4ndim_1__get__(PyObject *__pyx_v_self); /*proto*/ -static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4ndim_1__get__(PyObject *__pyx_v_self) { - CYTHON_UNUSED PyObject *const *__pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); - __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_4ndim___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4ndim___get__(struct __pyx_memoryview_obj *__pyx_v_self) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("__get__", 0); - - /* "View.MemoryView":588 - * @property - * def ndim(self): - * return self.view.ndim # <<<<<<<<<<<<<< - * - * @property - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_self->view.ndim); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 588, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_r = __pyx_t_1; - __pyx_t_1 = 0; - goto __pyx_L0; - - /* "View.MemoryView":586 - * return tuple([suboffset for suboffset in self.view.suboffsets[:self.view.ndim]]) - * - * @property # <<<<<<<<<<<<<< - * def ndim(self): - * return self.view.ndim - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_AddTraceback("View.MemoryView.memoryview.ndim.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":590 - * return self.view.ndim - * - * @property # <<<<<<<<<<<<<< - * def itemsize(self): - * return self.view.itemsize - */ - -/* Python wrapper */ -static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_8itemsize_1__get__(PyObject *__pyx_v_self); /*proto*/ -static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_8itemsize_1__get__(PyObject *__pyx_v_self) { - CYTHON_UNUSED PyObject *const *__pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); - __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_8itemsize___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_8itemsize___get__(struct __pyx_memoryview_obj *__pyx_v_self) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("__get__", 0); - - /* "View.MemoryView":592 - * @property - * def itemsize(self): - * return self.view.itemsize # <<<<<<<<<<<<<< - * - * @property - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = PyInt_FromSsize_t(__pyx_v_self->view.itemsize); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 592, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_r = __pyx_t_1; - __pyx_t_1 = 0; - goto __pyx_L0; - - /* "View.MemoryView":590 - * return self.view.ndim - * - * @property # <<<<<<<<<<<<<< - * def itemsize(self): - * return self.view.itemsize - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_AddTraceback("View.MemoryView.memoryview.itemsize.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":594 - * return self.view.itemsize - * - * @property # <<<<<<<<<<<<<< - * def nbytes(self): - * return self.size * self.view.itemsize - */ - -/* Python wrapper */ -static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_6nbytes_1__get__(PyObject *__pyx_v_self); /*proto*/ -static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_6nbytes_1__get__(PyObject *__pyx_v_self) { - CYTHON_UNUSED PyObject *const *__pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); - __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_6nbytes___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_6nbytes___get__(struct __pyx_memoryview_obj *__pyx_v_self) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - PyObject *__pyx_t_2 = NULL; - PyObject *__pyx_t_3 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("__get__", 0); - - /* "View.MemoryView":596 - * @property - * def nbytes(self): - * return self.size * self.view.itemsize # <<<<<<<<<<<<<< - * - * @property - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 596, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = PyInt_FromSsize_t(__pyx_v_self->view.itemsize); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 596, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_3 = PyNumber_Multiply(__pyx_t_1, __pyx_t_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 596, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_r = __pyx_t_3; - __pyx_t_3 = 0; - goto __pyx_L0; - - /* "View.MemoryView":594 - * return self.view.itemsize - * - * @property # <<<<<<<<<<<<<< - * def nbytes(self): - * return self.size * self.view.itemsize - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_2); - __Pyx_XDECREF(__pyx_t_3); - __Pyx_AddTraceback("View.MemoryView.memoryview.nbytes.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":598 - * return self.size * self.view.itemsize - * - * @property # <<<<<<<<<<<<<< - * def size(self): - * if self._size is None: - */ - -/* Python wrapper */ -static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4size_1__get__(PyObject *__pyx_v_self); /*proto*/ -static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4size_1__get__(PyObject *__pyx_v_self) { - CYTHON_UNUSED PyObject *const *__pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); - __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_4size___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4size___get__(struct __pyx_memoryview_obj *__pyx_v_self) { - PyObject *__pyx_v_result = NULL; - PyObject *__pyx_v_length = NULL; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - int __pyx_t_1; - Py_ssize_t *__pyx_t_2; - Py_ssize_t *__pyx_t_3; - Py_ssize_t *__pyx_t_4; - PyObject *__pyx_t_5 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("__get__", 0); - - /* "View.MemoryView":600 - * @property - * def size(self): - * if self._size is None: # <<<<<<<<<<<<<< - * result = 1 - * - */ - __pyx_t_1 = (__pyx_v_self->_size == Py_None); - if (__pyx_t_1) { - - /* "View.MemoryView":601 - * def size(self): - * if self._size is None: - * result = 1 # <<<<<<<<<<<<<< - * - * for length in self.view.shape[:self.view.ndim]: - */ - __Pyx_INCREF(__pyx_int_1); - __pyx_v_result = __pyx_int_1; - - /* "View.MemoryView":603 - * result = 1 - * - * for length in self.view.shape[:self.view.ndim]: # <<<<<<<<<<<<<< - * result *= length - * - */ - __pyx_t_3 = (__pyx_v_self->view.shape + __pyx_v_self->view.ndim); - for (__pyx_t_4 = __pyx_v_self->view.shape; __pyx_t_4 < __pyx_t_3; __pyx_t_4++) { - __pyx_t_2 = __pyx_t_4; - __pyx_t_5 = PyInt_FromSsize_t((__pyx_t_2[0])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 603, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __Pyx_XDECREF_SET(__pyx_v_length, __pyx_t_5); - __pyx_t_5 = 0; - - /* "View.MemoryView":604 - * - * for length in self.view.shape[:self.view.ndim]: - * result *= length # <<<<<<<<<<<<<< - * - * self._size = result - */ - __pyx_t_5 = PyNumber_InPlaceMultiply(__pyx_v_result, __pyx_v_length); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 604, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF_SET(__pyx_v_result, __pyx_t_5); - __pyx_t_5 = 0; - } - - /* "View.MemoryView":606 - * result *= length - * - * self._size = result # <<<<<<<<<<<<<< - * - * return self._size - */ - __Pyx_INCREF(__pyx_v_result); - __Pyx_GIVEREF(__pyx_v_result); - __Pyx_GOTREF(__pyx_v_self->_size); - __Pyx_DECREF(__pyx_v_self->_size); - __pyx_v_self->_size = __pyx_v_result; - - /* "View.MemoryView":600 - * @property - * def size(self): - * if self._size is None: # <<<<<<<<<<<<<< - * result = 1 - * - */ - } - - /* "View.MemoryView":608 - * self._size = result - * - * return self._size # <<<<<<<<<<<<<< - * - * def __len__(self): - */ - __Pyx_XDECREF(__pyx_r); - __Pyx_INCREF(__pyx_v_self->_size); - __pyx_r = __pyx_v_self->_size; - goto __pyx_L0; - - /* "View.MemoryView":598 - * return self.size * self.view.itemsize - * - * @property # <<<<<<<<<<<<<< - * def size(self): - * if self._size is None: - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_5); - __Pyx_AddTraceback("View.MemoryView.memoryview.size.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XDECREF(__pyx_v_result); - __Pyx_XDECREF(__pyx_v_length); - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":610 - * return self._size - * - * def __len__(self): # <<<<<<<<<<<<<< - * if self.view.ndim >= 1: - * return self.view.shape[0] - */ - -/* Python wrapper */ -static Py_ssize_t __pyx_memoryview___len__(PyObject *__pyx_v_self); /*proto*/ -static Py_ssize_t __pyx_memoryview___len__(PyObject *__pyx_v_self) { - CYTHON_UNUSED PyObject *const *__pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); - Py_ssize_t __pyx_r; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__len__ (wrapper)", 0); - __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_10__len__(((struct __pyx_memoryview_obj *)__pyx_v_self)); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static Py_ssize_t __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_10__len__(struct __pyx_memoryview_obj *__pyx_v_self) { - Py_ssize_t __pyx_r; - __Pyx_RefNannyDeclarations - int __pyx_t_1; - __Pyx_RefNannySetupContext("__len__", 0); - - /* "View.MemoryView":611 - * - * def __len__(self): - * if self.view.ndim >= 1: # <<<<<<<<<<<<<< - * return self.view.shape[0] - * - */ - __pyx_t_1 = (__pyx_v_self->view.ndim >= 1); - if (__pyx_t_1) { - - /* "View.MemoryView":612 - * def __len__(self): - * if self.view.ndim >= 1: - * return self.view.shape[0] # <<<<<<<<<<<<<< - * - * return 0 - */ - __pyx_r = (__pyx_v_self->view.shape[0]); - goto __pyx_L0; - - /* "View.MemoryView":611 - * - * def __len__(self): - * if self.view.ndim >= 1: # <<<<<<<<<<<<<< - * return self.view.shape[0] - * - */ - } - - /* "View.MemoryView":614 - * return self.view.shape[0] - * - * return 0 # <<<<<<<<<<<<<< - * - * def __repr__(self): - */ - __pyx_r = 0; - goto __pyx_L0; - - /* "View.MemoryView":610 - * return self._size - * - * def __len__(self): # <<<<<<<<<<<<<< - * if self.view.ndim >= 1: - * return self.view.shape[0] - */ - - /* function exit code */ - __pyx_L0:; - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":616 - * return 0 - * - * def __repr__(self): # <<<<<<<<<<<<<< - * return "" % (self.base.__class__.__name__, - * id(self)) - */ - -/* Python wrapper */ -static PyObject *__pyx_memoryview___repr__(PyObject *__pyx_v_self); /*proto*/ -static PyObject *__pyx_memoryview___repr__(PyObject *__pyx_v_self) { - CYTHON_UNUSED PyObject *const *__pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0); - __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_12__repr__(((struct __pyx_memoryview_obj *)__pyx_v_self)); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_12__repr__(struct __pyx_memoryview_obj *__pyx_v_self) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - PyObject *__pyx_t_2 = NULL; - PyObject *__pyx_t_3 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("__repr__", 0); - - /* "View.MemoryView":617 - * - * def __repr__(self): - * return "" % (self.base.__class__.__name__, # <<<<<<<<<<<<<< - * id(self)) - * - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_base); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 617, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_class); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 617, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_name_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 617, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - - /* "View.MemoryView":618 - * def __repr__(self): - * return "" % (self.base.__class__.__name__, - * id(self)) # <<<<<<<<<<<<<< - * - * def __str__(self): - */ - __pyx_t_2 = __Pyx_PyObject_CallOneArg(__pyx_builtin_id, ((PyObject *)__pyx_v_self)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 618, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - - /* "View.MemoryView":617 - * - * def __repr__(self): - * return "" % (self.base.__class__.__name__, # <<<<<<<<<<<<<< - * id(self)) - * - */ - __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 617, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_GIVEREF(__pyx_t_1); - PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_1); - __Pyx_GIVEREF(__pyx_t_2); - PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_2); - __pyx_t_1 = 0; - __pyx_t_2 = 0; - __pyx_t_2 = __Pyx_PyString_Format(__pyx_kp_s_MemoryView_of_r_at_0x_x, __pyx_t_3); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 617, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_r = __pyx_t_2; - __pyx_t_2 = 0; - goto __pyx_L0; - - /* "View.MemoryView":616 - * return 0 - * - * def __repr__(self): # <<<<<<<<<<<<<< - * return "" % (self.base.__class__.__name__, - * id(self)) - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_2); - __Pyx_XDECREF(__pyx_t_3); - __Pyx_AddTraceback("View.MemoryView.memoryview.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":620 - * id(self)) - * - * def __str__(self): # <<<<<<<<<<<<<< - * return "" % (self.base.__class__.__name__,) - * - */ - -/* Python wrapper */ -static PyObject *__pyx_memoryview___str__(PyObject *__pyx_v_self); /*proto*/ -static PyObject *__pyx_memoryview___str__(PyObject *__pyx_v_self) { - CYTHON_UNUSED PyObject *const *__pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__str__ (wrapper)", 0); - __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_14__str__(((struct __pyx_memoryview_obj *)__pyx_v_self)); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_14__str__(struct __pyx_memoryview_obj *__pyx_v_self) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - PyObject *__pyx_t_2 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("__str__", 0); - - /* "View.MemoryView":621 - * - * def __str__(self): - * return "" % (self.base.__class__.__name__,) # <<<<<<<<<<<<<< - * - * - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_base); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 621, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_class); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 621, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_name_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 621, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 621, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_GIVEREF(__pyx_t_1); - PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_1); - __pyx_t_1 = 0; - __pyx_t_1 = __Pyx_PyString_Format(__pyx_kp_s_MemoryView_of_r_object, __pyx_t_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 621, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_r = __pyx_t_1; - __pyx_t_1 = 0; - goto __pyx_L0; - - /* "View.MemoryView":620 - * id(self)) - * - * def __str__(self): # <<<<<<<<<<<<<< - * return "" % (self.base.__class__.__name__,) - * - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_2); - __Pyx_AddTraceback("View.MemoryView.memoryview.__str__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":624 - * - * - * def is_c_contig(self): # <<<<<<<<<<<<<< - * cdef __Pyx_memviewslice *mslice - * cdef __Pyx_memviewslice tmp - */ - -/* Python wrapper */ -static PyObject *__pyx_memoryview_is_c_contig(PyObject *__pyx_v_self, -#if CYTHON_METH_FASTCALL -PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds -#else -PyObject *__pyx_args, PyObject *__pyx_kwds -#endif -); /*proto*/ -static PyObject *__pyx_memoryview_is_c_contig(PyObject *__pyx_v_self, -#if CYTHON_METH_FASTCALL -PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds -#else -PyObject *__pyx_args, PyObject *__pyx_kwds -#endif -) { - #if !CYTHON_METH_FASTCALL - CYTHON_UNUSED const Py_ssize_t __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); - #endif - CYTHON_UNUSED PyObject *const *__pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("is_c_contig (wrapper)", 0); - if (unlikely(__pyx_nargs > 0)) { - __Pyx_RaiseArgtupleInvalid("is_c_contig", 1, 0, 0, __pyx_nargs); return NULL;} - if (unlikely(__pyx_kwds) && __Pyx_NumKwargs_FASTCALL(__pyx_kwds) && unlikely(!__Pyx_CheckKeywordStrings(__pyx_kwds, "is_c_contig", 0))) return NULL; - __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_16is_c_contig(((struct __pyx_memoryview_obj *)__pyx_v_self)); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_16is_c_contig(struct __pyx_memoryview_obj *__pyx_v_self) { - __Pyx_memviewslice *__pyx_v_mslice; - __Pyx_memviewslice __pyx_v_tmp; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - __Pyx_memviewslice *__pyx_t_1; - PyObject *__pyx_t_2 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("is_c_contig", 0); - - /* "View.MemoryView":627 - * cdef __Pyx_memviewslice *mslice - * cdef __Pyx_memviewslice tmp - * mslice = get_slice_from_memview(self, &tmp) # <<<<<<<<<<<<<< - * return slice_is_contig(mslice[0], 'C', self.view.ndim) - * - */ - __pyx_t_1 = __pyx_memoryview_get_slice_from_memoryview(__pyx_v_self, (&__pyx_v_tmp)); if (unlikely(__pyx_t_1 == ((__Pyx_memviewslice *)NULL))) __PYX_ERR(1, 627, __pyx_L1_error) - __pyx_v_mslice = __pyx_t_1; - - /* "View.MemoryView":628 - * cdef __Pyx_memviewslice tmp - * mslice = get_slice_from_memview(self, &tmp) - * return slice_is_contig(mslice[0], 'C', self.view.ndim) # <<<<<<<<<<<<<< - * - * def is_f_contig(self): - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_memviewslice_is_contig((__pyx_v_mslice[0]), 'C', __pyx_v_self->view.ndim)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 628, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_r = __pyx_t_2; - __pyx_t_2 = 0; - goto __pyx_L0; - - /* "View.MemoryView":624 - * - * - * def is_c_contig(self): # <<<<<<<<<<<<<< - * cdef __Pyx_memviewslice *mslice - * cdef __Pyx_memviewslice tmp - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_2); - __Pyx_AddTraceback("View.MemoryView.memoryview.is_c_contig", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":630 - * return slice_is_contig(mslice[0], 'C', self.view.ndim) - * - * def is_f_contig(self): # <<<<<<<<<<<<<< - * cdef __Pyx_memviewslice *mslice - * cdef __Pyx_memviewslice tmp - */ - -/* Python wrapper */ -static PyObject *__pyx_memoryview_is_f_contig(PyObject *__pyx_v_self, -#if CYTHON_METH_FASTCALL -PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds -#else -PyObject *__pyx_args, PyObject *__pyx_kwds -#endif -); /*proto*/ -static PyObject *__pyx_memoryview_is_f_contig(PyObject *__pyx_v_self, -#if CYTHON_METH_FASTCALL -PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds -#else -PyObject *__pyx_args, PyObject *__pyx_kwds -#endif -) { - #if !CYTHON_METH_FASTCALL - CYTHON_UNUSED const Py_ssize_t __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); - #endif - CYTHON_UNUSED PyObject *const *__pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("is_f_contig (wrapper)", 0); - if (unlikely(__pyx_nargs > 0)) { - __Pyx_RaiseArgtupleInvalid("is_f_contig", 1, 0, 0, __pyx_nargs); return NULL;} - if (unlikely(__pyx_kwds) && __Pyx_NumKwargs_FASTCALL(__pyx_kwds) && unlikely(!__Pyx_CheckKeywordStrings(__pyx_kwds, "is_f_contig", 0))) return NULL; - __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_18is_f_contig(((struct __pyx_memoryview_obj *)__pyx_v_self)); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_18is_f_contig(struct __pyx_memoryview_obj *__pyx_v_self) { - __Pyx_memviewslice *__pyx_v_mslice; - __Pyx_memviewslice __pyx_v_tmp; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - __Pyx_memviewslice *__pyx_t_1; - PyObject *__pyx_t_2 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("is_f_contig", 0); - - /* "View.MemoryView":633 - * cdef __Pyx_memviewslice *mslice - * cdef __Pyx_memviewslice tmp - * mslice = get_slice_from_memview(self, &tmp) # <<<<<<<<<<<<<< - * return slice_is_contig(mslice[0], 'F', self.view.ndim) - * - */ - __pyx_t_1 = __pyx_memoryview_get_slice_from_memoryview(__pyx_v_self, (&__pyx_v_tmp)); if (unlikely(__pyx_t_1 == ((__Pyx_memviewslice *)NULL))) __PYX_ERR(1, 633, __pyx_L1_error) - __pyx_v_mslice = __pyx_t_1; - - /* "View.MemoryView":634 - * cdef __Pyx_memviewslice tmp - * mslice = get_slice_from_memview(self, &tmp) - * return slice_is_contig(mslice[0], 'F', self.view.ndim) # <<<<<<<<<<<<<< - * - * def copy(self): - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_memviewslice_is_contig((__pyx_v_mslice[0]), 'F', __pyx_v_self->view.ndim)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 634, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_r = __pyx_t_2; - __pyx_t_2 = 0; - goto __pyx_L0; - - /* "View.MemoryView":630 - * return slice_is_contig(mslice[0], 'C', self.view.ndim) - * - * def is_f_contig(self): # <<<<<<<<<<<<<< - * cdef __Pyx_memviewslice *mslice - * cdef __Pyx_memviewslice tmp - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_2); - __Pyx_AddTraceback("View.MemoryView.memoryview.is_f_contig", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":636 - * return slice_is_contig(mslice[0], 'F', self.view.ndim) - * - * def copy(self): # <<<<<<<<<<<<<< - * cdef __Pyx_memviewslice mslice - * cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS - */ - -/* Python wrapper */ -static PyObject *__pyx_memoryview_copy(PyObject *__pyx_v_self, -#if CYTHON_METH_FASTCALL -PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds -#else -PyObject *__pyx_args, PyObject *__pyx_kwds -#endif -); /*proto*/ -static PyObject *__pyx_memoryview_copy(PyObject *__pyx_v_self, -#if CYTHON_METH_FASTCALL -PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds -#else -PyObject *__pyx_args, PyObject *__pyx_kwds -#endif -) { - #if !CYTHON_METH_FASTCALL - CYTHON_UNUSED const Py_ssize_t __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); - #endif - CYTHON_UNUSED PyObject *const *__pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("copy (wrapper)", 0); - if (unlikely(__pyx_nargs > 0)) { - __Pyx_RaiseArgtupleInvalid("copy", 1, 0, 0, __pyx_nargs); return NULL;} - if (unlikely(__pyx_kwds) && __Pyx_NumKwargs_FASTCALL(__pyx_kwds) && unlikely(!__Pyx_CheckKeywordStrings(__pyx_kwds, "copy", 0))) return NULL; - __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_20copy(((struct __pyx_memoryview_obj *)__pyx_v_self)); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_20copy(struct __pyx_memoryview_obj *__pyx_v_self) { - __Pyx_memviewslice __pyx_v_mslice; - int __pyx_v_flags; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - __Pyx_memviewslice __pyx_t_1; - PyObject *__pyx_t_2 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("copy", 0); - - /* "View.MemoryView":638 - * def copy(self): - * cdef __Pyx_memviewslice mslice - * cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS # <<<<<<<<<<<<<< - * - * slice_copy(self, &mslice) - */ - __pyx_v_flags = (__pyx_v_self->flags & (~PyBUF_F_CONTIGUOUS)); - - /* "View.MemoryView":640 - * cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS - * - * slice_copy(self, &mslice) # <<<<<<<<<<<<<< - * mslice = slice_copy_contig(&mslice, "c", self.view.ndim, - * self.view.itemsize, - */ - __pyx_memoryview_slice_copy(__pyx_v_self, (&__pyx_v_mslice)); - - /* "View.MemoryView":641 - * - * slice_copy(self, &mslice) - * mslice = slice_copy_contig(&mslice, "c", self.view.ndim, # <<<<<<<<<<<<<< - * self.view.itemsize, - * flags|PyBUF_C_CONTIGUOUS, - */ - __pyx_t_1 = __pyx_memoryview_copy_new_contig((&__pyx_v_mslice), ((char *)"c"), __pyx_v_self->view.ndim, __pyx_v_self->view.itemsize, (__pyx_v_flags | PyBUF_C_CONTIGUOUS), __pyx_v_self->dtype_is_object); if (unlikely(PyErr_Occurred())) __PYX_ERR(1, 641, __pyx_L1_error) - __pyx_v_mslice = __pyx_t_1; - - /* "View.MemoryView":646 - * self.dtype_is_object) - * - * return memoryview_copy_from_slice(self, &mslice) # <<<<<<<<<<<<<< - * - * def copy_fortran(self): - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_2 = __pyx_memoryview_copy_object_from_slice(__pyx_v_self, (&__pyx_v_mslice)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 646, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_r = __pyx_t_2; - __pyx_t_2 = 0; - goto __pyx_L0; - - /* "View.MemoryView":636 - * return slice_is_contig(mslice[0], 'F', self.view.ndim) - * - * def copy(self): # <<<<<<<<<<<<<< - * cdef __Pyx_memviewslice mslice - * cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_2); - __Pyx_AddTraceback("View.MemoryView.memoryview.copy", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":648 - * return memoryview_copy_from_slice(self, &mslice) - * - * def copy_fortran(self): # <<<<<<<<<<<<<< - * cdef __Pyx_memviewslice src, dst - * cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS - */ - -/* Python wrapper */ -static PyObject *__pyx_memoryview_copy_fortran(PyObject *__pyx_v_self, -#if CYTHON_METH_FASTCALL -PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds -#else -PyObject *__pyx_args, PyObject *__pyx_kwds -#endif -); /*proto*/ -static PyObject *__pyx_memoryview_copy_fortran(PyObject *__pyx_v_self, -#if CYTHON_METH_FASTCALL -PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds -#else -PyObject *__pyx_args, PyObject *__pyx_kwds -#endif -) { - #if !CYTHON_METH_FASTCALL - CYTHON_UNUSED const Py_ssize_t __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); - #endif - CYTHON_UNUSED PyObject *const *__pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("copy_fortran (wrapper)", 0); - if (unlikely(__pyx_nargs > 0)) { - __Pyx_RaiseArgtupleInvalid("copy_fortran", 1, 0, 0, __pyx_nargs); return NULL;} - if (unlikely(__pyx_kwds) && __Pyx_NumKwargs_FASTCALL(__pyx_kwds) && unlikely(!__Pyx_CheckKeywordStrings(__pyx_kwds, "copy_fortran", 0))) return NULL; - __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_22copy_fortran(((struct __pyx_memoryview_obj *)__pyx_v_self)); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_22copy_fortran(struct __pyx_memoryview_obj *__pyx_v_self) { - __Pyx_memviewslice __pyx_v_src; - __Pyx_memviewslice __pyx_v_dst; - int __pyx_v_flags; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - __Pyx_memviewslice __pyx_t_1; - PyObject *__pyx_t_2 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("copy_fortran", 0); - - /* "View.MemoryView":650 - * def copy_fortran(self): - * cdef __Pyx_memviewslice src, dst - * cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS # <<<<<<<<<<<<<< - * - * slice_copy(self, &src) - */ - __pyx_v_flags = (__pyx_v_self->flags & (~PyBUF_C_CONTIGUOUS)); - - /* "View.MemoryView":652 - * cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS - * - * slice_copy(self, &src) # <<<<<<<<<<<<<< - * dst = slice_copy_contig(&src, "fortran", self.view.ndim, - * self.view.itemsize, - */ - __pyx_memoryview_slice_copy(__pyx_v_self, (&__pyx_v_src)); - - /* "View.MemoryView":653 - * - * slice_copy(self, &src) - * dst = slice_copy_contig(&src, "fortran", self.view.ndim, # <<<<<<<<<<<<<< - * self.view.itemsize, - * flags|PyBUF_F_CONTIGUOUS, - */ - __pyx_t_1 = __pyx_memoryview_copy_new_contig((&__pyx_v_src), ((char *)"fortran"), __pyx_v_self->view.ndim, __pyx_v_self->view.itemsize, (__pyx_v_flags | PyBUF_F_CONTIGUOUS), __pyx_v_self->dtype_is_object); if (unlikely(PyErr_Occurred())) __PYX_ERR(1, 653, __pyx_L1_error) - __pyx_v_dst = __pyx_t_1; - - /* "View.MemoryView":658 - * self.dtype_is_object) - * - * return memoryview_copy_from_slice(self, &dst) # <<<<<<<<<<<<<< - * - * - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_2 = __pyx_memoryview_copy_object_from_slice(__pyx_v_self, (&__pyx_v_dst)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 658, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_r = __pyx_t_2; - __pyx_t_2 = 0; - goto __pyx_L0; - - /* "View.MemoryView":648 - * return memoryview_copy_from_slice(self, &mslice) - * - * def copy_fortran(self): # <<<<<<<<<<<<<< - * cdef __Pyx_memviewslice src, dst - * cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_2); - __Pyx_AddTraceback("View.MemoryView.memoryview.copy_fortran", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "(tree fragment)":1 - * def __reduce_cython__(self): # <<<<<<<<<<<<<< - * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" - * def __setstate_cython__(self, __pyx_state): - */ - -/* Python wrapper */ -static PyObject *__pyx_pw___pyx_memoryview_1__reduce_cython__(PyObject *__pyx_v_self, -#if CYTHON_METH_FASTCALL -PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds -#else -PyObject *__pyx_args, PyObject *__pyx_kwds -#endif -); /*proto*/ -static PyObject *__pyx_pw___pyx_memoryview_1__reduce_cython__(PyObject *__pyx_v_self, -#if CYTHON_METH_FASTCALL -PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds -#else -PyObject *__pyx_args, PyObject *__pyx_kwds -#endif -) { - #if !CYTHON_METH_FASTCALL - CYTHON_UNUSED const Py_ssize_t __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); - #endif - CYTHON_UNUSED PyObject *const *__pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0); - if (unlikely(__pyx_nargs > 0)) { - __Pyx_RaiseArgtupleInvalid("__reduce_cython__", 1, 0, 0, __pyx_nargs); return NULL;} - if (unlikely(__pyx_kwds) && __Pyx_NumKwargs_FASTCALL(__pyx_kwds) && unlikely(!__Pyx_CheckKeywordStrings(__pyx_kwds, "__reduce_cython__", 0))) return NULL; - __pyx_r = __pyx_pf___pyx_memoryview___reduce_cython__(((struct __pyx_memoryview_obj *)__pyx_v_self)); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_pf___pyx_memoryview___reduce_cython__(CYTHON_UNUSED struct __pyx_memoryview_obj *__pyx_v_self) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("__reduce_cython__", 0); - - /* "(tree fragment)":2 - * def __reduce_cython__(self): - * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" # <<<<<<<<<<<<<< - * def __setstate_cython__(self, __pyx_state): - * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" - */ - __Pyx_Raise(__pyx_builtin_TypeError, __pyx_kp_s_no_default___reduce___due_to_non, 0, 0); - __PYX_ERR(1, 2, __pyx_L1_error) - - /* "(tree fragment)":1 - * def __reduce_cython__(self): # <<<<<<<<<<<<<< - * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" - * def __setstate_cython__(self, __pyx_state): - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_AddTraceback("View.MemoryView.memoryview.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "(tree fragment)":3 - * def __reduce_cython__(self): - * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" - * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< - * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" - */ - -/* Python wrapper */ -static PyObject *__pyx_pw___pyx_memoryview_3__setstate_cython__(PyObject *__pyx_v_self, -#if CYTHON_METH_FASTCALL -PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds -#else -PyObject *__pyx_args, PyObject *__pyx_kwds -#endif -); /*proto*/ -static PyObject *__pyx_pw___pyx_memoryview_3__setstate_cython__(PyObject *__pyx_v_self, -#if CYTHON_METH_FASTCALL -PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds -#else -PyObject *__pyx_args, PyObject *__pyx_kwds -#endif -) { - CYTHON_UNUSED PyObject *__pyx_v___pyx_state = 0; - #if !CYTHON_METH_FASTCALL - CYTHON_UNUSED const Py_ssize_t __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); - #endif - CYTHON_UNUSED PyObject *const *__pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0); - { - PyObject **__pyx_pyargnames[] = {&__pyx_n_s_pyx_state,0}; - PyObject* values[1] = {0}; - if (__pyx_kwds) { - Py_ssize_t kw_args; - switch (__pyx_nargs) { - case 1: values[0] = __Pyx_Arg_FASTCALL(__pyx_args, 0); - CYTHON_FALLTHROUGH; - case 0: break; - default: goto __pyx_L5_argtuple_error; - } - kw_args = __Pyx_NumKwargs_FASTCALL(__pyx_kwds); - switch (__pyx_nargs) { - case 0: - if (likely((values[0] = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_pyx_state)) != 0)) kw_args--; - else if (unlikely(PyErr_Occurred())) __PYX_ERR(1, 3, __pyx_L3_error) - else goto __pyx_L5_argtuple_error; - } - if (unlikely(kw_args > 0)) { - const Py_ssize_t kwd_pos_args = __pyx_nargs; - if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values + 0, kwd_pos_args, "__setstate_cython__") < 0)) __PYX_ERR(1, 3, __pyx_L3_error) - } - } else if (unlikely(__pyx_nargs != 1)) { - goto __pyx_L5_argtuple_error; - } else { - values[0] = __Pyx_Arg_FASTCALL(__pyx_args, 0); - } - __pyx_v___pyx_state = values[0]; - } - goto __pyx_L4_argument_unpacking_done; - __pyx_L5_argtuple_error:; - __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, __pyx_nargs); __PYX_ERR(1, 3, __pyx_L3_error) - __pyx_L3_error:; - __Pyx_AddTraceback("View.MemoryView.memoryview.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __Pyx_RefNannyFinishContext(); - return NULL; - __pyx_L4_argument_unpacking_done:; - __pyx_r = __pyx_pf___pyx_memoryview_2__setstate_cython__(((struct __pyx_memoryview_obj *)__pyx_v_self), __pyx_v___pyx_state); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_pf___pyx_memoryview_2__setstate_cython__(CYTHON_UNUSED struct __pyx_memoryview_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("__setstate_cython__", 0); - - /* "(tree fragment)":4 - * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" - * def __setstate_cython__(self, __pyx_state): - * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" # <<<<<<<<<<<<<< - */ - __Pyx_Raise(__pyx_builtin_TypeError, __pyx_kp_s_no_default___reduce___due_to_non, 0, 0); - __PYX_ERR(1, 4, __pyx_L1_error) - - /* "(tree fragment)":3 - * def __reduce_cython__(self): - * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" - * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< - * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_AddTraceback("View.MemoryView.memoryview.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":662 - * - * @cname('__pyx_memoryview_new') - * cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo): # <<<<<<<<<<<<<< - * cdef memoryview result = memoryview(o, flags, dtype_is_object) - * result.typeinfo = typeinfo - */ - -static PyObject *__pyx_memoryview_new(PyObject *__pyx_v_o, int __pyx_v_flags, int __pyx_v_dtype_is_object, __Pyx_TypeInfo *__pyx_v_typeinfo) { - struct __pyx_memoryview_obj *__pyx_v_result = 0; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - PyObject *__pyx_t_2 = NULL; - PyObject *__pyx_t_3 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("memoryview_cwrapper", 0); - - /* "View.MemoryView":663 - * @cname('__pyx_memoryview_new') - * cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo): - * cdef memoryview result = memoryview(o, flags, dtype_is_object) # <<<<<<<<<<<<<< - * result.typeinfo = typeinfo - * return result - */ - __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_flags); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 663, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_v_dtype_is_object); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 663, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 663, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_INCREF(__pyx_v_o); - __Pyx_GIVEREF(__pyx_v_o); - PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_o); - __Pyx_GIVEREF(__pyx_t_1); - PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_1); - __Pyx_GIVEREF(__pyx_t_2); - PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_t_2); - __pyx_t_1 = 0; - __pyx_t_2 = 0; - __pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryview_type), __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 663, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_v_result = ((struct __pyx_memoryview_obj *)__pyx_t_2); - __pyx_t_2 = 0; - - /* "View.MemoryView":664 - * cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo): - * cdef memoryview result = memoryview(o, flags, dtype_is_object) - * result.typeinfo = typeinfo # <<<<<<<<<<<<<< - * return result - * - */ - __pyx_v_result->typeinfo = __pyx_v_typeinfo; - - /* "View.MemoryView":665 - * cdef memoryview result = memoryview(o, flags, dtype_is_object) - * result.typeinfo = typeinfo - * return result # <<<<<<<<<<<<<< - * - * @cname('__pyx_memoryview_check') - */ - __Pyx_XDECREF(__pyx_r); - __Pyx_INCREF((PyObject *)__pyx_v_result); - __pyx_r = ((PyObject *)__pyx_v_result); - goto __pyx_L0; - - /* "View.MemoryView":662 - * - * @cname('__pyx_memoryview_new') - * cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo): # <<<<<<<<<<<<<< - * cdef memoryview result = memoryview(o, flags, dtype_is_object) - * result.typeinfo = typeinfo - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_2); - __Pyx_XDECREF(__pyx_t_3); - __Pyx_AddTraceback("View.MemoryView.memoryview_cwrapper", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = 0; - __pyx_L0:; - __Pyx_XDECREF((PyObject *)__pyx_v_result); - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":668 - * - * @cname('__pyx_memoryview_check') - * cdef inline bint memoryview_check(object o) noexcept: # <<<<<<<<<<<<<< - * return isinstance(o, memoryview) - * - */ - -static CYTHON_INLINE int __pyx_memoryview_check(PyObject *__pyx_v_o) { - int __pyx_r; - __Pyx_RefNannyDeclarations - int __pyx_t_1; - __Pyx_RefNannySetupContext("memoryview_check", 0); - - /* "View.MemoryView":669 - * @cname('__pyx_memoryview_check') - * cdef inline bint memoryview_check(object o) noexcept: - * return isinstance(o, memoryview) # <<<<<<<<<<<<<< - * - * cdef tuple _unellipsify(object index, int ndim): - */ - __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_o, __pyx_memoryview_type); - __pyx_r = __pyx_t_1; - goto __pyx_L0; - - /* "View.MemoryView":668 - * - * @cname('__pyx_memoryview_check') - * cdef inline bint memoryview_check(object o) noexcept: # <<<<<<<<<<<<<< - * return isinstance(o, memoryview) - * - */ - - /* function exit code */ - __pyx_L0:; - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":671 - * return isinstance(o, memoryview) - * - * cdef tuple _unellipsify(object index, int ndim): # <<<<<<<<<<<<<< - * """ - * Replace all ellipses with full slices and fill incomplete indices with - */ - -static PyObject *_unellipsify(PyObject *__pyx_v_index, int __pyx_v_ndim) { - Py_ssize_t __pyx_v_idx; - PyObject *__pyx_v_tup = NULL; - PyObject *__pyx_v_result = NULL; - int __pyx_v_have_slices; - int __pyx_v_seen_ellipsis; - PyObject *__pyx_v_item = NULL; - Py_ssize_t __pyx_v_nslices; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - int __pyx_t_2; - PyObject *__pyx_t_3 = NULL; - Py_ssize_t __pyx_t_4; - Py_ssize_t __pyx_t_5; - Py_UCS4 __pyx_t_6; - PyObject *__pyx_t_7 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("_unellipsify", 0); - - /* "View.MemoryView":677 - * """ - * cdef Py_ssize_t idx - * tup = index if isinstance(index, tuple) else (index,) # <<<<<<<<<<<<<< - * - * result = [slice(None)] * ndim - */ - __pyx_t_2 = PyTuple_Check(__pyx_v_index); - if (__pyx_t_2) { - __Pyx_INCREF(((PyObject*)__pyx_v_index)); - __pyx_t_1 = __pyx_v_index; - } else { - __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 677, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_INCREF(__pyx_v_index); - __Pyx_GIVEREF(__pyx_v_index); - PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_index); - __pyx_t_1 = __pyx_t_3; - __pyx_t_3 = 0; - } - __pyx_v_tup = ((PyObject*)__pyx_t_1); - __pyx_t_1 = 0; - - /* "View.MemoryView":679 - * tup = index if isinstance(index, tuple) else (index,) - * - * result = [slice(None)] * ndim # <<<<<<<<<<<<<< - * have_slices = False - * seen_ellipsis = False - */ - __pyx_t_1 = PyList_New(1 * ((__pyx_v_ndim<0) ? 0:__pyx_v_ndim)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 679, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - { Py_ssize_t __pyx_temp; - for (__pyx_temp=0; __pyx_temp < __pyx_v_ndim; __pyx_temp++) { - __Pyx_INCREF(__pyx_slice__5); - __Pyx_GIVEREF(__pyx_slice__5); - PyList_SET_ITEM(__pyx_t_1, __pyx_temp, __pyx_slice__5); - } - } - __pyx_v_result = ((PyObject*)__pyx_t_1); - __pyx_t_1 = 0; - - /* "View.MemoryView":680 - * - * result = [slice(None)] * ndim - * have_slices = False # <<<<<<<<<<<<<< - * seen_ellipsis = False - * idx = 0 - */ - __pyx_v_have_slices = 0; - - /* "View.MemoryView":681 - * result = [slice(None)] * ndim - * have_slices = False - * seen_ellipsis = False # <<<<<<<<<<<<<< - * idx = 0 - * for item in tup: - */ - __pyx_v_seen_ellipsis = 0; - - /* "View.MemoryView":682 - * have_slices = False - * seen_ellipsis = False - * idx = 0 # <<<<<<<<<<<<<< - * for item in tup: - * if item is Ellipsis: - */ - __pyx_v_idx = 0; - - /* "View.MemoryView":683 - * seen_ellipsis = False - * idx = 0 - * for item in tup: # <<<<<<<<<<<<<< - * if item is Ellipsis: - * if not seen_ellipsis: - */ - if (unlikely(__pyx_v_tup == Py_None)) { - PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable"); - __PYX_ERR(1, 683, __pyx_L1_error) - } - __pyx_t_1 = __pyx_v_tup; __Pyx_INCREF(__pyx_t_1); __pyx_t_4 = 0; - for (;;) { - if (__pyx_t_4 >= PyTuple_GET_SIZE(__pyx_t_1)) break; - #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS - __pyx_t_3 = PyTuple_GET_ITEM(__pyx_t_1, __pyx_t_4); __Pyx_INCREF(__pyx_t_3); __pyx_t_4++; if (unlikely((0 < 0))) __PYX_ERR(1, 683, __pyx_L1_error) - #else - __pyx_t_3 = PySequence_ITEM(__pyx_t_1, __pyx_t_4); __pyx_t_4++; if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 683, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - #endif - __Pyx_XDECREF_SET(__pyx_v_item, __pyx_t_3); - __pyx_t_3 = 0; - - /* "View.MemoryView":684 - * idx = 0 - * for item in tup: - * if item is Ellipsis: # <<<<<<<<<<<<<< - * if not seen_ellipsis: - * idx += ndim - len(tup) - */ - __pyx_t_2 = (__pyx_v_item == __pyx_builtin_Ellipsis); - if (__pyx_t_2) { - - /* "View.MemoryView":685 - * for item in tup: - * if item is Ellipsis: - * if not seen_ellipsis: # <<<<<<<<<<<<<< - * idx += ndim - len(tup) - * seen_ellipsis = True - */ - __pyx_t_2 = (!__pyx_v_seen_ellipsis); - if (__pyx_t_2) { - - /* "View.MemoryView":686 - * if item is Ellipsis: - * if not seen_ellipsis: - * idx += ndim - len(tup) # <<<<<<<<<<<<<< - * seen_ellipsis = True - * have_slices = True - */ - if (unlikely(__pyx_v_tup == Py_None)) { - PyErr_SetString(PyExc_TypeError, "object of type 'NoneType' has no len()"); - __PYX_ERR(1, 686, __pyx_L1_error) - } - __pyx_t_5 = PyTuple_GET_SIZE(__pyx_v_tup); if (unlikely(__pyx_t_5 == ((Py_ssize_t)-1))) __PYX_ERR(1, 686, __pyx_L1_error) - __pyx_v_idx = (__pyx_v_idx + (__pyx_v_ndim - __pyx_t_5)); - - /* "View.MemoryView":687 - * if not seen_ellipsis: - * idx += ndim - len(tup) - * seen_ellipsis = True # <<<<<<<<<<<<<< - * have_slices = True - * else: - */ - __pyx_v_seen_ellipsis = 1; - - /* "View.MemoryView":685 - * for item in tup: - * if item is Ellipsis: - * if not seen_ellipsis: # <<<<<<<<<<<<<< - * idx += ndim - len(tup) - * seen_ellipsis = True - */ - } - - /* "View.MemoryView":688 - * idx += ndim - len(tup) - * seen_ellipsis = True - * have_slices = True # <<<<<<<<<<<<<< - * else: - * if isinstance(item, slice): - */ - __pyx_v_have_slices = 1; - - /* "View.MemoryView":684 - * idx = 0 - * for item in tup: - * if item is Ellipsis: # <<<<<<<<<<<<<< - * if not seen_ellipsis: - * idx += ndim - len(tup) - */ - goto __pyx_L5; - } - - /* "View.MemoryView":690 - * have_slices = True - * else: - * if isinstance(item, slice): # <<<<<<<<<<<<<< - * have_slices = True - * elif not PyIndex_Check(item): - */ - /*else*/ { - __pyx_t_2 = PySlice_Check(__pyx_v_item); - if (__pyx_t_2) { - - /* "View.MemoryView":691 - * else: - * if isinstance(item, slice): - * have_slices = True # <<<<<<<<<<<<<< - * elif not PyIndex_Check(item): - * raise TypeError, f"Cannot index with type '{type(item)}'" - */ - __pyx_v_have_slices = 1; - - /* "View.MemoryView":690 - * have_slices = True - * else: - * if isinstance(item, slice): # <<<<<<<<<<<<<< - * have_slices = True - * elif not PyIndex_Check(item): - */ - goto __pyx_L7; - } - - /* "View.MemoryView":692 - * if isinstance(item, slice): - * have_slices = True - * elif not PyIndex_Check(item): # <<<<<<<<<<<<<< - * raise TypeError, f"Cannot index with type '{type(item)}'" - * result[idx] = item - */ - __pyx_t_2 = (!(PyIndex_Check(__pyx_v_item) != 0)); - if (unlikely(__pyx_t_2)) { - - /* "View.MemoryView":693 - * have_slices = True - * elif not PyIndex_Check(item): - * raise TypeError, f"Cannot index with type '{type(item)}'" # <<<<<<<<<<<<<< - * result[idx] = item - * idx += 1 - */ - __pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 693, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_5 = 0; - __pyx_t_6 = 127; - __Pyx_INCREF(__pyx_kp_u_Cannot_index_with_type); - __pyx_t_5 += 24; - __Pyx_GIVEREF(__pyx_kp_u_Cannot_index_with_type); - PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_kp_u_Cannot_index_with_type); - __pyx_t_7 = __Pyx_PyObject_FormatSimple(((PyObject *)Py_TYPE(__pyx_v_item)), __pyx_empty_unicode); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 693, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_7); - __pyx_t_6 = (__Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_7) > __pyx_t_6) ? __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_7) : __pyx_t_6; - __pyx_t_5 += __Pyx_PyUnicode_GET_LENGTH(__pyx_t_7); - __Pyx_GIVEREF(__pyx_t_7); - PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_7); - __pyx_t_7 = 0; - __Pyx_INCREF(__pyx_kp_u__6); - __pyx_t_5 += 1; - __Pyx_GIVEREF(__pyx_kp_u__6); - PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_kp_u__6); - __pyx_t_7 = __Pyx_PyUnicode_Join(__pyx_t_3, 3, __pyx_t_5, __pyx_t_6); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 693, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_7); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __Pyx_Raise(__pyx_builtin_TypeError, __pyx_t_7, 0, 0); - __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; - __PYX_ERR(1, 693, __pyx_L1_error) - - /* "View.MemoryView":692 - * if isinstance(item, slice): - * have_slices = True - * elif not PyIndex_Check(item): # <<<<<<<<<<<<<< - * raise TypeError, f"Cannot index with type '{type(item)}'" - * result[idx] = item - */ - } - __pyx_L7:; - - /* "View.MemoryView":694 - * elif not PyIndex_Check(item): - * raise TypeError, f"Cannot index with type '{type(item)}'" - * result[idx] = item # <<<<<<<<<<<<<< - * idx += 1 - * - */ - if (unlikely((__Pyx_SetItemInt(__pyx_v_result, __pyx_v_idx, __pyx_v_item, Py_ssize_t, 1, PyInt_FromSsize_t, 1, 1, 1) < 0))) __PYX_ERR(1, 694, __pyx_L1_error) - } - __pyx_L5:; - - /* "View.MemoryView":695 - * raise TypeError, f"Cannot index with type '{type(item)}'" - * result[idx] = item - * idx += 1 # <<<<<<<<<<<<<< - * - * nslices = ndim - idx - */ - __pyx_v_idx = (__pyx_v_idx + 1); - - /* "View.MemoryView":683 - * seen_ellipsis = False - * idx = 0 - * for item in tup: # <<<<<<<<<<<<<< - * if item is Ellipsis: - * if not seen_ellipsis: - */ - } - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - - /* "View.MemoryView":697 - * idx += 1 - * - * nslices = ndim - idx # <<<<<<<<<<<<<< - * return have_slices or nslices, tuple(result) - * - */ - __pyx_v_nslices = (__pyx_v_ndim - __pyx_v_idx); - - /* "View.MemoryView":698 - * - * nslices = ndim - idx - * return have_slices or nslices, tuple(result) # <<<<<<<<<<<<<< - * - * cdef int assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim) except -1: - */ - __Pyx_XDECREF(__pyx_r); - if (!__pyx_v_have_slices) { - } else { - __pyx_t_7 = __Pyx_PyBool_FromLong(__pyx_v_have_slices); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 698, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_7); - __pyx_t_1 = __pyx_t_7; - __pyx_t_7 = 0; - goto __pyx_L9_bool_binop_done; - } - __pyx_t_7 = PyInt_FromSsize_t(__pyx_v_nslices); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 698, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_7); - __pyx_t_1 = __pyx_t_7; - __pyx_t_7 = 0; - __pyx_L9_bool_binop_done:; - __pyx_t_7 = PyList_AsTuple(__pyx_v_result); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 698, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_7); - __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 698, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_GIVEREF(__pyx_t_1); - PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_1); - __Pyx_GIVEREF(__pyx_t_7); - PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_7); - __pyx_t_1 = 0; - __pyx_t_7 = 0; - __pyx_r = ((PyObject*)__pyx_t_3); - __pyx_t_3 = 0; - goto __pyx_L0; - - /* "View.MemoryView":671 - * return isinstance(o, memoryview) - * - * cdef tuple _unellipsify(object index, int ndim): # <<<<<<<<<<<<<< - * """ - * Replace all ellipses with full slices and fill incomplete indices with - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_3); - __Pyx_XDECREF(__pyx_t_7); - __Pyx_AddTraceback("View.MemoryView._unellipsify", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = 0; - __pyx_L0:; - __Pyx_XDECREF(__pyx_v_tup); - __Pyx_XDECREF(__pyx_v_result); - __Pyx_XDECREF(__pyx_v_item); - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":700 - * return have_slices or nslices, tuple(result) - * - * cdef int assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim) except -1: # <<<<<<<<<<<<<< - * for suboffset in suboffsets[:ndim]: - * if suboffset >= 0: - */ - -static int assert_direct_dimensions(Py_ssize_t *__pyx_v_suboffsets, int __pyx_v_ndim) { - Py_ssize_t __pyx_v_suboffset; - int __pyx_r; - __Pyx_RefNannyDeclarations - Py_ssize_t *__pyx_t_1; - Py_ssize_t *__pyx_t_2; - Py_ssize_t *__pyx_t_3; - int __pyx_t_4; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("assert_direct_dimensions", 0); - - /* "View.MemoryView":701 - * - * cdef int assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim) except -1: - * for suboffset in suboffsets[:ndim]: # <<<<<<<<<<<<<< - * if suboffset >= 0: - * raise ValueError, "Indirect dimensions not supported" - */ - __pyx_t_2 = (__pyx_v_suboffsets + __pyx_v_ndim); - for (__pyx_t_3 = __pyx_v_suboffsets; __pyx_t_3 < __pyx_t_2; __pyx_t_3++) { - __pyx_t_1 = __pyx_t_3; - __pyx_v_suboffset = (__pyx_t_1[0]); - - /* "View.MemoryView":702 - * cdef int assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim) except -1: - * for suboffset in suboffsets[:ndim]: - * if suboffset >= 0: # <<<<<<<<<<<<<< - * raise ValueError, "Indirect dimensions not supported" - * return 0 # return type just used as an error flag - */ - __pyx_t_4 = (__pyx_v_suboffset >= 0); - if (unlikely(__pyx_t_4)) { - - /* "View.MemoryView":703 - * for suboffset in suboffsets[:ndim]: - * if suboffset >= 0: - * raise ValueError, "Indirect dimensions not supported" # <<<<<<<<<<<<<< - * return 0 # return type just used as an error flag - * - */ - __Pyx_Raise(__pyx_builtin_ValueError, __pyx_kp_s_Indirect_dimensions_not_supporte, 0, 0); - __PYX_ERR(1, 703, __pyx_L1_error) - - /* "View.MemoryView":702 - * cdef int assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim) except -1: - * for suboffset in suboffsets[:ndim]: - * if suboffset >= 0: # <<<<<<<<<<<<<< - * raise ValueError, "Indirect dimensions not supported" - * return 0 # return type just used as an error flag - */ - } - } - - /* "View.MemoryView":704 - * if suboffset >= 0: - * raise ValueError, "Indirect dimensions not supported" - * return 0 # return type just used as an error flag # <<<<<<<<<<<<<< - * - * - */ - __pyx_r = 0; - goto __pyx_L0; - - /* "View.MemoryView":700 - * return have_slices or nslices, tuple(result) - * - * cdef int assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim) except -1: # <<<<<<<<<<<<<< - * for suboffset in suboffsets[:ndim]: - * if suboffset >= 0: - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_AddTraceback("View.MemoryView.assert_direct_dimensions", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = -1; - __pyx_L0:; - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":711 - * - * @cname('__pyx_memview_slice') - * cdef memoryview memview_slice(memoryview memview, object indices): # <<<<<<<<<<<<<< - * cdef int new_ndim = 0, suboffset_dim = -1, dim - * cdef bint negative_step - */ - -static struct __pyx_memoryview_obj *__pyx_memview_slice(struct __pyx_memoryview_obj *__pyx_v_memview, PyObject *__pyx_v_indices) { - int __pyx_v_new_ndim; - int __pyx_v_suboffset_dim; - int __pyx_v_dim; - __Pyx_memviewslice __pyx_v_src; - __Pyx_memviewslice __pyx_v_dst; - __Pyx_memviewslice *__pyx_v_p_src; - struct __pyx_memoryviewslice_obj *__pyx_v_memviewsliceobj = 0; - __Pyx_memviewslice *__pyx_v_p_dst; - int *__pyx_v_p_suboffset_dim; - Py_ssize_t __pyx_v_start; - Py_ssize_t __pyx_v_stop; - Py_ssize_t __pyx_v_step; - Py_ssize_t __pyx_v_cindex; - int __pyx_v_have_start; - int __pyx_v_have_stop; - int __pyx_v_have_step; - PyObject *__pyx_v_index = NULL; - struct __pyx_memoryview_obj *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - int __pyx_t_1; - PyObject *__pyx_t_2 = NULL; - struct __pyx_memoryview_obj *__pyx_t_3; - char *__pyx_t_4; - int __pyx_t_5; - Py_ssize_t __pyx_t_6; - PyObject *(*__pyx_t_7)(PyObject *); - PyObject *__pyx_t_8 = NULL; - Py_ssize_t __pyx_t_9; - int __pyx_t_10; - Py_ssize_t __pyx_t_11; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("memview_slice", 0); - - /* "View.MemoryView":712 - * @cname('__pyx_memview_slice') - * cdef memoryview memview_slice(memoryview memview, object indices): - * cdef int new_ndim = 0, suboffset_dim = -1, dim # <<<<<<<<<<<<<< - * cdef bint negative_step - * cdef __Pyx_memviewslice src, dst - */ - __pyx_v_new_ndim = 0; - __pyx_v_suboffset_dim = -1; - - /* "View.MemoryView":719 - * - * - * memset(&dst, 0, sizeof(dst)) # <<<<<<<<<<<<<< - * - * cdef _memoryviewslice memviewsliceobj - */ - (void)(memset((&__pyx_v_dst), 0, (sizeof(__pyx_v_dst)))); - - /* "View.MemoryView":723 - * cdef _memoryviewslice memviewsliceobj - * - * assert memview.view.ndim > 0 # <<<<<<<<<<<<<< - * - * if isinstance(memview, _memoryviewslice): - */ - #ifndef CYTHON_WITHOUT_ASSERTIONS - if (unlikely(__pyx_assertions_enabled())) { - __pyx_t_1 = (__pyx_v_memview->view.ndim > 0); - if (unlikely(!__pyx_t_1)) { - __Pyx_Raise(__pyx_builtin_AssertionError, 0, 0, 0); - __PYX_ERR(1, 723, __pyx_L1_error) - } - } - #else - if ((1)); else __PYX_ERR(1, 723, __pyx_L1_error) - #endif - - /* "View.MemoryView":725 - * assert memview.view.ndim > 0 - * - * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< - * memviewsliceobj = memview - * p_src = &memviewsliceobj.from_slice - */ - __pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type); - if (__pyx_t_1) { - - /* "View.MemoryView":726 - * - * if isinstance(memview, _memoryviewslice): - * memviewsliceobj = memview # <<<<<<<<<<<<<< - * p_src = &memviewsliceobj.from_slice - * else: - */ - if (!(likely(((((PyObject *)__pyx_v_memview)) == Py_None) || likely(__Pyx_TypeTest(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type))))) __PYX_ERR(1, 726, __pyx_L1_error) - __pyx_t_2 = ((PyObject *)__pyx_v_memview); - __Pyx_INCREF(__pyx_t_2); - __pyx_v_memviewsliceobj = ((struct __pyx_memoryviewslice_obj *)__pyx_t_2); - __pyx_t_2 = 0; - - /* "View.MemoryView":727 - * if isinstance(memview, _memoryviewslice): - * memviewsliceobj = memview - * p_src = &memviewsliceobj.from_slice # <<<<<<<<<<<<<< - * else: - * slice_copy(memview, &src) - */ - __pyx_v_p_src = (&__pyx_v_memviewsliceobj->from_slice); - - /* "View.MemoryView":725 - * assert memview.view.ndim > 0 - * - * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< - * memviewsliceobj = memview - * p_src = &memviewsliceobj.from_slice - */ - goto __pyx_L3; - } - - /* "View.MemoryView":729 - * p_src = &memviewsliceobj.from_slice - * else: - * slice_copy(memview, &src) # <<<<<<<<<<<<<< - * p_src = &src - * - */ - /*else*/ { - __pyx_memoryview_slice_copy(__pyx_v_memview, (&__pyx_v_src)); - - /* "View.MemoryView":730 - * else: - * slice_copy(memview, &src) - * p_src = &src # <<<<<<<<<<<<<< - * - * - */ - __pyx_v_p_src = (&__pyx_v_src); - } - __pyx_L3:; - - /* "View.MemoryView":736 - * - * - * dst.memview = p_src.memview # <<<<<<<<<<<<<< - * dst.data = p_src.data - * - */ - __pyx_t_3 = __pyx_v_p_src->memview; - __pyx_v_dst.memview = __pyx_t_3; - - /* "View.MemoryView":737 - * - * dst.memview = p_src.memview - * dst.data = p_src.data # <<<<<<<<<<<<<< - * - * - */ - __pyx_t_4 = __pyx_v_p_src->data; - __pyx_v_dst.data = __pyx_t_4; - - /* "View.MemoryView":742 - * - * - * cdef __Pyx_memviewslice *p_dst = &dst # <<<<<<<<<<<<<< - * cdef int *p_suboffset_dim = &suboffset_dim - * cdef Py_ssize_t start, stop, step, cindex - */ - __pyx_v_p_dst = (&__pyx_v_dst); - - /* "View.MemoryView":743 - * - * cdef __Pyx_memviewslice *p_dst = &dst - * cdef int *p_suboffset_dim = &suboffset_dim # <<<<<<<<<<<<<< - * cdef Py_ssize_t start, stop, step, cindex - * cdef bint have_start, have_stop, have_step - */ - __pyx_v_p_suboffset_dim = (&__pyx_v_suboffset_dim); - - /* "View.MemoryView":747 - * cdef bint have_start, have_stop, have_step - * - * for dim, index in enumerate(indices): # <<<<<<<<<<<<<< - * if PyIndex_Check(index): - * cindex = index - */ - __pyx_t_5 = 0; - if (likely(PyList_CheckExact(__pyx_v_indices)) || PyTuple_CheckExact(__pyx_v_indices)) { - __pyx_t_2 = __pyx_v_indices; __Pyx_INCREF(__pyx_t_2); __pyx_t_6 = 0; - __pyx_t_7 = NULL; - } else { - __pyx_t_6 = -1; __pyx_t_2 = PyObject_GetIter(__pyx_v_indices); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 747, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_7 = __Pyx_PyObject_GetIterNextFunc(__pyx_t_2); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 747, __pyx_L1_error) - } - for (;;) { - if (likely(!__pyx_t_7)) { - if (likely(PyList_CheckExact(__pyx_t_2))) { - if (__pyx_t_6 >= PyList_GET_SIZE(__pyx_t_2)) break; - #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS - __pyx_t_8 = PyList_GET_ITEM(__pyx_t_2, __pyx_t_6); __Pyx_INCREF(__pyx_t_8); __pyx_t_6++; if (unlikely((0 < 0))) __PYX_ERR(1, 747, __pyx_L1_error) - #else - __pyx_t_8 = PySequence_ITEM(__pyx_t_2, __pyx_t_6); __pyx_t_6++; if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 747, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_8); - #endif - } else { - if (__pyx_t_6 >= PyTuple_GET_SIZE(__pyx_t_2)) break; - #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS - __pyx_t_8 = PyTuple_GET_ITEM(__pyx_t_2, __pyx_t_6); __Pyx_INCREF(__pyx_t_8); __pyx_t_6++; if (unlikely((0 < 0))) __PYX_ERR(1, 747, __pyx_L1_error) - #else - __pyx_t_8 = PySequence_ITEM(__pyx_t_2, __pyx_t_6); __pyx_t_6++; if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 747, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_8); - #endif - } - } else { - __pyx_t_8 = __pyx_t_7(__pyx_t_2); - if (unlikely(!__pyx_t_8)) { - PyObject* exc_type = PyErr_Occurred(); - if (exc_type) { - if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear(); - else __PYX_ERR(1, 747, __pyx_L1_error) - } - break; - } - __Pyx_GOTREF(__pyx_t_8); - } - __Pyx_XDECREF_SET(__pyx_v_index, __pyx_t_8); - __pyx_t_8 = 0; - __pyx_v_dim = __pyx_t_5; - __pyx_t_5 = (__pyx_t_5 + 1); - - /* "View.MemoryView":748 - * - * for dim, index in enumerate(indices): - * if PyIndex_Check(index): # <<<<<<<<<<<<<< - * cindex = index - * slice_memviewslice( - */ - __pyx_t_1 = (PyIndex_Check(__pyx_v_index) != 0); - if (__pyx_t_1) { - - /* "View.MemoryView":749 - * for dim, index in enumerate(indices): - * if PyIndex_Check(index): - * cindex = index # <<<<<<<<<<<<<< - * slice_memviewslice( - * p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim], - */ - __pyx_t_9 = __Pyx_PyIndex_AsSsize_t(__pyx_v_index); if (unlikely((__pyx_t_9 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 749, __pyx_L1_error) - __pyx_v_cindex = __pyx_t_9; - - /* "View.MemoryView":750 - * if PyIndex_Check(index): - * cindex = index - * slice_memviewslice( # <<<<<<<<<<<<<< - * p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim], - * dim, new_ndim, p_suboffset_dim, - */ - __pyx_t_10 = __pyx_memoryview_slice_memviewslice(__pyx_v_p_dst, (__pyx_v_p_src->shape[__pyx_v_dim]), (__pyx_v_p_src->strides[__pyx_v_dim]), (__pyx_v_p_src->suboffsets[__pyx_v_dim]), __pyx_v_dim, __pyx_v_new_ndim, __pyx_v_p_suboffset_dim, __pyx_v_cindex, 0, 0, 0, 0, 0, 0); if (unlikely(__pyx_t_10 == ((int)-1))) __PYX_ERR(1, 750, __pyx_L1_error) - - /* "View.MemoryView":748 - * - * for dim, index in enumerate(indices): - * if PyIndex_Check(index): # <<<<<<<<<<<<<< - * cindex = index - * slice_memviewslice( - */ - goto __pyx_L6; - } - - /* "View.MemoryView":756 - * 0, 0, 0, # have_{start,stop,step} - * False) - * elif index is None: # <<<<<<<<<<<<<< - * p_dst.shape[new_ndim] = 1 - * p_dst.strides[new_ndim] = 0 - */ - __pyx_t_1 = (__pyx_v_index == Py_None); - if (__pyx_t_1) { - - /* "View.MemoryView":757 - * False) - * elif index is None: - * p_dst.shape[new_ndim] = 1 # <<<<<<<<<<<<<< - * p_dst.strides[new_ndim] = 0 - * p_dst.suboffsets[new_ndim] = -1 - */ - (__pyx_v_p_dst->shape[__pyx_v_new_ndim]) = 1; - - /* "View.MemoryView":758 - * elif index is None: - * p_dst.shape[new_ndim] = 1 - * p_dst.strides[new_ndim] = 0 # <<<<<<<<<<<<<< - * p_dst.suboffsets[new_ndim] = -1 - * new_ndim += 1 - */ - (__pyx_v_p_dst->strides[__pyx_v_new_ndim]) = 0; - - /* "View.MemoryView":759 - * p_dst.shape[new_ndim] = 1 - * p_dst.strides[new_ndim] = 0 - * p_dst.suboffsets[new_ndim] = -1 # <<<<<<<<<<<<<< - * new_ndim += 1 - * else: - */ - (__pyx_v_p_dst->suboffsets[__pyx_v_new_ndim]) = -1L; - - /* "View.MemoryView":760 - * p_dst.strides[new_ndim] = 0 - * p_dst.suboffsets[new_ndim] = -1 - * new_ndim += 1 # <<<<<<<<<<<<<< - * else: - * start = index.start or 0 - */ - __pyx_v_new_ndim = (__pyx_v_new_ndim + 1); - - /* "View.MemoryView":756 - * 0, 0, 0, # have_{start,stop,step} - * False) - * elif index is None: # <<<<<<<<<<<<<< - * p_dst.shape[new_ndim] = 1 - * p_dst.strides[new_ndim] = 0 - */ - goto __pyx_L6; - } - - /* "View.MemoryView":762 - * new_ndim += 1 - * else: - * start = index.start or 0 # <<<<<<<<<<<<<< - * stop = index.stop or 0 - * step = index.step or 0 - */ - /*else*/ { - __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_start); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 762, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_8); - __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_8); if (unlikely((__pyx_t_1 < 0))) __PYX_ERR(1, 762, __pyx_L1_error) - if (!__pyx_t_1) { - __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; - } else { - __pyx_t_11 = __Pyx_PyIndex_AsSsize_t(__pyx_t_8); if (unlikely((__pyx_t_11 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 762, __pyx_L1_error) - __pyx_t_9 = __pyx_t_11; - __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; - goto __pyx_L7_bool_binop_done; - } - __pyx_t_9 = 0; - __pyx_L7_bool_binop_done:; - __pyx_v_start = __pyx_t_9; - - /* "View.MemoryView":763 - * else: - * start = index.start or 0 - * stop = index.stop or 0 # <<<<<<<<<<<<<< - * step = index.step or 0 - * - */ - __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_stop); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 763, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_8); - __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_8); if (unlikely((__pyx_t_1 < 0))) __PYX_ERR(1, 763, __pyx_L1_error) - if (!__pyx_t_1) { - __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; - } else { - __pyx_t_11 = __Pyx_PyIndex_AsSsize_t(__pyx_t_8); if (unlikely((__pyx_t_11 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 763, __pyx_L1_error) - __pyx_t_9 = __pyx_t_11; - __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; - goto __pyx_L9_bool_binop_done; - } - __pyx_t_9 = 0; - __pyx_L9_bool_binop_done:; - __pyx_v_stop = __pyx_t_9; - - /* "View.MemoryView":764 - * start = index.start or 0 - * stop = index.stop or 0 - * step = index.step or 0 # <<<<<<<<<<<<<< - * - * have_start = index.start is not None - */ - __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_step); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 764, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_8); - __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_8); if (unlikely((__pyx_t_1 < 0))) __PYX_ERR(1, 764, __pyx_L1_error) - if (!__pyx_t_1) { - __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; - } else { - __pyx_t_11 = __Pyx_PyIndex_AsSsize_t(__pyx_t_8); if (unlikely((__pyx_t_11 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 764, __pyx_L1_error) - __pyx_t_9 = __pyx_t_11; - __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; - goto __pyx_L11_bool_binop_done; - } - __pyx_t_9 = 0; - __pyx_L11_bool_binop_done:; - __pyx_v_step = __pyx_t_9; - - /* "View.MemoryView":766 - * step = index.step or 0 - * - * have_start = index.start is not None # <<<<<<<<<<<<<< - * have_stop = index.stop is not None - * have_step = index.step is not None - */ - __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_start); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 766, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_8); - __pyx_t_1 = (__pyx_t_8 != Py_None); - __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; - __pyx_v_have_start = __pyx_t_1; - - /* "View.MemoryView":767 - * - * have_start = index.start is not None - * have_stop = index.stop is not None # <<<<<<<<<<<<<< - * have_step = index.step is not None - * - */ - __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_stop); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 767, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_8); - __pyx_t_1 = (__pyx_t_8 != Py_None); - __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; - __pyx_v_have_stop = __pyx_t_1; - - /* "View.MemoryView":768 - * have_start = index.start is not None - * have_stop = index.stop is not None - * have_step = index.step is not None # <<<<<<<<<<<<<< - * - * slice_memviewslice( - */ - __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_step); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 768, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_8); - __pyx_t_1 = (__pyx_t_8 != Py_None); - __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; - __pyx_v_have_step = __pyx_t_1; - - /* "View.MemoryView":770 - * have_step = index.step is not None - * - * slice_memviewslice( # <<<<<<<<<<<<<< - * p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim], - * dim, new_ndim, p_suboffset_dim, - */ - __pyx_t_10 = __pyx_memoryview_slice_memviewslice(__pyx_v_p_dst, (__pyx_v_p_src->shape[__pyx_v_dim]), (__pyx_v_p_src->strides[__pyx_v_dim]), (__pyx_v_p_src->suboffsets[__pyx_v_dim]), __pyx_v_dim, __pyx_v_new_ndim, __pyx_v_p_suboffset_dim, __pyx_v_start, __pyx_v_stop, __pyx_v_step, __pyx_v_have_start, __pyx_v_have_stop, __pyx_v_have_step, 1); if (unlikely(__pyx_t_10 == ((int)-1))) __PYX_ERR(1, 770, __pyx_L1_error) - - /* "View.MemoryView":776 - * have_start, have_stop, have_step, - * True) - * new_ndim += 1 # <<<<<<<<<<<<<< - * - * if isinstance(memview, _memoryviewslice): - */ - __pyx_v_new_ndim = (__pyx_v_new_ndim + 1); - } - __pyx_L6:; - - /* "View.MemoryView":747 - * cdef bint have_start, have_stop, have_step - * - * for dim, index in enumerate(indices): # <<<<<<<<<<<<<< - * if PyIndex_Check(index): - * cindex = index - */ - } - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - - /* "View.MemoryView":778 - * new_ndim += 1 - * - * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< - * return memoryview_fromslice(dst, new_ndim, - * memviewsliceobj.to_object_func, - */ - __pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type); - if (__pyx_t_1) { - - /* "View.MemoryView":779 - * - * if isinstance(memview, _memoryviewslice): - * return memoryview_fromslice(dst, new_ndim, # <<<<<<<<<<<<<< - * memviewsliceobj.to_object_func, - * memviewsliceobj.to_dtype_func, - */ - __Pyx_XDECREF((PyObject *)__pyx_r); - - /* "View.MemoryView":780 - * if isinstance(memview, _memoryviewslice): - * return memoryview_fromslice(dst, new_ndim, - * memviewsliceobj.to_object_func, # <<<<<<<<<<<<<< - * memviewsliceobj.to_dtype_func, - * memview.dtype_is_object) - */ - if (unlikely(!__pyx_v_memviewsliceobj)) { __Pyx_RaiseUnboundLocalError("memviewsliceobj"); __PYX_ERR(1, 780, __pyx_L1_error) } - - /* "View.MemoryView":781 - * return memoryview_fromslice(dst, new_ndim, - * memviewsliceobj.to_object_func, - * memviewsliceobj.to_dtype_func, # <<<<<<<<<<<<<< - * memview.dtype_is_object) - * else: - */ - if (unlikely(!__pyx_v_memviewsliceobj)) { __Pyx_RaiseUnboundLocalError("memviewsliceobj"); __PYX_ERR(1, 781, __pyx_L1_error) } - - /* "View.MemoryView":779 - * - * if isinstance(memview, _memoryviewslice): - * return memoryview_fromslice(dst, new_ndim, # <<<<<<<<<<<<<< - * memviewsliceobj.to_object_func, - * memviewsliceobj.to_dtype_func, - */ - __pyx_t_2 = __pyx_memoryview_fromslice(__pyx_v_dst, __pyx_v_new_ndim, __pyx_v_memviewsliceobj->to_object_func, __pyx_v_memviewsliceobj->to_dtype_func, __pyx_v_memview->dtype_is_object); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 779, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - if (!(likely(((__pyx_t_2) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_2, __pyx_memoryview_type))))) __PYX_ERR(1, 779, __pyx_L1_error) - __pyx_r = ((struct __pyx_memoryview_obj *)__pyx_t_2); - __pyx_t_2 = 0; - goto __pyx_L0; - - /* "View.MemoryView":778 - * new_ndim += 1 - * - * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< - * return memoryview_fromslice(dst, new_ndim, - * memviewsliceobj.to_object_func, - */ - } - - /* "View.MemoryView":784 - * memview.dtype_is_object) - * else: - * return memoryview_fromslice(dst, new_ndim, NULL, NULL, # <<<<<<<<<<<<<< - * memview.dtype_is_object) - * - */ - /*else*/ { - __Pyx_XDECREF((PyObject *)__pyx_r); - - /* "View.MemoryView":785 - * else: - * return memoryview_fromslice(dst, new_ndim, NULL, NULL, - * memview.dtype_is_object) # <<<<<<<<<<<<<< - * - * - */ - __pyx_t_2 = __pyx_memoryview_fromslice(__pyx_v_dst, __pyx_v_new_ndim, NULL, NULL, __pyx_v_memview->dtype_is_object); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 784, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - - /* "View.MemoryView":784 - * memview.dtype_is_object) - * else: - * return memoryview_fromslice(dst, new_ndim, NULL, NULL, # <<<<<<<<<<<<<< - * memview.dtype_is_object) - * - */ - if (!(likely(((__pyx_t_2) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_2, __pyx_memoryview_type))))) __PYX_ERR(1, 784, __pyx_L1_error) - __pyx_r = ((struct __pyx_memoryview_obj *)__pyx_t_2); - __pyx_t_2 = 0; - goto __pyx_L0; - } - - /* "View.MemoryView":711 - * - * @cname('__pyx_memview_slice') - * cdef memoryview memview_slice(memoryview memview, object indices): # <<<<<<<<<<<<<< - * cdef int new_ndim = 0, suboffset_dim = -1, dim - * cdef bint negative_step - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_2); - __Pyx_XDECREF(__pyx_t_8); - __Pyx_AddTraceback("View.MemoryView.memview_slice", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = 0; - __pyx_L0:; - __Pyx_XDECREF((PyObject *)__pyx_v_memviewsliceobj); - __Pyx_XDECREF(__pyx_v_index); - __Pyx_XGIVEREF((PyObject *)__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":793 - * - * @cname('__pyx_memoryview_slice_memviewslice') - * cdef int slice_memviewslice( # <<<<<<<<<<<<<< - * __Pyx_memviewslice *dst, - * Py_ssize_t shape, Py_ssize_t stride, Py_ssize_t suboffset, - */ - -static int __pyx_memoryview_slice_memviewslice(__Pyx_memviewslice *__pyx_v_dst, Py_ssize_t __pyx_v_shape, Py_ssize_t __pyx_v_stride, Py_ssize_t __pyx_v_suboffset, int __pyx_v_dim, int __pyx_v_new_ndim, int *__pyx_v_suboffset_dim, Py_ssize_t __pyx_v_start, Py_ssize_t __pyx_v_stop, Py_ssize_t __pyx_v_step, int __pyx_v_have_start, int __pyx_v_have_stop, int __pyx_v_have_step, int __pyx_v_is_slice) { - Py_ssize_t __pyx_v_new_shape; - int __pyx_v_negative_step; - int __pyx_r; - int __pyx_t_1; - int __pyx_t_2; - int __pyx_t_3; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - #ifdef WITH_THREAD - PyGILState_STATE __pyx_gilstate_save; - #endif - - /* "View.MemoryView":813 - * cdef bint negative_step - * - * if not is_slice: # <<<<<<<<<<<<<< - * - * if start < 0: - */ - __pyx_t_1 = (!__pyx_v_is_slice); - if (__pyx_t_1) { - - /* "View.MemoryView":815 - * if not is_slice: - * - * if start < 0: # <<<<<<<<<<<<<< - * start += shape - * if not 0 <= start < shape: - */ - __pyx_t_1 = (__pyx_v_start < 0); - if (__pyx_t_1) { - - /* "View.MemoryView":816 - * - * if start < 0: - * start += shape # <<<<<<<<<<<<<< - * if not 0 <= start < shape: - * _err_dim(PyExc_IndexError, "Index out of bounds (axis %d)", dim) - */ - __pyx_v_start = (__pyx_v_start + __pyx_v_shape); - - /* "View.MemoryView":815 - * if not is_slice: - * - * if start < 0: # <<<<<<<<<<<<<< - * start += shape - * if not 0 <= start < shape: - */ - } - - /* "View.MemoryView":817 - * if start < 0: - * start += shape - * if not 0 <= start < shape: # <<<<<<<<<<<<<< - * _err_dim(PyExc_IndexError, "Index out of bounds (axis %d)", dim) - * else: - */ - __pyx_t_1 = (0 <= __pyx_v_start); - if (__pyx_t_1) { - __pyx_t_1 = (__pyx_v_start < __pyx_v_shape); - } - __pyx_t_2 = (!__pyx_t_1); - if (__pyx_t_2) { - - /* "View.MemoryView":818 - * start += shape - * if not 0 <= start < shape: - * _err_dim(PyExc_IndexError, "Index out of bounds (axis %d)", dim) # <<<<<<<<<<<<<< - * else: - * - */ - __pyx_t_3 = __pyx_memoryview_err_dim(PyExc_IndexError, __pyx_kp_s_Index_out_of_bounds_axis_d, __pyx_v_dim); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(1, 818, __pyx_L1_error) - - /* "View.MemoryView":817 - * if start < 0: - * start += shape - * if not 0 <= start < shape: # <<<<<<<<<<<<<< - * _err_dim(PyExc_IndexError, "Index out of bounds (axis %d)", dim) - * else: - */ - } - - /* "View.MemoryView":813 - * cdef bint negative_step - * - * if not is_slice: # <<<<<<<<<<<<<< - * - * if start < 0: - */ - goto __pyx_L3; - } - - /* "View.MemoryView":821 - * else: - * - * if have_step: # <<<<<<<<<<<<<< - * negative_step = step < 0 - * if step == 0: - */ - /*else*/ { - __pyx_t_2 = (__pyx_v_have_step != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":822 - * - * if have_step: - * negative_step = step < 0 # <<<<<<<<<<<<<< - * if step == 0: - * _err_dim(PyExc_ValueError, "Step may not be zero (axis %d)", dim) - */ - __pyx_v_negative_step = (__pyx_v_step < 0); - - /* "View.MemoryView":823 - * if have_step: - * negative_step = step < 0 - * if step == 0: # <<<<<<<<<<<<<< - * _err_dim(PyExc_ValueError, "Step may not be zero (axis %d)", dim) - * else: - */ - __pyx_t_2 = (__pyx_v_step == 0); - if (__pyx_t_2) { - - /* "View.MemoryView":824 - * negative_step = step < 0 - * if step == 0: - * _err_dim(PyExc_ValueError, "Step may not be zero (axis %d)", dim) # <<<<<<<<<<<<<< - * else: - * negative_step = False - */ - __pyx_t_3 = __pyx_memoryview_err_dim(PyExc_ValueError, __pyx_kp_s_Step_may_not_be_zero_axis_d, __pyx_v_dim); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(1, 824, __pyx_L1_error) - - /* "View.MemoryView":823 - * if have_step: - * negative_step = step < 0 - * if step == 0: # <<<<<<<<<<<<<< - * _err_dim(PyExc_ValueError, "Step may not be zero (axis %d)", dim) - * else: - */ - } - - /* "View.MemoryView":821 - * else: - * - * if have_step: # <<<<<<<<<<<<<< - * negative_step = step < 0 - * if step == 0: - */ - goto __pyx_L6; - } - - /* "View.MemoryView":826 - * _err_dim(PyExc_ValueError, "Step may not be zero (axis %d)", dim) - * else: - * negative_step = False # <<<<<<<<<<<<<< - * step = 1 - * - */ - /*else*/ { - __pyx_v_negative_step = 0; - - /* "View.MemoryView":827 - * else: - * negative_step = False - * step = 1 # <<<<<<<<<<<<<< - * - * - */ - __pyx_v_step = 1; - } - __pyx_L6:; - - /* "View.MemoryView":830 - * - * - * if have_start: # <<<<<<<<<<<<<< - * if start < 0: - * start += shape - */ - __pyx_t_2 = (__pyx_v_have_start != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":831 - * - * if have_start: - * if start < 0: # <<<<<<<<<<<<<< - * start += shape - * if start < 0: - */ - __pyx_t_2 = (__pyx_v_start < 0); - if (__pyx_t_2) { - - /* "View.MemoryView":832 - * if have_start: - * if start < 0: - * start += shape # <<<<<<<<<<<<<< - * if start < 0: - * start = 0 - */ - __pyx_v_start = (__pyx_v_start + __pyx_v_shape); - - /* "View.MemoryView":833 - * if start < 0: - * start += shape - * if start < 0: # <<<<<<<<<<<<<< - * start = 0 - * elif start >= shape: - */ - __pyx_t_2 = (__pyx_v_start < 0); - if (__pyx_t_2) { - - /* "View.MemoryView":834 - * start += shape - * if start < 0: - * start = 0 # <<<<<<<<<<<<<< - * elif start >= shape: - * if negative_step: - */ - __pyx_v_start = 0; - - /* "View.MemoryView":833 - * if start < 0: - * start += shape - * if start < 0: # <<<<<<<<<<<<<< - * start = 0 - * elif start >= shape: - */ - } - - /* "View.MemoryView":831 - * - * if have_start: - * if start < 0: # <<<<<<<<<<<<<< - * start += shape - * if start < 0: - */ - goto __pyx_L9; - } - - /* "View.MemoryView":835 - * if start < 0: - * start = 0 - * elif start >= shape: # <<<<<<<<<<<<<< - * if negative_step: - * start = shape - 1 - */ - __pyx_t_2 = (__pyx_v_start >= __pyx_v_shape); - if (__pyx_t_2) { - - /* "View.MemoryView":836 - * start = 0 - * elif start >= shape: - * if negative_step: # <<<<<<<<<<<<<< - * start = shape - 1 - * else: - */ - if (__pyx_v_negative_step) { - - /* "View.MemoryView":837 - * elif start >= shape: - * if negative_step: - * start = shape - 1 # <<<<<<<<<<<<<< - * else: - * start = shape - */ - __pyx_v_start = (__pyx_v_shape - 1); - - /* "View.MemoryView":836 - * start = 0 - * elif start >= shape: - * if negative_step: # <<<<<<<<<<<<<< - * start = shape - 1 - * else: - */ - goto __pyx_L11; - } - - /* "View.MemoryView":839 - * start = shape - 1 - * else: - * start = shape # <<<<<<<<<<<<<< - * else: - * if negative_step: - */ - /*else*/ { - __pyx_v_start = __pyx_v_shape; - } - __pyx_L11:; - - /* "View.MemoryView":835 - * if start < 0: - * start = 0 - * elif start >= shape: # <<<<<<<<<<<<<< - * if negative_step: - * start = shape - 1 - */ - } - __pyx_L9:; - - /* "View.MemoryView":830 - * - * - * if have_start: # <<<<<<<<<<<<<< - * if start < 0: - * start += shape - */ - goto __pyx_L8; - } - - /* "View.MemoryView":841 - * start = shape - * else: - * if negative_step: # <<<<<<<<<<<<<< - * start = shape - 1 - * else: - */ - /*else*/ { - if (__pyx_v_negative_step) { - - /* "View.MemoryView":842 - * else: - * if negative_step: - * start = shape - 1 # <<<<<<<<<<<<<< - * else: - * start = 0 - */ - __pyx_v_start = (__pyx_v_shape - 1); - - /* "View.MemoryView":841 - * start = shape - * else: - * if negative_step: # <<<<<<<<<<<<<< - * start = shape - 1 - * else: - */ - goto __pyx_L12; - } - - /* "View.MemoryView":844 - * start = shape - 1 - * else: - * start = 0 # <<<<<<<<<<<<<< - * - * if have_stop: - */ - /*else*/ { - __pyx_v_start = 0; - } - __pyx_L12:; - } - __pyx_L8:; - - /* "View.MemoryView":846 - * start = 0 - * - * if have_stop: # <<<<<<<<<<<<<< - * if stop < 0: - * stop += shape - */ - __pyx_t_2 = (__pyx_v_have_stop != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":847 - * - * if have_stop: - * if stop < 0: # <<<<<<<<<<<<<< - * stop += shape - * if stop < 0: - */ - __pyx_t_2 = (__pyx_v_stop < 0); - if (__pyx_t_2) { - - /* "View.MemoryView":848 - * if have_stop: - * if stop < 0: - * stop += shape # <<<<<<<<<<<<<< - * if stop < 0: - * stop = 0 - */ - __pyx_v_stop = (__pyx_v_stop + __pyx_v_shape); - - /* "View.MemoryView":849 - * if stop < 0: - * stop += shape - * if stop < 0: # <<<<<<<<<<<<<< - * stop = 0 - * elif stop > shape: - */ - __pyx_t_2 = (__pyx_v_stop < 0); - if (__pyx_t_2) { - - /* "View.MemoryView":850 - * stop += shape - * if stop < 0: - * stop = 0 # <<<<<<<<<<<<<< - * elif stop > shape: - * stop = shape - */ - __pyx_v_stop = 0; - - /* "View.MemoryView":849 - * if stop < 0: - * stop += shape - * if stop < 0: # <<<<<<<<<<<<<< - * stop = 0 - * elif stop > shape: - */ - } - - /* "View.MemoryView":847 - * - * if have_stop: - * if stop < 0: # <<<<<<<<<<<<<< - * stop += shape - * if stop < 0: - */ - goto __pyx_L14; - } - - /* "View.MemoryView":851 - * if stop < 0: - * stop = 0 - * elif stop > shape: # <<<<<<<<<<<<<< - * stop = shape - * else: - */ - __pyx_t_2 = (__pyx_v_stop > __pyx_v_shape); - if (__pyx_t_2) { - - /* "View.MemoryView":852 - * stop = 0 - * elif stop > shape: - * stop = shape # <<<<<<<<<<<<<< - * else: - * if negative_step: - */ - __pyx_v_stop = __pyx_v_shape; - - /* "View.MemoryView":851 - * if stop < 0: - * stop = 0 - * elif stop > shape: # <<<<<<<<<<<<<< - * stop = shape - * else: - */ - } - __pyx_L14:; - - /* "View.MemoryView":846 - * start = 0 - * - * if have_stop: # <<<<<<<<<<<<<< - * if stop < 0: - * stop += shape - */ - goto __pyx_L13; - } - - /* "View.MemoryView":854 - * stop = shape - * else: - * if negative_step: # <<<<<<<<<<<<<< - * stop = -1 - * else: - */ - /*else*/ { - if (__pyx_v_negative_step) { - - /* "View.MemoryView":855 - * else: - * if negative_step: - * stop = -1 # <<<<<<<<<<<<<< - * else: - * stop = shape - */ - __pyx_v_stop = -1L; - - /* "View.MemoryView":854 - * stop = shape - * else: - * if negative_step: # <<<<<<<<<<<<<< - * stop = -1 - * else: - */ - goto __pyx_L16; - } - - /* "View.MemoryView":857 - * stop = -1 - * else: - * stop = shape # <<<<<<<<<<<<<< - * - * - */ - /*else*/ { - __pyx_v_stop = __pyx_v_shape; - } - __pyx_L16:; - } - __pyx_L13:; - - /* "View.MemoryView":861 - * - * with cython.cdivision(True): - * new_shape = (stop - start) // step # <<<<<<<<<<<<<< - * - * if (stop - start) - step * new_shape: - */ - __pyx_v_new_shape = ((__pyx_v_stop - __pyx_v_start) / __pyx_v_step); - - /* "View.MemoryView":863 - * new_shape = (stop - start) // step - * - * if (stop - start) - step * new_shape: # <<<<<<<<<<<<<< - * new_shape += 1 - * - */ - __pyx_t_2 = (((__pyx_v_stop - __pyx_v_start) - (__pyx_v_step * __pyx_v_new_shape)) != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":864 - * - * if (stop - start) - step * new_shape: - * new_shape += 1 # <<<<<<<<<<<<<< - * - * if new_shape < 0: - */ - __pyx_v_new_shape = (__pyx_v_new_shape + 1); - - /* "View.MemoryView":863 - * new_shape = (stop - start) // step - * - * if (stop - start) - step * new_shape: # <<<<<<<<<<<<<< - * new_shape += 1 - * - */ - } - - /* "View.MemoryView":866 - * new_shape += 1 - * - * if new_shape < 0: # <<<<<<<<<<<<<< - * new_shape = 0 - * - */ - __pyx_t_2 = (__pyx_v_new_shape < 0); - if (__pyx_t_2) { - - /* "View.MemoryView":867 - * - * if new_shape < 0: - * new_shape = 0 # <<<<<<<<<<<<<< - * - * - */ - __pyx_v_new_shape = 0; - - /* "View.MemoryView":866 - * new_shape += 1 - * - * if new_shape < 0: # <<<<<<<<<<<<<< - * new_shape = 0 - * - */ - } - - /* "View.MemoryView":870 - * - * - * dst.strides[new_ndim] = stride * step # <<<<<<<<<<<<<< - * dst.shape[new_ndim] = new_shape - * dst.suboffsets[new_ndim] = suboffset - */ - (__pyx_v_dst->strides[__pyx_v_new_ndim]) = (__pyx_v_stride * __pyx_v_step); - - /* "View.MemoryView":871 - * - * dst.strides[new_ndim] = stride * step - * dst.shape[new_ndim] = new_shape # <<<<<<<<<<<<<< - * dst.suboffsets[new_ndim] = suboffset - * - */ - (__pyx_v_dst->shape[__pyx_v_new_ndim]) = __pyx_v_new_shape; - - /* "View.MemoryView":872 - * dst.strides[new_ndim] = stride * step - * dst.shape[new_ndim] = new_shape - * dst.suboffsets[new_ndim] = suboffset # <<<<<<<<<<<<<< - * - * - */ - (__pyx_v_dst->suboffsets[__pyx_v_new_ndim]) = __pyx_v_suboffset; - } - __pyx_L3:; - - /* "View.MemoryView":875 - * - * - * if suboffset_dim[0] < 0: # <<<<<<<<<<<<<< - * dst.data += start * stride - * else: - */ - __pyx_t_2 = ((__pyx_v_suboffset_dim[0]) < 0); - if (__pyx_t_2) { - - /* "View.MemoryView":876 - * - * if suboffset_dim[0] < 0: - * dst.data += start * stride # <<<<<<<<<<<<<< - * else: - * dst.suboffsets[suboffset_dim[0]] += start * stride - */ - __pyx_v_dst->data = (__pyx_v_dst->data + (__pyx_v_start * __pyx_v_stride)); - - /* "View.MemoryView":875 - * - * - * if suboffset_dim[0] < 0: # <<<<<<<<<<<<<< - * dst.data += start * stride - * else: - */ - goto __pyx_L19; - } - - /* "View.MemoryView":878 - * dst.data += start * stride - * else: - * dst.suboffsets[suboffset_dim[0]] += start * stride # <<<<<<<<<<<<<< - * - * if suboffset >= 0: - */ - /*else*/ { - __pyx_t_3 = (__pyx_v_suboffset_dim[0]); - (__pyx_v_dst->suboffsets[__pyx_t_3]) = ((__pyx_v_dst->suboffsets[__pyx_t_3]) + (__pyx_v_start * __pyx_v_stride)); - } - __pyx_L19:; - - /* "View.MemoryView":880 - * dst.suboffsets[suboffset_dim[0]] += start * stride - * - * if suboffset >= 0: # <<<<<<<<<<<<<< - * if not is_slice: - * if new_ndim == 0: - */ - __pyx_t_2 = (__pyx_v_suboffset >= 0); - if (__pyx_t_2) { - - /* "View.MemoryView":881 - * - * if suboffset >= 0: - * if not is_slice: # <<<<<<<<<<<<<< - * if new_ndim == 0: - * dst.data = ( dst.data)[0] + suboffset - */ - __pyx_t_2 = (!__pyx_v_is_slice); - if (__pyx_t_2) { - - /* "View.MemoryView":882 - * if suboffset >= 0: - * if not is_slice: - * if new_ndim == 0: # <<<<<<<<<<<<<< - * dst.data = ( dst.data)[0] + suboffset - * else: - */ - __pyx_t_2 = (__pyx_v_new_ndim == 0); - if (__pyx_t_2) { - - /* "View.MemoryView":883 - * if not is_slice: - * if new_ndim == 0: - * dst.data = ( dst.data)[0] + suboffset # <<<<<<<<<<<<<< - * else: - * _err_dim(PyExc_IndexError, "All dimensions preceding dimension %d " - */ - __pyx_v_dst->data = ((((char **)__pyx_v_dst->data)[0]) + __pyx_v_suboffset); - - /* "View.MemoryView":882 - * if suboffset >= 0: - * if not is_slice: - * if new_ndim == 0: # <<<<<<<<<<<<<< - * dst.data = ( dst.data)[0] + suboffset - * else: - */ - goto __pyx_L22; - } - - /* "View.MemoryView":885 - * dst.data = ( dst.data)[0] + suboffset - * else: - * _err_dim(PyExc_IndexError, "All dimensions preceding dimension %d " # <<<<<<<<<<<<<< - * "must be indexed and not sliced", dim) - * else: - */ - /*else*/ { - - /* "View.MemoryView":886 - * else: - * _err_dim(PyExc_IndexError, "All dimensions preceding dimension %d " - * "must be indexed and not sliced", dim) # <<<<<<<<<<<<<< - * else: - * suboffset_dim[0] = new_ndim - */ - __pyx_t_3 = __pyx_memoryview_err_dim(PyExc_IndexError, __pyx_kp_s_All_dimensions_preceding_dimensi, __pyx_v_dim); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(1, 885, __pyx_L1_error) - } - __pyx_L22:; - - /* "View.MemoryView":881 - * - * if suboffset >= 0: - * if not is_slice: # <<<<<<<<<<<<<< - * if new_ndim == 0: - * dst.data = ( dst.data)[0] + suboffset - */ - goto __pyx_L21; - } - - /* "View.MemoryView":888 - * "must be indexed and not sliced", dim) - * else: - * suboffset_dim[0] = new_ndim # <<<<<<<<<<<<<< - * - * return 0 - */ - /*else*/ { - (__pyx_v_suboffset_dim[0]) = __pyx_v_new_ndim; - } - __pyx_L21:; - - /* "View.MemoryView":880 - * dst.suboffsets[suboffset_dim[0]] += start * stride - * - * if suboffset >= 0: # <<<<<<<<<<<<<< - * if not is_slice: - * if new_ndim == 0: - */ - } - - /* "View.MemoryView":890 - * suboffset_dim[0] = new_ndim - * - * return 0 # <<<<<<<<<<<<<< - * - * - */ - __pyx_r = 0; - goto __pyx_L0; - - /* "View.MemoryView":793 - * - * @cname('__pyx_memoryview_slice_memviewslice') - * cdef int slice_memviewslice( # <<<<<<<<<<<<<< - * __Pyx_memviewslice *dst, - * Py_ssize_t shape, Py_ssize_t stride, Py_ssize_t suboffset, - */ - - /* function exit code */ - __pyx_L1_error:; - #ifdef WITH_THREAD - __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); - #endif - __Pyx_AddTraceback("View.MemoryView.slice_memviewslice", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = -1; - #ifdef WITH_THREAD - __Pyx_PyGILState_Release(__pyx_gilstate_save); - #endif - __pyx_L0:; - return __pyx_r; -} - -/* "View.MemoryView":896 - * - * @cname('__pyx_pybuffer_index') - * cdef char *pybuffer_index(Py_buffer *view, char *bufp, Py_ssize_t index, # <<<<<<<<<<<<<< - * Py_ssize_t dim) except NULL: - * cdef Py_ssize_t shape, stride, suboffset = -1 - */ - -static char *__pyx_pybuffer_index(Py_buffer *__pyx_v_view, char *__pyx_v_bufp, Py_ssize_t __pyx_v_index, Py_ssize_t __pyx_v_dim) { - Py_ssize_t __pyx_v_shape; - Py_ssize_t __pyx_v_stride; - Py_ssize_t __pyx_v_suboffset; - Py_ssize_t __pyx_v_itemsize; - char *__pyx_v_resultp; - char *__pyx_r; - __Pyx_RefNannyDeclarations - Py_ssize_t __pyx_t_1; - int __pyx_t_2; - PyObject *__pyx_t_3 = NULL; - Py_UCS4 __pyx_t_4; - PyObject *__pyx_t_5 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("pybuffer_index", 0); - - /* "View.MemoryView":898 - * cdef char *pybuffer_index(Py_buffer *view, char *bufp, Py_ssize_t index, - * Py_ssize_t dim) except NULL: - * cdef Py_ssize_t shape, stride, suboffset = -1 # <<<<<<<<<<<<<< - * cdef Py_ssize_t itemsize = view.itemsize - * cdef char *resultp - */ - __pyx_v_suboffset = -1L; - - /* "View.MemoryView":899 - * Py_ssize_t dim) except NULL: - * cdef Py_ssize_t shape, stride, suboffset = -1 - * cdef Py_ssize_t itemsize = view.itemsize # <<<<<<<<<<<<<< - * cdef char *resultp - * - */ - __pyx_t_1 = __pyx_v_view->itemsize; - __pyx_v_itemsize = __pyx_t_1; - - /* "View.MemoryView":902 - * cdef char *resultp - * - * if view.ndim == 0: # <<<<<<<<<<<<<< - * shape = view.len // itemsize - * stride = itemsize - */ - __pyx_t_2 = (__pyx_v_view->ndim == 0); - if (__pyx_t_2) { - - /* "View.MemoryView":903 - * - * if view.ndim == 0: - * shape = view.len // itemsize # <<<<<<<<<<<<<< - * stride = itemsize - * else: - */ - if (unlikely(__pyx_v_itemsize == 0)) { - PyErr_SetString(PyExc_ZeroDivisionError, "integer division or modulo by zero"); - __PYX_ERR(1, 903, __pyx_L1_error) - } - else if (sizeof(Py_ssize_t) == sizeof(long) && (!(((Py_ssize_t)-1) > 0)) && unlikely(__pyx_v_itemsize == (Py_ssize_t)-1) && unlikely(__Pyx_UNARY_NEG_WOULD_OVERFLOW(__pyx_v_view->len))) { - PyErr_SetString(PyExc_OverflowError, "value too large to perform division"); - __PYX_ERR(1, 903, __pyx_L1_error) - } - __pyx_v_shape = __Pyx_div_Py_ssize_t(__pyx_v_view->len, __pyx_v_itemsize); - - /* "View.MemoryView":904 - * if view.ndim == 0: - * shape = view.len // itemsize - * stride = itemsize # <<<<<<<<<<<<<< - * else: - * shape = view.shape[dim] - */ - __pyx_v_stride = __pyx_v_itemsize; - - /* "View.MemoryView":902 - * cdef char *resultp - * - * if view.ndim == 0: # <<<<<<<<<<<<<< - * shape = view.len // itemsize - * stride = itemsize - */ - goto __pyx_L3; - } - - /* "View.MemoryView":906 - * stride = itemsize - * else: - * shape = view.shape[dim] # <<<<<<<<<<<<<< - * stride = view.strides[dim] - * if view.suboffsets != NULL: - */ - /*else*/ { - __pyx_v_shape = (__pyx_v_view->shape[__pyx_v_dim]); - - /* "View.MemoryView":907 - * else: - * shape = view.shape[dim] - * stride = view.strides[dim] # <<<<<<<<<<<<<< - * if view.suboffsets != NULL: - * suboffset = view.suboffsets[dim] - */ - __pyx_v_stride = (__pyx_v_view->strides[__pyx_v_dim]); - - /* "View.MemoryView":908 - * shape = view.shape[dim] - * stride = view.strides[dim] - * if view.suboffsets != NULL: # <<<<<<<<<<<<<< - * suboffset = view.suboffsets[dim] - * - */ - __pyx_t_2 = (__pyx_v_view->suboffsets != NULL); - if (__pyx_t_2) { - - /* "View.MemoryView":909 - * stride = view.strides[dim] - * if view.suboffsets != NULL: - * suboffset = view.suboffsets[dim] # <<<<<<<<<<<<<< - * - * if index < 0: - */ - __pyx_v_suboffset = (__pyx_v_view->suboffsets[__pyx_v_dim]); - - /* "View.MemoryView":908 - * shape = view.shape[dim] - * stride = view.strides[dim] - * if view.suboffsets != NULL: # <<<<<<<<<<<<<< - * suboffset = view.suboffsets[dim] - * - */ - } - } - __pyx_L3:; - - /* "View.MemoryView":911 - * suboffset = view.suboffsets[dim] - * - * if index < 0: # <<<<<<<<<<<<<< - * index += view.shape[dim] - * if index < 0: - */ - __pyx_t_2 = (__pyx_v_index < 0); - if (__pyx_t_2) { - - /* "View.MemoryView":912 - * - * if index < 0: - * index += view.shape[dim] # <<<<<<<<<<<<<< - * if index < 0: - * raise IndexError, f"Out of bounds on buffer access (axis {dim})" - */ - __pyx_v_index = (__pyx_v_index + (__pyx_v_view->shape[__pyx_v_dim])); - - /* "View.MemoryView":913 - * if index < 0: - * index += view.shape[dim] - * if index < 0: # <<<<<<<<<<<<<< - * raise IndexError, f"Out of bounds on buffer access (axis {dim})" - * - */ - __pyx_t_2 = (__pyx_v_index < 0); - if (unlikely(__pyx_t_2)) { - - /* "View.MemoryView":914 - * index += view.shape[dim] - * if index < 0: - * raise IndexError, f"Out of bounds on buffer access (axis {dim})" # <<<<<<<<<<<<<< - * - * if index >= shape: - */ - __pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 914, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_1 = 0; - __pyx_t_4 = 127; - __Pyx_INCREF(__pyx_kp_u_Out_of_bounds_on_buffer_access_a); - __pyx_t_1 += 37; - __Pyx_GIVEREF(__pyx_kp_u_Out_of_bounds_on_buffer_access_a); - PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_kp_u_Out_of_bounds_on_buffer_access_a); - __pyx_t_5 = __Pyx_PyUnicode_From_Py_ssize_t(__pyx_v_dim, 0, ' ', 'd'); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 914, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_1 += __Pyx_PyUnicode_GET_LENGTH(__pyx_t_5); - __Pyx_GIVEREF(__pyx_t_5); - PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_5); - __pyx_t_5 = 0; - __Pyx_INCREF(__pyx_kp_u__7); - __pyx_t_1 += 1; - __Pyx_GIVEREF(__pyx_kp_u__7); - PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_kp_u__7); - __pyx_t_5 = __Pyx_PyUnicode_Join(__pyx_t_3, 3, __pyx_t_1, __pyx_t_4); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 914, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __Pyx_Raise(__pyx_builtin_IndexError, __pyx_t_5, 0, 0); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __PYX_ERR(1, 914, __pyx_L1_error) - - /* "View.MemoryView":913 - * if index < 0: - * index += view.shape[dim] - * if index < 0: # <<<<<<<<<<<<<< - * raise IndexError, f"Out of bounds on buffer access (axis {dim})" - * - */ - } - - /* "View.MemoryView":911 - * suboffset = view.suboffsets[dim] - * - * if index < 0: # <<<<<<<<<<<<<< - * index += view.shape[dim] - * if index < 0: - */ - } - - /* "View.MemoryView":916 - * raise IndexError, f"Out of bounds on buffer access (axis {dim})" - * - * if index >= shape: # <<<<<<<<<<<<<< - * raise IndexError, f"Out of bounds on buffer access (axis {dim})" - * - */ - __pyx_t_2 = (__pyx_v_index >= __pyx_v_shape); - if (unlikely(__pyx_t_2)) { - - /* "View.MemoryView":917 - * - * if index >= shape: - * raise IndexError, f"Out of bounds on buffer access (axis {dim})" # <<<<<<<<<<<<<< - * - * resultp = bufp + index * stride - */ - __pyx_t_5 = PyTuple_New(3); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 917, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_1 = 0; - __pyx_t_4 = 127; - __Pyx_INCREF(__pyx_kp_u_Out_of_bounds_on_buffer_access_a); - __pyx_t_1 += 37; - __Pyx_GIVEREF(__pyx_kp_u_Out_of_bounds_on_buffer_access_a); - PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_kp_u_Out_of_bounds_on_buffer_access_a); - __pyx_t_3 = __Pyx_PyUnicode_From_Py_ssize_t(__pyx_v_dim, 0, ' ', 'd'); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 917, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_1 += __Pyx_PyUnicode_GET_LENGTH(__pyx_t_3); - __Pyx_GIVEREF(__pyx_t_3); - PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_3); - __pyx_t_3 = 0; - __Pyx_INCREF(__pyx_kp_u__7); - __pyx_t_1 += 1; - __Pyx_GIVEREF(__pyx_kp_u__7); - PyTuple_SET_ITEM(__pyx_t_5, 2, __pyx_kp_u__7); - __pyx_t_3 = __Pyx_PyUnicode_Join(__pyx_t_5, 3, __pyx_t_1, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 917, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __Pyx_Raise(__pyx_builtin_IndexError, __pyx_t_3, 0, 0); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __PYX_ERR(1, 917, __pyx_L1_error) - - /* "View.MemoryView":916 - * raise IndexError, f"Out of bounds on buffer access (axis {dim})" - * - * if index >= shape: # <<<<<<<<<<<<<< - * raise IndexError, f"Out of bounds on buffer access (axis {dim})" - * - */ - } - - /* "View.MemoryView":919 - * raise IndexError, f"Out of bounds on buffer access (axis {dim})" - * - * resultp = bufp + index * stride # <<<<<<<<<<<<<< - * if suboffset >= 0: - * resultp = ( resultp)[0] + suboffset - */ - __pyx_v_resultp = (__pyx_v_bufp + (__pyx_v_index * __pyx_v_stride)); - - /* "View.MemoryView":920 - * - * resultp = bufp + index * stride - * if suboffset >= 0: # <<<<<<<<<<<<<< - * resultp = ( resultp)[0] + suboffset - * - */ - __pyx_t_2 = (__pyx_v_suboffset >= 0); - if (__pyx_t_2) { - - /* "View.MemoryView":921 - * resultp = bufp + index * stride - * if suboffset >= 0: - * resultp = ( resultp)[0] + suboffset # <<<<<<<<<<<<<< - * - * return resultp - */ - __pyx_v_resultp = ((((char **)__pyx_v_resultp)[0]) + __pyx_v_suboffset); - - /* "View.MemoryView":920 - * - * resultp = bufp + index * stride - * if suboffset >= 0: # <<<<<<<<<<<<<< - * resultp = ( resultp)[0] + suboffset - * - */ - } - - /* "View.MemoryView":923 - * resultp = ( resultp)[0] + suboffset - * - * return resultp # <<<<<<<<<<<<<< - * - * - */ - __pyx_r = __pyx_v_resultp; - goto __pyx_L0; - - /* "View.MemoryView":896 - * - * @cname('__pyx_pybuffer_index') - * cdef char *pybuffer_index(Py_buffer *view, char *bufp, Py_ssize_t index, # <<<<<<<<<<<<<< - * Py_ssize_t dim) except NULL: - * cdef Py_ssize_t shape, stride, suboffset = -1 - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_3); - __Pyx_XDECREF(__pyx_t_5); - __Pyx_AddTraceback("View.MemoryView.pybuffer_index", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":929 - * - * @cname('__pyx_memslice_transpose') - * cdef int transpose_memslice(__Pyx_memviewslice *memslice) except -1 nogil: # <<<<<<<<<<<<<< - * cdef int ndim = memslice.memview.view.ndim - * - */ - -static int __pyx_memslice_transpose(__Pyx_memviewslice *__pyx_v_memslice) { - int __pyx_v_ndim; - Py_ssize_t *__pyx_v_shape; - Py_ssize_t *__pyx_v_strides; - int __pyx_v_i; - int __pyx_v_j; - int __pyx_r; - int __pyx_t_1; - Py_ssize_t *__pyx_t_2; - long __pyx_t_3; - long __pyx_t_4; - Py_ssize_t __pyx_t_5; - Py_ssize_t __pyx_t_6; - int __pyx_t_7; - int __pyx_t_8; - int __pyx_t_9; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - #ifdef WITH_THREAD - PyGILState_STATE __pyx_gilstate_save; - #endif - - /* "View.MemoryView":930 - * @cname('__pyx_memslice_transpose') - * cdef int transpose_memslice(__Pyx_memviewslice *memslice) except -1 nogil: - * cdef int ndim = memslice.memview.view.ndim # <<<<<<<<<<<<<< - * - * cdef Py_ssize_t *shape = memslice.shape - */ - __pyx_t_1 = __pyx_v_memslice->memview->view.ndim; - __pyx_v_ndim = __pyx_t_1; - - /* "View.MemoryView":932 - * cdef int ndim = memslice.memview.view.ndim - * - * cdef Py_ssize_t *shape = memslice.shape # <<<<<<<<<<<<<< - * cdef Py_ssize_t *strides = memslice.strides - * - */ - __pyx_t_2 = __pyx_v_memslice->shape; - __pyx_v_shape = __pyx_t_2; - - /* "View.MemoryView":933 - * - * cdef Py_ssize_t *shape = memslice.shape - * cdef Py_ssize_t *strides = memslice.strides # <<<<<<<<<<<<<< - * - * - */ - __pyx_t_2 = __pyx_v_memslice->strides; - __pyx_v_strides = __pyx_t_2; - - /* "View.MemoryView":937 - * - * cdef int i, j - * for i in range(ndim // 2): # <<<<<<<<<<<<<< - * j = ndim - 1 - i - * strides[i], strides[j] = strides[j], strides[i] - */ - __pyx_t_3 = __Pyx_div_long(__pyx_v_ndim, 2); - __pyx_t_4 = __pyx_t_3; - for (__pyx_t_1 = 0; __pyx_t_1 < __pyx_t_4; __pyx_t_1+=1) { - __pyx_v_i = __pyx_t_1; - - /* "View.MemoryView":938 - * cdef int i, j - * for i in range(ndim // 2): - * j = ndim - 1 - i # <<<<<<<<<<<<<< - * strides[i], strides[j] = strides[j], strides[i] - * shape[i], shape[j] = shape[j], shape[i] - */ - __pyx_v_j = ((__pyx_v_ndim - 1) - __pyx_v_i); - - /* "View.MemoryView":939 - * for i in range(ndim // 2): - * j = ndim - 1 - i - * strides[i], strides[j] = strides[j], strides[i] # <<<<<<<<<<<<<< - * shape[i], shape[j] = shape[j], shape[i] - * - */ - __pyx_t_5 = (__pyx_v_strides[__pyx_v_j]); - __pyx_t_6 = (__pyx_v_strides[__pyx_v_i]); - (__pyx_v_strides[__pyx_v_i]) = __pyx_t_5; - (__pyx_v_strides[__pyx_v_j]) = __pyx_t_6; - - /* "View.MemoryView":940 - * j = ndim - 1 - i - * strides[i], strides[j] = strides[j], strides[i] - * shape[i], shape[j] = shape[j], shape[i] # <<<<<<<<<<<<<< - * - * if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0: - */ - __pyx_t_6 = (__pyx_v_shape[__pyx_v_j]); - __pyx_t_5 = (__pyx_v_shape[__pyx_v_i]); - (__pyx_v_shape[__pyx_v_i]) = __pyx_t_6; - (__pyx_v_shape[__pyx_v_j]) = __pyx_t_5; - - /* "View.MemoryView":942 - * shape[i], shape[j] = shape[j], shape[i] - * - * if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0: # <<<<<<<<<<<<<< - * _err(PyExc_ValueError, "Cannot transpose memoryview with indirect dimensions") - * - */ - __pyx_t_8 = ((__pyx_v_memslice->suboffsets[__pyx_v_i]) >= 0); - if (!__pyx_t_8) { - } else { - __pyx_t_7 = __pyx_t_8; - goto __pyx_L6_bool_binop_done; - } - __pyx_t_8 = ((__pyx_v_memslice->suboffsets[__pyx_v_j]) >= 0); - __pyx_t_7 = __pyx_t_8; - __pyx_L6_bool_binop_done:; - if (__pyx_t_7) { - - /* "View.MemoryView":943 - * - * if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0: - * _err(PyExc_ValueError, "Cannot transpose memoryview with indirect dimensions") # <<<<<<<<<<<<<< - * - * return 0 - */ - __pyx_t_9 = __pyx_memoryview_err(PyExc_ValueError, __pyx_kp_s_Cannot_transpose_memoryview_with); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(1, 943, __pyx_L1_error) - - /* "View.MemoryView":942 - * shape[i], shape[j] = shape[j], shape[i] - * - * if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0: # <<<<<<<<<<<<<< - * _err(PyExc_ValueError, "Cannot transpose memoryview with indirect dimensions") - * - */ - } - } - - /* "View.MemoryView":945 - * _err(PyExc_ValueError, "Cannot transpose memoryview with indirect dimensions") - * - * return 0 # <<<<<<<<<<<<<< - * - * - */ - __pyx_r = 0; - goto __pyx_L0; - - /* "View.MemoryView":929 - * - * @cname('__pyx_memslice_transpose') - * cdef int transpose_memslice(__Pyx_memviewslice *memslice) except -1 nogil: # <<<<<<<<<<<<<< - * cdef int ndim = memslice.memview.view.ndim - * - */ - - /* function exit code */ - __pyx_L1_error:; - #ifdef WITH_THREAD - __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); - #endif - __Pyx_AddTraceback("View.MemoryView.transpose_memslice", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = -1; - #ifdef WITH_THREAD - __Pyx_PyGILState_Release(__pyx_gilstate_save); - #endif - __pyx_L0:; - return __pyx_r; -} - -/* "View.MemoryView":963 - * cdef int (*to_dtype_func)(char *, object) except 0 - * - * def __dealloc__(self): # <<<<<<<<<<<<<< - * __PYX_XCLEAR_MEMVIEW(&self.from_slice, 1) - * - */ - -/* Python wrapper */ -static void __pyx_memoryviewslice___dealloc__(PyObject *__pyx_v_self); /*proto*/ -static void __pyx_memoryviewslice___dealloc__(PyObject *__pyx_v_self) { - CYTHON_UNUSED PyObject *const *__pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0); - __pyx_memoryviewslice___pyx_pf_15View_dot_MemoryView_16_memoryviewslice___dealloc__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self)); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); -} - -static void __pyx_memoryviewslice___pyx_pf_15View_dot_MemoryView_16_memoryviewslice___dealloc__(struct __pyx_memoryviewslice_obj *__pyx_v_self) { - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__dealloc__", 0); - - /* "View.MemoryView":964 - * - * def __dealloc__(self): - * __PYX_XCLEAR_MEMVIEW(&self.from_slice, 1) # <<<<<<<<<<<<<< - * - * cdef convert_item_to_object(self, char *itemp): - */ - __PYX_XCLEAR_MEMVIEW((&__pyx_v_self->from_slice), 1); - - /* "View.MemoryView":963 - * cdef int (*to_dtype_func)(char *, object) except 0 - * - * def __dealloc__(self): # <<<<<<<<<<<<<< - * __PYX_XCLEAR_MEMVIEW(&self.from_slice, 1) - * - */ - - /* function exit code */ - __Pyx_RefNannyFinishContext(); -} - -/* "View.MemoryView":966 - * __PYX_XCLEAR_MEMVIEW(&self.from_slice, 1) - * - * cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<< - * if self.to_object_func != NULL: - * return self.to_object_func(itemp) - */ - -static PyObject *__pyx_memoryviewslice_convert_item_to_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - int __pyx_t_1; - PyObject *__pyx_t_2 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("convert_item_to_object", 0); - - /* "View.MemoryView":967 - * - * cdef convert_item_to_object(self, char *itemp): - * if self.to_object_func != NULL: # <<<<<<<<<<<<<< - * return self.to_object_func(itemp) - * else: - */ - __pyx_t_1 = (__pyx_v_self->to_object_func != NULL); - if (__pyx_t_1) { - - /* "View.MemoryView":968 - * cdef convert_item_to_object(self, char *itemp): - * if self.to_object_func != NULL: - * return self.to_object_func(itemp) # <<<<<<<<<<<<<< - * else: - * return memoryview.convert_item_to_object(self, itemp) - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_2 = __pyx_v_self->to_object_func(__pyx_v_itemp); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 968, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_r = __pyx_t_2; - __pyx_t_2 = 0; - goto __pyx_L0; - - /* "View.MemoryView":967 - * - * cdef convert_item_to_object(self, char *itemp): - * if self.to_object_func != NULL: # <<<<<<<<<<<<<< - * return self.to_object_func(itemp) - * else: - */ - } - - /* "View.MemoryView":970 - * return self.to_object_func(itemp) - * else: - * return memoryview.convert_item_to_object(self, itemp) # <<<<<<<<<<<<<< - * - * cdef assign_item_from_object(self, char *itemp, object value): - */ - /*else*/ { - __Pyx_XDECREF(__pyx_r); - __pyx_t_2 = __pyx_memoryview_convert_item_to_object(((struct __pyx_memoryview_obj *)__pyx_v_self), __pyx_v_itemp); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 970, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_r = __pyx_t_2; - __pyx_t_2 = 0; - goto __pyx_L0; - } - - /* "View.MemoryView":966 - * __PYX_XCLEAR_MEMVIEW(&self.from_slice, 1) - * - * cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<< - * if self.to_object_func != NULL: - * return self.to_object_func(itemp) - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_2); - __Pyx_AddTraceback("View.MemoryView._memoryviewslice.convert_item_to_object", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = 0; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":972 - * return memoryview.convert_item_to_object(self, itemp) - * - * cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<< - * if self.to_dtype_func != NULL: - * self.to_dtype_func(itemp, value) - */ - -static PyObject *__pyx_memoryviewslice_assign_item_from_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - int __pyx_t_1; - int __pyx_t_2; - PyObject *__pyx_t_3 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("assign_item_from_object", 0); - - /* "View.MemoryView":973 - * - * cdef assign_item_from_object(self, char *itemp, object value): - * if self.to_dtype_func != NULL: # <<<<<<<<<<<<<< - * self.to_dtype_func(itemp, value) - * else: - */ - __pyx_t_1 = (__pyx_v_self->to_dtype_func != NULL); - if (__pyx_t_1) { - - /* "View.MemoryView":974 - * cdef assign_item_from_object(self, char *itemp, object value): - * if self.to_dtype_func != NULL: - * self.to_dtype_func(itemp, value) # <<<<<<<<<<<<<< - * else: - * memoryview.assign_item_from_object(self, itemp, value) - */ - __pyx_t_2 = __pyx_v_self->to_dtype_func(__pyx_v_itemp, __pyx_v_value); if (unlikely(__pyx_t_2 == ((int)0))) __PYX_ERR(1, 974, __pyx_L1_error) - - /* "View.MemoryView":973 - * - * cdef assign_item_from_object(self, char *itemp, object value): - * if self.to_dtype_func != NULL: # <<<<<<<<<<<<<< - * self.to_dtype_func(itemp, value) - * else: - */ - goto __pyx_L3; - } - - /* "View.MemoryView":976 - * self.to_dtype_func(itemp, value) - * else: - * memoryview.assign_item_from_object(self, itemp, value) # <<<<<<<<<<<<<< - * - * cdef _get_base(self): - */ - /*else*/ { - __pyx_t_3 = __pyx_memoryview_assign_item_from_object(((struct __pyx_memoryview_obj *)__pyx_v_self), __pyx_v_itemp, __pyx_v_value); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 976, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - } - __pyx_L3:; - - /* "View.MemoryView":972 - * return memoryview.convert_item_to_object(self, itemp) - * - * cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<< - * if self.to_dtype_func != NULL: - * self.to_dtype_func(itemp, value) - */ - - /* function exit code */ - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_3); - __Pyx_AddTraceback("View.MemoryView._memoryviewslice.assign_item_from_object", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = 0; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":978 - * memoryview.assign_item_from_object(self, itemp, value) - * - * cdef _get_base(self): # <<<<<<<<<<<<<< - * return self.from_object - * - */ - -static PyObject *__pyx_memoryviewslice__get_base(struct __pyx_memoryviewslice_obj *__pyx_v_self) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("_get_base", 0); - - /* "View.MemoryView":979 - * - * cdef _get_base(self): - * return self.from_object # <<<<<<<<<<<<<< - * - * - */ - __Pyx_XDECREF(__pyx_r); - __Pyx_INCREF(__pyx_v_self->from_object); - __pyx_r = __pyx_v_self->from_object; - goto __pyx_L0; - - /* "View.MemoryView":978 - * memoryview.assign_item_from_object(self, itemp, value) - * - * cdef _get_base(self): # <<<<<<<<<<<<<< - * return self.from_object - * - */ - - /* function exit code */ - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "(tree fragment)":1 - * def __reduce_cython__(self): # <<<<<<<<<<<<<< - * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" - * def __setstate_cython__(self, __pyx_state): - */ - -/* Python wrapper */ -static PyObject *__pyx_pw___pyx_memoryviewslice_1__reduce_cython__(PyObject *__pyx_v_self, -#if CYTHON_METH_FASTCALL -PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds -#else -PyObject *__pyx_args, PyObject *__pyx_kwds -#endif -); /*proto*/ -static PyObject *__pyx_pw___pyx_memoryviewslice_1__reduce_cython__(PyObject *__pyx_v_self, -#if CYTHON_METH_FASTCALL -PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds -#else -PyObject *__pyx_args, PyObject *__pyx_kwds -#endif -) { - #if !CYTHON_METH_FASTCALL - CYTHON_UNUSED const Py_ssize_t __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); - #endif - CYTHON_UNUSED PyObject *const *__pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0); - if (unlikely(__pyx_nargs > 0)) { - __Pyx_RaiseArgtupleInvalid("__reduce_cython__", 1, 0, 0, __pyx_nargs); return NULL;} - if (unlikely(__pyx_kwds) && __Pyx_NumKwargs_FASTCALL(__pyx_kwds) && unlikely(!__Pyx_CheckKeywordStrings(__pyx_kwds, "__reduce_cython__", 0))) return NULL; - __pyx_r = __pyx_pf___pyx_memoryviewslice___reduce_cython__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self)); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_pf___pyx_memoryviewslice___reduce_cython__(CYTHON_UNUSED struct __pyx_memoryviewslice_obj *__pyx_v_self) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("__reduce_cython__", 0); - - /* "(tree fragment)":2 - * def __reduce_cython__(self): - * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" # <<<<<<<<<<<<<< - * def __setstate_cython__(self, __pyx_state): - * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" - */ - __Pyx_Raise(__pyx_builtin_TypeError, __pyx_kp_s_no_default___reduce___due_to_non, 0, 0); - __PYX_ERR(1, 2, __pyx_L1_error) - - /* "(tree fragment)":1 - * def __reduce_cython__(self): # <<<<<<<<<<<<<< - * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" - * def __setstate_cython__(self, __pyx_state): - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_AddTraceback("View.MemoryView._memoryviewslice.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "(tree fragment)":3 - * def __reduce_cython__(self): - * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" - * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< - * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" - */ - -/* Python wrapper */ -static PyObject *__pyx_pw___pyx_memoryviewslice_3__setstate_cython__(PyObject *__pyx_v_self, -#if CYTHON_METH_FASTCALL -PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds -#else -PyObject *__pyx_args, PyObject *__pyx_kwds -#endif -); /*proto*/ -static PyObject *__pyx_pw___pyx_memoryviewslice_3__setstate_cython__(PyObject *__pyx_v_self, -#if CYTHON_METH_FASTCALL -PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds -#else -PyObject *__pyx_args, PyObject *__pyx_kwds -#endif -) { - CYTHON_UNUSED PyObject *__pyx_v___pyx_state = 0; - #if !CYTHON_METH_FASTCALL - CYTHON_UNUSED const Py_ssize_t __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); - #endif - CYTHON_UNUSED PyObject *const *__pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0); - { - PyObject **__pyx_pyargnames[] = {&__pyx_n_s_pyx_state,0}; - PyObject* values[1] = {0}; - if (__pyx_kwds) { - Py_ssize_t kw_args; - switch (__pyx_nargs) { - case 1: values[0] = __Pyx_Arg_FASTCALL(__pyx_args, 0); - CYTHON_FALLTHROUGH; - case 0: break; - default: goto __pyx_L5_argtuple_error; - } - kw_args = __Pyx_NumKwargs_FASTCALL(__pyx_kwds); - switch (__pyx_nargs) { - case 0: - if (likely((values[0] = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_pyx_state)) != 0)) kw_args--; - else if (unlikely(PyErr_Occurred())) __PYX_ERR(1, 3, __pyx_L3_error) - else goto __pyx_L5_argtuple_error; - } - if (unlikely(kw_args > 0)) { - const Py_ssize_t kwd_pos_args = __pyx_nargs; - if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values + 0, kwd_pos_args, "__setstate_cython__") < 0)) __PYX_ERR(1, 3, __pyx_L3_error) - } - } else if (unlikely(__pyx_nargs != 1)) { - goto __pyx_L5_argtuple_error; - } else { - values[0] = __Pyx_Arg_FASTCALL(__pyx_args, 0); - } - __pyx_v___pyx_state = values[0]; - } - goto __pyx_L4_argument_unpacking_done; - __pyx_L5_argtuple_error:; - __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, __pyx_nargs); __PYX_ERR(1, 3, __pyx_L3_error) - __pyx_L3_error:; - __Pyx_AddTraceback("View.MemoryView._memoryviewslice.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __Pyx_RefNannyFinishContext(); - return NULL; - __pyx_L4_argument_unpacking_done:; - __pyx_r = __pyx_pf___pyx_memoryviewslice_2__setstate_cython__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self), __pyx_v___pyx_state); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_pf___pyx_memoryviewslice_2__setstate_cython__(CYTHON_UNUSED struct __pyx_memoryviewslice_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("__setstate_cython__", 0); - - /* "(tree fragment)":4 - * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" - * def __setstate_cython__(self, __pyx_state): - * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" # <<<<<<<<<<<<<< - */ - __Pyx_Raise(__pyx_builtin_TypeError, __pyx_kp_s_no_default___reduce___due_to_non, 0, 0); - __PYX_ERR(1, 4, __pyx_L1_error) - - /* "(tree fragment)":3 - * def __reduce_cython__(self): - * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" - * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< - * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_AddTraceback("View.MemoryView._memoryviewslice.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":999 - * - * @cname('__pyx_memoryview_fromslice') - * cdef memoryview_fromslice(__Pyx_memviewslice memviewslice, # <<<<<<<<<<<<<< - * int ndim, - * object (*to_object_func)(char *), - */ - -static PyObject *__pyx_memoryview_fromslice(__Pyx_memviewslice __pyx_v_memviewslice, int __pyx_v_ndim, PyObject *(*__pyx_v_to_object_func)(char *), int (*__pyx_v_to_dtype_func)(char *, PyObject *), int __pyx_v_dtype_is_object) { - struct __pyx_memoryviewslice_obj *__pyx_v_result = 0; - Py_ssize_t __pyx_v_suboffset; - PyObject *__pyx_v_length = NULL; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - int __pyx_t_1; - PyObject *__pyx_t_2 = NULL; - PyObject *__pyx_t_3 = NULL; - __Pyx_TypeInfo *__pyx_t_4; - Py_buffer __pyx_t_5; - Py_ssize_t *__pyx_t_6; - Py_ssize_t *__pyx_t_7; - Py_ssize_t *__pyx_t_8; - Py_ssize_t __pyx_t_9; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("memoryview_fromslice", 0); - - /* "View.MemoryView":1007 - * cdef _memoryviewslice result - * - * if memviewslice.memview == Py_None: # <<<<<<<<<<<<<< - * return None - * - */ - __pyx_t_1 = (((PyObject *)__pyx_v_memviewslice.memview) == Py_None); - if (__pyx_t_1) { - - /* "View.MemoryView":1008 - * - * if memviewslice.memview == Py_None: - * return None # <<<<<<<<<<<<<< - * - * - */ - __Pyx_XDECREF(__pyx_r); - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - goto __pyx_L0; - - /* "View.MemoryView":1007 - * cdef _memoryviewslice result - * - * if memviewslice.memview == Py_None: # <<<<<<<<<<<<<< - * return None - * - */ - } - - /* "View.MemoryView":1013 - * - * - * result = _memoryviewslice.__new__(_memoryviewslice, None, 0, dtype_is_object) # <<<<<<<<<<<<<< - * - * result.from_slice = memviewslice - */ - __pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_v_dtype_is_object); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1013, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1013, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_INCREF(Py_None); - __Pyx_GIVEREF(Py_None); - PyTuple_SET_ITEM(__pyx_t_3, 0, Py_None); - __Pyx_INCREF(__pyx_int_0); - __Pyx_GIVEREF(__pyx_int_0); - PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_int_0); - __Pyx_GIVEREF(__pyx_t_2); - PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_t_2); - __pyx_t_2 = 0; - __pyx_t_2 = ((PyObject *)__pyx_tp_new__memoryviewslice(((PyTypeObject *)__pyx_memoryviewslice_type), __pyx_t_3, NULL)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1013, __pyx_L1_error) - __Pyx_GOTREF((PyObject *)__pyx_t_2); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_v_result = ((struct __pyx_memoryviewslice_obj *)__pyx_t_2); - __pyx_t_2 = 0; - - /* "View.MemoryView":1015 - * result = _memoryviewslice.__new__(_memoryviewslice, None, 0, dtype_is_object) - * - * result.from_slice = memviewslice # <<<<<<<<<<<<<< - * __PYX_INC_MEMVIEW(&memviewslice, 1) - * - */ - __pyx_v_result->from_slice = __pyx_v_memviewslice; - - /* "View.MemoryView":1016 - * - * result.from_slice = memviewslice - * __PYX_INC_MEMVIEW(&memviewslice, 1) # <<<<<<<<<<<<<< - * - * result.from_object = ( memviewslice.memview)._get_base() - */ - __PYX_INC_MEMVIEW((&__pyx_v_memviewslice), 1); - - /* "View.MemoryView":1018 - * __PYX_INC_MEMVIEW(&memviewslice, 1) - * - * result.from_object = ( memviewslice.memview)._get_base() # <<<<<<<<<<<<<< - * result.typeinfo = memviewslice.memview.typeinfo - * - */ - __pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)((struct __pyx_memoryview_obj *)__pyx_v_memviewslice.memview)->__pyx_vtab)->_get_base(((struct __pyx_memoryview_obj *)__pyx_v_memviewslice.memview)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1018, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_GIVEREF(__pyx_t_2); - __Pyx_GOTREF(__pyx_v_result->from_object); - __Pyx_DECREF(__pyx_v_result->from_object); - __pyx_v_result->from_object = __pyx_t_2; - __pyx_t_2 = 0; - - /* "View.MemoryView":1019 - * - * result.from_object = ( memviewslice.memview)._get_base() - * result.typeinfo = memviewslice.memview.typeinfo # <<<<<<<<<<<<<< - * - * result.view = memviewslice.memview.view - */ - __pyx_t_4 = __pyx_v_memviewslice.memview->typeinfo; - __pyx_v_result->__pyx_base.typeinfo = __pyx_t_4; - - /* "View.MemoryView":1021 - * result.typeinfo = memviewslice.memview.typeinfo - * - * result.view = memviewslice.memview.view # <<<<<<<<<<<<<< - * result.view.buf = memviewslice.data - * result.view.ndim = ndim - */ - __pyx_t_5 = __pyx_v_memviewslice.memview->view; - __pyx_v_result->__pyx_base.view = __pyx_t_5; - - /* "View.MemoryView":1022 - * - * result.view = memviewslice.memview.view - * result.view.buf = memviewslice.data # <<<<<<<<<<<<<< - * result.view.ndim = ndim - * (<__pyx_buffer *> &result.view).obj = Py_None - */ - __pyx_v_result->__pyx_base.view.buf = ((void *)__pyx_v_memviewslice.data); - - /* "View.MemoryView":1023 - * result.view = memviewslice.memview.view - * result.view.buf = memviewslice.data - * result.view.ndim = ndim # <<<<<<<<<<<<<< - * (<__pyx_buffer *> &result.view).obj = Py_None - * Py_INCREF(Py_None) - */ - __pyx_v_result->__pyx_base.view.ndim = __pyx_v_ndim; - - /* "View.MemoryView":1024 - * result.view.buf = memviewslice.data - * result.view.ndim = ndim - * (<__pyx_buffer *> &result.view).obj = Py_None # <<<<<<<<<<<<<< - * Py_INCREF(Py_None) - * - */ - ((Py_buffer *)(&__pyx_v_result->__pyx_base.view))->obj = Py_None; - - /* "View.MemoryView":1025 - * result.view.ndim = ndim - * (<__pyx_buffer *> &result.view).obj = Py_None - * Py_INCREF(Py_None) # <<<<<<<<<<<<<< - * - * if (memviewslice.memview).flags & PyBUF_WRITABLE: - */ - Py_INCREF(Py_None); - - /* "View.MemoryView":1027 - * Py_INCREF(Py_None) - * - * if (memviewslice.memview).flags & PyBUF_WRITABLE: # <<<<<<<<<<<<<< - * result.flags = PyBUF_RECORDS - * else: - */ - __pyx_t_1 = ((((struct __pyx_memoryview_obj *)__pyx_v_memviewslice.memview)->flags & PyBUF_WRITABLE) != 0); - if (__pyx_t_1) { - - /* "View.MemoryView":1028 - * - * if (memviewslice.memview).flags & PyBUF_WRITABLE: - * result.flags = PyBUF_RECORDS # <<<<<<<<<<<<<< - * else: - * result.flags = PyBUF_RECORDS_RO - */ - __pyx_v_result->__pyx_base.flags = PyBUF_RECORDS; - - /* "View.MemoryView":1027 - * Py_INCREF(Py_None) - * - * if (memviewslice.memview).flags & PyBUF_WRITABLE: # <<<<<<<<<<<<<< - * result.flags = PyBUF_RECORDS - * else: - */ - goto __pyx_L4; - } - - /* "View.MemoryView":1030 - * result.flags = PyBUF_RECORDS - * else: - * result.flags = PyBUF_RECORDS_RO # <<<<<<<<<<<<<< - * - * result.view.shape = result.from_slice.shape - */ - /*else*/ { - __pyx_v_result->__pyx_base.flags = PyBUF_RECORDS_RO; - } - __pyx_L4:; - - /* "View.MemoryView":1032 - * result.flags = PyBUF_RECORDS_RO - * - * result.view.shape = result.from_slice.shape # <<<<<<<<<<<<<< - * result.view.strides = result.from_slice.strides - * - */ - __pyx_v_result->__pyx_base.view.shape = ((Py_ssize_t *)__pyx_v_result->from_slice.shape); - - /* "View.MemoryView":1033 - * - * result.view.shape = result.from_slice.shape - * result.view.strides = result.from_slice.strides # <<<<<<<<<<<<<< - * - * - */ - __pyx_v_result->__pyx_base.view.strides = ((Py_ssize_t *)__pyx_v_result->from_slice.strides); - - /* "View.MemoryView":1036 - * - * - * result.view.suboffsets = NULL # <<<<<<<<<<<<<< - * for suboffset in result.from_slice.suboffsets[:ndim]: - * if suboffset >= 0: - */ - __pyx_v_result->__pyx_base.view.suboffsets = NULL; - - /* "View.MemoryView":1037 - * - * result.view.suboffsets = NULL - * for suboffset in result.from_slice.suboffsets[:ndim]: # <<<<<<<<<<<<<< - * if suboffset >= 0: - * result.view.suboffsets = result.from_slice.suboffsets - */ - __pyx_t_7 = (__pyx_v_result->from_slice.suboffsets + __pyx_v_ndim); - for (__pyx_t_8 = __pyx_v_result->from_slice.suboffsets; __pyx_t_8 < __pyx_t_7; __pyx_t_8++) { - __pyx_t_6 = __pyx_t_8; - __pyx_v_suboffset = (__pyx_t_6[0]); - - /* "View.MemoryView":1038 - * result.view.suboffsets = NULL - * for suboffset in result.from_slice.suboffsets[:ndim]: - * if suboffset >= 0: # <<<<<<<<<<<<<< - * result.view.suboffsets = result.from_slice.suboffsets - * break - */ - __pyx_t_1 = (__pyx_v_suboffset >= 0); - if (__pyx_t_1) { - - /* "View.MemoryView":1039 - * for suboffset in result.from_slice.suboffsets[:ndim]: - * if suboffset >= 0: - * result.view.suboffsets = result.from_slice.suboffsets # <<<<<<<<<<<<<< - * break - * - */ - __pyx_v_result->__pyx_base.view.suboffsets = ((Py_ssize_t *)__pyx_v_result->from_slice.suboffsets); - - /* "View.MemoryView":1040 - * if suboffset >= 0: - * result.view.suboffsets = result.from_slice.suboffsets - * break # <<<<<<<<<<<<<< - * - * result.view.len = result.view.itemsize - */ - goto __pyx_L6_break; - - /* "View.MemoryView":1038 - * result.view.suboffsets = NULL - * for suboffset in result.from_slice.suboffsets[:ndim]: - * if suboffset >= 0: # <<<<<<<<<<<<<< - * result.view.suboffsets = result.from_slice.suboffsets - * break - */ - } - } - __pyx_L6_break:; - - /* "View.MemoryView":1042 - * break - * - * result.view.len = result.view.itemsize # <<<<<<<<<<<<<< - * for length in result.view.shape[:ndim]: - * result.view.len *= length - */ - __pyx_t_9 = __pyx_v_result->__pyx_base.view.itemsize; - __pyx_v_result->__pyx_base.view.len = __pyx_t_9; - - /* "View.MemoryView":1043 - * - * result.view.len = result.view.itemsize - * for length in result.view.shape[:ndim]: # <<<<<<<<<<<<<< - * result.view.len *= length - * - */ - __pyx_t_7 = (__pyx_v_result->__pyx_base.view.shape + __pyx_v_ndim); - for (__pyx_t_8 = __pyx_v_result->__pyx_base.view.shape; __pyx_t_8 < __pyx_t_7; __pyx_t_8++) { - __pyx_t_6 = __pyx_t_8; - __pyx_t_2 = PyInt_FromSsize_t((__pyx_t_6[0])); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1043, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_XDECREF_SET(__pyx_v_length, __pyx_t_2); - __pyx_t_2 = 0; - - /* "View.MemoryView":1044 - * result.view.len = result.view.itemsize - * for length in result.view.shape[:ndim]: - * result.view.len *= length # <<<<<<<<<<<<<< - * - * result.to_object_func = to_object_func - */ - __pyx_t_2 = PyInt_FromSsize_t(__pyx_v_result->__pyx_base.view.len); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1044, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_3 = PyNumber_InPlaceMultiply(__pyx_t_2, __pyx_v_length); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1044, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_9 = __Pyx_PyIndex_AsSsize_t(__pyx_t_3); if (unlikely((__pyx_t_9 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 1044, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_v_result->__pyx_base.view.len = __pyx_t_9; - } - - /* "View.MemoryView":1046 - * result.view.len *= length - * - * result.to_object_func = to_object_func # <<<<<<<<<<<<<< - * result.to_dtype_func = to_dtype_func - * - */ - __pyx_v_result->to_object_func = __pyx_v_to_object_func; - - /* "View.MemoryView":1047 - * - * result.to_object_func = to_object_func - * result.to_dtype_func = to_dtype_func # <<<<<<<<<<<<<< - * - * return result - */ - __pyx_v_result->to_dtype_func = __pyx_v_to_dtype_func; - - /* "View.MemoryView":1049 - * result.to_dtype_func = to_dtype_func - * - * return result # <<<<<<<<<<<<<< - * - * @cname('__pyx_memoryview_get_slice_from_memoryview') - */ - __Pyx_XDECREF(__pyx_r); - __Pyx_INCREF((PyObject *)__pyx_v_result); - __pyx_r = ((PyObject *)__pyx_v_result); - goto __pyx_L0; - - /* "View.MemoryView":999 - * - * @cname('__pyx_memoryview_fromslice') - * cdef memoryview_fromslice(__Pyx_memviewslice memviewslice, # <<<<<<<<<<<<<< - * int ndim, - * object (*to_object_func)(char *), - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_2); - __Pyx_XDECREF(__pyx_t_3); - __Pyx_AddTraceback("View.MemoryView.memoryview_fromslice", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = 0; - __pyx_L0:; - __Pyx_XDECREF((PyObject *)__pyx_v_result); - __Pyx_XDECREF(__pyx_v_length); - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":1052 - * - * @cname('__pyx_memoryview_get_slice_from_memoryview') - * cdef __Pyx_memviewslice *get_slice_from_memview(memoryview memview, # <<<<<<<<<<<<<< - * __Pyx_memviewslice *mslice) except NULL: - * cdef _memoryviewslice obj - */ - -static __Pyx_memviewslice *__pyx_memoryview_get_slice_from_memoryview(struct __pyx_memoryview_obj *__pyx_v_memview, __Pyx_memviewslice *__pyx_v_mslice) { - struct __pyx_memoryviewslice_obj *__pyx_v_obj = 0; - __Pyx_memviewslice *__pyx_r; - __Pyx_RefNannyDeclarations - int __pyx_t_1; - PyObject *__pyx_t_2 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("get_slice_from_memview", 0); - - /* "View.MemoryView":1055 - * __Pyx_memviewslice *mslice) except NULL: - * cdef _memoryviewslice obj - * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< - * obj = memview - * return &obj.from_slice - */ - __pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type); - if (__pyx_t_1) { - - /* "View.MemoryView":1056 - * cdef _memoryviewslice obj - * if isinstance(memview, _memoryviewslice): - * obj = memview # <<<<<<<<<<<<<< - * return &obj.from_slice - * else: - */ - if (!(likely(((((PyObject *)__pyx_v_memview)) == Py_None) || likely(__Pyx_TypeTest(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type))))) __PYX_ERR(1, 1056, __pyx_L1_error) - __pyx_t_2 = ((PyObject *)__pyx_v_memview); - __Pyx_INCREF(__pyx_t_2); - __pyx_v_obj = ((struct __pyx_memoryviewslice_obj *)__pyx_t_2); - __pyx_t_2 = 0; - - /* "View.MemoryView":1057 - * if isinstance(memview, _memoryviewslice): - * obj = memview - * return &obj.from_slice # <<<<<<<<<<<<<< - * else: - * slice_copy(memview, mslice) - */ - __pyx_r = (&__pyx_v_obj->from_slice); - goto __pyx_L0; - - /* "View.MemoryView":1055 - * __Pyx_memviewslice *mslice) except NULL: - * cdef _memoryviewslice obj - * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< - * obj = memview - * return &obj.from_slice - */ - } - - /* "View.MemoryView":1059 - * return &obj.from_slice - * else: - * slice_copy(memview, mslice) # <<<<<<<<<<<<<< - * return mslice - * - */ - /*else*/ { - __pyx_memoryview_slice_copy(__pyx_v_memview, __pyx_v_mslice); - - /* "View.MemoryView":1060 - * else: - * slice_copy(memview, mslice) - * return mslice # <<<<<<<<<<<<<< - * - * @cname('__pyx_memoryview_slice_copy') - */ - __pyx_r = __pyx_v_mslice; - goto __pyx_L0; - } - - /* "View.MemoryView":1052 - * - * @cname('__pyx_memoryview_get_slice_from_memoryview') - * cdef __Pyx_memviewslice *get_slice_from_memview(memoryview memview, # <<<<<<<<<<<<<< - * __Pyx_memviewslice *mslice) except NULL: - * cdef _memoryviewslice obj - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_2); - __Pyx_AddTraceback("View.MemoryView.get_slice_from_memview", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XDECREF((PyObject *)__pyx_v_obj); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":1063 - * - * @cname('__pyx_memoryview_slice_copy') - * cdef void slice_copy(memoryview memview, __Pyx_memviewslice *dst) noexcept: # <<<<<<<<<<<<<< - * cdef int dim - * cdef (Py_ssize_t*) shape, strides, suboffsets - */ - -static void __pyx_memoryview_slice_copy(struct __pyx_memoryview_obj *__pyx_v_memview, __Pyx_memviewslice *__pyx_v_dst) { - int __pyx_v_dim; - Py_ssize_t *__pyx_v_shape; - Py_ssize_t *__pyx_v_strides; - Py_ssize_t *__pyx_v_suboffsets; - __Pyx_RefNannyDeclarations - Py_ssize_t *__pyx_t_1; - int __pyx_t_2; - int __pyx_t_3; - int __pyx_t_4; - Py_ssize_t __pyx_t_5; - __Pyx_RefNannySetupContext("slice_copy", 0); - - /* "View.MemoryView":1067 - * cdef (Py_ssize_t*) shape, strides, suboffsets - * - * shape = memview.view.shape # <<<<<<<<<<<<<< - * strides = memview.view.strides - * suboffsets = memview.view.suboffsets - */ - __pyx_t_1 = __pyx_v_memview->view.shape; - __pyx_v_shape = __pyx_t_1; - - /* "View.MemoryView":1068 - * - * shape = memview.view.shape - * strides = memview.view.strides # <<<<<<<<<<<<<< - * suboffsets = memview.view.suboffsets - * - */ - __pyx_t_1 = __pyx_v_memview->view.strides; - __pyx_v_strides = __pyx_t_1; - - /* "View.MemoryView":1069 - * shape = memview.view.shape - * strides = memview.view.strides - * suboffsets = memview.view.suboffsets # <<<<<<<<<<<<<< - * - * dst.memview = <__pyx_memoryview *> memview - */ - __pyx_t_1 = __pyx_v_memview->view.suboffsets; - __pyx_v_suboffsets = __pyx_t_1; - - /* "View.MemoryView":1071 - * suboffsets = memview.view.suboffsets - * - * dst.memview = <__pyx_memoryview *> memview # <<<<<<<<<<<<<< - * dst.data = memview.view.buf - * - */ - __pyx_v_dst->memview = ((struct __pyx_memoryview_obj *)__pyx_v_memview); - - /* "View.MemoryView":1072 - * - * dst.memview = <__pyx_memoryview *> memview - * dst.data = memview.view.buf # <<<<<<<<<<<<<< - * - * for dim in range(memview.view.ndim): - */ - __pyx_v_dst->data = ((char *)__pyx_v_memview->view.buf); - - /* "View.MemoryView":1074 - * dst.data = memview.view.buf - * - * for dim in range(memview.view.ndim): # <<<<<<<<<<<<<< - * dst.shape[dim] = shape[dim] - * dst.strides[dim] = strides[dim] - */ - __pyx_t_2 = __pyx_v_memview->view.ndim; - __pyx_t_3 = __pyx_t_2; - for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { - __pyx_v_dim = __pyx_t_4; - - /* "View.MemoryView":1075 - * - * for dim in range(memview.view.ndim): - * dst.shape[dim] = shape[dim] # <<<<<<<<<<<<<< - * dst.strides[dim] = strides[dim] - * dst.suboffsets[dim] = suboffsets[dim] if suboffsets else -1 - */ - (__pyx_v_dst->shape[__pyx_v_dim]) = (__pyx_v_shape[__pyx_v_dim]); - - /* "View.MemoryView":1076 - * for dim in range(memview.view.ndim): - * dst.shape[dim] = shape[dim] - * dst.strides[dim] = strides[dim] # <<<<<<<<<<<<<< - * dst.suboffsets[dim] = suboffsets[dim] if suboffsets else -1 - * - */ - (__pyx_v_dst->strides[__pyx_v_dim]) = (__pyx_v_strides[__pyx_v_dim]); - - /* "View.MemoryView":1077 - * dst.shape[dim] = shape[dim] - * dst.strides[dim] = strides[dim] - * dst.suboffsets[dim] = suboffsets[dim] if suboffsets else -1 # <<<<<<<<<<<<<< - * - * @cname('__pyx_memoryview_copy_object') - */ - if ((__pyx_v_suboffsets != 0)) { - __pyx_t_5 = (__pyx_v_suboffsets[__pyx_v_dim]); - } else { - __pyx_t_5 = -1L; - } - (__pyx_v_dst->suboffsets[__pyx_v_dim]) = __pyx_t_5; - } - - /* "View.MemoryView":1063 - * - * @cname('__pyx_memoryview_slice_copy') - * cdef void slice_copy(memoryview memview, __Pyx_memviewslice *dst) noexcept: # <<<<<<<<<<<<<< - * cdef int dim - * cdef (Py_ssize_t*) shape, strides, suboffsets - */ - - /* function exit code */ - __Pyx_RefNannyFinishContext(); -} - -/* "View.MemoryView":1080 - * - * @cname('__pyx_memoryview_copy_object') - * cdef memoryview_copy(memoryview memview): # <<<<<<<<<<<<<< - * "Create a new memoryview object" - * cdef __Pyx_memviewslice memviewslice - */ - -static PyObject *__pyx_memoryview_copy_object(struct __pyx_memoryview_obj *__pyx_v_memview) { - __Pyx_memviewslice __pyx_v_memviewslice; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("memoryview_copy", 0); - - /* "View.MemoryView":1083 - * "Create a new memoryview object" - * cdef __Pyx_memviewslice memviewslice - * slice_copy(memview, &memviewslice) # <<<<<<<<<<<<<< - * return memoryview_copy_from_slice(memview, &memviewslice) - * - */ - __pyx_memoryview_slice_copy(__pyx_v_memview, (&__pyx_v_memviewslice)); - - /* "View.MemoryView":1084 - * cdef __Pyx_memviewslice memviewslice - * slice_copy(memview, &memviewslice) - * return memoryview_copy_from_slice(memview, &memviewslice) # <<<<<<<<<<<<<< - * - * @cname('__pyx_memoryview_copy_object_from_slice') - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = __pyx_memoryview_copy_object_from_slice(__pyx_v_memview, (&__pyx_v_memviewslice)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1084, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_r = __pyx_t_1; - __pyx_t_1 = 0; - goto __pyx_L0; - - /* "View.MemoryView":1080 - * - * @cname('__pyx_memoryview_copy_object') - * cdef memoryview_copy(memoryview memview): # <<<<<<<<<<<<<< - * "Create a new memoryview object" - * cdef __Pyx_memviewslice memviewslice - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_AddTraceback("View.MemoryView.memoryview_copy", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = 0; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":1087 - * - * @cname('__pyx_memoryview_copy_object_from_slice') - * cdef memoryview_copy_from_slice(memoryview memview, __Pyx_memviewslice *memviewslice): # <<<<<<<<<<<<<< - * """ - * Create a new memoryview object from a given memoryview object and slice. - */ - -static PyObject *__pyx_memoryview_copy_object_from_slice(struct __pyx_memoryview_obj *__pyx_v_memview, __Pyx_memviewslice *__pyx_v_memviewslice) { - PyObject *(*__pyx_v_to_object_func)(char *); - int (*__pyx_v_to_dtype_func)(char *, PyObject *); - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - int __pyx_t_1; - PyObject *(*__pyx_t_2)(char *); - int (*__pyx_t_3)(char *, PyObject *); - PyObject *__pyx_t_4 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("memoryview_copy_from_slice", 0); - - /* "View.MemoryView":1094 - * cdef int (*to_dtype_func)(char *, object) except 0 - * - * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< - * to_object_func = (<_memoryviewslice> memview).to_object_func - * to_dtype_func = (<_memoryviewslice> memview).to_dtype_func - */ - __pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type); - if (__pyx_t_1) { - - /* "View.MemoryView":1095 - * - * if isinstance(memview, _memoryviewslice): - * to_object_func = (<_memoryviewslice> memview).to_object_func # <<<<<<<<<<<<<< - * to_dtype_func = (<_memoryviewslice> memview).to_dtype_func - * else: - */ - __pyx_t_2 = ((struct __pyx_memoryviewslice_obj *)__pyx_v_memview)->to_object_func; - __pyx_v_to_object_func = __pyx_t_2; - - /* "View.MemoryView":1096 - * if isinstance(memview, _memoryviewslice): - * to_object_func = (<_memoryviewslice> memview).to_object_func - * to_dtype_func = (<_memoryviewslice> memview).to_dtype_func # <<<<<<<<<<<<<< - * else: - * to_object_func = NULL - */ - __pyx_t_3 = ((struct __pyx_memoryviewslice_obj *)__pyx_v_memview)->to_dtype_func; - __pyx_v_to_dtype_func = __pyx_t_3; - - /* "View.MemoryView":1094 - * cdef int (*to_dtype_func)(char *, object) except 0 - * - * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< - * to_object_func = (<_memoryviewslice> memview).to_object_func - * to_dtype_func = (<_memoryviewslice> memview).to_dtype_func - */ - goto __pyx_L3; - } - - /* "View.MemoryView":1098 - * to_dtype_func = (<_memoryviewslice> memview).to_dtype_func - * else: - * to_object_func = NULL # <<<<<<<<<<<<<< - * to_dtype_func = NULL - * - */ - /*else*/ { - __pyx_v_to_object_func = NULL; - - /* "View.MemoryView":1099 - * else: - * to_object_func = NULL - * to_dtype_func = NULL # <<<<<<<<<<<<<< - * - * return memoryview_fromslice(memviewslice[0], memview.view.ndim, - */ - __pyx_v_to_dtype_func = NULL; - } - __pyx_L3:; - - /* "View.MemoryView":1101 - * to_dtype_func = NULL - * - * return memoryview_fromslice(memviewslice[0], memview.view.ndim, # <<<<<<<<<<<<<< - * to_object_func, to_dtype_func, - * memview.dtype_is_object) - */ - __Pyx_XDECREF(__pyx_r); - - /* "View.MemoryView":1103 - * return memoryview_fromslice(memviewslice[0], memview.view.ndim, - * to_object_func, to_dtype_func, - * memview.dtype_is_object) # <<<<<<<<<<<<<< - * - * - */ - __pyx_t_4 = __pyx_memoryview_fromslice((__pyx_v_memviewslice[0]), __pyx_v_memview->view.ndim, __pyx_v_to_object_func, __pyx_v_to_dtype_func, __pyx_v_memview->dtype_is_object); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 1101, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __pyx_r = __pyx_t_4; - __pyx_t_4 = 0; - goto __pyx_L0; - - /* "View.MemoryView":1087 - * - * @cname('__pyx_memoryview_copy_object_from_slice') - * cdef memoryview_copy_from_slice(memoryview memview, __Pyx_memviewslice *memviewslice): # <<<<<<<<<<<<<< - * """ - * Create a new memoryview object from a given memoryview object and slice. - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_4); - __Pyx_AddTraceback("View.MemoryView.memoryview_copy_from_slice", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = 0; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":1109 - * - * - * cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) noexcept nogil: # <<<<<<<<<<<<<< - * return -arg if arg < 0 else arg - * - */ - -static Py_ssize_t abs_py_ssize_t(Py_ssize_t __pyx_v_arg) { - Py_ssize_t __pyx_r; - Py_ssize_t __pyx_t_1; - - /* "View.MemoryView":1110 - * - * cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) noexcept nogil: - * return -arg if arg < 0 else arg # <<<<<<<<<<<<<< - * - * @cname('__pyx_get_best_slice_order') - */ - if ((__pyx_v_arg < 0)) { - __pyx_t_1 = (-__pyx_v_arg); - } else { - __pyx_t_1 = __pyx_v_arg; - } - __pyx_r = __pyx_t_1; - goto __pyx_L0; - - /* "View.MemoryView":1109 - * - * - * cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) noexcept nogil: # <<<<<<<<<<<<<< - * return -arg if arg < 0 else arg - * - */ - - /* function exit code */ - __pyx_L0:; - return __pyx_r; -} - -/* "View.MemoryView":1113 - * - * @cname('__pyx_get_best_slice_order') - * cdef char get_best_order(__Pyx_memviewslice *mslice, int ndim) noexcept nogil: # <<<<<<<<<<<<<< - * """ - * Figure out the best memory access order for a given slice. - */ - -static char __pyx_get_best_slice_order(__Pyx_memviewslice *__pyx_v_mslice, int __pyx_v_ndim) { - int __pyx_v_i; - Py_ssize_t __pyx_v_c_stride; - Py_ssize_t __pyx_v_f_stride; - char __pyx_r; - int __pyx_t_1; - int __pyx_t_2; - int __pyx_t_3; - int __pyx_t_4; - - /* "View.MemoryView":1118 - * """ - * cdef int i - * cdef Py_ssize_t c_stride = 0 # <<<<<<<<<<<<<< - * cdef Py_ssize_t f_stride = 0 - * - */ - __pyx_v_c_stride = 0; - - /* "View.MemoryView":1119 - * cdef int i - * cdef Py_ssize_t c_stride = 0 - * cdef Py_ssize_t f_stride = 0 # <<<<<<<<<<<<<< - * - * for i in range(ndim - 1, -1, -1): - */ - __pyx_v_f_stride = 0; - - /* "View.MemoryView":1121 - * cdef Py_ssize_t f_stride = 0 - * - * for i in range(ndim - 1, -1, -1): # <<<<<<<<<<<<<< - * if mslice.shape[i] > 1: - * c_stride = mslice.strides[i] - */ - for (__pyx_t_1 = (__pyx_v_ndim - 1); __pyx_t_1 > -1; __pyx_t_1-=1) { - __pyx_v_i = __pyx_t_1; - - /* "View.MemoryView":1122 - * - * for i in range(ndim - 1, -1, -1): - * if mslice.shape[i] > 1: # <<<<<<<<<<<<<< - * c_stride = mslice.strides[i] - * break - */ - __pyx_t_2 = ((__pyx_v_mslice->shape[__pyx_v_i]) > 1); - if (__pyx_t_2) { - - /* "View.MemoryView":1123 - * for i in range(ndim - 1, -1, -1): - * if mslice.shape[i] > 1: - * c_stride = mslice.strides[i] # <<<<<<<<<<<<<< - * break - * - */ - __pyx_v_c_stride = (__pyx_v_mslice->strides[__pyx_v_i]); - - /* "View.MemoryView":1124 - * if mslice.shape[i] > 1: - * c_stride = mslice.strides[i] - * break # <<<<<<<<<<<<<< - * - * for i in range(ndim): - */ - goto __pyx_L4_break; - - /* "View.MemoryView":1122 - * - * for i in range(ndim - 1, -1, -1): - * if mslice.shape[i] > 1: # <<<<<<<<<<<<<< - * c_stride = mslice.strides[i] - * break - */ - } - } - __pyx_L4_break:; - - /* "View.MemoryView":1126 - * break - * - * for i in range(ndim): # <<<<<<<<<<<<<< - * if mslice.shape[i] > 1: - * f_stride = mslice.strides[i] - */ - __pyx_t_1 = __pyx_v_ndim; - __pyx_t_3 = __pyx_t_1; - for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { - __pyx_v_i = __pyx_t_4; - - /* "View.MemoryView":1127 - * - * for i in range(ndim): - * if mslice.shape[i] > 1: # <<<<<<<<<<<<<< - * f_stride = mslice.strides[i] - * break - */ - __pyx_t_2 = ((__pyx_v_mslice->shape[__pyx_v_i]) > 1); - if (__pyx_t_2) { - - /* "View.MemoryView":1128 - * for i in range(ndim): - * if mslice.shape[i] > 1: - * f_stride = mslice.strides[i] # <<<<<<<<<<<<<< - * break - * - */ - __pyx_v_f_stride = (__pyx_v_mslice->strides[__pyx_v_i]); - - /* "View.MemoryView":1129 - * if mslice.shape[i] > 1: - * f_stride = mslice.strides[i] - * break # <<<<<<<<<<<<<< - * - * if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride): - */ - goto __pyx_L7_break; - - /* "View.MemoryView":1127 - * - * for i in range(ndim): - * if mslice.shape[i] > 1: # <<<<<<<<<<<<<< - * f_stride = mslice.strides[i] - * break - */ - } - } - __pyx_L7_break:; - - /* "View.MemoryView":1131 - * break - * - * if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride): # <<<<<<<<<<<<<< - * return 'C' - * else: - */ - __pyx_t_2 = (abs_py_ssize_t(__pyx_v_c_stride) <= abs_py_ssize_t(__pyx_v_f_stride)); - if (__pyx_t_2) { - - /* "View.MemoryView":1132 - * - * if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride): - * return 'C' # <<<<<<<<<<<<<< - * else: - * return 'F' - */ - __pyx_r = 'C'; - goto __pyx_L0; - - /* "View.MemoryView":1131 - * break - * - * if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride): # <<<<<<<<<<<<<< - * return 'C' - * else: - */ - } - - /* "View.MemoryView":1134 - * return 'C' - * else: - * return 'F' # <<<<<<<<<<<<<< - * - * @cython.cdivision(True) - */ - /*else*/ { - __pyx_r = 'F'; - goto __pyx_L0; - } - - /* "View.MemoryView":1113 - * - * @cname('__pyx_get_best_slice_order') - * cdef char get_best_order(__Pyx_memviewslice *mslice, int ndim) noexcept nogil: # <<<<<<<<<<<<<< - * """ - * Figure out the best memory access order for a given slice. - */ - - /* function exit code */ - __pyx_L0:; - return __pyx_r; -} - -/* "View.MemoryView":1137 - * - * @cython.cdivision(True) - * cdef void _copy_strided_to_strided(char *src_data, Py_ssize_t *src_strides, # <<<<<<<<<<<<<< - * char *dst_data, Py_ssize_t *dst_strides, - * Py_ssize_t *src_shape, Py_ssize_t *dst_shape, - */ - -static void _copy_strided_to_strided(char *__pyx_v_src_data, Py_ssize_t *__pyx_v_src_strides, char *__pyx_v_dst_data, Py_ssize_t *__pyx_v_dst_strides, Py_ssize_t *__pyx_v_src_shape, Py_ssize_t *__pyx_v_dst_shape, int __pyx_v_ndim, size_t __pyx_v_itemsize) { - CYTHON_UNUSED Py_ssize_t __pyx_v_i; - CYTHON_UNUSED Py_ssize_t __pyx_v_src_extent; - Py_ssize_t __pyx_v_dst_extent; - Py_ssize_t __pyx_v_src_stride; - Py_ssize_t __pyx_v_dst_stride; - int __pyx_t_1; - int __pyx_t_2; - Py_ssize_t __pyx_t_3; - Py_ssize_t __pyx_t_4; - Py_ssize_t __pyx_t_5; - - /* "View.MemoryView":1144 - * - * cdef Py_ssize_t i - * cdef Py_ssize_t src_extent = src_shape[0] # <<<<<<<<<<<<<< - * cdef Py_ssize_t dst_extent = dst_shape[0] - * cdef Py_ssize_t src_stride = src_strides[0] - */ - __pyx_v_src_extent = (__pyx_v_src_shape[0]); - - /* "View.MemoryView":1145 - * cdef Py_ssize_t i - * cdef Py_ssize_t src_extent = src_shape[0] - * cdef Py_ssize_t dst_extent = dst_shape[0] # <<<<<<<<<<<<<< - * cdef Py_ssize_t src_stride = src_strides[0] - * cdef Py_ssize_t dst_stride = dst_strides[0] - */ - __pyx_v_dst_extent = (__pyx_v_dst_shape[0]); - - /* "View.MemoryView":1146 - * cdef Py_ssize_t src_extent = src_shape[0] - * cdef Py_ssize_t dst_extent = dst_shape[0] - * cdef Py_ssize_t src_stride = src_strides[0] # <<<<<<<<<<<<<< - * cdef Py_ssize_t dst_stride = dst_strides[0] - * - */ - __pyx_v_src_stride = (__pyx_v_src_strides[0]); - - /* "View.MemoryView":1147 - * cdef Py_ssize_t dst_extent = dst_shape[0] - * cdef Py_ssize_t src_stride = src_strides[0] - * cdef Py_ssize_t dst_stride = dst_strides[0] # <<<<<<<<<<<<<< - * - * if ndim == 1: - */ - __pyx_v_dst_stride = (__pyx_v_dst_strides[0]); - - /* "View.MemoryView":1149 - * cdef Py_ssize_t dst_stride = dst_strides[0] - * - * if ndim == 1: # <<<<<<<<<<<<<< - * if (src_stride > 0 and dst_stride > 0 and - * src_stride == itemsize == dst_stride): - */ - __pyx_t_1 = (__pyx_v_ndim == 1); - if (__pyx_t_1) { - - /* "View.MemoryView":1150 - * - * if ndim == 1: - * if (src_stride > 0 and dst_stride > 0 and # <<<<<<<<<<<<<< - * src_stride == itemsize == dst_stride): - * memcpy(dst_data, src_data, itemsize * dst_extent) - */ - __pyx_t_2 = (__pyx_v_src_stride > 0); - if (__pyx_t_2) { - } else { - __pyx_t_1 = __pyx_t_2; - goto __pyx_L5_bool_binop_done; - } - __pyx_t_2 = (__pyx_v_dst_stride > 0); - if (__pyx_t_2) { - } else { - __pyx_t_1 = __pyx_t_2; - goto __pyx_L5_bool_binop_done; - } - - /* "View.MemoryView":1151 - * if ndim == 1: - * if (src_stride > 0 and dst_stride > 0 and - * src_stride == itemsize == dst_stride): # <<<<<<<<<<<<<< - * memcpy(dst_data, src_data, itemsize * dst_extent) - * else: - */ - __pyx_t_2 = (((size_t)__pyx_v_src_stride) == __pyx_v_itemsize); - if (__pyx_t_2) { - __pyx_t_2 = (__pyx_v_itemsize == ((size_t)__pyx_v_dst_stride)); - } - __pyx_t_1 = __pyx_t_2; - __pyx_L5_bool_binop_done:; - - /* "View.MemoryView":1150 - * - * if ndim == 1: - * if (src_stride > 0 and dst_stride > 0 and # <<<<<<<<<<<<<< - * src_stride == itemsize == dst_stride): - * memcpy(dst_data, src_data, itemsize * dst_extent) - */ - if (__pyx_t_1) { - - /* "View.MemoryView":1152 - * if (src_stride > 0 and dst_stride > 0 and - * src_stride == itemsize == dst_stride): - * memcpy(dst_data, src_data, itemsize * dst_extent) # <<<<<<<<<<<<<< - * else: - * for i in range(dst_extent): - */ - (void)(memcpy(__pyx_v_dst_data, __pyx_v_src_data, (__pyx_v_itemsize * __pyx_v_dst_extent))); - - /* "View.MemoryView":1150 - * - * if ndim == 1: - * if (src_stride > 0 and dst_stride > 0 and # <<<<<<<<<<<<<< - * src_stride == itemsize == dst_stride): - * memcpy(dst_data, src_data, itemsize * dst_extent) - */ - goto __pyx_L4; - } - - /* "View.MemoryView":1154 - * memcpy(dst_data, src_data, itemsize * dst_extent) - * else: - * for i in range(dst_extent): # <<<<<<<<<<<<<< - * memcpy(dst_data, src_data, itemsize) - * src_data += src_stride - */ - /*else*/ { - __pyx_t_3 = __pyx_v_dst_extent; - __pyx_t_4 = __pyx_t_3; - for (__pyx_t_5 = 0; __pyx_t_5 < __pyx_t_4; __pyx_t_5+=1) { - __pyx_v_i = __pyx_t_5; - - /* "View.MemoryView":1155 - * else: - * for i in range(dst_extent): - * memcpy(dst_data, src_data, itemsize) # <<<<<<<<<<<<<< - * src_data += src_stride - * dst_data += dst_stride - */ - (void)(memcpy(__pyx_v_dst_data, __pyx_v_src_data, __pyx_v_itemsize)); - - /* "View.MemoryView":1156 - * for i in range(dst_extent): - * memcpy(dst_data, src_data, itemsize) - * src_data += src_stride # <<<<<<<<<<<<<< - * dst_data += dst_stride - * else: - */ - __pyx_v_src_data = (__pyx_v_src_data + __pyx_v_src_stride); - - /* "View.MemoryView":1157 - * memcpy(dst_data, src_data, itemsize) - * src_data += src_stride - * dst_data += dst_stride # <<<<<<<<<<<<<< - * else: - * for i in range(dst_extent): - */ - __pyx_v_dst_data = (__pyx_v_dst_data + __pyx_v_dst_stride); - } - } - __pyx_L4:; - - /* "View.MemoryView":1149 - * cdef Py_ssize_t dst_stride = dst_strides[0] - * - * if ndim == 1: # <<<<<<<<<<<<<< - * if (src_stride > 0 and dst_stride > 0 and - * src_stride == itemsize == dst_stride): - */ - goto __pyx_L3; - } - - /* "View.MemoryView":1159 - * dst_data += dst_stride - * else: - * for i in range(dst_extent): # <<<<<<<<<<<<<< - * _copy_strided_to_strided(src_data, src_strides + 1, - * dst_data, dst_strides + 1, - */ - /*else*/ { - __pyx_t_3 = __pyx_v_dst_extent; - __pyx_t_4 = __pyx_t_3; - for (__pyx_t_5 = 0; __pyx_t_5 < __pyx_t_4; __pyx_t_5+=1) { - __pyx_v_i = __pyx_t_5; - - /* "View.MemoryView":1160 - * else: - * for i in range(dst_extent): - * _copy_strided_to_strided(src_data, src_strides + 1, # <<<<<<<<<<<<<< - * dst_data, dst_strides + 1, - * src_shape + 1, dst_shape + 1, - */ - _copy_strided_to_strided(__pyx_v_src_data, (__pyx_v_src_strides + 1), __pyx_v_dst_data, (__pyx_v_dst_strides + 1), (__pyx_v_src_shape + 1), (__pyx_v_dst_shape + 1), (__pyx_v_ndim - 1), __pyx_v_itemsize); - - /* "View.MemoryView":1164 - * src_shape + 1, dst_shape + 1, - * ndim - 1, itemsize) - * src_data += src_stride # <<<<<<<<<<<<<< - * dst_data += dst_stride - * - */ - __pyx_v_src_data = (__pyx_v_src_data + __pyx_v_src_stride); - - /* "View.MemoryView":1165 - * ndim - 1, itemsize) - * src_data += src_stride - * dst_data += dst_stride # <<<<<<<<<<<<<< - * - * cdef void copy_strided_to_strided(__Pyx_memviewslice *src, - */ - __pyx_v_dst_data = (__pyx_v_dst_data + __pyx_v_dst_stride); - } - } - __pyx_L3:; - - /* "View.MemoryView":1137 - * - * @cython.cdivision(True) - * cdef void _copy_strided_to_strided(char *src_data, Py_ssize_t *src_strides, # <<<<<<<<<<<<<< - * char *dst_data, Py_ssize_t *dst_strides, - * Py_ssize_t *src_shape, Py_ssize_t *dst_shape, - */ - - /* function exit code */ -} - -/* "View.MemoryView":1167 - * dst_data += dst_stride - * - * cdef void copy_strided_to_strided(__Pyx_memviewslice *src, # <<<<<<<<<<<<<< - * __Pyx_memviewslice *dst, - * int ndim, size_t itemsize) noexcept nogil: - */ - -static void copy_strided_to_strided(__Pyx_memviewslice *__pyx_v_src, __Pyx_memviewslice *__pyx_v_dst, int __pyx_v_ndim, size_t __pyx_v_itemsize) { - - /* "View.MemoryView":1170 - * __Pyx_memviewslice *dst, - * int ndim, size_t itemsize) noexcept nogil: - * _copy_strided_to_strided(src.data, src.strides, dst.data, dst.strides, # <<<<<<<<<<<<<< - * src.shape, dst.shape, ndim, itemsize) - * - */ - _copy_strided_to_strided(__pyx_v_src->data, __pyx_v_src->strides, __pyx_v_dst->data, __pyx_v_dst->strides, __pyx_v_src->shape, __pyx_v_dst->shape, __pyx_v_ndim, __pyx_v_itemsize); - - /* "View.MemoryView":1167 - * dst_data += dst_stride - * - * cdef void copy_strided_to_strided(__Pyx_memviewslice *src, # <<<<<<<<<<<<<< - * __Pyx_memviewslice *dst, - * int ndim, size_t itemsize) noexcept nogil: - */ - - /* function exit code */ -} - -/* "View.MemoryView":1174 - * - * @cname('__pyx_memoryview_slice_get_size') - * cdef Py_ssize_t slice_get_size(__Pyx_memviewslice *src, int ndim) noexcept nogil: # <<<<<<<<<<<<<< - * "Return the size of the memory occupied by the slice in number of bytes" - * cdef Py_ssize_t shape, size = src.memview.view.itemsize - */ - -static Py_ssize_t __pyx_memoryview_slice_get_size(__Pyx_memviewslice *__pyx_v_src, int __pyx_v_ndim) { - Py_ssize_t __pyx_v_shape; - Py_ssize_t __pyx_v_size; - Py_ssize_t __pyx_r; - Py_ssize_t __pyx_t_1; - Py_ssize_t *__pyx_t_2; - Py_ssize_t *__pyx_t_3; - Py_ssize_t *__pyx_t_4; - - /* "View.MemoryView":1176 - * cdef Py_ssize_t slice_get_size(__Pyx_memviewslice *src, int ndim) noexcept nogil: - * "Return the size of the memory occupied by the slice in number of bytes" - * cdef Py_ssize_t shape, size = src.memview.view.itemsize # <<<<<<<<<<<<<< - * - * for shape in src.shape[:ndim]: - */ - __pyx_t_1 = __pyx_v_src->memview->view.itemsize; - __pyx_v_size = __pyx_t_1; - - /* "View.MemoryView":1178 - * cdef Py_ssize_t shape, size = src.memview.view.itemsize - * - * for shape in src.shape[:ndim]: # <<<<<<<<<<<<<< - * size *= shape - * - */ - __pyx_t_3 = (__pyx_v_src->shape + __pyx_v_ndim); - for (__pyx_t_4 = __pyx_v_src->shape; __pyx_t_4 < __pyx_t_3; __pyx_t_4++) { - __pyx_t_2 = __pyx_t_4; - __pyx_v_shape = (__pyx_t_2[0]); - - /* "View.MemoryView":1179 - * - * for shape in src.shape[:ndim]: - * size *= shape # <<<<<<<<<<<<<< - * - * return size - */ - __pyx_v_size = (__pyx_v_size * __pyx_v_shape); - } - - /* "View.MemoryView":1181 - * size *= shape - * - * return size # <<<<<<<<<<<<<< - * - * @cname('__pyx_fill_contig_strides_array') - */ - __pyx_r = __pyx_v_size; - goto __pyx_L0; - - /* "View.MemoryView":1174 - * - * @cname('__pyx_memoryview_slice_get_size') - * cdef Py_ssize_t slice_get_size(__Pyx_memviewslice *src, int ndim) noexcept nogil: # <<<<<<<<<<<<<< - * "Return the size of the memory occupied by the slice in number of bytes" - * cdef Py_ssize_t shape, size = src.memview.view.itemsize - */ - - /* function exit code */ - __pyx_L0:; - return __pyx_r; -} - -/* "View.MemoryView":1184 - * - * @cname('__pyx_fill_contig_strides_array') - * cdef Py_ssize_t fill_contig_strides_array( # <<<<<<<<<<<<<< - * Py_ssize_t *shape, Py_ssize_t *strides, Py_ssize_t stride, - * int ndim, char order) noexcept nogil: - */ - -static Py_ssize_t __pyx_fill_contig_strides_array(Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, Py_ssize_t __pyx_v_stride, int __pyx_v_ndim, char __pyx_v_order) { - int __pyx_v_idx; - Py_ssize_t __pyx_r; - int __pyx_t_1; - int __pyx_t_2; - int __pyx_t_3; - int __pyx_t_4; - - /* "View.MemoryView":1193 - * cdef int idx - * - * if order == 'F': # <<<<<<<<<<<<<< - * for idx in range(ndim): - * strides[idx] = stride - */ - __pyx_t_1 = (__pyx_v_order == 'F'); - if (__pyx_t_1) { - - /* "View.MemoryView":1194 - * - * if order == 'F': - * for idx in range(ndim): # <<<<<<<<<<<<<< - * strides[idx] = stride - * stride *= shape[idx] - */ - __pyx_t_2 = __pyx_v_ndim; - __pyx_t_3 = __pyx_t_2; - for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { - __pyx_v_idx = __pyx_t_4; - - /* "View.MemoryView":1195 - * if order == 'F': - * for idx in range(ndim): - * strides[idx] = stride # <<<<<<<<<<<<<< - * stride *= shape[idx] - * else: - */ - (__pyx_v_strides[__pyx_v_idx]) = __pyx_v_stride; - - /* "View.MemoryView":1196 - * for idx in range(ndim): - * strides[idx] = stride - * stride *= shape[idx] # <<<<<<<<<<<<<< - * else: - * for idx in range(ndim - 1, -1, -1): - */ - __pyx_v_stride = (__pyx_v_stride * (__pyx_v_shape[__pyx_v_idx])); - } - - /* "View.MemoryView":1193 - * cdef int idx - * - * if order == 'F': # <<<<<<<<<<<<<< - * for idx in range(ndim): - * strides[idx] = stride - */ - goto __pyx_L3; - } - - /* "View.MemoryView":1198 - * stride *= shape[idx] - * else: - * for idx in range(ndim - 1, -1, -1): # <<<<<<<<<<<<<< - * strides[idx] = stride - * stride *= shape[idx] - */ - /*else*/ { - for (__pyx_t_2 = (__pyx_v_ndim - 1); __pyx_t_2 > -1; __pyx_t_2-=1) { - __pyx_v_idx = __pyx_t_2; - - /* "View.MemoryView":1199 - * else: - * for idx in range(ndim - 1, -1, -1): - * strides[idx] = stride # <<<<<<<<<<<<<< - * stride *= shape[idx] - * - */ - (__pyx_v_strides[__pyx_v_idx]) = __pyx_v_stride; - - /* "View.MemoryView":1200 - * for idx in range(ndim - 1, -1, -1): - * strides[idx] = stride - * stride *= shape[idx] # <<<<<<<<<<<<<< - * - * return stride - */ - __pyx_v_stride = (__pyx_v_stride * (__pyx_v_shape[__pyx_v_idx])); - } - } - __pyx_L3:; - - /* "View.MemoryView":1202 - * stride *= shape[idx] - * - * return stride # <<<<<<<<<<<<<< - * - * @cname('__pyx_memoryview_copy_data_to_temp') - */ - __pyx_r = __pyx_v_stride; - goto __pyx_L0; - - /* "View.MemoryView":1184 - * - * @cname('__pyx_fill_contig_strides_array') - * cdef Py_ssize_t fill_contig_strides_array( # <<<<<<<<<<<<<< - * Py_ssize_t *shape, Py_ssize_t *strides, Py_ssize_t stride, - * int ndim, char order) noexcept nogil: - */ - - /* function exit code */ - __pyx_L0:; - return __pyx_r; -} - -/* "View.MemoryView":1205 - * - * @cname('__pyx_memoryview_copy_data_to_temp') - * cdef void *copy_data_to_temp(__Pyx_memviewslice *src, # <<<<<<<<<<<<<< - * __Pyx_memviewslice *tmpslice, - * char order, - */ - -static void *__pyx_memoryview_copy_data_to_temp(__Pyx_memviewslice *__pyx_v_src, __Pyx_memviewslice *__pyx_v_tmpslice, char __pyx_v_order, int __pyx_v_ndim) { - int __pyx_v_i; - void *__pyx_v_result; - size_t __pyx_v_itemsize; - size_t __pyx_v_size; - void *__pyx_r; - Py_ssize_t __pyx_t_1; - int __pyx_t_2; - int __pyx_t_3; - struct __pyx_memoryview_obj *__pyx_t_4; - int __pyx_t_5; - int __pyx_t_6; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - #ifdef WITH_THREAD - PyGILState_STATE __pyx_gilstate_save; - #endif - - /* "View.MemoryView":1216 - * cdef void *result - * - * cdef size_t itemsize = src.memview.view.itemsize # <<<<<<<<<<<<<< - * cdef size_t size = slice_get_size(src, ndim) - * - */ - __pyx_t_1 = __pyx_v_src->memview->view.itemsize; - __pyx_v_itemsize = __pyx_t_1; - - /* "View.MemoryView":1217 - * - * cdef size_t itemsize = src.memview.view.itemsize - * cdef size_t size = slice_get_size(src, ndim) # <<<<<<<<<<<<<< - * - * result = malloc(size) - */ - __pyx_v_size = __pyx_memoryview_slice_get_size(__pyx_v_src, __pyx_v_ndim); - - /* "View.MemoryView":1219 - * cdef size_t size = slice_get_size(src, ndim) - * - * result = malloc(size) # <<<<<<<<<<<<<< - * if not result: - * _err_no_memory() - */ - __pyx_v_result = malloc(__pyx_v_size); - - /* "View.MemoryView":1220 - * - * result = malloc(size) - * if not result: # <<<<<<<<<<<<<< - * _err_no_memory() - * - */ - __pyx_t_2 = (!(__pyx_v_result != 0)); - if (__pyx_t_2) { - - /* "View.MemoryView":1221 - * result = malloc(size) - * if not result: - * _err_no_memory() # <<<<<<<<<<<<<< - * - * - */ - __pyx_t_3 = __pyx_memoryview_err_no_memory(); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(1, 1221, __pyx_L1_error) - - /* "View.MemoryView":1220 - * - * result = malloc(size) - * if not result: # <<<<<<<<<<<<<< - * _err_no_memory() - * - */ - } - - /* "View.MemoryView":1224 - * - * - * tmpslice.data = result # <<<<<<<<<<<<<< - * tmpslice.memview = src.memview - * for i in range(ndim): - */ - __pyx_v_tmpslice->data = ((char *)__pyx_v_result); - - /* "View.MemoryView":1225 - * - * tmpslice.data = result - * tmpslice.memview = src.memview # <<<<<<<<<<<<<< - * for i in range(ndim): - * tmpslice.shape[i] = src.shape[i] - */ - __pyx_t_4 = __pyx_v_src->memview; - __pyx_v_tmpslice->memview = __pyx_t_4; - - /* "View.MemoryView":1226 - * tmpslice.data = result - * tmpslice.memview = src.memview - * for i in range(ndim): # <<<<<<<<<<<<<< - * tmpslice.shape[i] = src.shape[i] - * tmpslice.suboffsets[i] = -1 - */ - __pyx_t_3 = __pyx_v_ndim; - __pyx_t_5 = __pyx_t_3; - for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) { - __pyx_v_i = __pyx_t_6; - - /* "View.MemoryView":1227 - * tmpslice.memview = src.memview - * for i in range(ndim): - * tmpslice.shape[i] = src.shape[i] # <<<<<<<<<<<<<< - * tmpslice.suboffsets[i] = -1 - * - */ - (__pyx_v_tmpslice->shape[__pyx_v_i]) = (__pyx_v_src->shape[__pyx_v_i]); - - /* "View.MemoryView":1228 - * for i in range(ndim): - * tmpslice.shape[i] = src.shape[i] - * tmpslice.suboffsets[i] = -1 # <<<<<<<<<<<<<< - * - * fill_contig_strides_array(&tmpslice.shape[0], &tmpslice.strides[0], itemsize, ndim, order) - */ - (__pyx_v_tmpslice->suboffsets[__pyx_v_i]) = -1L; - } - - /* "View.MemoryView":1230 - * tmpslice.suboffsets[i] = -1 - * - * fill_contig_strides_array(&tmpslice.shape[0], &tmpslice.strides[0], itemsize, ndim, order) # <<<<<<<<<<<<<< - * - * - */ - (void)(__pyx_fill_contig_strides_array((&(__pyx_v_tmpslice->shape[0])), (&(__pyx_v_tmpslice->strides[0])), __pyx_v_itemsize, __pyx_v_ndim, __pyx_v_order)); - - /* "View.MemoryView":1233 - * - * - * for i in range(ndim): # <<<<<<<<<<<<<< - * if tmpslice.shape[i] == 1: - * tmpslice.strides[i] = 0 - */ - __pyx_t_3 = __pyx_v_ndim; - __pyx_t_5 = __pyx_t_3; - for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) { - __pyx_v_i = __pyx_t_6; - - /* "View.MemoryView":1234 - * - * for i in range(ndim): - * if tmpslice.shape[i] == 1: # <<<<<<<<<<<<<< - * tmpslice.strides[i] = 0 - * - */ - __pyx_t_2 = ((__pyx_v_tmpslice->shape[__pyx_v_i]) == 1); - if (__pyx_t_2) { - - /* "View.MemoryView":1235 - * for i in range(ndim): - * if tmpslice.shape[i] == 1: - * tmpslice.strides[i] = 0 # <<<<<<<<<<<<<< - * - * if slice_is_contig(src[0], order, ndim): - */ - (__pyx_v_tmpslice->strides[__pyx_v_i]) = 0; - - /* "View.MemoryView":1234 - * - * for i in range(ndim): - * if tmpslice.shape[i] == 1: # <<<<<<<<<<<<<< - * tmpslice.strides[i] = 0 - * - */ - } - } - - /* "View.MemoryView":1237 - * tmpslice.strides[i] = 0 - * - * if slice_is_contig(src[0], order, ndim): # <<<<<<<<<<<<<< - * memcpy(result, src.data, size) - * else: - */ - __pyx_t_2 = __pyx_memviewslice_is_contig((__pyx_v_src[0]), __pyx_v_order, __pyx_v_ndim); - if (__pyx_t_2) { - - /* "View.MemoryView":1238 - * - * if slice_is_contig(src[0], order, ndim): - * memcpy(result, src.data, size) # <<<<<<<<<<<<<< - * else: - * copy_strided_to_strided(src, tmpslice, ndim, itemsize) - */ - (void)(memcpy(__pyx_v_result, __pyx_v_src->data, __pyx_v_size)); - - /* "View.MemoryView":1237 - * tmpslice.strides[i] = 0 - * - * if slice_is_contig(src[0], order, ndim): # <<<<<<<<<<<<<< - * memcpy(result, src.data, size) - * else: - */ - goto __pyx_L9; - } - - /* "View.MemoryView":1240 - * memcpy(result, src.data, size) - * else: - * copy_strided_to_strided(src, tmpslice, ndim, itemsize) # <<<<<<<<<<<<<< - * - * return result - */ - /*else*/ { - copy_strided_to_strided(__pyx_v_src, __pyx_v_tmpslice, __pyx_v_ndim, __pyx_v_itemsize); - } - __pyx_L9:; - - /* "View.MemoryView":1242 - * copy_strided_to_strided(src, tmpslice, ndim, itemsize) - * - * return result # <<<<<<<<<<<<<< - * - * - */ - __pyx_r = __pyx_v_result; - goto __pyx_L0; - - /* "View.MemoryView":1205 - * - * @cname('__pyx_memoryview_copy_data_to_temp') - * cdef void *copy_data_to_temp(__Pyx_memviewslice *src, # <<<<<<<<<<<<<< - * __Pyx_memviewslice *tmpslice, - * char order, - */ - - /* function exit code */ - __pyx_L1_error:; - #ifdef WITH_THREAD - __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); - #endif - __Pyx_AddTraceback("View.MemoryView.copy_data_to_temp", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - #ifdef WITH_THREAD - __Pyx_PyGILState_Release(__pyx_gilstate_save); - #endif - __pyx_L0:; - return __pyx_r; -} - -/* "View.MemoryView":1247 - * - * @cname('__pyx_memoryview_err_extents') - * cdef int _err_extents(int i, Py_ssize_t extent1, # <<<<<<<<<<<<<< - * Py_ssize_t extent2) except -1 with gil: - * raise ValueError, f"got differing extents in dimension {i} (got {extent1} and {extent2})" - */ - -static int __pyx_memoryview_err_extents(int __pyx_v_i, Py_ssize_t __pyx_v_extent1, Py_ssize_t __pyx_v_extent2) { - int __pyx_r; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - Py_ssize_t __pyx_t_2; - Py_UCS4 __pyx_t_3; - PyObject *__pyx_t_4 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - #ifdef WITH_THREAD - PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); - #endif - __Pyx_RefNannySetupContext("_err_extents", 0); - - /* "View.MemoryView":1249 - * cdef int _err_extents(int i, Py_ssize_t extent1, - * Py_ssize_t extent2) except -1 with gil: - * raise ValueError, f"got differing extents in dimension {i} (got {extent1} and {extent2})" # <<<<<<<<<<<<<< - * - * @cname('__pyx_memoryview_err_dim') - */ - __pyx_t_1 = PyTuple_New(7); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1249, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = 0; - __pyx_t_3 = 127; - __Pyx_INCREF(__pyx_kp_u_got_differing_extents_in_dimensi); - __pyx_t_2 += 35; - __Pyx_GIVEREF(__pyx_kp_u_got_differing_extents_in_dimensi); - PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_kp_u_got_differing_extents_in_dimensi); - __pyx_t_4 = __Pyx_PyUnicode_From_int(__pyx_v_i, 0, ' ', 'd'); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 1249, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __pyx_t_2 += __Pyx_PyUnicode_GET_LENGTH(__pyx_t_4); - __Pyx_GIVEREF(__pyx_t_4); - PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_t_4); - __pyx_t_4 = 0; - __Pyx_INCREF(__pyx_kp_u_got); - __pyx_t_2 += 6; - __Pyx_GIVEREF(__pyx_kp_u_got); - PyTuple_SET_ITEM(__pyx_t_1, 2, __pyx_kp_u_got); - __pyx_t_4 = __Pyx_PyUnicode_From_Py_ssize_t(__pyx_v_extent1, 0, ' ', 'd'); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 1249, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __pyx_t_2 += __Pyx_PyUnicode_GET_LENGTH(__pyx_t_4); - __Pyx_GIVEREF(__pyx_t_4); - PyTuple_SET_ITEM(__pyx_t_1, 3, __pyx_t_4); - __pyx_t_4 = 0; - __Pyx_INCREF(__pyx_kp_u_and); - __pyx_t_2 += 5; - __Pyx_GIVEREF(__pyx_kp_u_and); - PyTuple_SET_ITEM(__pyx_t_1, 4, __pyx_kp_u_and); - __pyx_t_4 = __Pyx_PyUnicode_From_Py_ssize_t(__pyx_v_extent2, 0, ' ', 'd'); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 1249, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __pyx_t_2 += __Pyx_PyUnicode_GET_LENGTH(__pyx_t_4); - __Pyx_GIVEREF(__pyx_t_4); - PyTuple_SET_ITEM(__pyx_t_1, 5, __pyx_t_4); - __pyx_t_4 = 0; - __Pyx_INCREF(__pyx_kp_u__7); - __pyx_t_2 += 1; - __Pyx_GIVEREF(__pyx_kp_u__7); - PyTuple_SET_ITEM(__pyx_t_1, 6, __pyx_kp_u__7); - __pyx_t_4 = __Pyx_PyUnicode_Join(__pyx_t_1, 7, __pyx_t_2, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 1249, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __Pyx_Raise(__pyx_builtin_ValueError, __pyx_t_4, 0, 0); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __PYX_ERR(1, 1249, __pyx_L1_error) - - /* "View.MemoryView":1247 - * - * @cname('__pyx_memoryview_err_extents') - * cdef int _err_extents(int i, Py_ssize_t extent1, # <<<<<<<<<<<<<< - * Py_ssize_t extent2) except -1 with gil: - * raise ValueError, f"got differing extents in dimension {i} (got {extent1} and {extent2})" - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_4); - __Pyx_AddTraceback("View.MemoryView._err_extents", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = -1; - __Pyx_RefNannyFinishContext(); - #ifdef WITH_THREAD - __Pyx_PyGILState_Release(__pyx_gilstate_save); - #endif - return __pyx_r; -} - -/* "View.MemoryView":1252 - * - * @cname('__pyx_memoryview_err_dim') - * cdef int _err_dim(PyObject *error, str msg, int dim) except -1 with gil: # <<<<<<<<<<<<<< - * raise error, msg % dim - * - */ - -static int __pyx_memoryview_err_dim(PyObject *__pyx_v_error, PyObject *__pyx_v_msg, int __pyx_v_dim) { - int __pyx_r; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - PyObject *__pyx_t_2 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - #ifdef WITH_THREAD - PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); - #endif - __Pyx_RefNannySetupContext("_err_dim", 0); - __Pyx_INCREF(__pyx_v_msg); - - /* "View.MemoryView":1253 - * @cname('__pyx_memoryview_err_dim') - * cdef int _err_dim(PyObject *error, str msg, int dim) except -1 with gil: - * raise error, msg % dim # <<<<<<<<<<<<<< - * - * @cname('__pyx_memoryview_err') - */ - __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_dim); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1253, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = __Pyx_PyString_FormatSafe(__pyx_v_msg, __pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1253, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __Pyx_Raise(((PyObject *)__pyx_v_error), __pyx_t_2, 0, 0); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __PYX_ERR(1, 1253, __pyx_L1_error) - - /* "View.MemoryView":1252 - * - * @cname('__pyx_memoryview_err_dim') - * cdef int _err_dim(PyObject *error, str msg, int dim) except -1 with gil: # <<<<<<<<<<<<<< - * raise error, msg % dim - * - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_2); - __Pyx_AddTraceback("View.MemoryView._err_dim", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = -1; - __Pyx_XDECREF(__pyx_v_msg); - __Pyx_RefNannyFinishContext(); - #ifdef WITH_THREAD - __Pyx_PyGILState_Release(__pyx_gilstate_save); - #endif - return __pyx_r; -} - -/* "View.MemoryView":1256 - * - * @cname('__pyx_memoryview_err') - * cdef int _err(PyObject *error, str msg) except -1 with gil: # <<<<<<<<<<<<<< - * raise error, msg - * - */ - -static int __pyx_memoryview_err(PyObject *__pyx_v_error, PyObject *__pyx_v_msg) { - int __pyx_r; - __Pyx_RefNannyDeclarations - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - #ifdef WITH_THREAD - PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); - #endif - __Pyx_RefNannySetupContext("_err", 0); - __Pyx_INCREF(__pyx_v_msg); - - /* "View.MemoryView":1257 - * @cname('__pyx_memoryview_err') - * cdef int _err(PyObject *error, str msg) except -1 with gil: - * raise error, msg # <<<<<<<<<<<<<< - * - * @cname('__pyx_memoryview_err_no_memory') - */ - __Pyx_Raise(((PyObject *)__pyx_v_error), __pyx_v_msg, 0, 0); - __PYX_ERR(1, 1257, __pyx_L1_error) - - /* "View.MemoryView":1256 - * - * @cname('__pyx_memoryview_err') - * cdef int _err(PyObject *error, str msg) except -1 with gil: # <<<<<<<<<<<<<< - * raise error, msg - * - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_AddTraceback("View.MemoryView._err", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = -1; - __Pyx_XDECREF(__pyx_v_msg); - __Pyx_RefNannyFinishContext(); - #ifdef WITH_THREAD - __Pyx_PyGILState_Release(__pyx_gilstate_save); - #endif - return __pyx_r; -} - -/* "View.MemoryView":1260 - * - * @cname('__pyx_memoryview_err_no_memory') - * cdef int _err_no_memory() except -1 with gil: # <<<<<<<<<<<<<< - * raise MemoryError - * - */ - -static int __pyx_memoryview_err_no_memory(void) { - int __pyx_r; - __Pyx_RefNannyDeclarations - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - #ifdef WITH_THREAD - PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); - #endif - __Pyx_RefNannySetupContext("_err_no_memory", 0); - - /* "View.MemoryView":1261 - * @cname('__pyx_memoryview_err_no_memory') - * cdef int _err_no_memory() except -1 with gil: - * raise MemoryError # <<<<<<<<<<<<<< - * - * - */ - PyErr_NoMemory(); __PYX_ERR(1, 1261, __pyx_L1_error) - - /* "View.MemoryView":1260 - * - * @cname('__pyx_memoryview_err_no_memory') - * cdef int _err_no_memory() except -1 with gil: # <<<<<<<<<<<<<< - * raise MemoryError - * - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_AddTraceback("View.MemoryView._err_no_memory", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = -1; - __Pyx_RefNannyFinishContext(); - #ifdef WITH_THREAD - __Pyx_PyGILState_Release(__pyx_gilstate_save); - #endif - return __pyx_r; -} - -/* "View.MemoryView":1265 - * - * @cname('__pyx_memoryview_copy_contents') - * cdef int memoryview_copy_contents(__Pyx_memviewslice src, # <<<<<<<<<<<<<< - * __Pyx_memviewslice dst, - * int src_ndim, int dst_ndim, - */ - -static int __pyx_memoryview_copy_contents(__Pyx_memviewslice __pyx_v_src, __Pyx_memviewslice __pyx_v_dst, int __pyx_v_src_ndim, int __pyx_v_dst_ndim, int __pyx_v_dtype_is_object) { - void *__pyx_v_tmpdata; - size_t __pyx_v_itemsize; - int __pyx_v_i; - char __pyx_v_order; - int __pyx_v_broadcasting; - int __pyx_v_direct_copy; - __Pyx_memviewslice __pyx_v_tmp; - int __pyx_v_ndim; - int __pyx_r; - Py_ssize_t __pyx_t_1; - int __pyx_t_2; - int __pyx_t_3; - int __pyx_t_4; - int __pyx_t_5; - int __pyx_t_6; - void *__pyx_t_7; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - #ifdef WITH_THREAD - PyGILState_STATE __pyx_gilstate_save; - #endif - - /* "View.MemoryView":1273 - * Check for overlapping memory and verify the shapes. - * """ - * cdef void *tmpdata = NULL # <<<<<<<<<<<<<< - * cdef size_t itemsize = src.memview.view.itemsize - * cdef int i - */ - __pyx_v_tmpdata = NULL; - - /* "View.MemoryView":1274 - * """ - * cdef void *tmpdata = NULL - * cdef size_t itemsize = src.memview.view.itemsize # <<<<<<<<<<<<<< - * cdef int i - * cdef char order = get_best_order(&src, src_ndim) - */ - __pyx_t_1 = __pyx_v_src.memview->view.itemsize; - __pyx_v_itemsize = __pyx_t_1; - - /* "View.MemoryView":1276 - * cdef size_t itemsize = src.memview.view.itemsize - * cdef int i - * cdef char order = get_best_order(&src, src_ndim) # <<<<<<<<<<<<<< - * cdef bint broadcasting = False - * cdef bint direct_copy = False - */ - __pyx_v_order = __pyx_get_best_slice_order((&__pyx_v_src), __pyx_v_src_ndim); - - /* "View.MemoryView":1277 - * cdef int i - * cdef char order = get_best_order(&src, src_ndim) - * cdef bint broadcasting = False # <<<<<<<<<<<<<< - * cdef bint direct_copy = False - * cdef __Pyx_memviewslice tmp - */ - __pyx_v_broadcasting = 0; - - /* "View.MemoryView":1278 - * cdef char order = get_best_order(&src, src_ndim) - * cdef bint broadcasting = False - * cdef bint direct_copy = False # <<<<<<<<<<<<<< - * cdef __Pyx_memviewslice tmp - * - */ - __pyx_v_direct_copy = 0; - - /* "View.MemoryView":1281 - * cdef __Pyx_memviewslice tmp - * - * if src_ndim < dst_ndim: # <<<<<<<<<<<<<< - * broadcast_leading(&src, src_ndim, dst_ndim) - * elif dst_ndim < src_ndim: - */ - __pyx_t_2 = (__pyx_v_src_ndim < __pyx_v_dst_ndim); - if (__pyx_t_2) { - - /* "View.MemoryView":1282 - * - * if src_ndim < dst_ndim: - * broadcast_leading(&src, src_ndim, dst_ndim) # <<<<<<<<<<<<<< - * elif dst_ndim < src_ndim: - * broadcast_leading(&dst, dst_ndim, src_ndim) - */ - __pyx_memoryview_broadcast_leading((&__pyx_v_src), __pyx_v_src_ndim, __pyx_v_dst_ndim); - - /* "View.MemoryView":1281 - * cdef __Pyx_memviewslice tmp - * - * if src_ndim < dst_ndim: # <<<<<<<<<<<<<< - * broadcast_leading(&src, src_ndim, dst_ndim) - * elif dst_ndim < src_ndim: - */ - goto __pyx_L3; - } - - /* "View.MemoryView":1283 - * if src_ndim < dst_ndim: - * broadcast_leading(&src, src_ndim, dst_ndim) - * elif dst_ndim < src_ndim: # <<<<<<<<<<<<<< - * broadcast_leading(&dst, dst_ndim, src_ndim) - * - */ - __pyx_t_2 = (__pyx_v_dst_ndim < __pyx_v_src_ndim); - if (__pyx_t_2) { - - /* "View.MemoryView":1284 - * broadcast_leading(&src, src_ndim, dst_ndim) - * elif dst_ndim < src_ndim: - * broadcast_leading(&dst, dst_ndim, src_ndim) # <<<<<<<<<<<<<< - * - * cdef int ndim = max(src_ndim, dst_ndim) - */ - __pyx_memoryview_broadcast_leading((&__pyx_v_dst), __pyx_v_dst_ndim, __pyx_v_src_ndim); - - /* "View.MemoryView":1283 - * if src_ndim < dst_ndim: - * broadcast_leading(&src, src_ndim, dst_ndim) - * elif dst_ndim < src_ndim: # <<<<<<<<<<<<<< - * broadcast_leading(&dst, dst_ndim, src_ndim) - * - */ - } - __pyx_L3:; - - /* "View.MemoryView":1286 - * broadcast_leading(&dst, dst_ndim, src_ndim) - * - * cdef int ndim = max(src_ndim, dst_ndim) # <<<<<<<<<<<<<< - * - * for i in range(ndim): - */ - __pyx_t_3 = __pyx_v_dst_ndim; - __pyx_t_4 = __pyx_v_src_ndim; - if ((__pyx_t_3 > __pyx_t_4)) { - __pyx_t_5 = __pyx_t_3; - } else { - __pyx_t_5 = __pyx_t_4; - } - __pyx_v_ndim = __pyx_t_5; - - /* "View.MemoryView":1288 - * cdef int ndim = max(src_ndim, dst_ndim) - * - * for i in range(ndim): # <<<<<<<<<<<<<< - * if src.shape[i] != dst.shape[i]: - * if src.shape[i] == 1: - */ - __pyx_t_5 = __pyx_v_ndim; - __pyx_t_3 = __pyx_t_5; - for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { - __pyx_v_i = __pyx_t_4; - - /* "View.MemoryView":1289 - * - * for i in range(ndim): - * if src.shape[i] != dst.shape[i]: # <<<<<<<<<<<<<< - * if src.shape[i] == 1: - * broadcasting = True - */ - __pyx_t_2 = ((__pyx_v_src.shape[__pyx_v_i]) != (__pyx_v_dst.shape[__pyx_v_i])); - if (__pyx_t_2) { - - /* "View.MemoryView":1290 - * for i in range(ndim): - * if src.shape[i] != dst.shape[i]: - * if src.shape[i] == 1: # <<<<<<<<<<<<<< - * broadcasting = True - * src.strides[i] = 0 - */ - __pyx_t_2 = ((__pyx_v_src.shape[__pyx_v_i]) == 1); - if (__pyx_t_2) { - - /* "View.MemoryView":1291 - * if src.shape[i] != dst.shape[i]: - * if src.shape[i] == 1: - * broadcasting = True # <<<<<<<<<<<<<< - * src.strides[i] = 0 - * else: - */ - __pyx_v_broadcasting = 1; - - /* "View.MemoryView":1292 - * if src.shape[i] == 1: - * broadcasting = True - * src.strides[i] = 0 # <<<<<<<<<<<<<< - * else: - * _err_extents(i, dst.shape[i], src.shape[i]) - */ - (__pyx_v_src.strides[__pyx_v_i]) = 0; - - /* "View.MemoryView":1290 - * for i in range(ndim): - * if src.shape[i] != dst.shape[i]: - * if src.shape[i] == 1: # <<<<<<<<<<<<<< - * broadcasting = True - * src.strides[i] = 0 - */ - goto __pyx_L7; - } - - /* "View.MemoryView":1294 - * src.strides[i] = 0 - * else: - * _err_extents(i, dst.shape[i], src.shape[i]) # <<<<<<<<<<<<<< - * - * if src.suboffsets[i] >= 0: - */ - /*else*/ { - __pyx_t_6 = __pyx_memoryview_err_extents(__pyx_v_i, (__pyx_v_dst.shape[__pyx_v_i]), (__pyx_v_src.shape[__pyx_v_i])); if (unlikely(__pyx_t_6 == ((int)-1))) __PYX_ERR(1, 1294, __pyx_L1_error) - } - __pyx_L7:; - - /* "View.MemoryView":1289 - * - * for i in range(ndim): - * if src.shape[i] != dst.shape[i]: # <<<<<<<<<<<<<< - * if src.shape[i] == 1: - * broadcasting = True - */ - } - - /* "View.MemoryView":1296 - * _err_extents(i, dst.shape[i], src.shape[i]) - * - * if src.suboffsets[i] >= 0: # <<<<<<<<<<<<<< - * _err_dim(PyExc_ValueError, "Dimension %d is not direct", i) - * - */ - __pyx_t_2 = ((__pyx_v_src.suboffsets[__pyx_v_i]) >= 0); - if (__pyx_t_2) { - - /* "View.MemoryView":1297 - * - * if src.suboffsets[i] >= 0: - * _err_dim(PyExc_ValueError, "Dimension %d is not direct", i) # <<<<<<<<<<<<<< - * - * if slices_overlap(&src, &dst, ndim, itemsize): - */ - __pyx_t_6 = __pyx_memoryview_err_dim(PyExc_ValueError, __pyx_kp_s_Dimension_d_is_not_direct, __pyx_v_i); if (unlikely(__pyx_t_6 == ((int)-1))) __PYX_ERR(1, 1297, __pyx_L1_error) - - /* "View.MemoryView":1296 - * _err_extents(i, dst.shape[i], src.shape[i]) - * - * if src.suboffsets[i] >= 0: # <<<<<<<<<<<<<< - * _err_dim(PyExc_ValueError, "Dimension %d is not direct", i) - * - */ - } - } - - /* "View.MemoryView":1299 - * _err_dim(PyExc_ValueError, "Dimension %d is not direct", i) - * - * if slices_overlap(&src, &dst, ndim, itemsize): # <<<<<<<<<<<<<< - * - * if not slice_is_contig(src, order, ndim): - */ - __pyx_t_2 = __pyx_slices_overlap((&__pyx_v_src), (&__pyx_v_dst), __pyx_v_ndim, __pyx_v_itemsize); - if (__pyx_t_2) { - - /* "View.MemoryView":1301 - * if slices_overlap(&src, &dst, ndim, itemsize): - * - * if not slice_is_contig(src, order, ndim): # <<<<<<<<<<<<<< - * order = get_best_order(&dst, ndim) - * - */ - __pyx_t_2 = (!__pyx_memviewslice_is_contig(__pyx_v_src, __pyx_v_order, __pyx_v_ndim)); - if (__pyx_t_2) { - - /* "View.MemoryView":1302 - * - * if not slice_is_contig(src, order, ndim): - * order = get_best_order(&dst, ndim) # <<<<<<<<<<<<<< - * - * tmpdata = copy_data_to_temp(&src, &tmp, order, ndim) - */ - __pyx_v_order = __pyx_get_best_slice_order((&__pyx_v_dst), __pyx_v_ndim); - - /* "View.MemoryView":1301 - * if slices_overlap(&src, &dst, ndim, itemsize): - * - * if not slice_is_contig(src, order, ndim): # <<<<<<<<<<<<<< - * order = get_best_order(&dst, ndim) - * - */ - } - - /* "View.MemoryView":1304 - * order = get_best_order(&dst, ndim) - * - * tmpdata = copy_data_to_temp(&src, &tmp, order, ndim) # <<<<<<<<<<<<<< - * src = tmp - * - */ - __pyx_t_7 = __pyx_memoryview_copy_data_to_temp((&__pyx_v_src), (&__pyx_v_tmp), __pyx_v_order, __pyx_v_ndim); if (unlikely(__pyx_t_7 == ((void *)NULL))) __PYX_ERR(1, 1304, __pyx_L1_error) - __pyx_v_tmpdata = __pyx_t_7; - - /* "View.MemoryView":1305 - * - * tmpdata = copy_data_to_temp(&src, &tmp, order, ndim) - * src = tmp # <<<<<<<<<<<<<< - * - * if not broadcasting: - */ - __pyx_v_src = __pyx_v_tmp; - - /* "View.MemoryView":1299 - * _err_dim(PyExc_ValueError, "Dimension %d is not direct", i) - * - * if slices_overlap(&src, &dst, ndim, itemsize): # <<<<<<<<<<<<<< - * - * if not slice_is_contig(src, order, ndim): - */ - } - - /* "View.MemoryView":1307 - * src = tmp - * - * if not broadcasting: # <<<<<<<<<<<<<< - * - * - */ - __pyx_t_2 = (!__pyx_v_broadcasting); - if (__pyx_t_2) { - - /* "View.MemoryView":1310 - * - * - * if slice_is_contig(src, 'C', ndim): # <<<<<<<<<<<<<< - * direct_copy = slice_is_contig(dst, 'C', ndim) - * elif slice_is_contig(src, 'F', ndim): - */ - __pyx_t_2 = __pyx_memviewslice_is_contig(__pyx_v_src, 'C', __pyx_v_ndim); - if (__pyx_t_2) { - - /* "View.MemoryView":1311 - * - * if slice_is_contig(src, 'C', ndim): - * direct_copy = slice_is_contig(dst, 'C', ndim) # <<<<<<<<<<<<<< - * elif slice_is_contig(src, 'F', ndim): - * direct_copy = slice_is_contig(dst, 'F', ndim) - */ - __pyx_v_direct_copy = __pyx_memviewslice_is_contig(__pyx_v_dst, 'C', __pyx_v_ndim); - - /* "View.MemoryView":1310 - * - * - * if slice_is_contig(src, 'C', ndim): # <<<<<<<<<<<<<< - * direct_copy = slice_is_contig(dst, 'C', ndim) - * elif slice_is_contig(src, 'F', ndim): - */ - goto __pyx_L12; - } - - /* "View.MemoryView":1312 - * if slice_is_contig(src, 'C', ndim): - * direct_copy = slice_is_contig(dst, 'C', ndim) - * elif slice_is_contig(src, 'F', ndim): # <<<<<<<<<<<<<< - * direct_copy = slice_is_contig(dst, 'F', ndim) - * - */ - __pyx_t_2 = __pyx_memviewslice_is_contig(__pyx_v_src, 'F', __pyx_v_ndim); - if (__pyx_t_2) { - - /* "View.MemoryView":1313 - * direct_copy = slice_is_contig(dst, 'C', ndim) - * elif slice_is_contig(src, 'F', ndim): - * direct_copy = slice_is_contig(dst, 'F', ndim) # <<<<<<<<<<<<<< - * - * if direct_copy: - */ - __pyx_v_direct_copy = __pyx_memviewslice_is_contig(__pyx_v_dst, 'F', __pyx_v_ndim); - - /* "View.MemoryView":1312 - * if slice_is_contig(src, 'C', ndim): - * direct_copy = slice_is_contig(dst, 'C', ndim) - * elif slice_is_contig(src, 'F', ndim): # <<<<<<<<<<<<<< - * direct_copy = slice_is_contig(dst, 'F', ndim) - * - */ - } - __pyx_L12:; - - /* "View.MemoryView":1315 - * direct_copy = slice_is_contig(dst, 'F', ndim) - * - * if direct_copy: # <<<<<<<<<<<<<< - * - * refcount_copying(&dst, dtype_is_object, ndim, inc=False) - */ - if (__pyx_v_direct_copy) { - - /* "View.MemoryView":1317 - * if direct_copy: - * - * refcount_copying(&dst, dtype_is_object, ndim, inc=False) # <<<<<<<<<<<<<< - * memcpy(dst.data, src.data, slice_get_size(&src, ndim)) - * refcount_copying(&dst, dtype_is_object, ndim, inc=True) - */ - __pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 0); - - /* "View.MemoryView":1318 - * - * refcount_copying(&dst, dtype_is_object, ndim, inc=False) - * memcpy(dst.data, src.data, slice_get_size(&src, ndim)) # <<<<<<<<<<<<<< - * refcount_copying(&dst, dtype_is_object, ndim, inc=True) - * free(tmpdata) - */ - (void)(memcpy(__pyx_v_dst.data, __pyx_v_src.data, __pyx_memoryview_slice_get_size((&__pyx_v_src), __pyx_v_ndim))); - - /* "View.MemoryView":1319 - * refcount_copying(&dst, dtype_is_object, ndim, inc=False) - * memcpy(dst.data, src.data, slice_get_size(&src, ndim)) - * refcount_copying(&dst, dtype_is_object, ndim, inc=True) # <<<<<<<<<<<<<< - * free(tmpdata) - * return 0 - */ - __pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 1); - - /* "View.MemoryView":1320 - * memcpy(dst.data, src.data, slice_get_size(&src, ndim)) - * refcount_copying(&dst, dtype_is_object, ndim, inc=True) - * free(tmpdata) # <<<<<<<<<<<<<< - * return 0 - * - */ - free(__pyx_v_tmpdata); - - /* "View.MemoryView":1321 - * refcount_copying(&dst, dtype_is_object, ndim, inc=True) - * free(tmpdata) - * return 0 # <<<<<<<<<<<<<< - * - * if order == 'F' == get_best_order(&dst, ndim): - */ - __pyx_r = 0; - goto __pyx_L0; - - /* "View.MemoryView":1315 - * direct_copy = slice_is_contig(dst, 'F', ndim) - * - * if direct_copy: # <<<<<<<<<<<<<< - * - * refcount_copying(&dst, dtype_is_object, ndim, inc=False) - */ - } - - /* "View.MemoryView":1307 - * src = tmp - * - * if not broadcasting: # <<<<<<<<<<<<<< - * - * - */ - } - - /* "View.MemoryView":1323 - * return 0 - * - * if order == 'F' == get_best_order(&dst, ndim): # <<<<<<<<<<<<<< - * - * - */ - __pyx_t_2 = (__pyx_v_order == 'F'); - if (__pyx_t_2) { - __pyx_t_2 = ('F' == __pyx_get_best_slice_order((&__pyx_v_dst), __pyx_v_ndim)); - } - if (__pyx_t_2) { - - /* "View.MemoryView":1326 - * - * - * transpose_memslice(&src) # <<<<<<<<<<<<<< - * transpose_memslice(&dst) - * - */ - __pyx_t_5 = __pyx_memslice_transpose((&__pyx_v_src)); if (unlikely(__pyx_t_5 == ((int)-1))) __PYX_ERR(1, 1326, __pyx_L1_error) - - /* "View.MemoryView":1327 - * - * transpose_memslice(&src) - * transpose_memslice(&dst) # <<<<<<<<<<<<<< - * - * refcount_copying(&dst, dtype_is_object, ndim, inc=False) - */ - __pyx_t_5 = __pyx_memslice_transpose((&__pyx_v_dst)); if (unlikely(__pyx_t_5 == ((int)-1))) __PYX_ERR(1, 1327, __pyx_L1_error) - - /* "View.MemoryView":1323 - * return 0 - * - * if order == 'F' == get_best_order(&dst, ndim): # <<<<<<<<<<<<<< - * - * - */ - } - - /* "View.MemoryView":1329 - * transpose_memslice(&dst) - * - * refcount_copying(&dst, dtype_is_object, ndim, inc=False) # <<<<<<<<<<<<<< - * copy_strided_to_strided(&src, &dst, ndim, itemsize) - * refcount_copying(&dst, dtype_is_object, ndim, inc=True) - */ - __pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 0); - - /* "View.MemoryView":1330 - * - * refcount_copying(&dst, dtype_is_object, ndim, inc=False) - * copy_strided_to_strided(&src, &dst, ndim, itemsize) # <<<<<<<<<<<<<< - * refcount_copying(&dst, dtype_is_object, ndim, inc=True) - * - */ - copy_strided_to_strided((&__pyx_v_src), (&__pyx_v_dst), __pyx_v_ndim, __pyx_v_itemsize); - - /* "View.MemoryView":1331 - * refcount_copying(&dst, dtype_is_object, ndim, inc=False) - * copy_strided_to_strided(&src, &dst, ndim, itemsize) - * refcount_copying(&dst, dtype_is_object, ndim, inc=True) # <<<<<<<<<<<<<< - * - * free(tmpdata) - */ - __pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 1); - - /* "View.MemoryView":1333 - * refcount_copying(&dst, dtype_is_object, ndim, inc=True) - * - * free(tmpdata) # <<<<<<<<<<<<<< - * return 0 - * - */ - free(__pyx_v_tmpdata); - - /* "View.MemoryView":1334 - * - * free(tmpdata) - * return 0 # <<<<<<<<<<<<<< - * - * @cname('__pyx_memoryview_broadcast_leading') - */ - __pyx_r = 0; - goto __pyx_L0; - - /* "View.MemoryView":1265 - * - * @cname('__pyx_memoryview_copy_contents') - * cdef int memoryview_copy_contents(__Pyx_memviewslice src, # <<<<<<<<<<<<<< - * __Pyx_memviewslice dst, - * int src_ndim, int dst_ndim, - */ - - /* function exit code */ - __pyx_L1_error:; - #ifdef WITH_THREAD - __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); - #endif - __Pyx_AddTraceback("View.MemoryView.memoryview_copy_contents", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = -1; - #ifdef WITH_THREAD - __Pyx_PyGILState_Release(__pyx_gilstate_save); - #endif - __pyx_L0:; - return __pyx_r; -} - -/* "View.MemoryView":1337 - * - * @cname('__pyx_memoryview_broadcast_leading') - * cdef void broadcast_leading(__Pyx_memviewslice *mslice, # <<<<<<<<<<<<<< - * int ndim, - * int ndim_other) noexcept nogil: - */ - -static void __pyx_memoryview_broadcast_leading(__Pyx_memviewslice *__pyx_v_mslice, int __pyx_v_ndim, int __pyx_v_ndim_other) { - int __pyx_v_i; - int __pyx_v_offset; - int __pyx_t_1; - int __pyx_t_2; - int __pyx_t_3; - - /* "View.MemoryView":1341 - * int ndim_other) noexcept nogil: - * cdef int i - * cdef int offset = ndim_other - ndim # <<<<<<<<<<<<<< - * - * for i in range(ndim - 1, -1, -1): - */ - __pyx_v_offset = (__pyx_v_ndim_other - __pyx_v_ndim); - - /* "View.MemoryView":1343 - * cdef int offset = ndim_other - ndim - * - * for i in range(ndim - 1, -1, -1): # <<<<<<<<<<<<<< - * mslice.shape[i + offset] = mslice.shape[i] - * mslice.strides[i + offset] = mslice.strides[i] - */ - for (__pyx_t_1 = (__pyx_v_ndim - 1); __pyx_t_1 > -1; __pyx_t_1-=1) { - __pyx_v_i = __pyx_t_1; - - /* "View.MemoryView":1344 - * - * for i in range(ndim - 1, -1, -1): - * mslice.shape[i + offset] = mslice.shape[i] # <<<<<<<<<<<<<< - * mslice.strides[i + offset] = mslice.strides[i] - * mslice.suboffsets[i + offset] = mslice.suboffsets[i] - */ - (__pyx_v_mslice->shape[(__pyx_v_i + __pyx_v_offset)]) = (__pyx_v_mslice->shape[__pyx_v_i]); - - /* "View.MemoryView":1345 - * for i in range(ndim - 1, -1, -1): - * mslice.shape[i + offset] = mslice.shape[i] - * mslice.strides[i + offset] = mslice.strides[i] # <<<<<<<<<<<<<< - * mslice.suboffsets[i + offset] = mslice.suboffsets[i] - * - */ - (__pyx_v_mslice->strides[(__pyx_v_i + __pyx_v_offset)]) = (__pyx_v_mslice->strides[__pyx_v_i]); - - /* "View.MemoryView":1346 - * mslice.shape[i + offset] = mslice.shape[i] - * mslice.strides[i + offset] = mslice.strides[i] - * mslice.suboffsets[i + offset] = mslice.suboffsets[i] # <<<<<<<<<<<<<< - * - * for i in range(offset): - */ - (__pyx_v_mslice->suboffsets[(__pyx_v_i + __pyx_v_offset)]) = (__pyx_v_mslice->suboffsets[__pyx_v_i]); - } - - /* "View.MemoryView":1348 - * mslice.suboffsets[i + offset] = mslice.suboffsets[i] - * - * for i in range(offset): # <<<<<<<<<<<<<< - * mslice.shape[i] = 1 - * mslice.strides[i] = mslice.strides[0] - */ - __pyx_t_1 = __pyx_v_offset; - __pyx_t_2 = __pyx_t_1; - for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) { - __pyx_v_i = __pyx_t_3; - - /* "View.MemoryView":1349 - * - * for i in range(offset): - * mslice.shape[i] = 1 # <<<<<<<<<<<<<< - * mslice.strides[i] = mslice.strides[0] - * mslice.suboffsets[i] = -1 - */ - (__pyx_v_mslice->shape[__pyx_v_i]) = 1; - - /* "View.MemoryView":1350 - * for i in range(offset): - * mslice.shape[i] = 1 - * mslice.strides[i] = mslice.strides[0] # <<<<<<<<<<<<<< - * mslice.suboffsets[i] = -1 - * - */ - (__pyx_v_mslice->strides[__pyx_v_i]) = (__pyx_v_mslice->strides[0]); - - /* "View.MemoryView":1351 - * mslice.shape[i] = 1 - * mslice.strides[i] = mslice.strides[0] - * mslice.suboffsets[i] = -1 # <<<<<<<<<<<<<< - * - * - */ - (__pyx_v_mslice->suboffsets[__pyx_v_i]) = -1L; - } - - /* "View.MemoryView":1337 - * - * @cname('__pyx_memoryview_broadcast_leading') - * cdef void broadcast_leading(__Pyx_memviewslice *mslice, # <<<<<<<<<<<<<< - * int ndim, - * int ndim_other) noexcept nogil: - */ - - /* function exit code */ -} - -/* "View.MemoryView":1359 - * - * @cname('__pyx_memoryview_refcount_copying') - * cdef void refcount_copying(__Pyx_memviewslice *dst, bint dtype_is_object, int ndim, bint inc) noexcept nogil: # <<<<<<<<<<<<<< - * - * if dtype_is_object: - */ - -static void __pyx_memoryview_refcount_copying(__Pyx_memviewslice *__pyx_v_dst, int __pyx_v_dtype_is_object, int __pyx_v_ndim, int __pyx_v_inc) { - - /* "View.MemoryView":1361 - * cdef void refcount_copying(__Pyx_memviewslice *dst, bint dtype_is_object, int ndim, bint inc) noexcept nogil: - * - * if dtype_is_object: # <<<<<<<<<<<<<< - * refcount_objects_in_slice_with_gil(dst.data, dst.shape, dst.strides, ndim, inc) - * - */ - if (__pyx_v_dtype_is_object) { - - /* "View.MemoryView":1362 - * - * if dtype_is_object: - * refcount_objects_in_slice_with_gil(dst.data, dst.shape, dst.strides, ndim, inc) # <<<<<<<<<<<<<< - * - * @cname('__pyx_memoryview_refcount_objects_in_slice_with_gil') - */ - __pyx_memoryview_refcount_objects_in_slice_with_gil(__pyx_v_dst->data, __pyx_v_dst->shape, __pyx_v_dst->strides, __pyx_v_ndim, __pyx_v_inc); - - /* "View.MemoryView":1361 - * cdef void refcount_copying(__Pyx_memviewslice *dst, bint dtype_is_object, int ndim, bint inc) noexcept nogil: - * - * if dtype_is_object: # <<<<<<<<<<<<<< - * refcount_objects_in_slice_with_gil(dst.data, dst.shape, dst.strides, ndim, inc) - * - */ - } - - /* "View.MemoryView":1359 - * - * @cname('__pyx_memoryview_refcount_copying') - * cdef void refcount_copying(__Pyx_memviewslice *dst, bint dtype_is_object, int ndim, bint inc) noexcept nogil: # <<<<<<<<<<<<<< - * - * if dtype_is_object: - */ - - /* function exit code */ -} - -/* "View.MemoryView":1365 - * - * @cname('__pyx_memoryview_refcount_objects_in_slice_with_gil') - * cdef void refcount_objects_in_slice_with_gil(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< - * Py_ssize_t *strides, int ndim, - * bint inc) noexcept with gil: - */ - -static void __pyx_memoryview_refcount_objects_in_slice_with_gil(char *__pyx_v_data, Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, int __pyx_v_ndim, int __pyx_v_inc) { - __Pyx_RefNannyDeclarations - #ifdef WITH_THREAD - PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); - #endif - __Pyx_RefNannySetupContext("refcount_objects_in_slice_with_gil", 0); - - /* "View.MemoryView":1368 - * Py_ssize_t *strides, int ndim, - * bint inc) noexcept with gil: - * refcount_objects_in_slice(data, shape, strides, ndim, inc) # <<<<<<<<<<<<<< - * - * @cname('__pyx_memoryview_refcount_objects_in_slice') - */ - __pyx_memoryview_refcount_objects_in_slice(__pyx_v_data, __pyx_v_shape, __pyx_v_strides, __pyx_v_ndim, __pyx_v_inc); - - /* "View.MemoryView":1365 - * - * @cname('__pyx_memoryview_refcount_objects_in_slice_with_gil') - * cdef void refcount_objects_in_slice_with_gil(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< - * Py_ssize_t *strides, int ndim, - * bint inc) noexcept with gil: - */ - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - #ifdef WITH_THREAD - __Pyx_PyGILState_Release(__pyx_gilstate_save); - #endif -} - -/* "View.MemoryView":1371 - * - * @cname('__pyx_memoryview_refcount_objects_in_slice') - * cdef void refcount_objects_in_slice(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< - * Py_ssize_t *strides, int ndim, bint inc) noexcept: - * cdef Py_ssize_t i - */ - -static void __pyx_memoryview_refcount_objects_in_slice(char *__pyx_v_data, Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, int __pyx_v_ndim, int __pyx_v_inc) { - CYTHON_UNUSED Py_ssize_t __pyx_v_i; - Py_ssize_t __pyx_v_stride; - __Pyx_RefNannyDeclarations - Py_ssize_t __pyx_t_1; - Py_ssize_t __pyx_t_2; - Py_ssize_t __pyx_t_3; - int __pyx_t_4; - __Pyx_RefNannySetupContext("refcount_objects_in_slice", 0); - - /* "View.MemoryView":1374 - * Py_ssize_t *strides, int ndim, bint inc) noexcept: - * cdef Py_ssize_t i - * cdef Py_ssize_t stride = strides[0] # <<<<<<<<<<<<<< - * - * for i in range(shape[0]): - */ - __pyx_v_stride = (__pyx_v_strides[0]); - - /* "View.MemoryView":1376 - * cdef Py_ssize_t stride = strides[0] - * - * for i in range(shape[0]): # <<<<<<<<<<<<<< - * if ndim == 1: - * if inc: - */ - __pyx_t_1 = (__pyx_v_shape[0]); - __pyx_t_2 = __pyx_t_1; - for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) { - __pyx_v_i = __pyx_t_3; - - /* "View.MemoryView":1377 - * - * for i in range(shape[0]): - * if ndim == 1: # <<<<<<<<<<<<<< - * if inc: - * Py_INCREF(( data)[0]) - */ - __pyx_t_4 = (__pyx_v_ndim == 1); - if (__pyx_t_4) { - - /* "View.MemoryView":1378 - * for i in range(shape[0]): - * if ndim == 1: - * if inc: # <<<<<<<<<<<<<< - * Py_INCREF(( data)[0]) - * else: - */ - if (__pyx_v_inc) { - - /* "View.MemoryView":1379 - * if ndim == 1: - * if inc: - * Py_INCREF(( data)[0]) # <<<<<<<<<<<<<< - * else: - * Py_DECREF(( data)[0]) - */ - Py_INCREF((((PyObject **)__pyx_v_data)[0])); - - /* "View.MemoryView":1378 - * for i in range(shape[0]): - * if ndim == 1: - * if inc: # <<<<<<<<<<<<<< - * Py_INCREF(( data)[0]) - * else: - */ - goto __pyx_L6; - } - - /* "View.MemoryView":1381 - * Py_INCREF(( data)[0]) - * else: - * Py_DECREF(( data)[0]) # <<<<<<<<<<<<<< - * else: - * refcount_objects_in_slice(data, shape + 1, strides + 1, ndim - 1, inc) - */ - /*else*/ { - Py_DECREF((((PyObject **)__pyx_v_data)[0])); - } - __pyx_L6:; - - /* "View.MemoryView":1377 - * - * for i in range(shape[0]): - * if ndim == 1: # <<<<<<<<<<<<<< - * if inc: - * Py_INCREF(( data)[0]) - */ - goto __pyx_L5; - } - - /* "View.MemoryView":1383 - * Py_DECREF(( data)[0]) - * else: - * refcount_objects_in_slice(data, shape + 1, strides + 1, ndim - 1, inc) # <<<<<<<<<<<<<< - * - * data += stride - */ - /*else*/ { - __pyx_memoryview_refcount_objects_in_slice(__pyx_v_data, (__pyx_v_shape + 1), (__pyx_v_strides + 1), (__pyx_v_ndim - 1), __pyx_v_inc); - } - __pyx_L5:; - - /* "View.MemoryView":1385 - * refcount_objects_in_slice(data, shape + 1, strides + 1, ndim - 1, inc) - * - * data += stride # <<<<<<<<<<<<<< - * - * - */ - __pyx_v_data = (__pyx_v_data + __pyx_v_stride); - } - - /* "View.MemoryView":1371 - * - * @cname('__pyx_memoryview_refcount_objects_in_slice') - * cdef void refcount_objects_in_slice(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< - * Py_ssize_t *strides, int ndim, bint inc) noexcept: - * cdef Py_ssize_t i - */ - - /* function exit code */ - __Pyx_RefNannyFinishContext(); -} - -/* "View.MemoryView":1391 - * - * @cname('__pyx_memoryview_slice_assign_scalar') - * cdef void slice_assign_scalar(__Pyx_memviewslice *dst, int ndim, # <<<<<<<<<<<<<< - * size_t itemsize, void *item, - * bint dtype_is_object) noexcept nogil: - */ - -static void __pyx_memoryview_slice_assign_scalar(__Pyx_memviewslice *__pyx_v_dst, int __pyx_v_ndim, size_t __pyx_v_itemsize, void *__pyx_v_item, int __pyx_v_dtype_is_object) { - - /* "View.MemoryView":1394 - * size_t itemsize, void *item, - * bint dtype_is_object) noexcept nogil: - * refcount_copying(dst, dtype_is_object, ndim, inc=False) # <<<<<<<<<<<<<< - * _slice_assign_scalar(dst.data, dst.shape, dst.strides, ndim, itemsize, item) - * refcount_copying(dst, dtype_is_object, ndim, inc=True) - */ - __pyx_memoryview_refcount_copying(__pyx_v_dst, __pyx_v_dtype_is_object, __pyx_v_ndim, 0); - - /* "View.MemoryView":1395 - * bint dtype_is_object) noexcept nogil: - * refcount_copying(dst, dtype_is_object, ndim, inc=False) - * _slice_assign_scalar(dst.data, dst.shape, dst.strides, ndim, itemsize, item) # <<<<<<<<<<<<<< - * refcount_copying(dst, dtype_is_object, ndim, inc=True) - * - */ - __pyx_memoryview__slice_assign_scalar(__pyx_v_dst->data, __pyx_v_dst->shape, __pyx_v_dst->strides, __pyx_v_ndim, __pyx_v_itemsize, __pyx_v_item); - - /* "View.MemoryView":1396 - * refcount_copying(dst, dtype_is_object, ndim, inc=False) - * _slice_assign_scalar(dst.data, dst.shape, dst.strides, ndim, itemsize, item) - * refcount_copying(dst, dtype_is_object, ndim, inc=True) # <<<<<<<<<<<<<< - * - * - */ - __pyx_memoryview_refcount_copying(__pyx_v_dst, __pyx_v_dtype_is_object, __pyx_v_ndim, 1); - - /* "View.MemoryView":1391 - * - * @cname('__pyx_memoryview_slice_assign_scalar') - * cdef void slice_assign_scalar(__Pyx_memviewslice *dst, int ndim, # <<<<<<<<<<<<<< - * size_t itemsize, void *item, - * bint dtype_is_object) noexcept nogil: - */ - - /* function exit code */ -} - -/* "View.MemoryView":1400 - * - * @cname('__pyx_memoryview__slice_assign_scalar') - * cdef void _slice_assign_scalar(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< - * Py_ssize_t *strides, int ndim, - * size_t itemsize, void *item) noexcept nogil: - */ - -static void __pyx_memoryview__slice_assign_scalar(char *__pyx_v_data, Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, int __pyx_v_ndim, size_t __pyx_v_itemsize, void *__pyx_v_item) { - CYTHON_UNUSED Py_ssize_t __pyx_v_i; - Py_ssize_t __pyx_v_stride; - Py_ssize_t __pyx_v_extent; - int __pyx_t_1; - Py_ssize_t __pyx_t_2; - Py_ssize_t __pyx_t_3; - Py_ssize_t __pyx_t_4; - - /* "View.MemoryView":1404 - * size_t itemsize, void *item) noexcept nogil: - * cdef Py_ssize_t i - * cdef Py_ssize_t stride = strides[0] # <<<<<<<<<<<<<< - * cdef Py_ssize_t extent = shape[0] - * - */ - __pyx_v_stride = (__pyx_v_strides[0]); - - /* "View.MemoryView":1405 - * cdef Py_ssize_t i - * cdef Py_ssize_t stride = strides[0] - * cdef Py_ssize_t extent = shape[0] # <<<<<<<<<<<<<< - * - * if ndim == 1: - */ - __pyx_v_extent = (__pyx_v_shape[0]); - - /* "View.MemoryView":1407 - * cdef Py_ssize_t extent = shape[0] - * - * if ndim == 1: # <<<<<<<<<<<<<< - * for i in range(extent): - * memcpy(data, item, itemsize) - */ - __pyx_t_1 = (__pyx_v_ndim == 1); - if (__pyx_t_1) { - - /* "View.MemoryView":1408 - * - * if ndim == 1: - * for i in range(extent): # <<<<<<<<<<<<<< - * memcpy(data, item, itemsize) - * data += stride - */ - __pyx_t_2 = __pyx_v_extent; - __pyx_t_3 = __pyx_t_2; - for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { - __pyx_v_i = __pyx_t_4; - - /* "View.MemoryView":1409 - * if ndim == 1: - * for i in range(extent): - * memcpy(data, item, itemsize) # <<<<<<<<<<<<<< - * data += stride - * else: - */ - (void)(memcpy(__pyx_v_data, __pyx_v_item, __pyx_v_itemsize)); - - /* "View.MemoryView":1410 - * for i in range(extent): - * memcpy(data, item, itemsize) - * data += stride # <<<<<<<<<<<<<< - * else: - * for i in range(extent): - */ - __pyx_v_data = (__pyx_v_data + __pyx_v_stride); - } - - /* "View.MemoryView":1407 - * cdef Py_ssize_t extent = shape[0] - * - * if ndim == 1: # <<<<<<<<<<<<<< - * for i in range(extent): - * memcpy(data, item, itemsize) - */ - goto __pyx_L3; - } - - /* "View.MemoryView":1412 - * data += stride - * else: - * for i in range(extent): # <<<<<<<<<<<<<< - * _slice_assign_scalar(data, shape + 1, strides + 1, ndim - 1, itemsize, item) - * data += stride - */ - /*else*/ { - __pyx_t_2 = __pyx_v_extent; - __pyx_t_3 = __pyx_t_2; - for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { - __pyx_v_i = __pyx_t_4; - - /* "View.MemoryView":1413 - * else: - * for i in range(extent): - * _slice_assign_scalar(data, shape + 1, strides + 1, ndim - 1, itemsize, item) # <<<<<<<<<<<<<< - * data += stride - * - */ - __pyx_memoryview__slice_assign_scalar(__pyx_v_data, (__pyx_v_shape + 1), (__pyx_v_strides + 1), (__pyx_v_ndim - 1), __pyx_v_itemsize, __pyx_v_item); - - /* "View.MemoryView":1414 - * for i in range(extent): - * _slice_assign_scalar(data, shape + 1, strides + 1, ndim - 1, itemsize, item) - * data += stride # <<<<<<<<<<<<<< - * - * - */ - __pyx_v_data = (__pyx_v_data + __pyx_v_stride); - } - } - __pyx_L3:; - - /* "View.MemoryView":1400 - * - * @cname('__pyx_memoryview__slice_assign_scalar') - * cdef void _slice_assign_scalar(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< - * Py_ssize_t *strides, int ndim, - * size_t itemsize, void *item) noexcept nogil: - */ - - /* function exit code */ -} - -/* "(tree fragment)":1 - * def __pyx_unpickle_Enum(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<< - * cdef object __pyx_PickleError - * cdef object __pyx_result - */ - -/* Python wrapper */ -static PyObject *__pyx_pw_15View_dot_MemoryView_1__pyx_unpickle_Enum(PyObject *__pyx_self, -#if CYTHON_METH_FASTCALL -PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds -#else -PyObject *__pyx_args, PyObject *__pyx_kwds -#endif -); /*proto*/ -static PyMethodDef __pyx_mdef_15View_dot_MemoryView_1__pyx_unpickle_Enum = {"__pyx_unpickle_Enum", (PyCFunction)(void*)(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_15View_dot_MemoryView_1__pyx_unpickle_Enum, __Pyx_METH_FASTCALL|METH_KEYWORDS, 0}; -static PyObject *__pyx_pw_15View_dot_MemoryView_1__pyx_unpickle_Enum(PyObject *__pyx_self, -#if CYTHON_METH_FASTCALL -PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds -#else -PyObject *__pyx_args, PyObject *__pyx_kwds -#endif -) { - PyObject *__pyx_v___pyx_type = 0; - long __pyx_v___pyx_checksum; - PyObject *__pyx_v___pyx_state = 0; - #if !CYTHON_METH_FASTCALL - CYTHON_UNUSED const Py_ssize_t __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); - #endif - CYTHON_UNUSED PyObject *const *__pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__pyx_unpickle_Enum (wrapper)", 0); - { - PyObject **__pyx_pyargnames[] = {&__pyx_n_s_pyx_type,&__pyx_n_s_pyx_checksum,&__pyx_n_s_pyx_state,0}; - PyObject* values[3] = {0,0,0}; - if (__pyx_kwds) { - Py_ssize_t kw_args; - switch (__pyx_nargs) { - case 3: values[2] = __Pyx_Arg_FASTCALL(__pyx_args, 2); - CYTHON_FALLTHROUGH; - case 2: values[1] = __Pyx_Arg_FASTCALL(__pyx_args, 1); - CYTHON_FALLTHROUGH; - case 1: values[0] = __Pyx_Arg_FASTCALL(__pyx_args, 0); - CYTHON_FALLTHROUGH; - case 0: break; - default: goto __pyx_L5_argtuple_error; - } - kw_args = __Pyx_NumKwargs_FASTCALL(__pyx_kwds); - switch (__pyx_nargs) { - case 0: - if (likely((values[0] = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_pyx_type)) != 0)) kw_args--; - else if (unlikely(PyErr_Occurred())) __PYX_ERR(1, 1, __pyx_L3_error) - else goto __pyx_L5_argtuple_error; - CYTHON_FALLTHROUGH; - case 1: - if (likely((values[1] = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_pyx_checksum)) != 0)) kw_args--; - else if (unlikely(PyErr_Occurred())) __PYX_ERR(1, 1, __pyx_L3_error) - else { - __Pyx_RaiseArgtupleInvalid("__pyx_unpickle_Enum", 1, 3, 3, 1); __PYX_ERR(1, 1, __pyx_L3_error) - } - CYTHON_FALLTHROUGH; - case 2: - if (likely((values[2] = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_pyx_state)) != 0)) kw_args--; - else if (unlikely(PyErr_Occurred())) __PYX_ERR(1, 1, __pyx_L3_error) - else { - __Pyx_RaiseArgtupleInvalid("__pyx_unpickle_Enum", 1, 3, 3, 2); __PYX_ERR(1, 1, __pyx_L3_error) - } - } - if (unlikely(kw_args > 0)) { - const Py_ssize_t kwd_pos_args = __pyx_nargs; - if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values + 0, kwd_pos_args, "__pyx_unpickle_Enum") < 0)) __PYX_ERR(1, 1, __pyx_L3_error) - } - } else if (unlikely(__pyx_nargs != 3)) { - goto __pyx_L5_argtuple_error; - } else { - values[0] = __Pyx_Arg_FASTCALL(__pyx_args, 0); - values[1] = __Pyx_Arg_FASTCALL(__pyx_args, 1); - values[2] = __Pyx_Arg_FASTCALL(__pyx_args, 2); - } - __pyx_v___pyx_type = values[0]; - __pyx_v___pyx_checksum = __Pyx_PyInt_As_long(values[1]); if (unlikely((__pyx_v___pyx_checksum == (long)-1) && PyErr_Occurred())) __PYX_ERR(1, 1, __pyx_L3_error) - __pyx_v___pyx_state = values[2]; - } - goto __pyx_L4_argument_unpacking_done; - __pyx_L5_argtuple_error:; - __Pyx_RaiseArgtupleInvalid("__pyx_unpickle_Enum", 1, 3, 3, __pyx_nargs); __PYX_ERR(1, 1, __pyx_L3_error) - __pyx_L3_error:; - __Pyx_AddTraceback("View.MemoryView.__pyx_unpickle_Enum", __pyx_clineno, __pyx_lineno, __pyx_filename); - __Pyx_RefNannyFinishContext(); - return NULL; - __pyx_L4_argument_unpacking_done:; - __pyx_r = __pyx_pf_15View_dot_MemoryView___pyx_unpickle_Enum(__pyx_self, __pyx_v___pyx_type, __pyx_v___pyx_checksum, __pyx_v___pyx_state); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_pf_15View_dot_MemoryView___pyx_unpickle_Enum(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v___pyx_type, long __pyx_v___pyx_checksum, PyObject *__pyx_v___pyx_state) { - PyObject *__pyx_v___pyx_PickleError = 0; - PyObject *__pyx_v___pyx_result = 0; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - int __pyx_t_2; - PyObject *__pyx_t_3 = NULL; - PyObject *__pyx_t_4 = NULL; - int __pyx_t_5; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("__pyx_unpickle_Enum", 0); - - /* "(tree fragment)":4 - * cdef object __pyx_PickleError - * cdef object __pyx_result - * if __pyx_checksum not in (0x82a3537, 0x6ae9995, 0xb068931): # <<<<<<<<<<<<<< - * from pickle import PickleError as __pyx_PickleError - * raise __pyx_PickleError, "Incompatible checksums (0x%x vs (0x82a3537, 0x6ae9995, 0xb068931) = (name))" % __pyx_checksum - */ - __pyx_t_1 = __Pyx_PyInt_From_long(__pyx_v___pyx_checksum); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 4, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = (__Pyx_PySequence_ContainsTF(__pyx_t_1, __pyx_tuple__8, Py_NE)); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(1, 4, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - if (__pyx_t_2) { - - /* "(tree fragment)":5 - * cdef object __pyx_result - * if __pyx_checksum not in (0x82a3537, 0x6ae9995, 0xb068931): - * from pickle import PickleError as __pyx_PickleError # <<<<<<<<<<<<<< - * raise __pyx_PickleError, "Incompatible checksums (0x%x vs (0x82a3537, 0x6ae9995, 0xb068931) = (name))" % __pyx_checksum - * __pyx_result = Enum.__new__(__pyx_type) - */ - __pyx_t_1 = PyList_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 5, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_INCREF(__pyx_n_s_PickleError); - __Pyx_GIVEREF(__pyx_n_s_PickleError); - PyList_SET_ITEM(__pyx_t_1, 0, __pyx_n_s_PickleError); - __pyx_t_3 = __Pyx_Import(__pyx_n_s_pickle, __pyx_t_1, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 5, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_1 = __Pyx_ImportFrom(__pyx_t_3, __pyx_n_s_PickleError); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 5, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_INCREF(__pyx_t_1); - __pyx_v___pyx_PickleError = __pyx_t_1; - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - - /* "(tree fragment)":6 - * if __pyx_checksum not in (0x82a3537, 0x6ae9995, 0xb068931): - * from pickle import PickleError as __pyx_PickleError - * raise __pyx_PickleError, "Incompatible checksums (0x%x vs (0x82a3537, 0x6ae9995, 0xb068931) = (name))" % __pyx_checksum # <<<<<<<<<<<<<< - * __pyx_result = Enum.__new__(__pyx_type) - * if __pyx_state is not None: - */ - __pyx_t_3 = __Pyx_PyInt_From_long(__pyx_v___pyx_checksum); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 6, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_1 = __Pyx_PyString_Format(__pyx_kp_s_Incompatible_checksums_0x_x_vs_0, __pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 6, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __Pyx_Raise(__pyx_v___pyx_PickleError, __pyx_t_1, 0, 0); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __PYX_ERR(1, 6, __pyx_L1_error) - - /* "(tree fragment)":4 - * cdef object __pyx_PickleError - * cdef object __pyx_result - * if __pyx_checksum not in (0x82a3537, 0x6ae9995, 0xb068931): # <<<<<<<<<<<<<< - * from pickle import PickleError as __pyx_PickleError - * raise __pyx_PickleError, "Incompatible checksums (0x%x vs (0x82a3537, 0x6ae9995, 0xb068931) = (name))" % __pyx_checksum - */ - } - - /* "(tree fragment)":7 - * from pickle import PickleError as __pyx_PickleError - * raise __pyx_PickleError, "Incompatible checksums (0x%x vs (0x82a3537, 0x6ae9995, 0xb068931) = (name))" % __pyx_checksum - * __pyx_result = Enum.__new__(__pyx_type) # <<<<<<<<<<<<<< - * if __pyx_state is not None: - * __pyx_unpickle_Enum__set_state( __pyx_result, __pyx_state) - */ - __pyx_t_3 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_MemviewEnum_type), __pyx_n_s_new); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 7, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_4 = NULL; - __pyx_t_5 = 0; - if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_3))) { - __pyx_t_4 = PyMethod_GET_SELF(__pyx_t_3); - if (likely(__pyx_t_4)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3); - __Pyx_INCREF(__pyx_t_4); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_3, function); - __pyx_t_5 = 1; - } - } - { - PyObject *__pyx_callargs[2] = {__pyx_t_4, __pyx_v___pyx_type}; - __pyx_t_1 = __Pyx_PyObject_FastCall(__pyx_t_3, __pyx_callargs+1-__pyx_t_5, 1+__pyx_t_5); - __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; - if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 7, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - } - __pyx_v___pyx_result = __pyx_t_1; - __pyx_t_1 = 0; - - /* "(tree fragment)":8 - * raise __pyx_PickleError, "Incompatible checksums (0x%x vs (0x82a3537, 0x6ae9995, 0xb068931) = (name))" % __pyx_checksum - * __pyx_result = Enum.__new__(__pyx_type) - * if __pyx_state is not None: # <<<<<<<<<<<<<< - * __pyx_unpickle_Enum__set_state( __pyx_result, __pyx_state) - * return __pyx_result - */ - __pyx_t_2 = (__pyx_v___pyx_state != Py_None); - if (__pyx_t_2) { - - /* "(tree fragment)":9 - * __pyx_result = Enum.__new__(__pyx_type) - * if __pyx_state is not None: - * __pyx_unpickle_Enum__set_state( __pyx_result, __pyx_state) # <<<<<<<<<<<<<< - * return __pyx_result - * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): - */ - if (!(likely(PyTuple_CheckExact(__pyx_v___pyx_state))||((__pyx_v___pyx_state) == Py_None) || __Pyx_RaiseUnexpectedTypeError("tuple", __pyx_v___pyx_state))) __PYX_ERR(1, 9, __pyx_L1_error) - __pyx_t_1 = __pyx_unpickle_Enum__set_state(((struct __pyx_MemviewEnum_obj *)__pyx_v___pyx_result), ((PyObject*)__pyx_v___pyx_state)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 9, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - - /* "(tree fragment)":8 - * raise __pyx_PickleError, "Incompatible checksums (0x%x vs (0x82a3537, 0x6ae9995, 0xb068931) = (name))" % __pyx_checksum - * __pyx_result = Enum.__new__(__pyx_type) - * if __pyx_state is not None: # <<<<<<<<<<<<<< - * __pyx_unpickle_Enum__set_state( __pyx_result, __pyx_state) - * return __pyx_result - */ - } - - /* "(tree fragment)":10 - * if __pyx_state is not None: - * __pyx_unpickle_Enum__set_state( __pyx_result, __pyx_state) - * return __pyx_result # <<<<<<<<<<<<<< - * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): - * __pyx_result.name = __pyx_state[0] - */ - __Pyx_XDECREF(__pyx_r); - __Pyx_INCREF(__pyx_v___pyx_result); - __pyx_r = __pyx_v___pyx_result; - goto __pyx_L0; - - /* "(tree fragment)":1 - * def __pyx_unpickle_Enum(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<< - * cdef object __pyx_PickleError - * cdef object __pyx_result - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_3); - __Pyx_XDECREF(__pyx_t_4); - __Pyx_AddTraceback("View.MemoryView.__pyx_unpickle_Enum", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XDECREF(__pyx_v___pyx_PickleError); - __Pyx_XDECREF(__pyx_v___pyx_result); - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "(tree fragment)":11 - * __pyx_unpickle_Enum__set_state( __pyx_result, __pyx_state) - * return __pyx_result - * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): # <<<<<<<<<<<<<< - * __pyx_result.name = __pyx_state[0] - * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): - */ - -static PyObject *__pyx_unpickle_Enum__set_state(struct __pyx_MemviewEnum_obj *__pyx_v___pyx_result, PyObject *__pyx_v___pyx_state) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - int __pyx_t_2; - Py_ssize_t __pyx_t_3; - int __pyx_t_4; - PyObject *__pyx_t_5 = NULL; - PyObject *__pyx_t_6 = NULL; - PyObject *__pyx_t_7 = NULL; - int __pyx_t_8; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("__pyx_unpickle_Enum__set_state", 0); - - /* "(tree fragment)":12 - * return __pyx_result - * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): - * __pyx_result.name = __pyx_state[0] # <<<<<<<<<<<<<< - * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): - * __pyx_result.__dict__.update(__pyx_state[1]) - */ - if (unlikely(__pyx_v___pyx_state == Py_None)) { - PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable"); - __PYX_ERR(1, 12, __pyx_L1_error) - } - __pyx_t_1 = __Pyx_GetItemInt_Tuple(__pyx_v___pyx_state, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 12, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_GIVEREF(__pyx_t_1); - __Pyx_GOTREF(__pyx_v___pyx_result->name); - __Pyx_DECREF(__pyx_v___pyx_result->name); - __pyx_v___pyx_result->name = __pyx_t_1; - __pyx_t_1 = 0; - - /* "(tree fragment)":13 - * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): - * __pyx_result.name = __pyx_state[0] - * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): # <<<<<<<<<<<<<< - * __pyx_result.__dict__.update(__pyx_state[1]) - */ - if (unlikely(__pyx_v___pyx_state == Py_None)) { - PyErr_SetString(PyExc_TypeError, "object of type 'NoneType' has no len()"); - __PYX_ERR(1, 13, __pyx_L1_error) - } - __pyx_t_3 = PyTuple_GET_SIZE(__pyx_v___pyx_state); if (unlikely(__pyx_t_3 == ((Py_ssize_t)-1))) __PYX_ERR(1, 13, __pyx_L1_error) - __pyx_t_4 = (__pyx_t_3 > 1); - if (__pyx_t_4) { - } else { - __pyx_t_2 = __pyx_t_4; - goto __pyx_L4_bool_binop_done; - } - __pyx_t_4 = __Pyx_HasAttr(((PyObject *)__pyx_v___pyx_result), __pyx_n_s_dict); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(1, 13, __pyx_L1_error) - __pyx_t_2 = __pyx_t_4; - __pyx_L4_bool_binop_done:; - if (__pyx_t_2) { - - /* "(tree fragment)":14 - * __pyx_result.name = __pyx_state[0] - * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): - * __pyx_result.__dict__.update(__pyx_state[1]) # <<<<<<<<<<<<<< - */ - __pyx_t_5 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v___pyx_result), __pyx_n_s_dict); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 14, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_n_s_update); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 14, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_6); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - if (unlikely(__pyx_v___pyx_state == Py_None)) { - PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable"); - __PYX_ERR(1, 14, __pyx_L1_error) - } - __pyx_t_5 = __Pyx_GetItemInt_Tuple(__pyx_v___pyx_state, 1, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 14, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_7 = NULL; - __pyx_t_8 = 0; - if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_6))) { - __pyx_t_7 = PyMethod_GET_SELF(__pyx_t_6); - if (likely(__pyx_t_7)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_6); - __Pyx_INCREF(__pyx_t_7); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_6, function); - __pyx_t_8 = 1; - } - } - { - PyObject *__pyx_callargs[2] = {__pyx_t_7, __pyx_t_5}; - __pyx_t_1 = __Pyx_PyObject_FastCall(__pyx_t_6, __pyx_callargs+1-__pyx_t_8, 1+__pyx_t_8); - __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 14, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; - } - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - - /* "(tree fragment)":13 - * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): - * __pyx_result.name = __pyx_state[0] - * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): # <<<<<<<<<<<<<< - * __pyx_result.__dict__.update(__pyx_state[1]) - */ - } - - /* "(tree fragment)":11 - * __pyx_unpickle_Enum__set_state( __pyx_result, __pyx_state) - * return __pyx_result - * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): # <<<<<<<<<<<<<< - * __pyx_result.name = __pyx_state[0] - * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): - */ - - /* function exit code */ - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_5); - __Pyx_XDECREF(__pyx_t_6); - __Pyx_XDECREF(__pyx_t_7); - __Pyx_AddTraceback("View.MemoryView.__pyx_unpickle_Enum__set_state", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = 0; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "monotonic_align/core.pyx":7 - * @cython.boundscheck(False) - * @cython.wraparound(False) - * cdef void maximum_path_each(int[:,::1] path, float[:,::1] value, int t_y, int t_x, float max_neg_val=-1e9) nogil: # <<<<<<<<<<<<<< - * cdef int x - * cdef int y - */ - -static void __pyx_f_15monotonic_align_4core_maximum_path_each(__Pyx_memviewslice __pyx_v_path, __Pyx_memviewslice __pyx_v_value, int __pyx_v_t_y, int __pyx_v_t_x, struct __pyx_opt_args_15monotonic_align_4core_maximum_path_each *__pyx_optional_args) { - float __pyx_v_max_neg_val = __pyx_k__9; - int __pyx_v_x; - int __pyx_v_y; - float __pyx_v_v_prev; - float __pyx_v_v_cur; - int __pyx_v_index; - int __pyx_t_1; - int __pyx_t_2; - int __pyx_t_3; - long __pyx_t_4; - int __pyx_t_5; - long __pyx_t_6; - long __pyx_t_7; - int __pyx_t_8; - Py_ssize_t __pyx_t_9; - Py_ssize_t __pyx_t_10; - float __pyx_t_11; - float __pyx_t_12; - float __pyx_t_13; - int __pyx_t_14; - Py_ssize_t __pyx_t_15; - Py_ssize_t __pyx_t_16; - if (__pyx_optional_args) { - if (__pyx_optional_args->__pyx_n > 0) { - __pyx_v_max_neg_val = __pyx_optional_args->max_neg_val; - } - } - - /* "monotonic_align/core.pyx":13 - * cdef float v_cur - * cdef float tmp - * cdef int index = t_x - 1 # <<<<<<<<<<<<<< - * - * for y in range(t_y): - */ - __pyx_v_index = (__pyx_v_t_x - 1); - - /* "monotonic_align/core.pyx":15 - * cdef int index = t_x - 1 - * - * for y in range(t_y): # <<<<<<<<<<<<<< - * for x in range(max(0, t_x + y - t_y), min(t_x, y + 1)): - * if x == y: - */ - __pyx_t_1 = __pyx_v_t_y; - __pyx_t_2 = __pyx_t_1; - for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) { - __pyx_v_y = __pyx_t_3; - - /* "monotonic_align/core.pyx":16 - * - * for y in range(t_y): - * for x in range(max(0, t_x + y - t_y), min(t_x, y + 1)): # <<<<<<<<<<<<<< - * if x == y: - * v_cur = max_neg_val - */ - __pyx_t_4 = (__pyx_v_y + 1); - __pyx_t_5 = __pyx_v_t_x; - if ((__pyx_t_4 < __pyx_t_5)) { - __pyx_t_6 = __pyx_t_4; - } else { - __pyx_t_6 = __pyx_t_5; - } - __pyx_t_4 = __pyx_t_6; - __pyx_t_5 = ((__pyx_v_t_x + __pyx_v_y) - __pyx_v_t_y); - __pyx_t_6 = 0; - if ((__pyx_t_5 > __pyx_t_6)) { - __pyx_t_7 = __pyx_t_5; - } else { - __pyx_t_7 = __pyx_t_6; - } - __pyx_t_6 = __pyx_t_4; - for (__pyx_t_5 = __pyx_t_7; __pyx_t_5 < __pyx_t_6; __pyx_t_5+=1) { - __pyx_v_x = __pyx_t_5; - - /* "monotonic_align/core.pyx":17 - * for y in range(t_y): - * for x in range(max(0, t_x + y - t_y), min(t_x, y + 1)): - * if x == y: # <<<<<<<<<<<<<< - * v_cur = max_neg_val - * else: - */ - __pyx_t_8 = (__pyx_v_x == __pyx_v_y); - if (__pyx_t_8) { - - /* "monotonic_align/core.pyx":18 - * for x in range(max(0, t_x + y - t_y), min(t_x, y + 1)): - * if x == y: - * v_cur = max_neg_val # <<<<<<<<<<<<<< - * else: - * v_cur = value[y-1, x] - */ - __pyx_v_v_cur = __pyx_v_max_neg_val; - - /* "monotonic_align/core.pyx":17 - * for y in range(t_y): - * for x in range(max(0, t_x + y - t_y), min(t_x, y + 1)): - * if x == y: # <<<<<<<<<<<<<< - * v_cur = max_neg_val - * else: - */ - goto __pyx_L7; - } - - /* "monotonic_align/core.pyx":20 - * v_cur = max_neg_val - * else: - * v_cur = value[y-1, x] # <<<<<<<<<<<<<< - * if x == 0: - * if y == 0: - */ - /*else*/ { - __pyx_t_9 = (__pyx_v_y - 1); - __pyx_t_10 = __pyx_v_x; - __pyx_v_v_cur = (*((float *) ( /* dim=1 */ ((char *) (((float *) ( /* dim=0 */ (__pyx_v_value.data + __pyx_t_9 * __pyx_v_value.strides[0]) )) + __pyx_t_10)) ))); - } - __pyx_L7:; - - /* "monotonic_align/core.pyx":21 - * else: - * v_cur = value[y-1, x] - * if x == 0: # <<<<<<<<<<<<<< - * if y == 0: - * v_prev = 0. - */ - __pyx_t_8 = (__pyx_v_x == 0); - if (__pyx_t_8) { - - /* "monotonic_align/core.pyx":22 - * v_cur = value[y-1, x] - * if x == 0: - * if y == 0: # <<<<<<<<<<<<<< - * v_prev = 0. - * else: - */ - __pyx_t_8 = (__pyx_v_y == 0); - if (__pyx_t_8) { - - /* "monotonic_align/core.pyx":23 - * if x == 0: - * if y == 0: - * v_prev = 0. # <<<<<<<<<<<<<< - * else: - * v_prev = max_neg_val - */ - __pyx_v_v_prev = 0.; - - /* "monotonic_align/core.pyx":22 - * v_cur = value[y-1, x] - * if x == 0: - * if y == 0: # <<<<<<<<<<<<<< - * v_prev = 0. - * else: - */ - goto __pyx_L9; - } - - /* "monotonic_align/core.pyx":25 - * v_prev = 0. - * else: - * v_prev = max_neg_val # <<<<<<<<<<<<<< - * else: - * v_prev = value[y-1, x-1] - */ - /*else*/ { - __pyx_v_v_prev = __pyx_v_max_neg_val; - } - __pyx_L9:; - - /* "monotonic_align/core.pyx":21 - * else: - * v_cur = value[y-1, x] - * if x == 0: # <<<<<<<<<<<<<< - * if y == 0: - * v_prev = 0. - */ - goto __pyx_L8; - } - - /* "monotonic_align/core.pyx":27 - * v_prev = max_neg_val - * else: - * v_prev = value[y-1, x-1] # <<<<<<<<<<<<<< - * value[y, x] += max(v_prev, v_cur) - * - */ - /*else*/ { - __pyx_t_10 = (__pyx_v_y - 1); - __pyx_t_9 = (__pyx_v_x - 1); - __pyx_v_v_prev = (*((float *) ( /* dim=1 */ ((char *) (((float *) ( /* dim=0 */ (__pyx_v_value.data + __pyx_t_10 * __pyx_v_value.strides[0]) )) + __pyx_t_9)) ))); - } - __pyx_L8:; - - /* "monotonic_align/core.pyx":28 - * else: - * v_prev = value[y-1, x-1] - * value[y, x] += max(v_prev, v_cur) # <<<<<<<<<<<<<< - * - * for y in range(t_y - 1, -1, -1): - */ - __pyx_t_11 = __pyx_v_v_cur; - __pyx_t_12 = __pyx_v_v_prev; - if ((__pyx_t_11 > __pyx_t_12)) { - __pyx_t_13 = __pyx_t_11; - } else { - __pyx_t_13 = __pyx_t_12; - } - __pyx_t_9 = __pyx_v_y; - __pyx_t_10 = __pyx_v_x; - *((float *) ( /* dim=1 */ ((char *) (((float *) ( /* dim=0 */ (__pyx_v_value.data + __pyx_t_9 * __pyx_v_value.strides[0]) )) + __pyx_t_10)) )) += __pyx_t_13; - } - } - - /* "monotonic_align/core.pyx":30 - * value[y, x] += max(v_prev, v_cur) - * - * for y in range(t_y - 1, -1, -1): # <<<<<<<<<<<<<< - * path[y, index] = 1 - * if index != 0 and (index == y or value[y-1, index] < value[y-1, index-1]): - */ - for (__pyx_t_1 = (__pyx_v_t_y - 1); __pyx_t_1 > -1; __pyx_t_1-=1) { - __pyx_v_y = __pyx_t_1; - - /* "monotonic_align/core.pyx":31 - * - * for y in range(t_y - 1, -1, -1): - * path[y, index] = 1 # <<<<<<<<<<<<<< - * if index != 0 and (index == y or value[y-1, index] < value[y-1, index-1]): - * index = index - 1 - */ - __pyx_t_10 = __pyx_v_y; - __pyx_t_9 = __pyx_v_index; - *((int *) ( /* dim=1 */ ((char *) (((int *) ( /* dim=0 */ (__pyx_v_path.data + __pyx_t_10 * __pyx_v_path.strides[0]) )) + __pyx_t_9)) )) = 1; - - /* "monotonic_align/core.pyx":32 - * for y in range(t_y - 1, -1, -1): - * path[y, index] = 1 - * if index != 0 and (index == y or value[y-1, index] < value[y-1, index-1]): # <<<<<<<<<<<<<< - * index = index - 1 - * - */ - __pyx_t_14 = (__pyx_v_index != 0); - if (__pyx_t_14) { - } else { - __pyx_t_8 = __pyx_t_14; - goto __pyx_L13_bool_binop_done; - } - __pyx_t_14 = (__pyx_v_index == __pyx_v_y); - if (!__pyx_t_14) { - } else { - __pyx_t_8 = __pyx_t_14; - goto __pyx_L13_bool_binop_done; - } - __pyx_t_9 = (__pyx_v_y - 1); - __pyx_t_10 = __pyx_v_index; - __pyx_t_15 = (__pyx_v_y - 1); - __pyx_t_16 = (__pyx_v_index - 1); - __pyx_t_14 = ((*((float *) ( /* dim=1 */ ((char *) (((float *) ( /* dim=0 */ (__pyx_v_value.data + __pyx_t_9 * __pyx_v_value.strides[0]) )) + __pyx_t_10)) ))) < (*((float *) ( /* dim=1 */ ((char *) (((float *) ( /* dim=0 */ (__pyx_v_value.data + __pyx_t_15 * __pyx_v_value.strides[0]) )) + __pyx_t_16)) )))); - __pyx_t_8 = __pyx_t_14; - __pyx_L13_bool_binop_done:; - if (__pyx_t_8) { - - /* "monotonic_align/core.pyx":33 - * path[y, index] = 1 - * if index != 0 and (index == y or value[y-1, index] < value[y-1, index-1]): - * index = index - 1 # <<<<<<<<<<<<<< - * - * - */ - __pyx_v_index = (__pyx_v_index - 1); - - /* "monotonic_align/core.pyx":32 - * for y in range(t_y - 1, -1, -1): - * path[y, index] = 1 - * if index != 0 and (index == y or value[y-1, index] < value[y-1, index-1]): # <<<<<<<<<<<<<< - * index = index - 1 - * - */ - } - } - - /* "monotonic_align/core.pyx":7 - * @cython.boundscheck(False) - * @cython.wraparound(False) - * cdef void maximum_path_each(int[:,::1] path, float[:,::1] value, int t_y, int t_x, float max_neg_val=-1e9) nogil: # <<<<<<<<<<<<<< - * cdef int x - * cdef int y - */ - - /* function exit code */ -} - -/* "monotonic_align/core.pyx":38 - * @cython.boundscheck(False) - * @cython.wraparound(False) - * cpdef void maximum_path_c(int[:,:,::1] paths, float[:,:,::1] values, int[::1] t_ys, int[::1] t_xs) nogil: # <<<<<<<<<<<<<< - * cdef int b = paths.shape[0] - * cdef int i - */ - -static PyObject *__pyx_pw_15monotonic_align_4core_1maximum_path_c(PyObject *__pyx_self, -#if CYTHON_METH_FASTCALL -PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds -#else -PyObject *__pyx_args, PyObject *__pyx_kwds -#endif -); /*proto*/ -static void __pyx_f_15monotonic_align_4core_maximum_path_c(__Pyx_memviewslice __pyx_v_paths, __Pyx_memviewslice __pyx_v_values, __Pyx_memviewslice __pyx_v_t_ys, __Pyx_memviewslice __pyx_v_t_xs, CYTHON_UNUSED int __pyx_skip_dispatch) { - CYTHON_UNUSED int __pyx_v_b; - int __pyx_v_i; - int __pyx_t_1; - int __pyx_t_2; - int __pyx_t_3; - __Pyx_memviewslice __pyx_t_4 = { 0, 0, { 0 }, { 0 }, { 0 } }; - __Pyx_memviewslice __pyx_t_5 = { 0, 0, { 0 }, { 0 }, { 0 } }; - Py_ssize_t __pyx_t_6; - Py_ssize_t __pyx_t_7; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - #ifdef WITH_THREAD - PyGILState_STATE __pyx_gilstate_save; - #endif - - /* "monotonic_align/core.pyx":39 - * @cython.wraparound(False) - * cpdef void maximum_path_c(int[:,:,::1] paths, float[:,:,::1] values, int[::1] t_ys, int[::1] t_xs) nogil: - * cdef int b = paths.shape[0] # <<<<<<<<<<<<<< - * cdef int i - * for i in prange(b, nogil=True): - */ - __pyx_v_b = (__pyx_v_paths.shape[0]); - - /* "monotonic_align/core.pyx":41 - * cdef int b = paths.shape[0] - * cdef int i - * for i in prange(b, nogil=True): # <<<<<<<<<<<<<< - * maximum_path_each(paths[i], values[i], t_ys[i], t_xs[i]) - */ - { - #ifdef WITH_THREAD - PyThreadState *_save; - _save = NULL; - if (PyGILState_Check()) { - Py_UNBLOCK_THREADS - } - __Pyx_FastGIL_Remember(); - #endif - /*try:*/ { - __pyx_t_1 = __pyx_v_b; - { - int __pyx_parallel_temp0 = ((int)0xbad0bad0); - const char *__pyx_parallel_filename = NULL; int __pyx_parallel_lineno = 0, __pyx_parallel_clineno = 0; - PyObject *__pyx_parallel_exc_type = NULL, *__pyx_parallel_exc_value = NULL, *__pyx_parallel_exc_tb = NULL; - int __pyx_parallel_why; - __pyx_parallel_why = 0; - #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) - #undef likely - #undef unlikely - #define likely(x) (x) - #define unlikely(x) (x) - #endif - __pyx_t_3 = (__pyx_t_1 - 0 + 1 - 1/abs(1)) / 1; - if (__pyx_t_3 > 0) - { - #ifdef _OPENMP - #pragma omp parallel private(__pyx_t_6, __pyx_t_7) firstprivate(__pyx_t_4, __pyx_t_5) private(__pyx_filename, __pyx_lineno, __pyx_clineno) shared(__pyx_parallel_why, __pyx_parallel_exc_type, __pyx_parallel_exc_value, __pyx_parallel_exc_tb) - #endif /* _OPENMP */ - { - #ifdef _OPENMP - #ifdef WITH_THREAD - PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); - #endif - Py_BEGIN_ALLOW_THREADS - #endif /* _OPENMP */ - #ifdef _OPENMP - #pragma omp for firstprivate(__pyx_v_i) lastprivate(__pyx_v_i) - #endif /* _OPENMP */ - for (__pyx_t_2 = 0; __pyx_t_2 < __pyx_t_3; __pyx_t_2++){ - if (__pyx_parallel_why < 2) - { - __pyx_v_i = (int)(0 + 1 * __pyx_t_2); - - /* "monotonic_align/core.pyx":42 - * cdef int i - * for i in prange(b, nogil=True): - * maximum_path_each(paths[i], values[i], t_ys[i], t_xs[i]) # <<<<<<<<<<<<<< - */ - __pyx_t_4.data = __pyx_v_paths.data; - __pyx_t_4.memview = __pyx_v_paths.memview; - __PYX_INC_MEMVIEW(&__pyx_t_4, 0); - { - Py_ssize_t __pyx_tmp_idx = __pyx_v_i; - Py_ssize_t __pyx_tmp_stride = __pyx_v_paths.strides[0]; - __pyx_t_4.data += __pyx_tmp_idx * __pyx_tmp_stride; -} - -__pyx_t_4.shape[0] = __pyx_v_paths.shape[1]; -__pyx_t_4.strides[0] = __pyx_v_paths.strides[1]; - __pyx_t_4.suboffsets[0] = -1; - -__pyx_t_4.shape[1] = __pyx_v_paths.shape[2]; -__pyx_t_4.strides[1] = __pyx_v_paths.strides[2]; - __pyx_t_4.suboffsets[1] = -1; - -__pyx_t_5.data = __pyx_v_values.data; - __pyx_t_5.memview = __pyx_v_values.memview; - __PYX_INC_MEMVIEW(&__pyx_t_5, 0); - { - Py_ssize_t __pyx_tmp_idx = __pyx_v_i; - Py_ssize_t __pyx_tmp_stride = __pyx_v_values.strides[0]; - __pyx_t_5.data += __pyx_tmp_idx * __pyx_tmp_stride; -} - -__pyx_t_5.shape[0] = __pyx_v_values.shape[1]; -__pyx_t_5.strides[0] = __pyx_v_values.strides[1]; - __pyx_t_5.suboffsets[0] = -1; - -__pyx_t_5.shape[1] = __pyx_v_values.shape[2]; -__pyx_t_5.strides[1] = __pyx_v_values.strides[2]; - __pyx_t_5.suboffsets[1] = -1; - -__pyx_t_6 = __pyx_v_i; - __pyx_t_7 = __pyx_v_i; - __pyx_f_15monotonic_align_4core_maximum_path_each(__pyx_t_4, __pyx_t_5, (*((int *) ( /* dim=0 */ ((char *) (((int *) __pyx_v_t_ys.data) + __pyx_t_6)) ))), (*((int *) ( /* dim=0 */ ((char *) (((int *) __pyx_v_t_xs.data) + __pyx_t_7)) ))), NULL); if (unlikely(__Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 42, __pyx_L8_error) - __PYX_XCLEAR_MEMVIEW(&__pyx_t_4, 0); - __pyx_t_4.memview = NULL; __pyx_t_4.data = NULL; - __PYX_XCLEAR_MEMVIEW(&__pyx_t_5, 0); - __pyx_t_5.memview = NULL; __pyx_t_5.data = NULL; - goto __pyx_L11; - __pyx_L8_error:; - { - #ifdef WITH_THREAD - PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); - #endif - #ifdef _OPENMP - #pragma omp flush(__pyx_parallel_exc_type) - #endif /* _OPENMP */ - if (!__pyx_parallel_exc_type) { - __Pyx_ErrFetchWithState(&__pyx_parallel_exc_type, &__pyx_parallel_exc_value, &__pyx_parallel_exc_tb); - __pyx_parallel_filename = __pyx_filename; __pyx_parallel_lineno = __pyx_lineno; __pyx_parallel_clineno = __pyx_clineno; - __Pyx_GOTREF(__pyx_parallel_exc_type); - } - #ifdef WITH_THREAD - __Pyx_PyGILState_Release(__pyx_gilstate_save); - #endif - } - __pyx_parallel_why = 4; - goto __pyx_L10; - __pyx_L10:; - #ifdef _OPENMP - #pragma omp critical(__pyx_parallel_lastprivates0) - #endif /* _OPENMP */ - { - __pyx_parallel_temp0 = __pyx_v_i; - } - __pyx_L11:; - #ifdef _OPENMP - #pragma omp flush(__pyx_parallel_why) - #endif /* _OPENMP */ - } - } - #ifdef _OPENMP - Py_END_ALLOW_THREADS - #else -{ -#ifdef WITH_THREAD - PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); - #endif - #endif /* _OPENMP */ - /* Clean up any temporaries */ - __PYX_XCLEAR_MEMVIEW(&__pyx_t_4, 0); - __pyx_t_4.memview = NULL; __pyx_t_4.data = NULL; - __PYX_XCLEAR_MEMVIEW(&__pyx_t_5, 0); - __pyx_t_5.memview = NULL; __pyx_t_5.data = NULL; - #ifdef WITH_THREAD - __Pyx_PyGILState_Release(__pyx_gilstate_save); - #endif - #ifndef _OPENMP -} -#endif /* _OPENMP */ - } - } - if (__pyx_parallel_exc_type) { - /* This may have been overridden by a continue, break or return in another thread. Prefer the error. */ - __pyx_parallel_why = 4; - } - if (__pyx_parallel_why) { - __pyx_v_i = __pyx_parallel_temp0; - switch (__pyx_parallel_why) { - case 4: - { - #ifdef WITH_THREAD - PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); - #endif - __Pyx_GIVEREF(__pyx_parallel_exc_type); - __Pyx_ErrRestoreWithState(__pyx_parallel_exc_type, __pyx_parallel_exc_value, __pyx_parallel_exc_tb); - __pyx_filename = __pyx_parallel_filename; __pyx_lineno = __pyx_parallel_lineno; __pyx_clineno = __pyx_parallel_clineno; - #ifdef WITH_THREAD - __Pyx_PyGILState_Release(__pyx_gilstate_save); - #endif - } - goto __pyx_L4_error; - } - } - } - #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) - #undef likely - #undef unlikely - #define likely(x) __builtin_expect(!!(x), 1) - #define unlikely(x) __builtin_expect(!!(x), 0) - #endif - } - - /* "monotonic_align/core.pyx":41 - * cdef int b = paths.shape[0] - * cdef int i - * for i in prange(b, nogil=True): # <<<<<<<<<<<<<< - * maximum_path_each(paths[i], values[i], t_ys[i], t_xs[i]) - */ - /*finally:*/ { - /*normal exit:*/{ - #ifdef WITH_THREAD - __Pyx_FastGIL_Forget(); - if (_save) { - Py_BLOCK_THREADS - } - #endif - goto __pyx_L5; - } - __pyx_L4_error: { - #ifdef WITH_THREAD - __Pyx_FastGIL_Forget(); - if (_save) { - Py_BLOCK_THREADS - } - #endif - goto __pyx_L1_error; - } - __pyx_L5:; - } - } - - /* "monotonic_align/core.pyx":38 - * @cython.boundscheck(False) - * @cython.wraparound(False) - * cpdef void maximum_path_c(int[:,:,::1] paths, float[:,:,::1] values, int[::1] t_ys, int[::1] t_xs) nogil: # <<<<<<<<<<<<<< - * cdef int b = paths.shape[0] - * cdef int i - */ - - /* function exit code */ - goto __pyx_L0; - __pyx_L1_error:; - #ifdef WITH_THREAD - __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); - #endif - __PYX_XCLEAR_MEMVIEW(&__pyx_t_4, 1); - __PYX_XCLEAR_MEMVIEW(&__pyx_t_5, 1); - __Pyx_AddTraceback("monotonic_align.core.maximum_path_c", __pyx_clineno, __pyx_lineno, __pyx_filename); - #ifdef WITH_THREAD - __Pyx_PyGILState_Release(__pyx_gilstate_save); - #endif - __pyx_L0:; -} - -/* Python wrapper */ -static PyObject *__pyx_pw_15monotonic_align_4core_1maximum_path_c(PyObject *__pyx_self, -#if CYTHON_METH_FASTCALL -PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds -#else -PyObject *__pyx_args, PyObject *__pyx_kwds -#endif -); /*proto*/ -static PyMethodDef __pyx_mdef_15monotonic_align_4core_1maximum_path_c = {"maximum_path_c", (PyCFunction)(void*)(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_15monotonic_align_4core_1maximum_path_c, __Pyx_METH_FASTCALL|METH_KEYWORDS, 0}; -static PyObject *__pyx_pw_15monotonic_align_4core_1maximum_path_c(PyObject *__pyx_self, -#if CYTHON_METH_FASTCALL -PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds -#else -PyObject *__pyx_args, PyObject *__pyx_kwds -#endif -) { - __Pyx_memviewslice __pyx_v_paths = { 0, 0, { 0 }, { 0 }, { 0 } }; - __Pyx_memviewslice __pyx_v_values = { 0, 0, { 0 }, { 0 }, { 0 } }; - __Pyx_memviewslice __pyx_v_t_ys = { 0, 0, { 0 }, { 0 }, { 0 } }; - __Pyx_memviewslice __pyx_v_t_xs = { 0, 0, { 0 }, { 0 }, { 0 } }; - #if !CYTHON_METH_FASTCALL - CYTHON_UNUSED const Py_ssize_t __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); - #endif - CYTHON_UNUSED PyObject *const *__pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("maximum_path_c (wrapper)", 0); - { - PyObject **__pyx_pyargnames[] = {&__pyx_n_s_paths,&__pyx_n_s_values,&__pyx_n_s_t_ys,&__pyx_n_s_t_xs,0}; - PyObject* values[4] = {0,0,0,0}; - if (__pyx_kwds) { - Py_ssize_t kw_args; - switch (__pyx_nargs) { - case 4: values[3] = __Pyx_Arg_FASTCALL(__pyx_args, 3); - CYTHON_FALLTHROUGH; - case 3: values[2] = __Pyx_Arg_FASTCALL(__pyx_args, 2); - CYTHON_FALLTHROUGH; - case 2: values[1] = __Pyx_Arg_FASTCALL(__pyx_args, 1); - CYTHON_FALLTHROUGH; - case 1: values[0] = __Pyx_Arg_FASTCALL(__pyx_args, 0); - CYTHON_FALLTHROUGH; - case 0: break; - default: goto __pyx_L5_argtuple_error; - } - kw_args = __Pyx_NumKwargs_FASTCALL(__pyx_kwds); - switch (__pyx_nargs) { - case 0: - if (likely((values[0] = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_paths)) != 0)) kw_args--; - else if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 38, __pyx_L3_error) - else goto __pyx_L5_argtuple_error; - CYTHON_FALLTHROUGH; - case 1: - if (likely((values[1] = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_values)) != 0)) kw_args--; - else if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 38, __pyx_L3_error) - else { - __Pyx_RaiseArgtupleInvalid("maximum_path_c", 1, 4, 4, 1); __PYX_ERR(0, 38, __pyx_L3_error) - } - CYTHON_FALLTHROUGH; - case 2: - if (likely((values[2] = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_t_ys)) != 0)) kw_args--; - else if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 38, __pyx_L3_error) - else { - __Pyx_RaiseArgtupleInvalid("maximum_path_c", 1, 4, 4, 2); __PYX_ERR(0, 38, __pyx_L3_error) - } - CYTHON_FALLTHROUGH; - case 3: - if (likely((values[3] = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_t_xs)) != 0)) kw_args--; - else if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 38, __pyx_L3_error) - else { - __Pyx_RaiseArgtupleInvalid("maximum_path_c", 1, 4, 4, 3); __PYX_ERR(0, 38, __pyx_L3_error) - } - } - if (unlikely(kw_args > 0)) { - const Py_ssize_t kwd_pos_args = __pyx_nargs; - if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values + 0, kwd_pos_args, "maximum_path_c") < 0)) __PYX_ERR(0, 38, __pyx_L3_error) - } - } else if (unlikely(__pyx_nargs != 4)) { - goto __pyx_L5_argtuple_error; - } else { - values[0] = __Pyx_Arg_FASTCALL(__pyx_args, 0); - values[1] = __Pyx_Arg_FASTCALL(__pyx_args, 1); - values[2] = __Pyx_Arg_FASTCALL(__pyx_args, 2); - values[3] = __Pyx_Arg_FASTCALL(__pyx_args, 3); - } - __pyx_v_paths = __Pyx_PyObject_to_MemoryviewSlice_d_d_dc_int(values[0], PyBUF_WRITABLE); if (unlikely(!__pyx_v_paths.memview)) __PYX_ERR(0, 38, __pyx_L3_error) - __pyx_v_values = __Pyx_PyObject_to_MemoryviewSlice_d_d_dc_float(values[1], PyBUF_WRITABLE); if (unlikely(!__pyx_v_values.memview)) __PYX_ERR(0, 38, __pyx_L3_error) - __pyx_v_t_ys = __Pyx_PyObject_to_MemoryviewSlice_dc_int(values[2], PyBUF_WRITABLE); if (unlikely(!__pyx_v_t_ys.memview)) __PYX_ERR(0, 38, __pyx_L3_error) - __pyx_v_t_xs = __Pyx_PyObject_to_MemoryviewSlice_dc_int(values[3], PyBUF_WRITABLE); if (unlikely(!__pyx_v_t_xs.memview)) __PYX_ERR(0, 38, __pyx_L3_error) - } - goto __pyx_L4_argument_unpacking_done; - __pyx_L5_argtuple_error:; - __Pyx_RaiseArgtupleInvalid("maximum_path_c", 1, 4, 4, __pyx_nargs); __PYX_ERR(0, 38, __pyx_L3_error) - __pyx_L3_error:; - __PYX_XCLEAR_MEMVIEW(&__pyx_v_paths, 1); - __PYX_XCLEAR_MEMVIEW(&__pyx_v_values, 1); - __PYX_XCLEAR_MEMVIEW(&__pyx_v_t_ys, 1); - __PYX_XCLEAR_MEMVIEW(&__pyx_v_t_xs, 1); - __Pyx_AddTraceback("monotonic_align.core.maximum_path_c", __pyx_clineno, __pyx_lineno, __pyx_filename); - __Pyx_RefNannyFinishContext(); - return NULL; - __pyx_L4_argument_unpacking_done:; - __pyx_r = __pyx_pf_15monotonic_align_4core_maximum_path_c(__pyx_self, __pyx_v_paths, __pyx_v_values, __pyx_v_t_ys, __pyx_v_t_xs); - - /* function exit code */ - __PYX_XCLEAR_MEMVIEW(&__pyx_v_paths, 1); - __PYX_XCLEAR_MEMVIEW(&__pyx_v_values, 1); - __PYX_XCLEAR_MEMVIEW(&__pyx_v_t_ys, 1); - __PYX_XCLEAR_MEMVIEW(&__pyx_v_t_xs, 1); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_pf_15monotonic_align_4core_maximum_path_c(CYTHON_UNUSED PyObject *__pyx_self, __Pyx_memviewslice __pyx_v_paths, __Pyx_memviewslice __pyx_v_values, __Pyx_memviewslice __pyx_v_t_ys, __Pyx_memviewslice __pyx_v_t_xs) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("maximum_path_c", 0); - __Pyx_XDECREF(__pyx_r); - if (unlikely(!__pyx_v_paths.memview)) { __Pyx_RaiseUnboundLocalError("paths"); __PYX_ERR(0, 38, __pyx_L1_error) } - if (unlikely(!__pyx_v_values.memview)) { __Pyx_RaiseUnboundLocalError("values"); __PYX_ERR(0, 38, __pyx_L1_error) } - if (unlikely(!__pyx_v_t_ys.memview)) { __Pyx_RaiseUnboundLocalError("t_ys"); __PYX_ERR(0, 38, __pyx_L1_error) } - if (unlikely(!__pyx_v_t_xs.memview)) { __Pyx_RaiseUnboundLocalError("t_xs"); __PYX_ERR(0, 38, __pyx_L1_error) } - __pyx_f_15monotonic_align_4core_maximum_path_c(__pyx_v_paths, __pyx_v_values, __pyx_v_t_ys, __pyx_v_t_xs, 0); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 38, __pyx_L1_error) - __pyx_t_1 = __Pyx_void_to_None(NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 38, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_r = __pyx_t_1; - __pyx_t_1 = 0; - goto __pyx_L0; - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_AddTraceback("monotonic_align.core.maximum_path_c", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} -static struct __pyx_vtabstruct_array __pyx_vtable_array; - -static PyObject *__pyx_tp_new_array(PyTypeObject *t, PyObject *a, PyObject *k) { - struct __pyx_array_obj *p; - PyObject *o; - #if CYTHON_COMPILING_IN_LIMITED_API - allocfunc alloc_func = (allocfunc)PyType_GetSlot(t, Py_tp_alloc); - o = alloc_func(t, 0); - #else - if (likely(!__Pyx_PyType_HasFeature(t, Py_TPFLAGS_IS_ABSTRACT))) { - o = (*t->tp_alloc)(t, 0); - } else { - o = (PyObject *) PyBaseObject_Type.tp_new(t, __pyx_empty_tuple, 0); - } - if (unlikely(!o)) return 0; - #endif - p = ((struct __pyx_array_obj *)o); - p->__pyx_vtab = __pyx_vtabptr_array; - p->mode = ((PyObject*)Py_None); Py_INCREF(Py_None); - p->_format = ((PyObject*)Py_None); Py_INCREF(Py_None); - if (unlikely(__pyx_array___cinit__(o, a, k) < 0)) goto bad; - return o; - bad: - Py_DECREF(o); o = 0; - return NULL; -} - -static void __pyx_tp_dealloc_array(PyObject *o) { - struct __pyx_array_obj *p = (struct __pyx_array_obj *)o; - #if CYTHON_USE_TP_FINALIZE - if (unlikely((PY_VERSION_HEX >= 0x03080000 || __Pyx_PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE)) && __Pyx_PyObject_GetSlot(o, tp_finalize, destructor)) && (!PyType_IS_GC(Py_TYPE(o)) || !__Pyx_PyObject_GC_IsFinalized(o))) { - if (__Pyx_PyObject_GetSlot(o, tp_dealloc, destructor) == __pyx_tp_dealloc_array) { - if (PyObject_CallFinalizerFromDealloc(o)) return; - } - } - #endif - { - PyObject *etype, *eval, *etb; - PyErr_Fetch(&etype, &eval, &etb); - __Pyx_SET_REFCNT(o, Py_REFCNT(o) + 1); - __pyx_array___dealloc__(o); - __Pyx_SET_REFCNT(o, Py_REFCNT(o) - 1); - PyErr_Restore(etype, eval, etb); - } - Py_CLEAR(p->mode); - Py_CLEAR(p->_format); - (*Py_TYPE(o)->tp_free)(o); -} -static PyObject *__pyx_sq_item_array(PyObject *o, Py_ssize_t i) { - PyObject *r; - PyObject *x = PyInt_FromSsize_t(i); if(!x) return 0; - r = Py_TYPE(o)->tp_as_mapping->mp_subscript(o, x); - Py_DECREF(x); - return r; -} - -static int __pyx_mp_ass_subscript_array(PyObject *o, PyObject *i, PyObject *v) { - if (v) { - return __pyx_array___setitem__(o, i, v); - } - else { - __Pyx_TypeName o_type_name; - o_type_name = __Pyx_PyType_GetName(Py_TYPE(o)); - PyErr_Format(PyExc_NotImplementedError, - "Subscript deletion not supported by " __Pyx_FMT_TYPENAME, o_type_name); - __Pyx_DECREF_TypeName(o_type_name); - return -1; - } -} - -static PyObject *__pyx_tp_getattro_array(PyObject *o, PyObject *n) { - PyObject *v = __Pyx_PyObject_GenericGetAttr(o, n); - if (!v && PyErr_ExceptionMatches(PyExc_AttributeError)) { - PyErr_Clear(); - v = __pyx_array___getattr__(o, n); - } - return v; -} - -static PyObject *__pyx_getprop___pyx_array_memview(PyObject *o, CYTHON_UNUSED void *x) { - return __pyx_pw_15View_dot_MemoryView_5array_7memview_1__get__(o); -} - -static PyMethodDef __pyx_methods_array[] = { - {"__getattr__", (PyCFunction)__pyx_array___getattr__, METH_O|METH_COEXIST, 0}, - {"__reduce_cython__", (PyCFunction)(void*)(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw___pyx_array_1__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, 0}, - {"__setstate_cython__", (PyCFunction)(void*)(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw___pyx_array_3__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, 0}, - {0, 0, 0, 0} -}; - -static struct PyGetSetDef __pyx_getsets_array[] = { - {(char *)"memview", __pyx_getprop___pyx_array_memview, 0, (char *)0, 0}, - {0, 0, 0, 0, 0} -}; -#if CYTHON_USE_TYPE_SPECS -#if !CYTHON_COMPILING_IN_LIMITED_API - -static PyBufferProcs __pyx_tp_as_buffer_array = { - #if PY_MAJOR_VERSION < 3 - 0, /*bf_getreadbuffer*/ - #endif - #if PY_MAJOR_VERSION < 3 - 0, /*bf_getwritebuffer*/ - #endif - #if PY_MAJOR_VERSION < 3 - 0, /*bf_getsegcount*/ - #endif - #if PY_MAJOR_VERSION < 3 - 0, /*bf_getcharbuffer*/ - #endif - __pyx_array_getbuffer, /*bf_getbuffer*/ - 0, /*bf_releasebuffer*/ -}; -#endif -static PyType_Slot __pyx_type___pyx_array_slots[] = { - {Py_tp_dealloc, (void *)__pyx_tp_dealloc_array}, - {Py_sq_length, (void *)__pyx_array___len__}, - {Py_sq_item, (void *)__pyx_sq_item_array}, - {Py_mp_length, (void *)__pyx_array___len__}, - {Py_mp_subscript, (void *)__pyx_array___getitem__}, - {Py_mp_ass_subscript, (void *)__pyx_mp_ass_subscript_array}, - {Py_tp_getattro, (void *)__pyx_tp_getattro_array}, - #if defined(Py_bf_getbuffer) - {Py_bf_getbuffer, (void *)__pyx_array_getbuffer}, - #endif - {Py_tp_methods, (void *)__pyx_methods_array}, - {Py_tp_getset, (void *)__pyx_getsets_array}, - {Py_tp_new, (void *)__pyx_tp_new_array}, - {0, 0}, -}; -static PyType_Spec __pyx_type___pyx_array_spec = { - "monotonic_align.core.array", - sizeof(struct __pyx_array_obj), - 0, - Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_SEQUENCE, - __pyx_type___pyx_array_slots, -}; -#else - -static PySequenceMethods __pyx_tp_as_sequence_array = { - __pyx_array___len__, /*sq_length*/ - 0, /*sq_concat*/ - 0, /*sq_repeat*/ - __pyx_sq_item_array, /*sq_item*/ - 0, /*sq_slice*/ - 0, /*sq_ass_item*/ - 0, /*sq_ass_slice*/ - 0, /*sq_contains*/ - 0, /*sq_inplace_concat*/ - 0, /*sq_inplace_repeat*/ -}; - -static PyMappingMethods __pyx_tp_as_mapping_array = { - __pyx_array___len__, /*mp_length*/ - __pyx_array___getitem__, /*mp_subscript*/ - __pyx_mp_ass_subscript_array, /*mp_ass_subscript*/ -}; - -static PyBufferProcs __pyx_tp_as_buffer_array = { - #if PY_MAJOR_VERSION < 3 - 0, /*bf_getreadbuffer*/ - #endif - #if PY_MAJOR_VERSION < 3 - 0, /*bf_getwritebuffer*/ - #endif - #if PY_MAJOR_VERSION < 3 - 0, /*bf_getsegcount*/ - #endif - #if PY_MAJOR_VERSION < 3 - 0, /*bf_getcharbuffer*/ - #endif - __pyx_array_getbuffer, /*bf_getbuffer*/ - 0, /*bf_releasebuffer*/ -}; - -static PyTypeObject __pyx_type___pyx_array = { - PyVarObject_HEAD_INIT(0, 0) - "monotonic_align.core.""array", /*tp_name*/ - sizeof(struct __pyx_array_obj), /*tp_basicsize*/ - 0, /*tp_itemsize*/ - __pyx_tp_dealloc_array, /*tp_dealloc*/ - #if PY_VERSION_HEX < 0x030800b4 - 0, /*tp_print*/ - #endif - #if PY_VERSION_HEX >= 0x030800b4 - 0, /*tp_vectorcall_offset*/ - #endif - 0, /*tp_getattr*/ - 0, /*tp_setattr*/ - #if PY_MAJOR_VERSION < 3 - 0, /*tp_compare*/ - #endif - #if PY_MAJOR_VERSION >= 3 - 0, /*tp_as_async*/ - #endif - 0, /*tp_repr*/ - 0, /*tp_as_number*/ - &__pyx_tp_as_sequence_array, /*tp_as_sequence*/ - &__pyx_tp_as_mapping_array, /*tp_as_mapping*/ - 0, /*tp_hash*/ - 0, /*tp_call*/ - 0, /*tp_str*/ - __pyx_tp_getattro_array, /*tp_getattro*/ - 0, /*tp_setattro*/ - &__pyx_tp_as_buffer_array, /*tp_as_buffer*/ - Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_SEQUENCE, /*tp_flags*/ - 0, /*tp_doc*/ - 0, /*tp_traverse*/ - 0, /*tp_clear*/ - 0, /*tp_richcompare*/ - 0, /*tp_weaklistoffset*/ - 0, /*tp_iter*/ - 0, /*tp_iternext*/ - __pyx_methods_array, /*tp_methods*/ - 0, /*tp_members*/ - __pyx_getsets_array, /*tp_getset*/ - 0, /*tp_base*/ - 0, /*tp_dict*/ - 0, /*tp_descr_get*/ - 0, /*tp_descr_set*/ - #if !CYTHON_USE_TYPE_SPECS - 0, /*tp_dictoffset*/ - #endif - 0, /*tp_init*/ - 0, /*tp_alloc*/ - __pyx_tp_new_array, /*tp_new*/ - 0, /*tp_free*/ - 0, /*tp_is_gc*/ - 0, /*tp_bases*/ - 0, /*tp_mro*/ - 0, /*tp_cache*/ - 0, /*tp_subclasses*/ - 0, /*tp_weaklist*/ - 0, /*tp_del*/ - 0, /*tp_version_tag*/ - #if PY_VERSION_HEX >= 0x030400a1 - #if CYTHON_USE_TP_FINALIZE - 0, /*tp_finalize*/ - #else - NULL, /*tp_finalize*/ - #endif - #endif - #if PY_VERSION_HEX >= 0x030800b1 && (!CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800) - 0, /*tp_vectorcall*/ - #endif - #if __PYX_NEED_TP_PRINT_SLOT == 1 - 0, /*tp_print*/ - #endif - #if PY_VERSION_HEX >= 0x030C0000 - 0, /*tp_watched*/ - #endif - #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000 && PY_VERSION_HEX < 0x030a0000 - 0, /*tp_pypy_flags*/ - #endif -}; -#endif - -static PyObject *__pyx_tp_new_Enum(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) { - struct __pyx_MemviewEnum_obj *p; - PyObject *o; - #if CYTHON_COMPILING_IN_LIMITED_API - allocfunc alloc_func = (allocfunc)PyType_GetSlot(t, Py_tp_alloc); - o = alloc_func(t, 0); - #else - if (likely(!__Pyx_PyType_HasFeature(t, Py_TPFLAGS_IS_ABSTRACT))) { - o = (*t->tp_alloc)(t, 0); - } else { - o = (PyObject *) PyBaseObject_Type.tp_new(t, __pyx_empty_tuple, 0); - } - if (unlikely(!o)) return 0; - #endif - p = ((struct __pyx_MemviewEnum_obj *)o); - p->name = Py_None; Py_INCREF(Py_None); - return o; -} - -static void __pyx_tp_dealloc_Enum(PyObject *o) { - struct __pyx_MemviewEnum_obj *p = (struct __pyx_MemviewEnum_obj *)o; - #if CYTHON_USE_TP_FINALIZE - if (unlikely((PY_VERSION_HEX >= 0x03080000 || __Pyx_PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE)) && __Pyx_PyObject_GetSlot(o, tp_finalize, destructor)) && !__Pyx_PyObject_GC_IsFinalized(o)) { - if (__Pyx_PyObject_GetSlot(o, tp_dealloc, destructor) == __pyx_tp_dealloc_Enum) { - if (PyObject_CallFinalizerFromDealloc(o)) return; - } - } - #endif - PyObject_GC_UnTrack(o); - Py_CLEAR(p->name); - (*Py_TYPE(o)->tp_free)(o); -} - -static int __pyx_tp_traverse_Enum(PyObject *o, visitproc v, void *a) { - int e; - struct __pyx_MemviewEnum_obj *p = (struct __pyx_MemviewEnum_obj *)o; - if (p->name) { - e = (*v)(p->name, a); if (e) return e; - } - return 0; -} - -static int __pyx_tp_clear_Enum(PyObject *o) { - PyObject* tmp; - struct __pyx_MemviewEnum_obj *p = (struct __pyx_MemviewEnum_obj *)o; - tmp = ((PyObject*)p->name); - p->name = Py_None; Py_INCREF(Py_None); - Py_XDECREF(tmp); - return 0; -} - -static PyObject *__pyx_specialmethod___pyx_MemviewEnum___repr__(PyObject *self, CYTHON_UNUSED PyObject *arg) { - return __pyx_MemviewEnum___repr__(self); -} - -static PyMethodDef __pyx_methods_Enum[] = { - {"__repr__", (PyCFunction)__pyx_specialmethod___pyx_MemviewEnum___repr__, METH_NOARGS|METH_COEXIST, 0}, - {"__reduce_cython__", (PyCFunction)(void*)(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw___pyx_MemviewEnum_1__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, 0}, - {"__setstate_cython__", (PyCFunction)(void*)(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw___pyx_MemviewEnum_3__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, 0}, - {0, 0, 0, 0} -}; -#if CYTHON_USE_TYPE_SPECS -static PyType_Slot __pyx_type___pyx_MemviewEnum_slots[] = { - {Py_tp_dealloc, (void *)__pyx_tp_dealloc_Enum}, - {Py_tp_repr, (void *)__pyx_MemviewEnum___repr__}, - {Py_tp_traverse, (void *)__pyx_tp_traverse_Enum}, - {Py_tp_clear, (void *)__pyx_tp_clear_Enum}, - {Py_tp_methods, (void *)__pyx_methods_Enum}, - {Py_tp_init, (void *)__pyx_MemviewEnum___init__}, - {Py_tp_new, (void *)__pyx_tp_new_Enum}, - {0, 0}, -}; -static PyType_Spec __pyx_type___pyx_MemviewEnum_spec = { - "monotonic_align.core.Enum", - sizeof(struct __pyx_MemviewEnum_obj), - 0, - Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, - __pyx_type___pyx_MemviewEnum_slots, -}; -#else - -static PyTypeObject __pyx_type___pyx_MemviewEnum = { - PyVarObject_HEAD_INIT(0, 0) - "monotonic_align.core.""Enum", /*tp_name*/ - sizeof(struct __pyx_MemviewEnum_obj), /*tp_basicsize*/ - 0, /*tp_itemsize*/ - __pyx_tp_dealloc_Enum, /*tp_dealloc*/ - #if PY_VERSION_HEX < 0x030800b4 - 0, /*tp_print*/ - #endif - #if PY_VERSION_HEX >= 0x030800b4 - 0, /*tp_vectorcall_offset*/ - #endif - 0, /*tp_getattr*/ - 0, /*tp_setattr*/ - #if PY_MAJOR_VERSION < 3 - 0, /*tp_compare*/ - #endif - #if PY_MAJOR_VERSION >= 3 - 0, /*tp_as_async*/ - #endif - __pyx_MemviewEnum___repr__, /*tp_repr*/ - 0, /*tp_as_number*/ - 0, /*tp_as_sequence*/ - 0, /*tp_as_mapping*/ - 0, /*tp_hash*/ - 0, /*tp_call*/ - 0, /*tp_str*/ - 0, /*tp_getattro*/ - 0, /*tp_setattro*/ - 0, /*tp_as_buffer*/ - Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/ - 0, /*tp_doc*/ - __pyx_tp_traverse_Enum, /*tp_traverse*/ - __pyx_tp_clear_Enum, /*tp_clear*/ - 0, /*tp_richcompare*/ - 0, /*tp_weaklistoffset*/ - 0, /*tp_iter*/ - 0, /*tp_iternext*/ - __pyx_methods_Enum, /*tp_methods*/ - 0, /*tp_members*/ - 0, /*tp_getset*/ - 0, /*tp_base*/ - 0, /*tp_dict*/ - 0, /*tp_descr_get*/ - 0, /*tp_descr_set*/ - #if !CYTHON_USE_TYPE_SPECS - 0, /*tp_dictoffset*/ - #endif - __pyx_MemviewEnum___init__, /*tp_init*/ - 0, /*tp_alloc*/ - __pyx_tp_new_Enum, /*tp_new*/ - 0, /*tp_free*/ - 0, /*tp_is_gc*/ - 0, /*tp_bases*/ - 0, /*tp_mro*/ - 0, /*tp_cache*/ - 0, /*tp_subclasses*/ - 0, /*tp_weaklist*/ - 0, /*tp_del*/ - 0, /*tp_version_tag*/ - #if PY_VERSION_HEX >= 0x030400a1 - #if CYTHON_USE_TP_FINALIZE - 0, /*tp_finalize*/ - #else - NULL, /*tp_finalize*/ - #endif - #endif - #if PY_VERSION_HEX >= 0x030800b1 && (!CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800) - 0, /*tp_vectorcall*/ - #endif - #if __PYX_NEED_TP_PRINT_SLOT == 1 - 0, /*tp_print*/ - #endif - #if PY_VERSION_HEX >= 0x030C0000 - 0, /*tp_watched*/ - #endif - #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000 && PY_VERSION_HEX < 0x030a0000 - 0, /*tp_pypy_flags*/ - #endif -}; -#endif -static struct __pyx_vtabstruct_memoryview __pyx_vtable_memoryview; - -static PyObject *__pyx_tp_new_memoryview(PyTypeObject *t, PyObject *a, PyObject *k) { - struct __pyx_memoryview_obj *p; - PyObject *o; - #if CYTHON_COMPILING_IN_LIMITED_API - allocfunc alloc_func = (allocfunc)PyType_GetSlot(t, Py_tp_alloc); - o = alloc_func(t, 0); - #else - if (likely(!__Pyx_PyType_HasFeature(t, Py_TPFLAGS_IS_ABSTRACT))) { - o = (*t->tp_alloc)(t, 0); - } else { - o = (PyObject *) PyBaseObject_Type.tp_new(t, __pyx_empty_tuple, 0); - } - if (unlikely(!o)) return 0; - #endif - p = ((struct __pyx_memoryview_obj *)o); - p->__pyx_vtab = __pyx_vtabptr_memoryview; - p->obj = Py_None; Py_INCREF(Py_None); - p->_size = Py_None; Py_INCREF(Py_None); - p->_array_interface = Py_None; Py_INCREF(Py_None); - p->view.obj = NULL; - if (unlikely(__pyx_memoryview___cinit__(o, a, k) < 0)) goto bad; - return o; - bad: - Py_DECREF(o); o = 0; - return NULL; -} - -static void __pyx_tp_dealloc_memoryview(PyObject *o) { - struct __pyx_memoryview_obj *p = (struct __pyx_memoryview_obj *)o; - #if CYTHON_USE_TP_FINALIZE - if (unlikely((PY_VERSION_HEX >= 0x03080000 || __Pyx_PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE)) && __Pyx_PyObject_GetSlot(o, tp_finalize, destructor)) && !__Pyx_PyObject_GC_IsFinalized(o)) { - if (__Pyx_PyObject_GetSlot(o, tp_dealloc, destructor) == __pyx_tp_dealloc_memoryview) { - if (PyObject_CallFinalizerFromDealloc(o)) return; - } - } - #endif - PyObject_GC_UnTrack(o); - { - PyObject *etype, *eval, *etb; - PyErr_Fetch(&etype, &eval, &etb); - __Pyx_SET_REFCNT(o, Py_REFCNT(o) + 1); - __pyx_memoryview___dealloc__(o); - __Pyx_SET_REFCNT(o, Py_REFCNT(o) - 1); - PyErr_Restore(etype, eval, etb); - } - Py_CLEAR(p->obj); - Py_CLEAR(p->_size); - Py_CLEAR(p->_array_interface); - (*Py_TYPE(o)->tp_free)(o); -} - -static int __pyx_tp_traverse_memoryview(PyObject *o, visitproc v, void *a) { - int e; - struct __pyx_memoryview_obj *p = (struct __pyx_memoryview_obj *)o; - if (p->obj) { - e = (*v)(p->obj, a); if (e) return e; - } - if (p->_size) { - e = (*v)(p->_size, a); if (e) return e; - } - if (p->_array_interface) { - e = (*v)(p->_array_interface, a); if (e) return e; - } - if (p->view.obj) { - e = (*v)(p->view.obj, a); if (e) return e; - } - return 0; -} - -static int __pyx_tp_clear_memoryview(PyObject *o) { - PyObject* tmp; - struct __pyx_memoryview_obj *p = (struct __pyx_memoryview_obj *)o; - tmp = ((PyObject*)p->obj); - p->obj = Py_None; Py_INCREF(Py_None); - Py_XDECREF(tmp); - tmp = ((PyObject*)p->_size); - p->_size = Py_None; Py_INCREF(Py_None); - Py_XDECREF(tmp); - tmp = ((PyObject*)p->_array_interface); - p->_array_interface = Py_None; Py_INCREF(Py_None); - Py_XDECREF(tmp); - Py_CLEAR(p->view.obj); - return 0; -} -static PyObject *__pyx_sq_item_memoryview(PyObject *o, Py_ssize_t i) { - PyObject *r; - PyObject *x = PyInt_FromSsize_t(i); if(!x) return 0; - r = Py_TYPE(o)->tp_as_mapping->mp_subscript(o, x); - Py_DECREF(x); - return r; -} - -static int __pyx_mp_ass_subscript_memoryview(PyObject *o, PyObject *i, PyObject *v) { - if (v) { - return __pyx_memoryview___setitem__(o, i, v); - } - else { - __Pyx_TypeName o_type_name; - o_type_name = __Pyx_PyType_GetName(Py_TYPE(o)); - PyErr_Format(PyExc_NotImplementedError, - "Subscript deletion not supported by " __Pyx_FMT_TYPENAME, o_type_name); - __Pyx_DECREF_TypeName(o_type_name); - return -1; - } -} - -static PyObject *__pyx_getprop___pyx_memoryview_T(PyObject *o, CYTHON_UNUSED void *x) { - return __pyx_pw_15View_dot_MemoryView_10memoryview_1T_1__get__(o); -} - -static PyObject *__pyx_getprop___pyx_memoryview_base(PyObject *o, CYTHON_UNUSED void *x) { - return __pyx_pw_15View_dot_MemoryView_10memoryview_4base_1__get__(o); -} - -static PyObject *__pyx_getprop___pyx_memoryview_shape(PyObject *o, CYTHON_UNUSED void *x) { - return __pyx_pw_15View_dot_MemoryView_10memoryview_5shape_1__get__(o); -} - -static PyObject *__pyx_getprop___pyx_memoryview_strides(PyObject *o, CYTHON_UNUSED void *x) { - return __pyx_pw_15View_dot_MemoryView_10memoryview_7strides_1__get__(o); -} - -static PyObject *__pyx_getprop___pyx_memoryview_suboffsets(PyObject *o, CYTHON_UNUSED void *x) { - return __pyx_pw_15View_dot_MemoryView_10memoryview_10suboffsets_1__get__(o); -} - -static PyObject *__pyx_getprop___pyx_memoryview_ndim(PyObject *o, CYTHON_UNUSED void *x) { - return __pyx_pw_15View_dot_MemoryView_10memoryview_4ndim_1__get__(o); -} - -static PyObject *__pyx_getprop___pyx_memoryview_itemsize(PyObject *o, CYTHON_UNUSED void *x) { - return __pyx_pw_15View_dot_MemoryView_10memoryview_8itemsize_1__get__(o); -} - -static PyObject *__pyx_getprop___pyx_memoryview_nbytes(PyObject *o, CYTHON_UNUSED void *x) { - return __pyx_pw_15View_dot_MemoryView_10memoryview_6nbytes_1__get__(o); -} - -static PyObject *__pyx_getprop___pyx_memoryview_size(PyObject *o, CYTHON_UNUSED void *x) { - return __pyx_pw_15View_dot_MemoryView_10memoryview_4size_1__get__(o); -} - -static PyObject *__pyx_specialmethod___pyx_memoryview___repr__(PyObject *self, CYTHON_UNUSED PyObject *arg) { - return __pyx_memoryview___repr__(self); -} - -static PyMethodDef __pyx_methods_memoryview[] = { - {"__repr__", (PyCFunction)__pyx_specialmethod___pyx_memoryview___repr__, METH_NOARGS|METH_COEXIST, 0}, - {"is_c_contig", (PyCFunction)(void*)(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_memoryview_is_c_contig, __Pyx_METH_FASTCALL|METH_KEYWORDS, 0}, - {"is_f_contig", (PyCFunction)(void*)(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_memoryview_is_f_contig, __Pyx_METH_FASTCALL|METH_KEYWORDS, 0}, - {"copy", (PyCFunction)(void*)(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_memoryview_copy, __Pyx_METH_FASTCALL|METH_KEYWORDS, 0}, - {"copy_fortran", (PyCFunction)(void*)(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_memoryview_copy_fortran, __Pyx_METH_FASTCALL|METH_KEYWORDS, 0}, - {"__reduce_cython__", (PyCFunction)(void*)(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw___pyx_memoryview_1__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, 0}, - {"__setstate_cython__", (PyCFunction)(void*)(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw___pyx_memoryview_3__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, 0}, - {0, 0, 0, 0} -}; - -static struct PyGetSetDef __pyx_getsets_memoryview[] = { - {(char *)"T", __pyx_getprop___pyx_memoryview_T, 0, (char *)0, 0}, - {(char *)"base", __pyx_getprop___pyx_memoryview_base, 0, (char *)0, 0}, - {(char *)"shape", __pyx_getprop___pyx_memoryview_shape, 0, (char *)0, 0}, - {(char *)"strides", __pyx_getprop___pyx_memoryview_strides, 0, (char *)0, 0}, - {(char *)"suboffsets", __pyx_getprop___pyx_memoryview_suboffsets, 0, (char *)0, 0}, - {(char *)"ndim", __pyx_getprop___pyx_memoryview_ndim, 0, (char *)0, 0}, - {(char *)"itemsize", __pyx_getprop___pyx_memoryview_itemsize, 0, (char *)0, 0}, - {(char *)"nbytes", __pyx_getprop___pyx_memoryview_nbytes, 0, (char *)0, 0}, - {(char *)"size", __pyx_getprop___pyx_memoryview_size, 0, (char *)0, 0}, - {0, 0, 0, 0, 0} -}; -#if CYTHON_USE_TYPE_SPECS -#if !CYTHON_COMPILING_IN_LIMITED_API - -static PyBufferProcs __pyx_tp_as_buffer_memoryview = { - #if PY_MAJOR_VERSION < 3 - 0, /*bf_getreadbuffer*/ - #endif - #if PY_MAJOR_VERSION < 3 - 0, /*bf_getwritebuffer*/ - #endif - #if PY_MAJOR_VERSION < 3 - 0, /*bf_getsegcount*/ - #endif - #if PY_MAJOR_VERSION < 3 - 0, /*bf_getcharbuffer*/ - #endif - __pyx_memoryview_getbuffer, /*bf_getbuffer*/ - 0, /*bf_releasebuffer*/ -}; -#endif -static PyType_Slot __pyx_type___pyx_memoryview_slots[] = { - {Py_tp_dealloc, (void *)__pyx_tp_dealloc_memoryview}, - {Py_tp_repr, (void *)__pyx_memoryview___repr__}, - {Py_sq_length, (void *)__pyx_memoryview___len__}, - {Py_sq_item, (void *)__pyx_sq_item_memoryview}, - {Py_mp_length, (void *)__pyx_memoryview___len__}, - {Py_mp_subscript, (void *)__pyx_memoryview___getitem__}, - {Py_mp_ass_subscript, (void *)__pyx_mp_ass_subscript_memoryview}, - {Py_tp_str, (void *)__pyx_memoryview___str__}, - #if defined(Py_bf_getbuffer) - {Py_bf_getbuffer, (void *)__pyx_memoryview_getbuffer}, - #endif - {Py_tp_traverse, (void *)__pyx_tp_traverse_memoryview}, - {Py_tp_clear, (void *)__pyx_tp_clear_memoryview}, - {Py_tp_methods, (void *)__pyx_methods_memoryview}, - {Py_tp_getset, (void *)__pyx_getsets_memoryview}, - {Py_tp_new, (void *)__pyx_tp_new_memoryview}, - {0, 0}, -}; -static PyType_Spec __pyx_type___pyx_memoryview_spec = { - "monotonic_align.core.memoryview", - sizeof(struct __pyx_memoryview_obj), - 0, - Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, - __pyx_type___pyx_memoryview_slots, -}; -#else - -static PySequenceMethods __pyx_tp_as_sequence_memoryview = { - __pyx_memoryview___len__, /*sq_length*/ - 0, /*sq_concat*/ - 0, /*sq_repeat*/ - __pyx_sq_item_memoryview, /*sq_item*/ - 0, /*sq_slice*/ - 0, /*sq_ass_item*/ - 0, /*sq_ass_slice*/ - 0, /*sq_contains*/ - 0, /*sq_inplace_concat*/ - 0, /*sq_inplace_repeat*/ -}; - -static PyMappingMethods __pyx_tp_as_mapping_memoryview = { - __pyx_memoryview___len__, /*mp_length*/ - __pyx_memoryview___getitem__, /*mp_subscript*/ - __pyx_mp_ass_subscript_memoryview, /*mp_ass_subscript*/ -}; - -static PyBufferProcs __pyx_tp_as_buffer_memoryview = { - #if PY_MAJOR_VERSION < 3 - 0, /*bf_getreadbuffer*/ - #endif - #if PY_MAJOR_VERSION < 3 - 0, /*bf_getwritebuffer*/ - #endif - #if PY_MAJOR_VERSION < 3 - 0, /*bf_getsegcount*/ - #endif - #if PY_MAJOR_VERSION < 3 - 0, /*bf_getcharbuffer*/ - #endif - __pyx_memoryview_getbuffer, /*bf_getbuffer*/ - 0, /*bf_releasebuffer*/ -}; - -static PyTypeObject __pyx_type___pyx_memoryview = { - PyVarObject_HEAD_INIT(0, 0) - "monotonic_align.core.""memoryview", /*tp_name*/ - sizeof(struct __pyx_memoryview_obj), /*tp_basicsize*/ - 0, /*tp_itemsize*/ - __pyx_tp_dealloc_memoryview, /*tp_dealloc*/ - #if PY_VERSION_HEX < 0x030800b4 - 0, /*tp_print*/ - #endif - #if PY_VERSION_HEX >= 0x030800b4 - 0, /*tp_vectorcall_offset*/ - #endif - 0, /*tp_getattr*/ - 0, /*tp_setattr*/ - #if PY_MAJOR_VERSION < 3 - 0, /*tp_compare*/ - #endif - #if PY_MAJOR_VERSION >= 3 - 0, /*tp_as_async*/ - #endif - __pyx_memoryview___repr__, /*tp_repr*/ - 0, /*tp_as_number*/ - &__pyx_tp_as_sequence_memoryview, /*tp_as_sequence*/ - &__pyx_tp_as_mapping_memoryview, /*tp_as_mapping*/ - 0, /*tp_hash*/ - 0, /*tp_call*/ - __pyx_memoryview___str__, /*tp_str*/ - 0, /*tp_getattro*/ - 0, /*tp_setattro*/ - &__pyx_tp_as_buffer_memoryview, /*tp_as_buffer*/ - Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/ - 0, /*tp_doc*/ - __pyx_tp_traverse_memoryview, /*tp_traverse*/ - __pyx_tp_clear_memoryview, /*tp_clear*/ - 0, /*tp_richcompare*/ - 0, /*tp_weaklistoffset*/ - 0, /*tp_iter*/ - 0, /*tp_iternext*/ - __pyx_methods_memoryview, /*tp_methods*/ - 0, /*tp_members*/ - __pyx_getsets_memoryview, /*tp_getset*/ - 0, /*tp_base*/ - 0, /*tp_dict*/ - 0, /*tp_descr_get*/ - 0, /*tp_descr_set*/ - #if !CYTHON_USE_TYPE_SPECS - 0, /*tp_dictoffset*/ - #endif - 0, /*tp_init*/ - 0, /*tp_alloc*/ - __pyx_tp_new_memoryview, /*tp_new*/ - 0, /*tp_free*/ - 0, /*tp_is_gc*/ - 0, /*tp_bases*/ - 0, /*tp_mro*/ - 0, /*tp_cache*/ - 0, /*tp_subclasses*/ - 0, /*tp_weaklist*/ - 0, /*tp_del*/ - 0, /*tp_version_tag*/ - #if PY_VERSION_HEX >= 0x030400a1 - #if CYTHON_USE_TP_FINALIZE - 0, /*tp_finalize*/ - #else - NULL, /*tp_finalize*/ - #endif - #endif - #if PY_VERSION_HEX >= 0x030800b1 && (!CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800) - 0, /*tp_vectorcall*/ - #endif - #if __PYX_NEED_TP_PRINT_SLOT == 1 - 0, /*tp_print*/ - #endif - #if PY_VERSION_HEX >= 0x030C0000 - 0, /*tp_watched*/ - #endif - #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000 && PY_VERSION_HEX < 0x030a0000 - 0, /*tp_pypy_flags*/ - #endif -}; -#endif -static struct __pyx_vtabstruct__memoryviewslice __pyx_vtable__memoryviewslice; - -static PyObject *__pyx_tp_new__memoryviewslice(PyTypeObject *t, PyObject *a, PyObject *k) { - struct __pyx_memoryviewslice_obj *p; - PyObject *o = __pyx_tp_new_memoryview(t, a, k); - if (unlikely(!o)) return 0; - p = ((struct __pyx_memoryviewslice_obj *)o); - p->__pyx_base.__pyx_vtab = (struct __pyx_vtabstruct_memoryview*)__pyx_vtabptr__memoryviewslice; - p->from_object = Py_None; Py_INCREF(Py_None); - p->from_slice.memview = NULL; - return o; -} - -static void __pyx_tp_dealloc__memoryviewslice(PyObject *o) { - struct __pyx_memoryviewslice_obj *p = (struct __pyx_memoryviewslice_obj *)o; - #if CYTHON_USE_TP_FINALIZE - if (unlikely((PY_VERSION_HEX >= 0x03080000 || __Pyx_PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE)) && __Pyx_PyObject_GetSlot(o, tp_finalize, destructor)) && !__Pyx_PyObject_GC_IsFinalized(o)) { - if (__Pyx_PyObject_GetSlot(o, tp_dealloc, destructor) == __pyx_tp_dealloc__memoryviewslice) { - if (PyObject_CallFinalizerFromDealloc(o)) return; - } - } - #endif - PyObject_GC_UnTrack(o); - { - PyObject *etype, *eval, *etb; - PyErr_Fetch(&etype, &eval, &etb); - __Pyx_SET_REFCNT(o, Py_REFCNT(o) + 1); - __pyx_memoryviewslice___dealloc__(o); - __Pyx_SET_REFCNT(o, Py_REFCNT(o) - 1); - PyErr_Restore(etype, eval, etb); - } - Py_CLEAR(p->from_object); - PyObject_GC_Track(o); - __pyx_tp_dealloc_memoryview(o); -} - -static int __pyx_tp_traverse__memoryviewslice(PyObject *o, visitproc v, void *a) { - int e; - struct __pyx_memoryviewslice_obj *p = (struct __pyx_memoryviewslice_obj *)o; - e = __pyx_tp_traverse_memoryview(o, v, a); if (e) return e; - if (p->from_object) { - e = (*v)(p->from_object, a); if (e) return e; - } - return 0; -} - -static int __pyx_tp_clear__memoryviewslice(PyObject *o) { - PyObject* tmp; - struct __pyx_memoryviewslice_obj *p = (struct __pyx_memoryviewslice_obj *)o; - __pyx_tp_clear_memoryview(o); - tmp = ((PyObject*)p->from_object); - p->from_object = Py_None; Py_INCREF(Py_None); - Py_XDECREF(tmp); - __PYX_XCLEAR_MEMVIEW(&p->from_slice, 1); - return 0; -} - -static PyMethodDef __pyx_methods__memoryviewslice[] = { - {"__reduce_cython__", (PyCFunction)(void*)(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw___pyx_memoryviewslice_1__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, 0}, - {"__setstate_cython__", (PyCFunction)(void*)(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw___pyx_memoryviewslice_3__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, 0}, - {0, 0, 0, 0} -}; -#if CYTHON_USE_TYPE_SPECS -static PyType_Slot __pyx_type___pyx_memoryviewslice_slots[] = { - {Py_tp_dealloc, (void *)__pyx_tp_dealloc__memoryviewslice}, - {Py_tp_doc, (void *)PyDoc_STR("Internal class for passing memoryview slices to Python")}, - {Py_tp_traverse, (void *)__pyx_tp_traverse__memoryviewslice}, - {Py_tp_clear, (void *)__pyx_tp_clear__memoryviewslice}, - {Py_tp_methods, (void *)__pyx_methods__memoryviewslice}, - {Py_tp_new, (void *)__pyx_tp_new__memoryviewslice}, - {0, 0}, -}; -static PyType_Spec __pyx_type___pyx_memoryviewslice_spec = { - "monotonic_align.core._memoryviewslice", - sizeof(struct __pyx_memoryviewslice_obj), - 0, - Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC|Py_TPFLAGS_SEQUENCE, - __pyx_type___pyx_memoryviewslice_slots, -}; -#else - -static PyTypeObject __pyx_type___pyx_memoryviewslice = { - PyVarObject_HEAD_INIT(0, 0) - "monotonic_align.core.""_memoryviewslice", /*tp_name*/ - sizeof(struct __pyx_memoryviewslice_obj), /*tp_basicsize*/ - 0, /*tp_itemsize*/ - __pyx_tp_dealloc__memoryviewslice, /*tp_dealloc*/ - #if PY_VERSION_HEX < 0x030800b4 - 0, /*tp_print*/ - #endif - #if PY_VERSION_HEX >= 0x030800b4 - 0, /*tp_vectorcall_offset*/ - #endif - 0, /*tp_getattr*/ - 0, /*tp_setattr*/ - #if PY_MAJOR_VERSION < 3 - 0, /*tp_compare*/ - #endif - #if PY_MAJOR_VERSION >= 3 - 0, /*tp_as_async*/ - #endif - #if CYTHON_COMPILING_IN_PYPY || 0 - __pyx_memoryview___repr__, /*tp_repr*/ - #else - 0, /*tp_repr*/ - #endif - 0, /*tp_as_number*/ - 0, /*tp_as_sequence*/ - 0, /*tp_as_mapping*/ - 0, /*tp_hash*/ - 0, /*tp_call*/ - #if CYTHON_COMPILING_IN_PYPY || 0 - __pyx_memoryview___str__, /*tp_str*/ - #else - 0, /*tp_str*/ - #endif - 0, /*tp_getattro*/ - 0, /*tp_setattro*/ - 0, /*tp_as_buffer*/ - Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC|Py_TPFLAGS_SEQUENCE, /*tp_flags*/ - PyDoc_STR("Internal class for passing memoryview slices to Python"), /*tp_doc*/ - __pyx_tp_traverse__memoryviewslice, /*tp_traverse*/ - __pyx_tp_clear__memoryviewslice, /*tp_clear*/ - 0, /*tp_richcompare*/ - 0, /*tp_weaklistoffset*/ - 0, /*tp_iter*/ - 0, /*tp_iternext*/ - __pyx_methods__memoryviewslice, /*tp_methods*/ - 0, /*tp_members*/ - 0, /*tp_getset*/ - 0, /*tp_base*/ - 0, /*tp_dict*/ - 0, /*tp_descr_get*/ - 0, /*tp_descr_set*/ - #if !CYTHON_USE_TYPE_SPECS - 0, /*tp_dictoffset*/ - #endif - 0, /*tp_init*/ - 0, /*tp_alloc*/ - __pyx_tp_new__memoryviewslice, /*tp_new*/ - 0, /*tp_free*/ - 0, /*tp_is_gc*/ - 0, /*tp_bases*/ - 0, /*tp_mro*/ - 0, /*tp_cache*/ - 0, /*tp_subclasses*/ - 0, /*tp_weaklist*/ - 0, /*tp_del*/ - 0, /*tp_version_tag*/ - #if PY_VERSION_HEX >= 0x030400a1 - #if CYTHON_USE_TP_FINALIZE - 0, /*tp_finalize*/ - #else - NULL, /*tp_finalize*/ - #endif - #endif - #if PY_VERSION_HEX >= 0x030800b1 && (!CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800) - 0, /*tp_vectorcall*/ - #endif - #if __PYX_NEED_TP_PRINT_SLOT == 1 - 0, /*tp_print*/ - #endif - #if PY_VERSION_HEX >= 0x030C0000 - 0, /*tp_watched*/ - #endif - #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000 && PY_VERSION_HEX < 0x030a0000 - 0, /*tp_pypy_flags*/ - #endif -}; -#endif - -static PyMethodDef __pyx_methods[] = { - {0, 0, 0, 0} -}; -#ifndef CYTHON_SMALL_CODE -#if defined(__clang__) - #define CYTHON_SMALL_CODE -#elif defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 3)) - #define CYTHON_SMALL_CODE __attribute__((cold)) -#else - #define CYTHON_SMALL_CODE -#endif -#endif -/* #### Code section: pystring_table ### */ - -static int __Pyx_CreateStringTabAndInitStrings(void) { - __Pyx_StringTabEntry __pyx_string_tab[] = { - {&__pyx_kp_u_, __pyx_k_, sizeof(__pyx_k_), 0, 1, 0, 0}, - {&__pyx_n_s_ASCII, __pyx_k_ASCII, sizeof(__pyx_k_ASCII), 0, 0, 1, 1}, - {&__pyx_kp_s_All_dimensions_preceding_dimensi, __pyx_k_All_dimensions_preceding_dimensi, sizeof(__pyx_k_All_dimensions_preceding_dimensi), 0, 0, 1, 0}, - {&__pyx_n_s_AssertionError, __pyx_k_AssertionError, sizeof(__pyx_k_AssertionError), 0, 0, 1, 1}, - {&__pyx_kp_s_Buffer_view_does_not_expose_stri, __pyx_k_Buffer_view_does_not_expose_stri, sizeof(__pyx_k_Buffer_view_does_not_expose_stri), 0, 0, 1, 0}, - {&__pyx_kp_s_Can_only_create_a_buffer_that_is, __pyx_k_Can_only_create_a_buffer_that_is, sizeof(__pyx_k_Can_only_create_a_buffer_that_is), 0, 0, 1, 0}, - {&__pyx_kp_s_Cannot_assign_to_read_only_memor, __pyx_k_Cannot_assign_to_read_only_memor, sizeof(__pyx_k_Cannot_assign_to_read_only_memor), 0, 0, 1, 0}, - {&__pyx_kp_s_Cannot_create_writable_memory_vi, __pyx_k_Cannot_create_writable_memory_vi, sizeof(__pyx_k_Cannot_create_writable_memory_vi), 0, 0, 1, 0}, - {&__pyx_kp_u_Cannot_index_with_type, __pyx_k_Cannot_index_with_type, sizeof(__pyx_k_Cannot_index_with_type), 0, 1, 0, 0}, - {&__pyx_kp_s_Cannot_transpose_memoryview_with, __pyx_k_Cannot_transpose_memoryview_with, sizeof(__pyx_k_Cannot_transpose_memoryview_with), 0, 0, 1, 0}, - {&__pyx_kp_s_Dimension_d_is_not_direct, __pyx_k_Dimension_d_is_not_direct, sizeof(__pyx_k_Dimension_d_is_not_direct), 0, 0, 1, 0}, - {&__pyx_n_s_Ellipsis, __pyx_k_Ellipsis, sizeof(__pyx_k_Ellipsis), 0, 0, 1, 1}, - {&__pyx_kp_s_Empty_shape_tuple_for_cython_arr, __pyx_k_Empty_shape_tuple_for_cython_arr, sizeof(__pyx_k_Empty_shape_tuple_for_cython_arr), 0, 0, 1, 0}, - {&__pyx_kp_s_Incompatible_checksums_0x_x_vs_0, __pyx_k_Incompatible_checksums_0x_x_vs_0, sizeof(__pyx_k_Incompatible_checksums_0x_x_vs_0), 0, 0, 1, 0}, - {&__pyx_n_s_IndexError, __pyx_k_IndexError, sizeof(__pyx_k_IndexError), 0, 0, 1, 1}, - {&__pyx_kp_s_Index_out_of_bounds_axis_d, __pyx_k_Index_out_of_bounds_axis_d, sizeof(__pyx_k_Index_out_of_bounds_axis_d), 0, 0, 1, 0}, - {&__pyx_kp_s_Indirect_dimensions_not_supporte, __pyx_k_Indirect_dimensions_not_supporte, sizeof(__pyx_k_Indirect_dimensions_not_supporte), 0, 0, 1, 0}, - {&__pyx_kp_u_Invalid_mode_expected_c_or_fortr, __pyx_k_Invalid_mode_expected_c_or_fortr, sizeof(__pyx_k_Invalid_mode_expected_c_or_fortr), 0, 1, 0, 0}, - {&__pyx_kp_u_Invalid_shape_in_axis, __pyx_k_Invalid_shape_in_axis, sizeof(__pyx_k_Invalid_shape_in_axis), 0, 1, 0, 0}, - {&__pyx_n_s_MemoryError, __pyx_k_MemoryError, sizeof(__pyx_k_MemoryError), 0, 0, 1, 1}, - {&__pyx_kp_s_MemoryView_of_r_at_0x_x, __pyx_k_MemoryView_of_r_at_0x_x, sizeof(__pyx_k_MemoryView_of_r_at_0x_x), 0, 0, 1, 0}, - {&__pyx_kp_s_MemoryView_of_r_object, __pyx_k_MemoryView_of_r_object, sizeof(__pyx_k_MemoryView_of_r_object), 0, 0, 1, 0}, - {&__pyx_n_b_O, __pyx_k_O, sizeof(__pyx_k_O), 0, 0, 0, 1}, - {&__pyx_kp_u_Out_of_bounds_on_buffer_access_a, __pyx_k_Out_of_bounds_on_buffer_access_a, sizeof(__pyx_k_Out_of_bounds_on_buffer_access_a), 0, 1, 0, 0}, - {&__pyx_n_s_PickleError, __pyx_k_PickleError, sizeof(__pyx_k_PickleError), 0, 0, 1, 1}, - {&__pyx_n_s_Sequence, __pyx_k_Sequence, sizeof(__pyx_k_Sequence), 0, 0, 1, 1}, - {&__pyx_kp_s_Step_may_not_be_zero_axis_d, __pyx_k_Step_may_not_be_zero_axis_d, sizeof(__pyx_k_Step_may_not_be_zero_axis_d), 0, 0, 1, 0}, - {&__pyx_n_s_TypeError, __pyx_k_TypeError, sizeof(__pyx_k_TypeError), 0, 0, 1, 1}, - {&__pyx_kp_s_Unable_to_convert_item_to_object, __pyx_k_Unable_to_convert_item_to_object, sizeof(__pyx_k_Unable_to_convert_item_to_object), 0, 0, 1, 0}, - {&__pyx_n_s_ValueError, __pyx_k_ValueError, sizeof(__pyx_k_ValueError), 0, 0, 1, 1}, - {&__pyx_n_s_View_MemoryView, __pyx_k_View_MemoryView, sizeof(__pyx_k_View_MemoryView), 0, 0, 1, 1}, - {&__pyx_kp_u__2, __pyx_k__2, sizeof(__pyx_k__2), 0, 1, 0, 0}, - {&__pyx_n_s__23, __pyx_k__23, sizeof(__pyx_k__23), 0, 0, 1, 1}, - {&__pyx_n_s__3, __pyx_k__3, sizeof(__pyx_k__3), 0, 0, 1, 1}, - {&__pyx_kp_u__6, __pyx_k__6, sizeof(__pyx_k__6), 0, 1, 0, 0}, - {&__pyx_kp_u__7, __pyx_k__7, sizeof(__pyx_k__7), 0, 1, 0, 0}, - {&__pyx_n_s_abc, __pyx_k_abc, sizeof(__pyx_k_abc), 0, 0, 1, 1}, - {&__pyx_n_s_allocate_buffer, __pyx_k_allocate_buffer, sizeof(__pyx_k_allocate_buffer), 0, 0, 1, 1}, - {&__pyx_kp_u_and, __pyx_k_and, sizeof(__pyx_k_and), 0, 1, 0, 0}, - {&__pyx_n_s_asyncio_coroutines, __pyx_k_asyncio_coroutines, sizeof(__pyx_k_asyncio_coroutines), 0, 0, 1, 1}, - {&__pyx_n_s_base, __pyx_k_base, sizeof(__pyx_k_base), 0, 0, 1, 1}, - {&__pyx_n_s_c, __pyx_k_c, sizeof(__pyx_k_c), 0, 0, 1, 1}, - {&__pyx_n_u_c, __pyx_k_c, sizeof(__pyx_k_c), 0, 1, 0, 1}, - {&__pyx_n_s_class, __pyx_k_class, sizeof(__pyx_k_class), 0, 0, 1, 1}, - {&__pyx_n_s_class_getitem, __pyx_k_class_getitem, sizeof(__pyx_k_class_getitem), 0, 0, 1, 1}, - {&__pyx_n_s_cline_in_traceback, __pyx_k_cline_in_traceback, sizeof(__pyx_k_cline_in_traceback), 0, 0, 1, 1}, - {&__pyx_n_s_collections, __pyx_k_collections, sizeof(__pyx_k_collections), 0, 0, 1, 1}, - {&__pyx_kp_s_collections_abc, __pyx_k_collections_abc, sizeof(__pyx_k_collections_abc), 0, 0, 1, 0}, - {&__pyx_kp_s_contiguous_and_direct, __pyx_k_contiguous_and_direct, sizeof(__pyx_k_contiguous_and_direct), 0, 0, 1, 0}, - {&__pyx_kp_s_contiguous_and_indirect, __pyx_k_contiguous_and_indirect, sizeof(__pyx_k_contiguous_and_indirect), 0, 0, 1, 0}, - {&__pyx_kp_s_core_pyx, __pyx_k_core_pyx, sizeof(__pyx_k_core_pyx), 0, 0, 1, 0}, - {&__pyx_n_s_count, __pyx_k_count, sizeof(__pyx_k_count), 0, 0, 1, 1}, - {&__pyx_n_s_dict, __pyx_k_dict, sizeof(__pyx_k_dict), 0, 0, 1, 1}, - {&__pyx_kp_u_disable, __pyx_k_disable, sizeof(__pyx_k_disable), 0, 1, 0, 0}, - {&__pyx_n_s_dtype_is_object, __pyx_k_dtype_is_object, sizeof(__pyx_k_dtype_is_object), 0, 0, 1, 1}, - {&__pyx_kp_u_enable, __pyx_k_enable, sizeof(__pyx_k_enable), 0, 1, 0, 0}, - {&__pyx_n_s_encode, __pyx_k_encode, sizeof(__pyx_k_encode), 0, 0, 1, 1}, - {&__pyx_n_s_enumerate, __pyx_k_enumerate, sizeof(__pyx_k_enumerate), 0, 0, 1, 1}, - {&__pyx_n_s_error, __pyx_k_error, sizeof(__pyx_k_error), 0, 0, 1, 1}, - {&__pyx_n_s_flags, __pyx_k_flags, sizeof(__pyx_k_flags), 0, 0, 1, 1}, - {&__pyx_n_s_format, __pyx_k_format, sizeof(__pyx_k_format), 0, 0, 1, 1}, - {&__pyx_n_s_fortran, __pyx_k_fortran, sizeof(__pyx_k_fortran), 0, 0, 1, 1}, - {&__pyx_n_u_fortran, __pyx_k_fortran, sizeof(__pyx_k_fortran), 0, 1, 0, 1}, - {&__pyx_kp_u_gc, __pyx_k_gc, sizeof(__pyx_k_gc), 0, 1, 0, 0}, - {&__pyx_n_s_getstate, __pyx_k_getstate, sizeof(__pyx_k_getstate), 0, 0, 1, 1}, - {&__pyx_kp_u_got, __pyx_k_got, sizeof(__pyx_k_got), 0, 1, 0, 0}, - {&__pyx_kp_u_got_differing_extents_in_dimensi, __pyx_k_got_differing_extents_in_dimensi, sizeof(__pyx_k_got_differing_extents_in_dimensi), 0, 1, 0, 0}, - {&__pyx_n_s_id, __pyx_k_id, sizeof(__pyx_k_id), 0, 0, 1, 1}, - {&__pyx_n_s_import, __pyx_k_import, sizeof(__pyx_k_import), 0, 0, 1, 1}, - {&__pyx_n_s_index, __pyx_k_index, sizeof(__pyx_k_index), 0, 0, 1, 1}, - {&__pyx_n_s_initializing, __pyx_k_initializing, sizeof(__pyx_k_initializing), 0, 0, 1, 1}, - {&__pyx_n_s_is_coroutine, __pyx_k_is_coroutine, sizeof(__pyx_k_is_coroutine), 0, 0, 1, 1}, - {&__pyx_kp_u_isenabled, __pyx_k_isenabled, sizeof(__pyx_k_isenabled), 0, 1, 0, 0}, - {&__pyx_n_s_itemsize, __pyx_k_itemsize, sizeof(__pyx_k_itemsize), 0, 0, 1, 1}, - {&__pyx_kp_s_itemsize_0_for_cython_array, __pyx_k_itemsize_0_for_cython_array, sizeof(__pyx_k_itemsize_0_for_cython_array), 0, 0, 1, 0}, - {&__pyx_n_s_main, __pyx_k_main, sizeof(__pyx_k_main), 0, 0, 1, 1}, - {&__pyx_n_s_maximum_path_c, __pyx_k_maximum_path_c, sizeof(__pyx_k_maximum_path_c), 0, 0, 1, 1}, - {&__pyx_n_s_memview, __pyx_k_memview, sizeof(__pyx_k_memview), 0, 0, 1, 1}, - {&__pyx_n_s_mode, __pyx_k_mode, sizeof(__pyx_k_mode), 0, 0, 1, 1}, - {&__pyx_n_s_monotonic_align_core, __pyx_k_monotonic_align_core, sizeof(__pyx_k_monotonic_align_core), 0, 0, 1, 1}, - {&__pyx_n_s_name, __pyx_k_name, sizeof(__pyx_k_name), 0, 0, 1, 1}, - {&__pyx_n_s_name_2, __pyx_k_name_2, sizeof(__pyx_k_name_2), 0, 0, 1, 1}, - {&__pyx_n_s_ndim, __pyx_k_ndim, sizeof(__pyx_k_ndim), 0, 0, 1, 1}, - {&__pyx_n_s_new, __pyx_k_new, sizeof(__pyx_k_new), 0, 0, 1, 1}, - {&__pyx_kp_s_no_default___reduce___due_to_non, __pyx_k_no_default___reduce___due_to_non, sizeof(__pyx_k_no_default___reduce___due_to_non), 0, 0, 1, 0}, - {&__pyx_n_s_obj, __pyx_k_obj, sizeof(__pyx_k_obj), 0, 0, 1, 1}, - {&__pyx_n_s_pack, __pyx_k_pack, sizeof(__pyx_k_pack), 0, 0, 1, 1}, - {&__pyx_n_s_paths, __pyx_k_paths, sizeof(__pyx_k_paths), 0, 0, 1, 1}, - {&__pyx_n_s_pickle, __pyx_k_pickle, sizeof(__pyx_k_pickle), 0, 0, 1, 1}, - {&__pyx_n_s_pyx_PickleError, __pyx_k_pyx_PickleError, sizeof(__pyx_k_pyx_PickleError), 0, 0, 1, 1}, - {&__pyx_n_s_pyx_checksum, __pyx_k_pyx_checksum, sizeof(__pyx_k_pyx_checksum), 0, 0, 1, 1}, - {&__pyx_n_s_pyx_result, __pyx_k_pyx_result, sizeof(__pyx_k_pyx_result), 0, 0, 1, 1}, - {&__pyx_n_s_pyx_state, __pyx_k_pyx_state, sizeof(__pyx_k_pyx_state), 0, 0, 1, 1}, - {&__pyx_n_s_pyx_type, __pyx_k_pyx_type, sizeof(__pyx_k_pyx_type), 0, 0, 1, 1}, - {&__pyx_n_s_pyx_unpickle_Enum, __pyx_k_pyx_unpickle_Enum, sizeof(__pyx_k_pyx_unpickle_Enum), 0, 0, 1, 1}, - {&__pyx_n_s_pyx_vtable, __pyx_k_pyx_vtable, sizeof(__pyx_k_pyx_vtable), 0, 0, 1, 1}, - {&__pyx_n_s_range, __pyx_k_range, sizeof(__pyx_k_range), 0, 0, 1, 1}, - {&__pyx_n_s_reduce, __pyx_k_reduce, sizeof(__pyx_k_reduce), 0, 0, 1, 1}, - {&__pyx_n_s_reduce_cython, __pyx_k_reduce_cython, sizeof(__pyx_k_reduce_cython), 0, 0, 1, 1}, - {&__pyx_n_s_reduce_ex, __pyx_k_reduce_ex, sizeof(__pyx_k_reduce_ex), 0, 0, 1, 1}, - {&__pyx_n_s_register, __pyx_k_register, sizeof(__pyx_k_register), 0, 0, 1, 1}, - {&__pyx_n_s_setstate, __pyx_k_setstate, sizeof(__pyx_k_setstate), 0, 0, 1, 1}, - {&__pyx_n_s_setstate_cython, __pyx_k_setstate_cython, sizeof(__pyx_k_setstate_cython), 0, 0, 1, 1}, - {&__pyx_n_s_shape, __pyx_k_shape, sizeof(__pyx_k_shape), 0, 0, 1, 1}, - {&__pyx_n_s_size, __pyx_k_size, sizeof(__pyx_k_size), 0, 0, 1, 1}, - {&__pyx_n_s_spec, __pyx_k_spec, sizeof(__pyx_k_spec), 0, 0, 1, 1}, - {&__pyx_n_s_start, __pyx_k_start, sizeof(__pyx_k_start), 0, 0, 1, 1}, - {&__pyx_n_s_step, __pyx_k_step, sizeof(__pyx_k_step), 0, 0, 1, 1}, - {&__pyx_n_s_stop, __pyx_k_stop, sizeof(__pyx_k_stop), 0, 0, 1, 1}, - {&__pyx_kp_s_strided_and_direct, __pyx_k_strided_and_direct, sizeof(__pyx_k_strided_and_direct), 0, 0, 1, 0}, - {&__pyx_kp_s_strided_and_direct_or_indirect, __pyx_k_strided_and_direct_or_indirect, sizeof(__pyx_k_strided_and_direct_or_indirect), 0, 0, 1, 0}, - {&__pyx_kp_s_strided_and_indirect, __pyx_k_strided_and_indirect, sizeof(__pyx_k_strided_and_indirect), 0, 0, 1, 0}, - {&__pyx_kp_s_stringsource, __pyx_k_stringsource, sizeof(__pyx_k_stringsource), 0, 0, 1, 0}, - {&__pyx_n_s_struct, __pyx_k_struct, sizeof(__pyx_k_struct), 0, 0, 1, 1}, - {&__pyx_n_s_sys, __pyx_k_sys, sizeof(__pyx_k_sys), 0, 0, 1, 1}, - {&__pyx_n_s_t_xs, __pyx_k_t_xs, sizeof(__pyx_k_t_xs), 0, 0, 1, 1}, - {&__pyx_n_s_t_ys, __pyx_k_t_ys, sizeof(__pyx_k_t_ys), 0, 0, 1, 1}, - {&__pyx_n_s_test, __pyx_k_test, sizeof(__pyx_k_test), 0, 0, 1, 1}, - {&__pyx_kp_s_unable_to_allocate_array_data, __pyx_k_unable_to_allocate_array_data, sizeof(__pyx_k_unable_to_allocate_array_data), 0, 0, 1, 0}, - {&__pyx_kp_s_unable_to_allocate_shape_and_str, __pyx_k_unable_to_allocate_shape_and_str, sizeof(__pyx_k_unable_to_allocate_shape_and_str), 0, 0, 1, 0}, - {&__pyx_n_s_unpack, __pyx_k_unpack, sizeof(__pyx_k_unpack), 0, 0, 1, 1}, - {&__pyx_n_s_update, __pyx_k_update, sizeof(__pyx_k_update), 0, 0, 1, 1}, - {&__pyx_n_s_values, __pyx_k_values, sizeof(__pyx_k_values), 0, 0, 1, 1}, - {&__pyx_n_s_version_info, __pyx_k_version_info, sizeof(__pyx_k_version_info), 0, 0, 1, 1}, - {0, 0, 0, 0, 0, 0, 0} - }; - return __Pyx_InitStrings(__pyx_string_tab); -} -/* #### Code section: cached_builtins ### */ -static CYTHON_SMALL_CODE int __Pyx_InitCachedBuiltins(void) { - __pyx_builtin_range = __Pyx_GetBuiltinName(__pyx_n_s_range); if (!__pyx_builtin_range) __PYX_ERR(0, 15, __pyx_L1_error) - __pyx_builtin___import__ = __Pyx_GetBuiltinName(__pyx_n_s_import); if (!__pyx_builtin___import__) __PYX_ERR(1, 100, __pyx_L1_error) - __pyx_builtin_ValueError = __Pyx_GetBuiltinName(__pyx_n_s_ValueError); if (!__pyx_builtin_ValueError) __PYX_ERR(1, 141, __pyx_L1_error) - __pyx_builtin_MemoryError = __Pyx_GetBuiltinName(__pyx_n_s_MemoryError); if (!__pyx_builtin_MemoryError) __PYX_ERR(1, 156, __pyx_L1_error) - __pyx_builtin_enumerate = __Pyx_GetBuiltinName(__pyx_n_s_enumerate); if (!__pyx_builtin_enumerate) __PYX_ERR(1, 159, __pyx_L1_error) - __pyx_builtin_TypeError = __Pyx_GetBuiltinName(__pyx_n_s_TypeError); if (!__pyx_builtin_TypeError) __PYX_ERR(1, 2, __pyx_L1_error) - __pyx_builtin_AssertionError = __Pyx_GetBuiltinName(__pyx_n_s_AssertionError); if (!__pyx_builtin_AssertionError) __PYX_ERR(1, 373, __pyx_L1_error) - __pyx_builtin_Ellipsis = __Pyx_GetBuiltinName(__pyx_n_s_Ellipsis); if (!__pyx_builtin_Ellipsis) __PYX_ERR(1, 408, __pyx_L1_error) - __pyx_builtin_id = __Pyx_GetBuiltinName(__pyx_n_s_id); if (!__pyx_builtin_id) __PYX_ERR(1, 618, __pyx_L1_error) - __pyx_builtin_IndexError = __Pyx_GetBuiltinName(__pyx_n_s_IndexError); if (!__pyx_builtin_IndexError) __PYX_ERR(1, 914, __pyx_L1_error) - return 0; - __pyx_L1_error:; - return -1; -} -/* #### Code section: cached_constants ### */ - -static CYTHON_SMALL_CODE int __Pyx_InitCachedConstants(void) { - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__Pyx_InitCachedConstants", 0); - - /* "View.MemoryView":582 - * def suboffsets(self): - * if self.view.suboffsets == NULL: - * return (-1,) * self.view.ndim # <<<<<<<<<<<<<< - * - * return tuple([suboffset for suboffset in self.view.suboffsets[:self.view.ndim]]) - */ - __pyx_tuple__4 = PyTuple_New(1); if (unlikely(!__pyx_tuple__4)) __PYX_ERR(1, 582, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__4); - __Pyx_INCREF(__pyx_int_neg_1); - __Pyx_GIVEREF(__pyx_int_neg_1); - PyTuple_SET_ITEM(__pyx_tuple__4, 0, __pyx_int_neg_1); - __Pyx_GIVEREF(__pyx_tuple__4); - - /* "View.MemoryView":679 - * tup = index if isinstance(index, tuple) else (index,) - * - * result = [slice(None)] * ndim # <<<<<<<<<<<<<< - * have_slices = False - * seen_ellipsis = False - */ - __pyx_slice__5 = PySlice_New(Py_None, Py_None, Py_None); if (unlikely(!__pyx_slice__5)) __PYX_ERR(1, 679, __pyx_L1_error) - __Pyx_GOTREF(__pyx_slice__5); - __Pyx_GIVEREF(__pyx_slice__5); - - /* "(tree fragment)":4 - * cdef object __pyx_PickleError - * cdef object __pyx_result - * if __pyx_checksum not in (0x82a3537, 0x6ae9995, 0xb068931): # <<<<<<<<<<<<<< - * from pickle import PickleError as __pyx_PickleError - * raise __pyx_PickleError, "Incompatible checksums (0x%x vs (0x82a3537, 0x6ae9995, 0xb068931) = (name))" % __pyx_checksum - */ - __pyx_tuple__8 = PyTuple_Pack(3, __pyx_int_136983863, __pyx_int_112105877, __pyx_int_184977713); if (unlikely(!__pyx_tuple__8)) __PYX_ERR(1, 4, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__8); - __Pyx_GIVEREF(__pyx_tuple__8); - - /* "View.MemoryView":100 - * cdef object __pyx_collections_abc_Sequence "__pyx_collections_abc_Sequence" - * try: - * if __import__("sys").version_info >= (3, 3): # <<<<<<<<<<<<<< - * __pyx_collections_abc_Sequence = __import__("collections.abc").abc.Sequence - * else: - */ - __pyx_tuple__10 = PyTuple_Pack(1, __pyx_n_s_sys); if (unlikely(!__pyx_tuple__10)) __PYX_ERR(1, 100, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__10); - __Pyx_GIVEREF(__pyx_tuple__10); - __pyx_tuple__11 = PyTuple_Pack(2, __pyx_int_3, __pyx_int_3); if (unlikely(!__pyx_tuple__11)) __PYX_ERR(1, 100, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__11); - __Pyx_GIVEREF(__pyx_tuple__11); - - /* "View.MemoryView":101 - * try: - * if __import__("sys").version_info >= (3, 3): - * __pyx_collections_abc_Sequence = __import__("collections.abc").abc.Sequence # <<<<<<<<<<<<<< - * else: - * __pyx_collections_abc_Sequence = __import__("collections").Sequence - */ - __pyx_tuple__12 = PyTuple_Pack(1, __pyx_kp_s_collections_abc); if (unlikely(!__pyx_tuple__12)) __PYX_ERR(1, 101, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__12); - __Pyx_GIVEREF(__pyx_tuple__12); - - /* "View.MemoryView":103 - * __pyx_collections_abc_Sequence = __import__("collections.abc").abc.Sequence - * else: - * __pyx_collections_abc_Sequence = __import__("collections").Sequence # <<<<<<<<<<<<<< - * except: - * - */ - __pyx_tuple__13 = PyTuple_Pack(1, __pyx_n_s_collections); if (unlikely(!__pyx_tuple__13)) __PYX_ERR(1, 103, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__13); - __Pyx_GIVEREF(__pyx_tuple__13); - - /* "View.MemoryView":309 - * return self.name - * - * cdef generic = Enum("") # <<<<<<<<<<<<<< - * cdef strided = Enum("") # default - * cdef indirect = Enum("") - */ - __pyx_tuple__14 = PyTuple_Pack(1, __pyx_kp_s_strided_and_direct_or_indirect); if (unlikely(!__pyx_tuple__14)) __PYX_ERR(1, 309, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__14); - __Pyx_GIVEREF(__pyx_tuple__14); - - /* "View.MemoryView":310 - * - * cdef generic = Enum("") - * cdef strided = Enum("") # default # <<<<<<<<<<<<<< - * cdef indirect = Enum("") - * - */ - __pyx_tuple__15 = PyTuple_Pack(1, __pyx_kp_s_strided_and_direct); if (unlikely(!__pyx_tuple__15)) __PYX_ERR(1, 310, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__15); - __Pyx_GIVEREF(__pyx_tuple__15); - - /* "View.MemoryView":311 - * cdef generic = Enum("") - * cdef strided = Enum("") # default - * cdef indirect = Enum("") # <<<<<<<<<<<<<< - * - * - */ - __pyx_tuple__16 = PyTuple_Pack(1, __pyx_kp_s_strided_and_indirect); if (unlikely(!__pyx_tuple__16)) __PYX_ERR(1, 311, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__16); - __Pyx_GIVEREF(__pyx_tuple__16); - - /* "View.MemoryView":314 - * - * - * cdef contiguous = Enum("") # <<<<<<<<<<<<<< - * cdef indirect_contiguous = Enum("") - * - */ - __pyx_tuple__17 = PyTuple_Pack(1, __pyx_kp_s_contiguous_and_direct); if (unlikely(!__pyx_tuple__17)) __PYX_ERR(1, 314, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__17); - __Pyx_GIVEREF(__pyx_tuple__17); - - /* "View.MemoryView":315 - * - * cdef contiguous = Enum("") - * cdef indirect_contiguous = Enum("") # <<<<<<<<<<<<<< - * - * - */ - __pyx_tuple__18 = PyTuple_Pack(1, __pyx_kp_s_contiguous_and_indirect); if (unlikely(!__pyx_tuple__18)) __PYX_ERR(1, 315, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__18); - __Pyx_GIVEREF(__pyx_tuple__18); - - /* "(tree fragment)":1 - * def __pyx_unpickle_Enum(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<< - * cdef object __pyx_PickleError - * cdef object __pyx_result - */ - __pyx_tuple__19 = PyTuple_Pack(5, __pyx_n_s_pyx_type, __pyx_n_s_pyx_checksum, __pyx_n_s_pyx_state, __pyx_n_s_pyx_PickleError, __pyx_n_s_pyx_result); if (unlikely(!__pyx_tuple__19)) __PYX_ERR(1, 1, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__19); - __Pyx_GIVEREF(__pyx_tuple__19); - __pyx_codeobj__20 = (PyObject*)__Pyx_PyCode_New(3, 0, 0, 5, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__19, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_stringsource, __pyx_n_s_pyx_unpickle_Enum, 1, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__20)) __PYX_ERR(1, 1, __pyx_L1_error) - - /* "monotonic_align/core.pyx":38 - * @cython.boundscheck(False) - * @cython.wraparound(False) - * cpdef void maximum_path_c(int[:,:,::1] paths, float[:,:,::1] values, int[::1] t_ys, int[::1] t_xs) nogil: # <<<<<<<<<<<<<< - * cdef int b = paths.shape[0] - * cdef int i - */ - __pyx_tuple__21 = PyTuple_Pack(4, __pyx_n_s_paths, __pyx_n_s_values, __pyx_n_s_t_ys, __pyx_n_s_t_xs); if (unlikely(!__pyx_tuple__21)) __PYX_ERR(0, 38, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__21); - __Pyx_GIVEREF(__pyx_tuple__21); - __pyx_codeobj__22 = (PyObject*)__Pyx_PyCode_New(4, 0, 0, 4, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__21, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_core_pyx, __pyx_n_s_maximum_path_c, 38, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__22)) __PYX_ERR(0, 38, __pyx_L1_error) - __Pyx_RefNannyFinishContext(); - return 0; - __pyx_L1_error:; - __Pyx_RefNannyFinishContext(); - return -1; -} -/* #### Code section: init_constants ### */ - -static CYTHON_SMALL_CODE int __Pyx_InitConstants(void) { - if (__Pyx_CreateStringTabAndInitStrings() < 0) __PYX_ERR(0, 1, __pyx_L1_error); - __pyx_int_0 = PyInt_FromLong(0); if (unlikely(!__pyx_int_0)) __PYX_ERR(0, 1, __pyx_L1_error) - __pyx_int_1 = PyInt_FromLong(1); if (unlikely(!__pyx_int_1)) __PYX_ERR(0, 1, __pyx_L1_error) - __pyx_int_3 = PyInt_FromLong(3); if (unlikely(!__pyx_int_3)) __PYX_ERR(0, 1, __pyx_L1_error) - __pyx_int_112105877 = PyInt_FromLong(112105877L); if (unlikely(!__pyx_int_112105877)) __PYX_ERR(0, 1, __pyx_L1_error) - __pyx_int_136983863 = PyInt_FromLong(136983863L); if (unlikely(!__pyx_int_136983863)) __PYX_ERR(0, 1, __pyx_L1_error) - __pyx_int_184977713 = PyInt_FromLong(184977713L); if (unlikely(!__pyx_int_184977713)) __PYX_ERR(0, 1, __pyx_L1_error) - __pyx_int_neg_1 = PyInt_FromLong(-1); if (unlikely(!__pyx_int_neg_1)) __PYX_ERR(0, 1, __pyx_L1_error) - return 0; - __pyx_L1_error:; - return -1; -} -/* #### Code section: init_globals ### */ - -static CYTHON_SMALL_CODE int __Pyx_InitGlobals(void) { - /* AssertionsEnabled.init */ - __Pyx_init_assertions_enabled(); - -if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 1, __pyx_L1_error) - - /* InitThreads.init */ - #if defined(WITH_THREAD) && PY_VERSION_HEX < 0x030700F0 -PyEval_InitThreads(); -#endif - -if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 1, __pyx_L1_error) - - return 0; - __pyx_L1_error:; - return -1; -} -/* #### Code section: init_module ### */ - -static CYTHON_SMALL_CODE int __Pyx_modinit_global_init_code(void); /*proto*/ -static CYTHON_SMALL_CODE int __Pyx_modinit_variable_export_code(void); /*proto*/ -static CYTHON_SMALL_CODE int __Pyx_modinit_function_export_code(void); /*proto*/ -static CYTHON_SMALL_CODE int __Pyx_modinit_type_init_code(void); /*proto*/ -static CYTHON_SMALL_CODE int __Pyx_modinit_type_import_code(void); /*proto*/ -static CYTHON_SMALL_CODE int __Pyx_modinit_variable_import_code(void); /*proto*/ -static CYTHON_SMALL_CODE int __Pyx_modinit_function_import_code(void); /*proto*/ - -static int __Pyx_modinit_global_init_code(void) { - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__Pyx_modinit_global_init_code", 0); - /*--- Global init code ---*/ - __pyx_collections_abc_Sequence = Py_None; Py_INCREF(Py_None); - generic = Py_None; Py_INCREF(Py_None); - strided = Py_None; Py_INCREF(Py_None); - indirect = Py_None; Py_INCREF(Py_None); - contiguous = Py_None; Py_INCREF(Py_None); - indirect_contiguous = Py_None; Py_INCREF(Py_None); - __Pyx_RefNannyFinishContext(); - return 0; -} - -static int __Pyx_modinit_variable_export_code(void) { - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__Pyx_modinit_variable_export_code", 0); - /*--- Variable export code ---*/ - __Pyx_RefNannyFinishContext(); - return 0; -} - -static int __Pyx_modinit_function_export_code(void) { - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__Pyx_modinit_function_export_code", 0); - /*--- Function export code ---*/ - __Pyx_RefNannyFinishContext(); - return 0; -} - -static int __Pyx_modinit_type_init_code(void) { - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("__Pyx_modinit_type_init_code", 0); - /*--- Type init code ---*/ - __pyx_vtabptr_array = &__pyx_vtable_array; - __pyx_vtable_array.get_memview = (PyObject *(*)(struct __pyx_array_obj *))__pyx_array_get_memview; - #if CYTHON_USE_TYPE_SPECS - __pyx_array_type = (PyTypeObject *) __Pyx_PyType_FromModuleAndSpec(__pyx_m, &__pyx_type___pyx_array_spec, NULL); if (unlikely(!__pyx_array_type)) __PYX_ERR(1, 114, __pyx_L1_error) - #if !CYTHON_COMPILING_IN_LIMITED_API - __pyx_array_type->tp_as_buffer = &__pyx_tp_as_buffer_array; - if (!__pyx_array_type->tp_as_buffer->bf_releasebuffer && __pyx_array_type->tp_base->tp_as_buffer && __pyx_array_type->tp_base->tp_as_buffer->bf_releasebuffer) { - __pyx_array_type->tp_as_buffer->bf_releasebuffer = __pyx_array_type->tp_base->tp_as_buffer->bf_releasebuffer; - } - #elif defined(Py_bf_getbuffer) && defined(Py_bf_releasebuffer) - /* PY_VERSION_HEX >= 0x03090000 || Py_LIMITED_API >= 0x030B0000 */ - #elif defined(_MSC_VER) - #pragma message ("The buffer protocol is not supported in the Limited C-API < 3.11.") - #else - #warning "The buffer protocol is not supported in the Limited C-API < 3.11." - #endif - if (__Pyx_fix_up_extension_type_from_spec(&__pyx_type___pyx_array_spec, __pyx_array_type) < 0) __PYX_ERR(1, 114, __pyx_L1_error) - #else - __pyx_array_type = &__pyx_type___pyx_array; - #endif - #if !CYTHON_COMPILING_IN_LIMITED_API - #endif - #if !CYTHON_USE_TYPE_SPECS - if (__Pyx_PyType_Ready(__pyx_array_type) < 0) __PYX_ERR(1, 114, __pyx_L1_error) - #endif - #if PY_MAJOR_VERSION < 3 - __pyx_array_type->tp_print = 0; - #endif - if (__Pyx_SetVtable(__pyx_array_type, __pyx_vtabptr_array) < 0) __PYX_ERR(1, 114, __pyx_L1_error) - #if !CYTHON_COMPILING_IN_LIMITED_API - if (__Pyx_MergeVtables(__pyx_array_type) < 0) __PYX_ERR(1, 114, __pyx_L1_error) - #endif - #if !CYTHON_COMPILING_IN_LIMITED_API - if (__Pyx_setup_reduce((PyObject *) __pyx_array_type) < 0) __PYX_ERR(1, 114, __pyx_L1_error) - #endif - #if CYTHON_USE_TYPE_SPECS - __pyx_MemviewEnum_type = (PyTypeObject *) __Pyx_PyType_FromModuleAndSpec(__pyx_m, &__pyx_type___pyx_MemviewEnum_spec, NULL); if (unlikely(!__pyx_MemviewEnum_type)) __PYX_ERR(1, 302, __pyx_L1_error) - if (__Pyx_fix_up_extension_type_from_spec(&__pyx_type___pyx_MemviewEnum_spec, __pyx_MemviewEnum_type) < 0) __PYX_ERR(1, 302, __pyx_L1_error) - #else - __pyx_MemviewEnum_type = &__pyx_type___pyx_MemviewEnum; - #endif - #if !CYTHON_COMPILING_IN_LIMITED_API - #endif - #if !CYTHON_USE_TYPE_SPECS - if (__Pyx_PyType_Ready(__pyx_MemviewEnum_type) < 0) __PYX_ERR(1, 302, __pyx_L1_error) - #endif - #if PY_MAJOR_VERSION < 3 - __pyx_MemviewEnum_type->tp_print = 0; - #endif - #if !CYTHON_COMPILING_IN_LIMITED_API - if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_MemviewEnum_type->tp_dictoffset && __pyx_MemviewEnum_type->tp_getattro == PyObject_GenericGetAttr)) { - __pyx_MemviewEnum_type->tp_getattro = __Pyx_PyObject_GenericGetAttr; - } - #endif - #if !CYTHON_COMPILING_IN_LIMITED_API - if (__Pyx_setup_reduce((PyObject *) __pyx_MemviewEnum_type) < 0) __PYX_ERR(1, 302, __pyx_L1_error) - #endif - __pyx_vtabptr_memoryview = &__pyx_vtable_memoryview; - __pyx_vtable_memoryview.get_item_pointer = (char *(*)(struct __pyx_memoryview_obj *, PyObject *))__pyx_memoryview_get_item_pointer; - __pyx_vtable_memoryview.is_slice = (PyObject *(*)(struct __pyx_memoryview_obj *, PyObject *))__pyx_memoryview_is_slice; - __pyx_vtable_memoryview.setitem_slice_assignment = (PyObject *(*)(struct __pyx_memoryview_obj *, PyObject *, PyObject *))__pyx_memoryview_setitem_slice_assignment; - __pyx_vtable_memoryview.setitem_slice_assign_scalar = (PyObject *(*)(struct __pyx_memoryview_obj *, struct __pyx_memoryview_obj *, PyObject *))__pyx_memoryview_setitem_slice_assign_scalar; - __pyx_vtable_memoryview.setitem_indexed = (PyObject *(*)(struct __pyx_memoryview_obj *, PyObject *, PyObject *))__pyx_memoryview_setitem_indexed; - __pyx_vtable_memoryview.convert_item_to_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *))__pyx_memoryview_convert_item_to_object; - __pyx_vtable_memoryview.assign_item_from_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *, PyObject *))__pyx_memoryview_assign_item_from_object; - __pyx_vtable_memoryview._get_base = (PyObject *(*)(struct __pyx_memoryview_obj *))__pyx_memoryview__get_base; - #if CYTHON_USE_TYPE_SPECS - __pyx_memoryview_type = (PyTypeObject *) __Pyx_PyType_FromModuleAndSpec(__pyx_m, &__pyx_type___pyx_memoryview_spec, NULL); if (unlikely(!__pyx_memoryview_type)) __PYX_ERR(1, 337, __pyx_L1_error) - #if !CYTHON_COMPILING_IN_LIMITED_API - __pyx_memoryview_type->tp_as_buffer = &__pyx_tp_as_buffer_memoryview; - if (!__pyx_memoryview_type->tp_as_buffer->bf_releasebuffer && __pyx_memoryview_type->tp_base->tp_as_buffer && __pyx_memoryview_type->tp_base->tp_as_buffer->bf_releasebuffer) { - __pyx_memoryview_type->tp_as_buffer->bf_releasebuffer = __pyx_memoryview_type->tp_base->tp_as_buffer->bf_releasebuffer; - } - #elif defined(Py_bf_getbuffer) && defined(Py_bf_releasebuffer) - /* PY_VERSION_HEX >= 0x03090000 || Py_LIMITED_API >= 0x030B0000 */ - #elif defined(_MSC_VER) - #pragma message ("The buffer protocol is not supported in the Limited C-API < 3.11.") - #else - #warning "The buffer protocol is not supported in the Limited C-API < 3.11." - #endif - if (__Pyx_fix_up_extension_type_from_spec(&__pyx_type___pyx_memoryview_spec, __pyx_memoryview_type) < 0) __PYX_ERR(1, 337, __pyx_L1_error) - #else - __pyx_memoryview_type = &__pyx_type___pyx_memoryview; - #endif - #if !CYTHON_COMPILING_IN_LIMITED_API - #endif - #if !CYTHON_USE_TYPE_SPECS - if (__Pyx_PyType_Ready(__pyx_memoryview_type) < 0) __PYX_ERR(1, 337, __pyx_L1_error) - #endif - #if PY_MAJOR_VERSION < 3 - __pyx_memoryview_type->tp_print = 0; - #endif - #if !CYTHON_COMPILING_IN_LIMITED_API - if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_memoryview_type->tp_dictoffset && __pyx_memoryview_type->tp_getattro == PyObject_GenericGetAttr)) { - __pyx_memoryview_type->tp_getattro = __Pyx_PyObject_GenericGetAttr; - } - #endif - if (__Pyx_SetVtable(__pyx_memoryview_type, __pyx_vtabptr_memoryview) < 0) __PYX_ERR(1, 337, __pyx_L1_error) - #if !CYTHON_COMPILING_IN_LIMITED_API - if (__Pyx_MergeVtables(__pyx_memoryview_type) < 0) __PYX_ERR(1, 337, __pyx_L1_error) - #endif - #if !CYTHON_COMPILING_IN_LIMITED_API - if (__Pyx_setup_reduce((PyObject *) __pyx_memoryview_type) < 0) __PYX_ERR(1, 337, __pyx_L1_error) - #endif - __pyx_vtabptr__memoryviewslice = &__pyx_vtable__memoryviewslice; - __pyx_vtable__memoryviewslice.__pyx_base = *__pyx_vtabptr_memoryview; - __pyx_vtable__memoryviewslice.__pyx_base.convert_item_to_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *))__pyx_memoryviewslice_convert_item_to_object; - __pyx_vtable__memoryviewslice.__pyx_base.assign_item_from_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *, PyObject *))__pyx_memoryviewslice_assign_item_from_object; - __pyx_vtable__memoryviewslice.__pyx_base._get_base = (PyObject *(*)(struct __pyx_memoryview_obj *))__pyx_memoryviewslice__get_base; - #if CYTHON_USE_TYPE_SPECS - __pyx_t_1 = PyTuple_Pack(1, (PyObject *)__pyx_memoryview_type); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 952, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_memoryviewslice_type = (PyTypeObject *) __Pyx_PyType_FromModuleAndSpec(__pyx_m, &__pyx_type___pyx_memoryviewslice_spec, __pyx_t_1); - __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; - if (unlikely(!__pyx_memoryviewslice_type)) __PYX_ERR(1, 952, __pyx_L1_error) - if (__Pyx_fix_up_extension_type_from_spec(&__pyx_type___pyx_memoryviewslice_spec, __pyx_memoryviewslice_type) < 0) __PYX_ERR(1, 952, __pyx_L1_error) - #else - __pyx_memoryviewslice_type = &__pyx_type___pyx_memoryviewslice; - #endif - #if !CYTHON_COMPILING_IN_LIMITED_API - __pyx_memoryviewslice_type->tp_base = __pyx_memoryview_type; - #endif - #if !CYTHON_USE_TYPE_SPECS - if (__Pyx_PyType_Ready(__pyx_memoryviewslice_type) < 0) __PYX_ERR(1, 952, __pyx_L1_error) - #endif - #if PY_MAJOR_VERSION < 3 - __pyx_memoryviewslice_type->tp_print = 0; - #endif - #if !CYTHON_COMPILING_IN_LIMITED_API - if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_memoryviewslice_type->tp_dictoffset && __pyx_memoryviewslice_type->tp_getattro == PyObject_GenericGetAttr)) { - __pyx_memoryviewslice_type->tp_getattro = __Pyx_PyObject_GenericGetAttr; - } - #endif - if (__Pyx_SetVtable(__pyx_memoryviewslice_type, __pyx_vtabptr__memoryviewslice) < 0) __PYX_ERR(1, 952, __pyx_L1_error) - #if !CYTHON_COMPILING_IN_LIMITED_API - if (__Pyx_MergeVtables(__pyx_memoryviewslice_type) < 0) __PYX_ERR(1, 952, __pyx_L1_error) - #endif - #if !CYTHON_COMPILING_IN_LIMITED_API - if (__Pyx_setup_reduce((PyObject *) __pyx_memoryviewslice_type) < 0) __PYX_ERR(1, 952, __pyx_L1_error) - #endif - __Pyx_RefNannyFinishContext(); - return 0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_RefNannyFinishContext(); - return -1; -} - -static int __Pyx_modinit_type_import_code(void) { - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__Pyx_modinit_type_import_code", 0); - /*--- Type import code ---*/ - __Pyx_RefNannyFinishContext(); - return 0; -} - -static int __Pyx_modinit_variable_import_code(void) { - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__Pyx_modinit_variable_import_code", 0); - /*--- Variable import code ---*/ - __Pyx_RefNannyFinishContext(); - return 0; -} - -static int __Pyx_modinit_function_import_code(void) { - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__Pyx_modinit_function_import_code", 0); - /*--- Function import code ---*/ - __Pyx_RefNannyFinishContext(); - return 0; -} - - -#if PY_MAJOR_VERSION >= 3 -#if CYTHON_PEP489_MULTI_PHASE_INIT -static PyObject* __pyx_pymod_create(PyObject *spec, PyModuleDef *def); /*proto*/ -static int __pyx_pymod_exec_core(PyObject* module); /*proto*/ -static PyModuleDef_Slot __pyx_moduledef_slots[] = { - {Py_mod_create, (void*)__pyx_pymod_create}, - {Py_mod_exec, (void*)__pyx_pymod_exec_core}, - {0, NULL} -}; -#endif - -#ifdef __cplusplus -namespace { - struct PyModuleDef __pyx_moduledef = - #else - static struct PyModuleDef __pyx_moduledef = - #endif - { - PyModuleDef_HEAD_INIT, - "core", - 0, /* m_doc */ - #if CYTHON_PEP489_MULTI_PHASE_INIT - 0, /* m_size */ - #elif CYTHON_USE_MODULE_STATE - sizeof(__pyx_mstate), /* m_size */ - #else - -1, /* m_size */ - #endif - __pyx_methods /* m_methods */, - #if CYTHON_PEP489_MULTI_PHASE_INIT - __pyx_moduledef_slots, /* m_slots */ - #else - NULL, /* m_reload */ - #endif - #if CYTHON_USE_MODULE_STATE - __pyx_m_traverse, /* m_traverse */ - __pyx_m_clear, /* m_clear */ - NULL /* m_free */ - #else - NULL, /* m_traverse */ - NULL, /* m_clear */ - NULL /* m_free */ - #endif - }; - #ifdef __cplusplus -} /* anonymous namespace */ -#endif -#endif - -#ifndef CYTHON_NO_PYINIT_EXPORT -#define __Pyx_PyMODINIT_FUNC PyMODINIT_FUNC -#elif PY_MAJOR_VERSION < 3 -#ifdef __cplusplus -#define __Pyx_PyMODINIT_FUNC extern "C" void -#else -#define __Pyx_PyMODINIT_FUNC void -#endif -#else -#ifdef __cplusplus -#define __Pyx_PyMODINIT_FUNC extern "C" PyObject * -#else -#define __Pyx_PyMODINIT_FUNC PyObject * -#endif -#endif - - -#if PY_MAJOR_VERSION < 3 -__Pyx_PyMODINIT_FUNC initcore(void) CYTHON_SMALL_CODE; /*proto*/ -__Pyx_PyMODINIT_FUNC initcore(void) -#else -__Pyx_PyMODINIT_FUNC PyInit_core(void) CYTHON_SMALL_CODE; /*proto*/ -__Pyx_PyMODINIT_FUNC PyInit_core(void) -#if CYTHON_PEP489_MULTI_PHASE_INIT -{ - return PyModuleDef_Init(&__pyx_moduledef); -} -static CYTHON_SMALL_CODE int __Pyx_check_single_interpreter(void) { - #if PY_VERSION_HEX >= 0x030700A1 - static PY_INT64_T main_interpreter_id = -1; - PY_INT64_T current_id = PyInterpreterState_GetID(PyThreadState_Get()->interp); - if (main_interpreter_id == -1) { - main_interpreter_id = current_id; - return (unlikely(current_id == -1)) ? -1 : 0; - } else if (unlikely(main_interpreter_id != current_id)) - #else - static PyInterpreterState *main_interpreter = NULL; - PyInterpreterState *current_interpreter = PyThreadState_Get()->interp; - if (!main_interpreter) { - main_interpreter = current_interpreter; - } else if (unlikely(main_interpreter != current_interpreter)) - #endif - { - PyErr_SetString( - PyExc_ImportError, - "Interpreter change detected - this module can only be loaded into one interpreter per process."); - return -1; - } - return 0; -} -#if CYTHON_COMPILING_IN_LIMITED_API -static CYTHON_SMALL_CODE int __Pyx_copy_spec_to_module(PyObject *spec, PyObject *module, const char* from_name, const char* to_name, int allow_none) -#else -static CYTHON_SMALL_CODE int __Pyx_copy_spec_to_module(PyObject *spec, PyObject *moddict, const char* from_name, const char* to_name, int allow_none) -#endif -{ - PyObject *value = PyObject_GetAttrString(spec, from_name); - int result = 0; - if (likely(value)) { - if (allow_none || value != Py_None) { -#if CYTHON_COMPILING_IN_LIMITED_API - result = PyModule_AddObject(module, to_name, value); -#else - result = PyDict_SetItemString(moddict, to_name, value); -#endif - } - Py_DECREF(value); - } else if (PyErr_ExceptionMatches(PyExc_AttributeError)) { - PyErr_Clear(); - } else { - result = -1; - } - return result; -} -static CYTHON_SMALL_CODE PyObject* __pyx_pymod_create(PyObject *spec, PyModuleDef *def) { - PyObject *module = NULL, *moddict, *modname; - CYTHON_UNUSED_VAR(def); - if (__Pyx_check_single_interpreter()) - return NULL; - if (__pyx_m) - return __Pyx_NewRef(__pyx_m); - modname = PyObject_GetAttrString(spec, "name"); - if (unlikely(!modname)) goto bad; - module = PyModule_NewObject(modname); - Py_DECREF(modname); - if (unlikely(!module)) goto bad; -#if CYTHON_COMPILING_IN_LIMITED_API - moddict = module; -#else - moddict = PyModule_GetDict(module); - if (unlikely(!moddict)) goto bad; -#endif - if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "loader", "__loader__", 1) < 0)) goto bad; - if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "origin", "__file__", 1) < 0)) goto bad; - if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "parent", "__package__", 1) < 0)) goto bad; - if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "submodule_search_locations", "__path__", 0) < 0)) goto bad; - return module; -bad: - Py_XDECREF(module); - return NULL; -} - - -static CYTHON_SMALL_CODE int __pyx_pymod_exec_core(PyObject *__pyx_pyinit_module) -#endif -#endif -{ - int stringtab_initialized = 0; - #if CYTHON_USE_MODULE_STATE - int pystate_addmodule_run = 0; - #endif - PyObject *__pyx_t_1 = NULL; - PyObject *__pyx_t_2 = NULL; - PyObject *__pyx_t_3 = NULL; - PyObject *__pyx_t_4 = NULL; - PyObject *__pyx_t_5 = NULL; - int __pyx_t_6; - PyObject *__pyx_t_7 = NULL; - static PyThread_type_lock __pyx_t_8[8]; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannyDeclarations - #if CYTHON_PEP489_MULTI_PHASE_INIT - if (__pyx_m) { - if (__pyx_m == __pyx_pyinit_module) return 0; - PyErr_SetString(PyExc_RuntimeError, "Module 'core' has already been imported. Re-initialisation is not supported."); - return -1; - } - #elif PY_MAJOR_VERSION >= 3 - if (__pyx_m) return __Pyx_NewRef(__pyx_m); - #endif - /*--- Module creation code ---*/ - #if CYTHON_PEP489_MULTI_PHASE_INIT - __pyx_m = __pyx_pyinit_module; - Py_INCREF(__pyx_m); - #else - #if PY_MAJOR_VERSION < 3 - __pyx_m = Py_InitModule4("core", __pyx_methods, 0, 0, PYTHON_API_VERSION); Py_XINCREF(__pyx_m); - if (unlikely(!__pyx_m)) __PYX_ERR(0, 1, __pyx_L1_error) - #elif CYTHON_USE_MODULE_STATE - __pyx_t_1 = PyModule_Create(&__pyx_moduledef); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1, __pyx_L1_error) - { - int add_module_result = PyState_AddModule(__pyx_t_1, &__pyx_moduledef); - __pyx_t_1 = 0; /* transfer ownership from __pyx_t_1 to core pseudovariable */ - if (unlikely((add_module_result < 0))) __PYX_ERR(0, 1, __pyx_L1_error) - pystate_addmodule_run = 1; - } - #else - __pyx_m = PyModule_Create(&__pyx_moduledef); - if (unlikely(!__pyx_m)) __PYX_ERR(0, 1, __pyx_L1_error) - #endif - #endif - CYTHON_UNUSED_VAR(__pyx_t_1); - __pyx_d = PyModule_GetDict(__pyx_m); if (unlikely(!__pyx_d)) __PYX_ERR(0, 1, __pyx_L1_error) - Py_INCREF(__pyx_d); - __pyx_b = PyImport_AddModule(__Pyx_BUILTIN_MODULE_NAME); if (unlikely(!__pyx_b)) __PYX_ERR(0, 1, __pyx_L1_error) - Py_INCREF(__pyx_b); - __pyx_cython_runtime = PyImport_AddModule((char *) "cython_runtime"); if (unlikely(!__pyx_cython_runtime)) __PYX_ERR(0, 1, __pyx_L1_error) - Py_INCREF(__pyx_cython_runtime); - if (PyObject_SetAttrString(__pyx_m, "__builtins__", __pyx_b) < 0) __PYX_ERR(0, 1, __pyx_L1_error) - #if CYTHON_REFNANNY -__Pyx_RefNanny = __Pyx_RefNannyImportAPI("refnanny"); -if (!__Pyx_RefNanny) { - PyErr_Clear(); - __Pyx_RefNanny = __Pyx_RefNannyImportAPI("Cython.Runtime.refnanny"); - if (!__Pyx_RefNanny) - Py_FatalError("failed to import 'refnanny' module"); -} -#endif - __Pyx_RefNannySetupContext("__Pyx_PyMODINIT_FUNC PyInit_core(void)", 0); - if (__Pyx_check_binary_version() < 0) __PYX_ERR(0, 1, __pyx_L1_error) - #ifdef __Pxy_PyFrame_Initialize_Offsets - __Pxy_PyFrame_Initialize_Offsets(); - #endif - __pyx_empty_tuple = PyTuple_New(0); if (unlikely(!__pyx_empty_tuple)) __PYX_ERR(0, 1, __pyx_L1_error) - __pyx_empty_bytes = PyBytes_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_bytes)) __PYX_ERR(0, 1, __pyx_L1_error) - __pyx_empty_unicode = PyUnicode_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_unicode)) __PYX_ERR(0, 1, __pyx_L1_error) - #ifdef __Pyx_CyFunction_USED - if (__pyx_CyFunction_init(__pyx_m) < 0) __PYX_ERR(0, 1, __pyx_L1_error) - #endif - #ifdef __Pyx_FusedFunction_USED - if (__pyx_FusedFunction_init(__pyx_m) < 0) __PYX_ERR(0, 1, __pyx_L1_error) - #endif - #ifdef __Pyx_Coroutine_USED - if (__pyx_Coroutine_init(__pyx_m) < 0) __PYX_ERR(0, 1, __pyx_L1_error) - #endif - #ifdef __Pyx_Generator_USED - if (__pyx_Generator_init(__pyx_m) < 0) __PYX_ERR(0, 1, __pyx_L1_error) - #endif - #ifdef __Pyx_AsyncGen_USED - if (__pyx_AsyncGen_init(__pyx_m) < 0) __PYX_ERR(0, 1, __pyx_L1_error) - #endif - #ifdef __Pyx_StopAsyncIteration_USED - if (__pyx_StopAsyncIteration_init(__pyx_m) < 0) __PYX_ERR(0, 1, __pyx_L1_error) - #endif - /*--- Library function declarations ---*/ - /*--- Threads initialization code ---*/ - #if defined(WITH_THREAD) && PY_VERSION_HEX < 0x030700F0 && defined(__PYX_FORCE_INIT_THREADS) && __PYX_FORCE_INIT_THREADS - PyEval_InitThreads(); - #endif - /*--- Initialize various global constants etc. ---*/ - if (__Pyx_InitConstants() < 0) __PYX_ERR(0, 1, __pyx_L1_error) - stringtab_initialized = 1; - if (__Pyx_InitGlobals() < 0) __PYX_ERR(0, 1, __pyx_L1_error) - #if PY_MAJOR_VERSION < 3 && (__PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT) - if (__Pyx_init_sys_getdefaultencoding_params() < 0) __PYX_ERR(0, 1, __pyx_L1_error) - #endif - if (__pyx_module_is_main_monotonic_align__core) { - if (PyObject_SetAttr(__pyx_m, __pyx_n_s_name_2, __pyx_n_s_main) < 0) __PYX_ERR(0, 1, __pyx_L1_error) - } - #if PY_MAJOR_VERSION >= 3 - { - PyObject *modules = PyImport_GetModuleDict(); if (unlikely(!modules)) __PYX_ERR(0, 1, __pyx_L1_error) - if (!PyDict_GetItemString(modules, "monotonic_align.core")) { - if (unlikely((PyDict_SetItemString(modules, "monotonic_align.core", __pyx_m) < 0))) __PYX_ERR(0, 1, __pyx_L1_error) - } - } - #endif - /*--- Builtin init code ---*/ - if (__Pyx_InitCachedBuiltins() < 0) __PYX_ERR(0, 1, __pyx_L1_error) - /*--- Constants init code ---*/ - if (__Pyx_InitCachedConstants() < 0) __PYX_ERR(0, 1, __pyx_L1_error) - /*--- Global type/function init code ---*/ - (void)__Pyx_modinit_global_init_code(); - (void)__Pyx_modinit_variable_export_code(); - (void)__Pyx_modinit_function_export_code(); - if (unlikely((__Pyx_modinit_type_init_code() < 0))) __PYX_ERR(0, 1, __pyx_L1_error) - (void)__Pyx_modinit_type_import_code(); - (void)__Pyx_modinit_variable_import_code(); - (void)__Pyx_modinit_function_import_code(); - /*--- Execution code ---*/ - #if defined(__Pyx_Generator_USED) || defined(__Pyx_Coroutine_USED) - if (__Pyx_patch_abc() < 0) __PYX_ERR(0, 1, __pyx_L1_error) - #endif - - /* "View.MemoryView":99 - * - * cdef object __pyx_collections_abc_Sequence "__pyx_collections_abc_Sequence" - * try: # <<<<<<<<<<<<<< - * if __import__("sys").version_info >= (3, 3): - * __pyx_collections_abc_Sequence = __import__("collections.abc").abc.Sequence - */ - { - __Pyx_PyThreadState_declare - __Pyx_PyThreadState_assign - __Pyx_ExceptionSave(&__pyx_t_1, &__pyx_t_2, &__pyx_t_3); - __Pyx_XGOTREF(__pyx_t_1); - __Pyx_XGOTREF(__pyx_t_2); - __Pyx_XGOTREF(__pyx_t_3); - /*try:*/ { - - /* "View.MemoryView":100 - * cdef object __pyx_collections_abc_Sequence "__pyx_collections_abc_Sequence" - * try: - * if __import__("sys").version_info >= (3, 3): # <<<<<<<<<<<<<< - * __pyx_collections_abc_Sequence = __import__("collections.abc").abc.Sequence - * else: - */ - __pyx_t_4 = __Pyx_PyObject_Call(__pyx_builtin___import__, __pyx_tuple__10, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 100, __pyx_L2_error) - __Pyx_GOTREF(__pyx_t_4); - __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s_version_info); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 100, __pyx_L2_error) - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __pyx_t_4 = PyObject_RichCompare(__pyx_t_5, __pyx_tuple__11, Py_GE); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 100, __pyx_L2_error) - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely((__pyx_t_6 < 0))) __PYX_ERR(1, 100, __pyx_L2_error) - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - if (__pyx_t_6) { - - /* "View.MemoryView":101 - * try: - * if __import__("sys").version_info >= (3, 3): - * __pyx_collections_abc_Sequence = __import__("collections.abc").abc.Sequence # <<<<<<<<<<<<<< - * else: - * __pyx_collections_abc_Sequence = __import__("collections").Sequence - */ - __pyx_t_4 = __Pyx_PyObject_Call(__pyx_builtin___import__, __pyx_tuple__12, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 101, __pyx_L2_error) - __Pyx_GOTREF(__pyx_t_4); - __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s_abc); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 101, __pyx_L2_error) - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_n_s_Sequence); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 101, __pyx_L2_error) - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __Pyx_XGOTREF(__pyx_collections_abc_Sequence); - __Pyx_DECREF_SET(__pyx_collections_abc_Sequence, __pyx_t_4); - __Pyx_GIVEREF(__pyx_t_4); - __pyx_t_4 = 0; - - /* "View.MemoryView":100 - * cdef object __pyx_collections_abc_Sequence "__pyx_collections_abc_Sequence" - * try: - * if __import__("sys").version_info >= (3, 3): # <<<<<<<<<<<<<< - * __pyx_collections_abc_Sequence = __import__("collections.abc").abc.Sequence - * else: - */ - goto __pyx_L8; - } - - /* "View.MemoryView":103 - * __pyx_collections_abc_Sequence = __import__("collections.abc").abc.Sequence - * else: - * __pyx_collections_abc_Sequence = __import__("collections").Sequence # <<<<<<<<<<<<<< - * except: - * - */ - /*else*/ { - __pyx_t_4 = __Pyx_PyObject_Call(__pyx_builtin___import__, __pyx_tuple__13, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 103, __pyx_L2_error) - __Pyx_GOTREF(__pyx_t_4); - __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s_Sequence); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 103, __pyx_L2_error) - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __Pyx_XGOTREF(__pyx_collections_abc_Sequence); - __Pyx_DECREF_SET(__pyx_collections_abc_Sequence, __pyx_t_5); - __Pyx_GIVEREF(__pyx_t_5); - __pyx_t_5 = 0; - } - __pyx_L8:; - - /* "View.MemoryView":99 - * - * cdef object __pyx_collections_abc_Sequence "__pyx_collections_abc_Sequence" - * try: # <<<<<<<<<<<<<< - * if __import__("sys").version_info >= (3, 3): - * __pyx_collections_abc_Sequence = __import__("collections.abc").abc.Sequence - */ - } - __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; - __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; - __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; - goto __pyx_L7_try_end; - __pyx_L2_error:; - __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; - __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; - - /* "View.MemoryView":104 - * else: - * __pyx_collections_abc_Sequence = __import__("collections").Sequence - * except: # <<<<<<<<<<<<<< - * - * __pyx_collections_abc_Sequence = None - */ - /*except:*/ { - __Pyx_AddTraceback("View.MemoryView", __pyx_clineno, __pyx_lineno, __pyx_filename); - if (__Pyx_GetException(&__pyx_t_5, &__pyx_t_4, &__pyx_t_7) < 0) __PYX_ERR(1, 104, __pyx_L4_except_error) - __Pyx_XGOTREF(__pyx_t_5); - __Pyx_XGOTREF(__pyx_t_4); - __Pyx_XGOTREF(__pyx_t_7); - - /* "View.MemoryView":106 - * except: - * - * __pyx_collections_abc_Sequence = None # <<<<<<<<<<<<<< - * - * - */ - __Pyx_INCREF(Py_None); - __Pyx_XGOTREF(__pyx_collections_abc_Sequence); - __Pyx_DECREF_SET(__pyx_collections_abc_Sequence, Py_None); - __Pyx_GIVEREF(Py_None); - __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; - __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; - __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; - goto __pyx_L3_exception_handled; - } - - /* "View.MemoryView":99 - * - * cdef object __pyx_collections_abc_Sequence "__pyx_collections_abc_Sequence" - * try: # <<<<<<<<<<<<<< - * if __import__("sys").version_info >= (3, 3): - * __pyx_collections_abc_Sequence = __import__("collections.abc").abc.Sequence - */ - __pyx_L4_except_error:; - __Pyx_XGIVEREF(__pyx_t_1); - __Pyx_XGIVEREF(__pyx_t_2); - __Pyx_XGIVEREF(__pyx_t_3); - __Pyx_ExceptionReset(__pyx_t_1, __pyx_t_2, __pyx_t_3); - goto __pyx_L1_error; - __pyx_L3_exception_handled:; - __Pyx_XGIVEREF(__pyx_t_1); - __Pyx_XGIVEREF(__pyx_t_2); - __Pyx_XGIVEREF(__pyx_t_3); - __Pyx_ExceptionReset(__pyx_t_1, __pyx_t_2, __pyx_t_3); - __pyx_L7_try_end:; - } - - /* "View.MemoryView":241 - * - * - * try: # <<<<<<<<<<<<<< - * count = __pyx_collections_abc_Sequence.count - * index = __pyx_collections_abc_Sequence.index - */ - { - __Pyx_PyThreadState_declare - __Pyx_PyThreadState_assign - __Pyx_ExceptionSave(&__pyx_t_3, &__pyx_t_2, &__pyx_t_1); - __Pyx_XGOTREF(__pyx_t_3); - __Pyx_XGOTREF(__pyx_t_2); - __Pyx_XGOTREF(__pyx_t_1); - /*try:*/ { - - /* "View.MemoryView":242 - * - * try: - * count = __pyx_collections_abc_Sequence.count # <<<<<<<<<<<<<< - * index = __pyx_collections_abc_Sequence.index - * except: - */ - __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_collections_abc_Sequence, __pyx_n_s_count); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 242, __pyx_L11_error) - __Pyx_GOTREF(__pyx_t_7); - if (PyDict_SetItem(__pyx_array_type->tp_dict, __pyx_n_s_count, __pyx_t_7) < 0) __PYX_ERR(1, 242, __pyx_L11_error) - __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; - PyType_Modified(__pyx_array_type); - - /* "View.MemoryView":243 - * try: - * count = __pyx_collections_abc_Sequence.count - * index = __pyx_collections_abc_Sequence.index # <<<<<<<<<<<<<< - * except: - * pass - */ - __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_collections_abc_Sequence, __pyx_n_s_index); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 243, __pyx_L11_error) - __Pyx_GOTREF(__pyx_t_7); - if (PyDict_SetItem(__pyx_array_type->tp_dict, __pyx_n_s_index, __pyx_t_7) < 0) __PYX_ERR(1, 243, __pyx_L11_error) - __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; - PyType_Modified(__pyx_array_type); - - /* "View.MemoryView":241 - * - * - * try: # <<<<<<<<<<<<<< - * count = __pyx_collections_abc_Sequence.count - * index = __pyx_collections_abc_Sequence.index - */ - } - __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; - __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; - __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; - goto __pyx_L16_try_end; - __pyx_L11_error:; - __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; - __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; - __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; - - /* "View.MemoryView":244 - * count = __pyx_collections_abc_Sequence.count - * index = __pyx_collections_abc_Sequence.index - * except: # <<<<<<<<<<<<<< - * pass - * - */ - /*except:*/ { - __Pyx_ErrRestore(0,0,0); - goto __pyx_L12_exception_handled; - } - __pyx_L12_exception_handled:; - __Pyx_XGIVEREF(__pyx_t_3); - __Pyx_XGIVEREF(__pyx_t_2); - __Pyx_XGIVEREF(__pyx_t_1); - __Pyx_ExceptionReset(__pyx_t_3, __pyx_t_2, __pyx_t_1); - __pyx_L16_try_end:; - } - - /* "View.MemoryView":309 - * return self.name - * - * cdef generic = Enum("") # <<<<<<<<<<<<<< - * cdef strided = Enum("") # default - * cdef indirect = Enum("") - */ - __pyx_t_7 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__14, NULL); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 309, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_7); - __Pyx_XGOTREF(generic); - __Pyx_DECREF_SET(generic, __pyx_t_7); - __Pyx_GIVEREF(__pyx_t_7); - __pyx_t_7 = 0; - - /* "View.MemoryView":310 - * - * cdef generic = Enum("") - * cdef strided = Enum("") # default # <<<<<<<<<<<<<< - * cdef indirect = Enum("") - * - */ - __pyx_t_7 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__15, NULL); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 310, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_7); - __Pyx_XGOTREF(strided); - __Pyx_DECREF_SET(strided, __pyx_t_7); - __Pyx_GIVEREF(__pyx_t_7); - __pyx_t_7 = 0; - - /* "View.MemoryView":311 - * cdef generic = Enum("") - * cdef strided = Enum("") # default - * cdef indirect = Enum("") # <<<<<<<<<<<<<< - * - * - */ - __pyx_t_7 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__16, NULL); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 311, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_7); - __Pyx_XGOTREF(indirect); - __Pyx_DECREF_SET(indirect, __pyx_t_7); - __Pyx_GIVEREF(__pyx_t_7); - __pyx_t_7 = 0; - - /* "View.MemoryView":314 - * - * - * cdef contiguous = Enum("") # <<<<<<<<<<<<<< - * cdef indirect_contiguous = Enum("") - * - */ - __pyx_t_7 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__17, NULL); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 314, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_7); - __Pyx_XGOTREF(contiguous); - __Pyx_DECREF_SET(contiguous, __pyx_t_7); - __Pyx_GIVEREF(__pyx_t_7); - __pyx_t_7 = 0; - - /* "View.MemoryView":315 - * - * cdef contiguous = Enum("") - * cdef indirect_contiguous = Enum("") # <<<<<<<<<<<<<< - * - * - */ - __pyx_t_7 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__18, NULL); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 315, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_7); - __Pyx_XGOTREF(indirect_contiguous); - __Pyx_DECREF_SET(indirect_contiguous, __pyx_t_7); - __Pyx_GIVEREF(__pyx_t_7); - __pyx_t_7 = 0; - - /* "View.MemoryView":323 - * - * - * cdef int __pyx_memoryview_thread_locks_used = 0 # <<<<<<<<<<<<<< - * cdef PyThread_type_lock[8] __pyx_memoryview_thread_locks = [ - * PyThread_allocate_lock(), - */ - __pyx_memoryview_thread_locks_used = 0; - - /* "View.MemoryView":324 - * - * cdef int __pyx_memoryview_thread_locks_used = 0 - * cdef PyThread_type_lock[8] __pyx_memoryview_thread_locks = [ # <<<<<<<<<<<<<< - * PyThread_allocate_lock(), - * PyThread_allocate_lock(), - */ - __pyx_t_8[0] = PyThread_allocate_lock(); - __pyx_t_8[1] = PyThread_allocate_lock(); - __pyx_t_8[2] = PyThread_allocate_lock(); - __pyx_t_8[3] = PyThread_allocate_lock(); - __pyx_t_8[4] = PyThread_allocate_lock(); - __pyx_t_8[5] = PyThread_allocate_lock(); - __pyx_t_8[6] = PyThread_allocate_lock(); - __pyx_t_8[7] = PyThread_allocate_lock(); - memcpy(&(__pyx_memoryview_thread_locks[0]), __pyx_t_8, sizeof(__pyx_memoryview_thread_locks[0]) * (8)); - - /* "View.MemoryView":982 - * - * - * try: # <<<<<<<<<<<<<< - * count = __pyx_collections_abc_Sequence.count - * index = __pyx_collections_abc_Sequence.index - */ - { - __Pyx_PyThreadState_declare - __Pyx_PyThreadState_assign - __Pyx_ExceptionSave(&__pyx_t_1, &__pyx_t_2, &__pyx_t_3); - __Pyx_XGOTREF(__pyx_t_1); - __Pyx_XGOTREF(__pyx_t_2); - __Pyx_XGOTREF(__pyx_t_3); - /*try:*/ { - - /* "View.MemoryView":983 - * - * try: - * count = __pyx_collections_abc_Sequence.count # <<<<<<<<<<<<<< - * index = __pyx_collections_abc_Sequence.index - * except: - */ - __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_collections_abc_Sequence, __pyx_n_s_count); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 983, __pyx_L17_error) - __Pyx_GOTREF(__pyx_t_7); - if (PyDict_SetItem(__pyx_memoryviewslice_type->tp_dict, __pyx_n_s_count, __pyx_t_7) < 0) __PYX_ERR(1, 983, __pyx_L17_error) - __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; - PyType_Modified(__pyx_memoryviewslice_type); - - /* "View.MemoryView":984 - * try: - * count = __pyx_collections_abc_Sequence.count - * index = __pyx_collections_abc_Sequence.index # <<<<<<<<<<<<<< - * except: - * pass - */ - __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_collections_abc_Sequence, __pyx_n_s_index); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 984, __pyx_L17_error) - __Pyx_GOTREF(__pyx_t_7); - if (PyDict_SetItem(__pyx_memoryviewslice_type->tp_dict, __pyx_n_s_index, __pyx_t_7) < 0) __PYX_ERR(1, 984, __pyx_L17_error) - __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; - PyType_Modified(__pyx_memoryviewslice_type); - - /* "View.MemoryView":982 - * - * - * try: # <<<<<<<<<<<<<< - * count = __pyx_collections_abc_Sequence.count - * index = __pyx_collections_abc_Sequence.index - */ - } - __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; - __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; - __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; - goto __pyx_L22_try_end; - __pyx_L17_error:; - __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; - __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; - __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; - - /* "View.MemoryView":985 - * count = __pyx_collections_abc_Sequence.count - * index = __pyx_collections_abc_Sequence.index - * except: # <<<<<<<<<<<<<< - * pass - * - */ - /*except:*/ { - __Pyx_ErrRestore(0,0,0); - goto __pyx_L18_exception_handled; - } - __pyx_L18_exception_handled:; - __Pyx_XGIVEREF(__pyx_t_1); - __Pyx_XGIVEREF(__pyx_t_2); - __Pyx_XGIVEREF(__pyx_t_3); - __Pyx_ExceptionReset(__pyx_t_1, __pyx_t_2, __pyx_t_3); - __pyx_L22_try_end:; - } - - /* "View.MemoryView":988 - * pass - * - * try: # <<<<<<<<<<<<<< - * if __pyx_collections_abc_Sequence: - * - */ - { - __Pyx_PyThreadState_declare - __Pyx_PyThreadState_assign - __Pyx_ExceptionSave(&__pyx_t_3, &__pyx_t_2, &__pyx_t_1); - __Pyx_XGOTREF(__pyx_t_3); - __Pyx_XGOTREF(__pyx_t_2); - __Pyx_XGOTREF(__pyx_t_1); - /*try:*/ { - - /* "View.MemoryView":989 - * - * try: - * if __pyx_collections_abc_Sequence: # <<<<<<<<<<<<<< - * - * - */ - __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_collections_abc_Sequence); if (unlikely((__pyx_t_6 < 0))) __PYX_ERR(1, 989, __pyx_L23_error) - if (__pyx_t_6) { - - /* "View.MemoryView":993 - * - * - * __pyx_collections_abc_Sequence.register(_memoryviewslice) # <<<<<<<<<<<<<< - * __pyx_collections_abc_Sequence.register(array) - * except: - */ - __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_collections_abc_Sequence, __pyx_n_s_register); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 993, __pyx_L23_error) - __Pyx_GOTREF(__pyx_t_7); - __pyx_t_4 = __Pyx_PyObject_CallOneArg(__pyx_t_7, ((PyObject *)__pyx_memoryviewslice_type)); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 993, __pyx_L23_error) - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - - /* "View.MemoryView":994 - * - * __pyx_collections_abc_Sequence.register(_memoryviewslice) - * __pyx_collections_abc_Sequence.register(array) # <<<<<<<<<<<<<< - * except: - * pass # ignore failure, it's a minor issue - */ - __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_collections_abc_Sequence, __pyx_n_s_register); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 994, __pyx_L23_error) - __Pyx_GOTREF(__pyx_t_4); - __pyx_t_7 = __Pyx_PyObject_CallOneArg(__pyx_t_4, ((PyObject *)__pyx_array_type)); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 994, __pyx_L23_error) - __Pyx_GOTREF(__pyx_t_7); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; - - /* "View.MemoryView":989 - * - * try: - * if __pyx_collections_abc_Sequence: # <<<<<<<<<<<<<< - * - * - */ - } - - /* "View.MemoryView":988 - * pass - * - * try: # <<<<<<<<<<<<<< - * if __pyx_collections_abc_Sequence: - * - */ - } - __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; - __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; - __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; - goto __pyx_L28_try_end; - __pyx_L23_error:; - __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; - __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; - __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; - - /* "View.MemoryView":995 - * __pyx_collections_abc_Sequence.register(_memoryviewslice) - * __pyx_collections_abc_Sequence.register(array) - * except: # <<<<<<<<<<<<<< - * pass # ignore failure, it's a minor issue - * - */ - /*except:*/ { - __Pyx_ErrRestore(0,0,0); - goto __pyx_L24_exception_handled; - } - __pyx_L24_exception_handled:; - __Pyx_XGIVEREF(__pyx_t_3); - __Pyx_XGIVEREF(__pyx_t_2); - __Pyx_XGIVEREF(__pyx_t_1); - __Pyx_ExceptionReset(__pyx_t_3, __pyx_t_2, __pyx_t_1); - __pyx_L28_try_end:; - } - - /* "(tree fragment)":1 - * def __pyx_unpickle_Enum(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<< - * cdef object __pyx_PickleError - * cdef object __pyx_result - */ - __pyx_t_7 = PyCFunction_NewEx(&__pyx_mdef_15View_dot_MemoryView_1__pyx_unpickle_Enum, NULL, __pyx_n_s_View_MemoryView); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 1, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_7); - if (PyDict_SetItem(__pyx_d, __pyx_n_s_pyx_unpickle_Enum, __pyx_t_7) < 0) __PYX_ERR(1, 1, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; - - /* "monotonic_align/core.pyx":7 - * @cython.boundscheck(False) - * @cython.wraparound(False) - * cdef void maximum_path_each(int[:,::1] path, float[:,::1] value, int t_y, int t_x, float max_neg_val=-1e9) nogil: # <<<<<<<<<<<<<< - * cdef int x - * cdef int y - */ - __pyx_k__9 = (-1e9); - - /* "monotonic_align/core.pyx":38 - * @cython.boundscheck(False) - * @cython.wraparound(False) - * cpdef void maximum_path_c(int[:,:,::1] paths, float[:,:,::1] values, int[::1] t_ys, int[::1] t_xs) nogil: # <<<<<<<<<<<<<< - * cdef int b = paths.shape[0] - * cdef int i - */ - __pyx_t_7 = __Pyx_CyFunction_New(&__pyx_mdef_15monotonic_align_4core_1maximum_path_c, 0, __pyx_n_s_maximum_path_c, NULL, __pyx_n_s_monotonic_align_core, __pyx_d, ((PyObject *)__pyx_codeobj__22)); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 38, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_7); - if (PyDict_SetItem(__pyx_d, __pyx_n_s_maximum_path_c, __pyx_t_7) < 0) __PYX_ERR(0, 38, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; - - /* "monotonic_align/core.pyx":1 - * cimport cython # <<<<<<<<<<<<<< - * from cython.parallel import prange - * - */ - __pyx_t_7 = __Pyx_PyDict_NewPresized(0); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 1, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_7); - if (PyDict_SetItem(__pyx_d, __pyx_n_s_test, __pyx_t_7) < 0) __PYX_ERR(0, 1, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; - - /*--- Wrapped vars code ---*/ - - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_4); - __Pyx_XDECREF(__pyx_t_5); - __Pyx_XDECREF(__pyx_t_7); - if (__pyx_m) { - if (__pyx_d && stringtab_initialized) { - __Pyx_AddTraceback("init monotonic_align.core", __pyx_clineno, __pyx_lineno, __pyx_filename); - } - #if !CYTHON_USE_MODULE_STATE - Py_CLEAR(__pyx_m); - #else - Py_DECREF(__pyx_m); - if (pystate_addmodule_run) { - PyObject *tp, *value, *tb; - PyErr_Fetch(&tp, &value, &tb); - PyState_RemoveModule(&__pyx_moduledef); - PyErr_Restore(tp, value, tb); - } - #endif - } else if (!PyErr_Occurred()) { - PyErr_SetString(PyExc_ImportError, "init monotonic_align.core"); - } - __pyx_L0:; - __Pyx_RefNannyFinishContext(); - #if CYTHON_PEP489_MULTI_PHASE_INIT - return (__pyx_m != NULL) ? 0 : -1; - #elif PY_MAJOR_VERSION >= 3 - return __pyx_m; - #else - return; - #endif -} -/* #### Code section: cleanup_globals ### */ -/* #### Code section: cleanup_module ### */ -/* #### Code section: main_method ### */ -/* #### Code section: utility_code_pragmas ### */ -#ifdef _MSC_VER -#pragma warning( push ) -/* Warning 4127: conditional expression is constant - * Cython uses constant conditional expressions to allow in inline functions to be optimized at - * compile-time, so this warning is not useful - */ -#pragma warning( disable : 4127 ) -#endif - - - -/* #### Code section: utility_code_def ### */ - -/* --- Runtime support code --- */ -/* Refnanny */ -#if CYTHON_REFNANNY -static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname) { - PyObject *m = NULL, *p = NULL; - void *r = NULL; - m = PyImport_ImportModule(modname); - if (!m) goto end; - p = PyObject_GetAttrString(m, "RefNannyAPI"); - if (!p) goto end; - r = PyLong_AsVoidPtr(p); -end: - Py_XDECREF(p); - Py_XDECREF(m); - return (__Pyx_RefNannyAPIStruct *)r; -} -#endif - -/* PyErrExceptionMatches */ -#if CYTHON_FAST_THREAD_STATE -static int __Pyx_PyErr_ExceptionMatchesTuple(PyObject *exc_type, PyObject *tuple) { - Py_ssize_t i, n; - n = PyTuple_GET_SIZE(tuple); -#if PY_MAJOR_VERSION >= 3 - for (i=0; i= 0x030C00A6 - PyObject *current_exception = tstate->current_exception; - if (unlikely(!current_exception)) return 0; - exc_type = (PyObject*) Py_TYPE(current_exception); - if (exc_type == err) return 1; -#else - exc_type = tstate->curexc_type; - if (exc_type == err) return 1; - if (unlikely(!exc_type)) return 0; -#endif - #if CYTHON_AVOID_BORROWED_REFS - Py_INCREF(exc_type); - #endif - if (unlikely(PyTuple_Check(err))) { - result = __Pyx_PyErr_ExceptionMatchesTuple(exc_type, err); - } else { - result = __Pyx_PyErr_GivenExceptionMatches(exc_type, err); - } - #if CYTHON_AVOID_BORROWED_REFS - Py_DECREF(exc_type); - #endif - return result; -} -#endif - -/* PyErrFetchRestore */ -#if CYTHON_FAST_THREAD_STATE -static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) { -#if PY_VERSION_HEX >= 0x030C00A6 - PyObject *tmp_value; - assert(type == NULL || (value != NULL && type == (PyObject*) Py_TYPE(value))); - if (value) { - #if CYTHON_COMPILING_IN_CPYTHON - if (unlikely(((PyBaseExceptionObject*) value)->traceback != tb)) - #endif - PyException_SetTraceback(value, tb); - } - tmp_value = tstate->current_exception; - tstate->current_exception = value; - Py_XDECREF(tmp_value); -#else - PyObject *tmp_type, *tmp_value, *tmp_tb; - tmp_type = tstate->curexc_type; - tmp_value = tstate->curexc_value; - tmp_tb = tstate->curexc_traceback; - tstate->curexc_type = type; - tstate->curexc_value = value; - tstate->curexc_traceback = tb; - Py_XDECREF(tmp_type); - Py_XDECREF(tmp_value); - Py_XDECREF(tmp_tb); -#endif -} -static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { -#if PY_VERSION_HEX >= 0x030C00A6 - PyObject* exc_value; - exc_value = tstate->current_exception; - tstate->current_exception = 0; - *value = exc_value; - *type = NULL; - *tb = NULL; - if (exc_value) { - *type = (PyObject*) Py_TYPE(exc_value); - Py_INCREF(*type); - #if CYTHON_COMPILING_IN_CPYTHON - *tb = ((PyBaseExceptionObject*) exc_value)->traceback; - Py_XINCREF(*tb); - #else - *tb = PyException_GetTraceback(exc_value); - #endif - } -#else - *type = tstate->curexc_type; - *value = tstate->curexc_value; - *tb = tstate->curexc_traceback; - tstate->curexc_type = 0; - tstate->curexc_value = 0; - tstate->curexc_traceback = 0; -#endif -} -#endif - -/* PyObjectGetAttrStr */ -#if CYTHON_USE_TYPE_SLOTS -static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name) { - PyTypeObject* tp = Py_TYPE(obj); - if (likely(tp->tp_getattro)) - return tp->tp_getattro(obj, attr_name); -#if PY_MAJOR_VERSION < 3 - if (likely(tp->tp_getattr)) - return tp->tp_getattr(obj, PyString_AS_STRING(attr_name)); -#endif - return PyObject_GetAttr(obj, attr_name); -} -#endif - -/* PyObjectGetAttrStrNoError */ -static void __Pyx_PyObject_GetAttrStr_ClearAttributeError(void) { - __Pyx_PyThreadState_declare - __Pyx_PyThreadState_assign - if (likely(__Pyx_PyErr_ExceptionMatches(PyExc_AttributeError))) - __Pyx_PyErr_Clear(); -} -static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStrNoError(PyObject* obj, PyObject* attr_name) { - PyObject *result; -#if CYTHON_COMPILING_IN_CPYTHON && CYTHON_USE_TYPE_SLOTS && PY_VERSION_HEX >= 0x030700B1 - PyTypeObject* tp = Py_TYPE(obj); - if (likely(tp->tp_getattro == PyObject_GenericGetAttr)) { - return _PyObject_GenericGetAttrWithDict(obj, attr_name, NULL, 1); - } -#endif - result = __Pyx_PyObject_GetAttrStr(obj, attr_name); - if (unlikely(!result)) { - __Pyx_PyObject_GetAttrStr_ClearAttributeError(); - } - return result; -} - -/* GetBuiltinName */ -static PyObject *__Pyx_GetBuiltinName(PyObject *name) { - PyObject* result = __Pyx_PyObject_GetAttrStrNoError(__pyx_b, name); - if (unlikely(!result) && !PyErr_Occurred()) { - PyErr_Format(PyExc_NameError, -#if PY_MAJOR_VERSION >= 3 - "name '%U' is not defined", name); -#else - "name '%.200s' is not defined", PyString_AS_STRING(name)); -#endif - } - return result; -} - -/* TupleAndListFromArray */ -#if CYTHON_COMPILING_IN_CPYTHON -static CYTHON_INLINE void __Pyx_copy_object_array(PyObject *const *CYTHON_RESTRICT src, PyObject** CYTHON_RESTRICT dest, Py_ssize_t length) { - PyObject *v; - Py_ssize_t i; - for (i = 0; i < length; i++) { - v = dest[i] = src[i]; - Py_INCREF(v); - } -} -static CYTHON_INLINE PyObject * -__Pyx_PyTuple_FromArray(PyObject *const *src, Py_ssize_t n) -{ - PyObject *res; - if (n <= 0) { - Py_INCREF(__pyx_empty_tuple); - return __pyx_empty_tuple; - } - res = PyTuple_New(n); - if (unlikely(res == NULL)) return NULL; - __Pyx_copy_object_array(src, ((PyTupleObject*)res)->ob_item, n); - return res; -} -static CYTHON_INLINE PyObject * -__Pyx_PyList_FromArray(PyObject *const *src, Py_ssize_t n) -{ - PyObject *res; - if (n <= 0) { - return PyList_New(0); - } - res = PyList_New(n); - if (unlikely(res == NULL)) return NULL; - __Pyx_copy_object_array(src, ((PyListObject*)res)->ob_item, n); - return res; -} -#endif - -/* BytesEquals */ -static CYTHON_INLINE int __Pyx_PyBytes_Equals(PyObject* s1, PyObject* s2, int equals) { -#if CYTHON_COMPILING_IN_PYPY || CYTHON_COMPILING_IN_LIMITED_API - return PyObject_RichCompareBool(s1, s2, equals); -#else - if (s1 == s2) { - return (equals == Py_EQ); - } else if (PyBytes_CheckExact(s1) & PyBytes_CheckExact(s2)) { - const char *ps1, *ps2; - Py_ssize_t length = PyBytes_GET_SIZE(s1); - if (length != PyBytes_GET_SIZE(s2)) - return (equals == Py_NE); - ps1 = PyBytes_AS_STRING(s1); - ps2 = PyBytes_AS_STRING(s2); - if (ps1[0] != ps2[0]) { - return (equals == Py_NE); - } else if (length == 1) { - return (equals == Py_EQ); - } else { - int result; -#if CYTHON_USE_UNICODE_INTERNALS && (PY_VERSION_HEX < 0x030B0000) - Py_hash_t hash1, hash2; - hash1 = ((PyBytesObject*)s1)->ob_shash; - hash2 = ((PyBytesObject*)s2)->ob_shash; - if (hash1 != hash2 && hash1 != -1 && hash2 != -1) { - return (equals == Py_NE); - } -#endif - result = memcmp(ps1, ps2, (size_t)length); - return (equals == Py_EQ) ? (result == 0) : (result != 0); - } - } else if ((s1 == Py_None) & PyBytes_CheckExact(s2)) { - return (equals == Py_NE); - } else if ((s2 == Py_None) & PyBytes_CheckExact(s1)) { - return (equals == Py_NE); - } else { - int result; - PyObject* py_result = PyObject_RichCompare(s1, s2, equals); - if (!py_result) - return -1; - result = __Pyx_PyObject_IsTrue(py_result); - Py_DECREF(py_result); - return result; - } -#endif -} - -/* UnicodeEquals */ -static CYTHON_INLINE int __Pyx_PyUnicode_Equals(PyObject* s1, PyObject* s2, int equals) { -#if CYTHON_COMPILING_IN_PYPY || CYTHON_COMPILING_IN_LIMITED_API - return PyObject_RichCompareBool(s1, s2, equals); -#else -#if PY_MAJOR_VERSION < 3 - PyObject* owned_ref = NULL; -#endif - int s1_is_unicode, s2_is_unicode; - if (s1 == s2) { - goto return_eq; - } - s1_is_unicode = PyUnicode_CheckExact(s1); - s2_is_unicode = PyUnicode_CheckExact(s2); -#if PY_MAJOR_VERSION < 3 - if ((s1_is_unicode & (!s2_is_unicode)) && PyString_CheckExact(s2)) { - owned_ref = PyUnicode_FromObject(s2); - if (unlikely(!owned_ref)) - return -1; - s2 = owned_ref; - s2_is_unicode = 1; - } else if ((s2_is_unicode & (!s1_is_unicode)) && PyString_CheckExact(s1)) { - owned_ref = PyUnicode_FromObject(s1); - if (unlikely(!owned_ref)) - return -1; - s1 = owned_ref; - s1_is_unicode = 1; - } else if (((!s2_is_unicode) & (!s1_is_unicode))) { - return __Pyx_PyBytes_Equals(s1, s2, equals); - } -#endif - if (s1_is_unicode & s2_is_unicode) { - Py_ssize_t length; - int kind; - void *data1, *data2; - if (unlikely(__Pyx_PyUnicode_READY(s1) < 0) || unlikely(__Pyx_PyUnicode_READY(s2) < 0)) - return -1; - length = __Pyx_PyUnicode_GET_LENGTH(s1); - if (length != __Pyx_PyUnicode_GET_LENGTH(s2)) { - goto return_ne; - } -#if CYTHON_USE_UNICODE_INTERNALS - { - Py_hash_t hash1, hash2; - #if CYTHON_PEP393_ENABLED - hash1 = ((PyASCIIObject*)s1)->hash; - hash2 = ((PyASCIIObject*)s2)->hash; - #else - hash1 = ((PyUnicodeObject*)s1)->hash; - hash2 = ((PyUnicodeObject*)s2)->hash; - #endif - if (hash1 != hash2 && hash1 != -1 && hash2 != -1) { - goto return_ne; - } - } -#endif - kind = __Pyx_PyUnicode_KIND(s1); - if (kind != __Pyx_PyUnicode_KIND(s2)) { - goto return_ne; - } - data1 = __Pyx_PyUnicode_DATA(s1); - data2 = __Pyx_PyUnicode_DATA(s2); - if (__Pyx_PyUnicode_READ(kind, data1, 0) != __Pyx_PyUnicode_READ(kind, data2, 0)) { - goto return_ne; - } else if (length == 1) { - goto return_eq; - } else { - int result = memcmp(data1, data2, (size_t)(length * kind)); - #if PY_MAJOR_VERSION < 3 - Py_XDECREF(owned_ref); - #endif - return (equals == Py_EQ) ? (result == 0) : (result != 0); - } - } else if ((s1 == Py_None) & s2_is_unicode) { - goto return_ne; - } else if ((s2 == Py_None) & s1_is_unicode) { - goto return_ne; - } else { - int result; - PyObject* py_result = PyObject_RichCompare(s1, s2, equals); - #if PY_MAJOR_VERSION < 3 - Py_XDECREF(owned_ref); - #endif - if (!py_result) - return -1; - result = __Pyx_PyObject_IsTrue(py_result); - Py_DECREF(py_result); - return result; - } -return_eq: - #if PY_MAJOR_VERSION < 3 - Py_XDECREF(owned_ref); - #endif - return (equals == Py_EQ); -return_ne: - #if PY_MAJOR_VERSION < 3 - Py_XDECREF(owned_ref); - #endif - return (equals == Py_NE); -#endif -} - -/* fastcall */ -#if CYTHON_METH_FASTCALL -static CYTHON_INLINE PyObject * __Pyx_GetKwValue_FASTCALL(PyObject *kwnames, PyObject *const *kwvalues, PyObject *s) -{ - Py_ssize_t i, n = PyTuple_GET_SIZE(kwnames); - for (i = 0; i < n; i++) - { - if (s == PyTuple_GET_ITEM(kwnames, i)) return kwvalues[i]; - } - for (i = 0; i < n; i++) - { - int eq = __Pyx_PyUnicode_Equals(s, PyTuple_GET_ITEM(kwnames, i), Py_EQ); - if (unlikely(eq != 0)) { - if (unlikely(eq < 0)) return NULL; // error - return kwvalues[i]; - } - } - return NULL; // not found (no exception set) -} -#endif - -/* RaiseArgTupleInvalid */ -static void __Pyx_RaiseArgtupleInvalid( - const char* func_name, - int exact, - Py_ssize_t num_min, - Py_ssize_t num_max, - Py_ssize_t num_found) -{ - Py_ssize_t num_expected; - const char *more_or_less; - if (num_found < num_min) { - num_expected = num_min; - more_or_less = "at least"; - } else { - num_expected = num_max; - more_or_less = "at most"; - } - if (exact) { - more_or_less = "exactly"; - } - PyErr_Format(PyExc_TypeError, - "%.200s() takes %.8s %" CYTHON_FORMAT_SSIZE_T "d positional argument%.1s (%" CYTHON_FORMAT_SSIZE_T "d given)", - func_name, more_or_less, num_expected, - (num_expected == 1) ? "" : "s", num_found); -} - -/* RaiseDoubleKeywords */ -static void __Pyx_RaiseDoubleKeywordsError( - const char* func_name, - PyObject* kw_name) -{ - PyErr_Format(PyExc_TypeError, - #if PY_MAJOR_VERSION >= 3 - "%s() got multiple values for keyword argument '%U'", func_name, kw_name); - #else - "%s() got multiple values for keyword argument '%s'", func_name, - PyString_AsString(kw_name)); - #endif -} - -/* ParseKeywords */ -static int __Pyx_ParseOptionalKeywords( - PyObject *kwds, - PyObject *const *kwvalues, - PyObject **argnames[], - PyObject *kwds2, - PyObject *values[], - Py_ssize_t num_pos_args, - const char* function_name) -{ - PyObject *key = 0, *value = 0; - Py_ssize_t pos = 0; - PyObject*** name; - PyObject*** first_kw_arg = argnames + num_pos_args; - int kwds_is_tuple = CYTHON_METH_FASTCALL && likely(PyTuple_Check(kwds)); - while (1) { - if (kwds_is_tuple) { - if (pos >= PyTuple_GET_SIZE(kwds)) break; - key = PyTuple_GET_ITEM(kwds, pos); - value = kwvalues[pos]; - pos++; - } - else - { - if (!PyDict_Next(kwds, &pos, &key, &value)) break; - } - name = first_kw_arg; - while (*name && (**name != key)) name++; - if (*name) { - values[name-argnames] = value; - continue; - } - name = first_kw_arg; - #if PY_MAJOR_VERSION < 3 - if (likely(PyString_Check(key))) { - while (*name) { - if ((CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**name) == PyString_GET_SIZE(key)) - && _PyString_Eq(**name, key)) { - values[name-argnames] = value; - break; - } - name++; - } - if (*name) continue; - else { - PyObject*** argname = argnames; - while (argname != first_kw_arg) { - if ((**argname == key) || ( - (CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**argname) == PyString_GET_SIZE(key)) - && _PyString_Eq(**argname, key))) { - goto arg_passed_twice; - } - argname++; - } - } - } else - #endif - if (likely(PyUnicode_Check(key))) { - while (*name) { - int cmp = ( - #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 - (__Pyx_PyUnicode_GET_LENGTH(**name) != __Pyx_PyUnicode_GET_LENGTH(key)) ? 1 : - #endif - PyUnicode_Compare(**name, key) - ); - if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; - if (cmp == 0) { - values[name-argnames] = value; - break; - } - name++; - } - if (*name) continue; - else { - PyObject*** argname = argnames; - while (argname != first_kw_arg) { - int cmp = (**argname == key) ? 0 : - #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 - (__Pyx_PyUnicode_GET_LENGTH(**argname) != __Pyx_PyUnicode_GET_LENGTH(key)) ? 1 : - #endif - PyUnicode_Compare(**argname, key); - if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; - if (cmp == 0) goto arg_passed_twice; - argname++; - } - } - } else - goto invalid_keyword_type; - if (kwds2) { - if (unlikely(PyDict_SetItem(kwds2, key, value))) goto bad; - } else { - goto invalid_keyword; - } - } - return 0; -arg_passed_twice: - __Pyx_RaiseDoubleKeywordsError(function_name, key); - goto bad; -invalid_keyword_type: - PyErr_Format(PyExc_TypeError, - "%.200s() keywords must be strings", function_name); - goto bad; -invalid_keyword: - #if PY_MAJOR_VERSION < 3 - PyErr_Format(PyExc_TypeError, - "%.200s() got an unexpected keyword argument '%.200s'", - function_name, PyString_AsString(key)); - #else - PyErr_Format(PyExc_TypeError, - "%s() got an unexpected keyword argument '%U'", - function_name, key); - #endif -bad: - return -1; -} - -/* ArgTypeTest */ -static int __Pyx__ArgTypeTest(PyObject *obj, PyTypeObject *type, const char *name, int exact) -{ - __Pyx_TypeName type_name; - __Pyx_TypeName obj_type_name; - if (unlikely(!type)) { - PyErr_SetString(PyExc_SystemError, "Missing type object"); - return 0; - } - else if (exact) { - #if PY_MAJOR_VERSION == 2 - if ((type == &PyBaseString_Type) && likely(__Pyx_PyBaseString_CheckExact(obj))) return 1; - #endif - } - else { - if (likely(__Pyx_TypeCheck(obj, type))) return 1; - } - type_name = __Pyx_PyType_GetName(type); - obj_type_name = __Pyx_PyType_GetName(Py_TYPE(obj)); - PyErr_Format(PyExc_TypeError, - "Argument '%.200s' has incorrect type (expected " __Pyx_FMT_TYPENAME - ", got " __Pyx_FMT_TYPENAME ")", name, type_name, obj_type_name); - __Pyx_DECREF_TypeName(type_name); - __Pyx_DECREF_TypeName(obj_type_name); - return 0; -} - -/* RaiseException */ -#if PY_MAJOR_VERSION < 3 -static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause) { - __Pyx_PyThreadState_declare - CYTHON_UNUSED_VAR(cause); - Py_XINCREF(type); - if (!value || value == Py_None) - value = NULL; - else - Py_INCREF(value); - if (!tb || tb == Py_None) - tb = NULL; - else { - Py_INCREF(tb); - if (!PyTraceBack_Check(tb)) { - PyErr_SetString(PyExc_TypeError, - "raise: arg 3 must be a traceback or None"); - goto raise_error; - } - } - if (PyType_Check(type)) { -#if CYTHON_COMPILING_IN_PYPY - if (!value) { - Py_INCREF(Py_None); - value = Py_None; - } -#endif - PyErr_NormalizeException(&type, &value, &tb); - } else { - if (value) { - PyErr_SetString(PyExc_TypeError, - "instance exception may not have a separate value"); - goto raise_error; - } - value = type; - type = (PyObject*) Py_TYPE(type); - Py_INCREF(type); - if (!PyType_IsSubtype((PyTypeObject *)type, (PyTypeObject *)PyExc_BaseException)) { - PyErr_SetString(PyExc_TypeError, - "raise: exception class must be a subclass of BaseException"); - goto raise_error; - } - } - __Pyx_PyThreadState_assign - __Pyx_ErrRestore(type, value, tb); - return; -raise_error: - Py_XDECREF(value); - Py_XDECREF(type); - Py_XDECREF(tb); - return; -} -#else -static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause) { - PyObject* owned_instance = NULL; - if (tb == Py_None) { - tb = 0; - } else if (tb && !PyTraceBack_Check(tb)) { - PyErr_SetString(PyExc_TypeError, - "raise: arg 3 must be a traceback or None"); - goto bad; - } - if (value == Py_None) - value = 0; - if (PyExceptionInstance_Check(type)) { - if (value) { - PyErr_SetString(PyExc_TypeError, - "instance exception may not have a separate value"); - goto bad; - } - value = type; - type = (PyObject*) Py_TYPE(value); - } else if (PyExceptionClass_Check(type)) { - PyObject *instance_class = NULL; - if (value && PyExceptionInstance_Check(value)) { - instance_class = (PyObject*) Py_TYPE(value); - if (instance_class != type) { - int is_subclass = PyObject_IsSubclass(instance_class, type); - if (!is_subclass) { - instance_class = NULL; - } else if (unlikely(is_subclass == -1)) { - goto bad; - } else { - type = instance_class; - } - } - } - if (!instance_class) { - PyObject *args; - if (!value) - args = PyTuple_New(0); - else if (PyTuple_Check(value)) { - Py_INCREF(value); - args = value; - } else - args = PyTuple_Pack(1, value); - if (!args) - goto bad; - owned_instance = PyObject_Call(type, args, NULL); - Py_DECREF(args); - if (!owned_instance) - goto bad; - value = owned_instance; - if (!PyExceptionInstance_Check(value)) { - PyErr_Format(PyExc_TypeError, - "calling %R should have returned an instance of " - "BaseException, not %R", - type, Py_TYPE(value)); - goto bad; - } - } - } else { - PyErr_SetString(PyExc_TypeError, - "raise: exception class must be a subclass of BaseException"); - goto bad; - } - if (cause) { - PyObject *fixed_cause; - if (cause == Py_None) { - fixed_cause = NULL; - } else if (PyExceptionClass_Check(cause)) { - fixed_cause = PyObject_CallObject(cause, NULL); - if (fixed_cause == NULL) - goto bad; - } else if (PyExceptionInstance_Check(cause)) { - fixed_cause = cause; - Py_INCREF(fixed_cause); - } else { - PyErr_SetString(PyExc_TypeError, - "exception causes must derive from " - "BaseException"); - goto bad; - } - PyException_SetCause(value, fixed_cause); - } - PyErr_SetObject(type, value); - if (tb) { - #if PY_VERSION_HEX >= 0x030C00A6 - PyException_SetTraceback(value, tb); - #elif CYTHON_FAST_THREAD_STATE - PyThreadState *tstate = __Pyx_PyThreadState_Current; - PyObject* tmp_tb = tstate->curexc_traceback; - if (tb != tmp_tb) { - Py_INCREF(tb); - tstate->curexc_traceback = tb; - Py_XDECREF(tmp_tb); - } -#else - PyObject *tmp_type, *tmp_value, *tmp_tb; - PyErr_Fetch(&tmp_type, &tmp_value, &tmp_tb); - Py_INCREF(tb); - PyErr_Restore(tmp_type, tmp_value, tb); - Py_XDECREF(tmp_tb); -#endif - } -bad: - Py_XDECREF(owned_instance); - return; -} -#endif - -/* PyFunctionFastCall */ -#if CYTHON_FAST_PYCALL && !CYTHON_VECTORCALL -static PyObject* __Pyx_PyFunction_FastCallNoKw(PyCodeObject *co, PyObject **args, Py_ssize_t na, - PyObject *globals) { - PyFrameObject *f; - PyThreadState *tstate = __Pyx_PyThreadState_Current; - PyObject **fastlocals; - Py_ssize_t i; - PyObject *result; - assert(globals != NULL); - /* XXX Perhaps we should create a specialized - PyFrame_New() that doesn't take locals, but does - take builtins without sanity checking them. - */ - assert(tstate != NULL); - f = PyFrame_New(tstate, co, globals, NULL); - if (f == NULL) { - return NULL; - } - fastlocals = __Pyx_PyFrame_GetLocalsplus(f); - for (i = 0; i < na; i++) { - Py_INCREF(*args); - fastlocals[i] = *args++; - } - result = PyEval_EvalFrameEx(f,0); - ++tstate->recursion_depth; - Py_DECREF(f); - --tstate->recursion_depth; - return result; -} -static PyObject *__Pyx_PyFunction_FastCallDict(PyObject *func, PyObject **args, Py_ssize_t nargs, PyObject *kwargs) { - PyCodeObject *co = (PyCodeObject *)PyFunction_GET_CODE(func); - PyObject *globals = PyFunction_GET_GLOBALS(func); - PyObject *argdefs = PyFunction_GET_DEFAULTS(func); - PyObject *closure; -#if PY_MAJOR_VERSION >= 3 - PyObject *kwdefs; -#endif - PyObject *kwtuple, **k; - PyObject **d; - Py_ssize_t nd; - Py_ssize_t nk; - PyObject *result; - assert(kwargs == NULL || PyDict_Check(kwargs)); - nk = kwargs ? PyDict_Size(kwargs) : 0; - if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object"))) { - return NULL; - } - if ( -#if PY_MAJOR_VERSION >= 3 - co->co_kwonlyargcount == 0 && -#endif - likely(kwargs == NULL || nk == 0) && - co->co_flags == (CO_OPTIMIZED | CO_NEWLOCALS | CO_NOFREE)) { - if (argdefs == NULL && co->co_argcount == nargs) { - result = __Pyx_PyFunction_FastCallNoKw(co, args, nargs, globals); - goto done; - } - else if (nargs == 0 && argdefs != NULL - && co->co_argcount == Py_SIZE(argdefs)) { - /* function called with no arguments, but all parameters have - a default value: use default values as arguments .*/ - args = &PyTuple_GET_ITEM(argdefs, 0); - result =__Pyx_PyFunction_FastCallNoKw(co, args, Py_SIZE(argdefs), globals); - goto done; - } - } - if (kwargs != NULL) { - Py_ssize_t pos, i; - kwtuple = PyTuple_New(2 * nk); - if (kwtuple == NULL) { - result = NULL; - goto done; - } - k = &PyTuple_GET_ITEM(kwtuple, 0); - pos = i = 0; - while (PyDict_Next(kwargs, &pos, &k[i], &k[i+1])) { - Py_INCREF(k[i]); - Py_INCREF(k[i+1]); - i += 2; - } - nk = i / 2; - } - else { - kwtuple = NULL; - k = NULL; - } - closure = PyFunction_GET_CLOSURE(func); -#if PY_MAJOR_VERSION >= 3 - kwdefs = PyFunction_GET_KW_DEFAULTS(func); -#endif - if (argdefs != NULL) { - d = &PyTuple_GET_ITEM(argdefs, 0); - nd = Py_SIZE(argdefs); - } - else { - d = NULL; - nd = 0; - } -#if PY_MAJOR_VERSION >= 3 - result = PyEval_EvalCodeEx((PyObject*)co, globals, (PyObject *)NULL, - args, (int)nargs, - k, (int)nk, - d, (int)nd, kwdefs, closure); -#else - result = PyEval_EvalCodeEx(co, globals, (PyObject *)NULL, - args, (int)nargs, - k, (int)nk, - d, (int)nd, closure); -#endif - Py_XDECREF(kwtuple); -done: - Py_LeaveRecursiveCall(); - return result; -} -#endif - -/* PyObjectCall */ -#if CYTHON_COMPILING_IN_CPYTHON -static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw) { - PyObject *result; - ternaryfunc call = Py_TYPE(func)->tp_call; - if (unlikely(!call)) - return PyObject_Call(func, arg, kw); - if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object"))) - return NULL; - result = (*call)(func, arg, kw); - Py_LeaveRecursiveCall(); - if (unlikely(!result) && unlikely(!PyErr_Occurred())) { - PyErr_SetString( - PyExc_SystemError, - "NULL result without error in PyObject_Call"); - } - return result; -} -#endif - -/* PyObjectCallMethO */ -#if CYTHON_COMPILING_IN_CPYTHON -static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg) { - PyObject *self, *result; - PyCFunction cfunc; - cfunc = PyCFunction_GET_FUNCTION(func); - self = PyCFunction_GET_SELF(func); - if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object"))) - return NULL; - result = cfunc(self, arg); - Py_LeaveRecursiveCall(); - if (unlikely(!result) && unlikely(!PyErr_Occurred())) { - PyErr_SetString( - PyExc_SystemError, - "NULL result without error in PyObject_Call"); - } - return result; -} -#endif - -/* PyObjectFastCall */ -static PyObject* __Pyx_PyObject_FastCall_fallback(PyObject *func, PyObject **args, size_t nargs, PyObject *kwargs) { - PyObject *argstuple; - PyObject *result; - size_t i; - argstuple = PyTuple_New((Py_ssize_t)nargs); - if (unlikely(!argstuple)) return NULL; - for (i = 0; i < nargs; i++) { - Py_INCREF(args[i]); - PyTuple_SET_ITEM(argstuple, (Py_ssize_t)i, args[i]); - } - result = __Pyx_PyObject_Call(func, argstuple, kwargs); - Py_DECREF(argstuple); - return result; -} -static CYTHON_INLINE PyObject* __Pyx_PyObject_FastCallDict(PyObject *func, PyObject **args, size_t _nargs, PyObject *kwargs) { - Py_ssize_t nargs = __Pyx_PyVectorcall_NARGS(_nargs); -#if CYTHON_COMPILING_IN_CPYTHON - if (nargs == 0 && kwargs == NULL) { -#if defined(__Pyx_CyFunction_USED) && defined(NDEBUG) - if (__Pyx_IsCyOrPyCFunction(func)) -#else - if (PyCFunction_Check(func)) -#endif - { - if (likely(PyCFunction_GET_FLAGS(func) & METH_NOARGS)) { - return __Pyx_PyObject_CallMethO(func, NULL); - } - } - } - else if (nargs == 1 && kwargs == NULL) { - if (PyCFunction_Check(func)) - { - if (likely(PyCFunction_GET_FLAGS(func) & METH_O)) { - return __Pyx_PyObject_CallMethO(func, args[0]); - } - } - } -#endif - #if PY_VERSION_HEX < 0x030800B1 - #if CYTHON_FAST_PYCCALL - if (PyCFunction_Check(func)) { - if (kwargs) { - return _PyCFunction_FastCallDict(func, args, nargs, kwargs); - } else { - return _PyCFunction_FastCallKeywords(func, args, nargs, NULL); - } - } - #if PY_VERSION_HEX >= 0x030700A1 - if (!kwargs && __Pyx_IS_TYPE(func, &PyMethodDescr_Type)) { - return _PyMethodDescr_FastCallKeywords(func, args, nargs, NULL); - } - #endif - #endif - #if CYTHON_FAST_PYCALL - if (PyFunction_Check(func)) { - return __Pyx_PyFunction_FastCallDict(func, args, nargs, kwargs); - } - #endif - #endif - #if CYTHON_VECTORCALL - vectorcallfunc f = _PyVectorcall_Function(func); - if (f) { - return f(func, args, (size_t)nargs, kwargs); - } - #elif defined(__Pyx_CyFunction_USED) && CYTHON_BACKPORT_VECTORCALL - if (__Pyx_CyFunction_CheckExact(func)) { - __pyx_vectorcallfunc f = __Pyx_CyFunction_func_vectorcall(func); - if (f) return f(func, args, (size_t)nargs, kwargs); - } - #endif - if (nargs == 0) { - return __Pyx_PyObject_Call(func, __pyx_empty_tuple, kwargs); - } - return __Pyx_PyObject_FastCall_fallback(func, args, (size_t)nargs, kwargs); -} - -/* RaiseUnexpectedTypeError */ -static int -__Pyx_RaiseUnexpectedTypeError(const char *expected, PyObject *obj) -{ - __Pyx_TypeName obj_type_name = __Pyx_PyType_GetName(Py_TYPE(obj)); - PyErr_Format(PyExc_TypeError, "Expected %s, got " __Pyx_FMT_TYPENAME, - expected, obj_type_name); - __Pyx_DECREF_TypeName(obj_type_name); - return 0; -} - -/* CIntToDigits */ -static const char DIGIT_PAIRS_10[2*10*10+1] = { - "00010203040506070809" - "10111213141516171819" - "20212223242526272829" - "30313233343536373839" - "40414243444546474849" - "50515253545556575859" - "60616263646566676869" - "70717273747576777879" - "80818283848586878889" - "90919293949596979899" -}; -static const char DIGIT_PAIRS_8[2*8*8+1] = { - "0001020304050607" - "1011121314151617" - "2021222324252627" - "3031323334353637" - "4041424344454647" - "5051525354555657" - "6061626364656667" - "7071727374757677" -}; -static const char DIGITS_HEX[2*16+1] = { - "0123456789abcdef" - "0123456789ABCDEF" -}; - -/* BuildPyUnicode */ -static PyObject* __Pyx_PyUnicode_BuildFromAscii(Py_ssize_t ulength, char* chars, int clength, - int prepend_sign, char padding_char) { - PyObject *uval; - Py_ssize_t uoffset = ulength - clength; -#if CYTHON_USE_UNICODE_INTERNALS - Py_ssize_t i; -#if CYTHON_PEP393_ENABLED - void *udata; - uval = PyUnicode_New(ulength, 127); - if (unlikely(!uval)) return NULL; - udata = PyUnicode_DATA(uval); -#else - Py_UNICODE *udata; - uval = PyUnicode_FromUnicode(NULL, ulength); - if (unlikely(!uval)) return NULL; - udata = PyUnicode_AS_UNICODE(uval); -#endif - if (uoffset > 0) { - i = 0; - if (prepend_sign) { - __Pyx_PyUnicode_WRITE(PyUnicode_1BYTE_KIND, udata, 0, '-'); - i++; - } - for (; i < uoffset; i++) { - __Pyx_PyUnicode_WRITE(PyUnicode_1BYTE_KIND, udata, i, padding_char); - } - } - for (i=0; i < clength; i++) { - __Pyx_PyUnicode_WRITE(PyUnicode_1BYTE_KIND, udata, uoffset+i, chars[i]); - } -#else - { - PyObject *sign = NULL, *padding = NULL; - uval = NULL; - if (uoffset > 0) { - prepend_sign = !!prepend_sign; - if (uoffset > prepend_sign) { - padding = PyUnicode_FromOrdinal(padding_char); - if (likely(padding) && uoffset > prepend_sign + 1) { - PyObject *tmp; - PyObject *repeat = PyInt_FromSsize_t(uoffset - prepend_sign); - if (unlikely(!repeat)) goto done_or_error; - tmp = PyNumber_Multiply(padding, repeat); - Py_DECREF(repeat); - Py_DECREF(padding); - padding = tmp; - } - if (unlikely(!padding)) goto done_or_error; - } - if (prepend_sign) { - sign = PyUnicode_FromOrdinal('-'); - if (unlikely(!sign)) goto done_or_error; - } - } - uval = PyUnicode_DecodeASCII(chars, clength, NULL); - if (likely(uval) && padding) { - PyObject *tmp = PyNumber_Add(padding, uval); - Py_DECREF(uval); - uval = tmp; - } - if (likely(uval) && sign) { - PyObject *tmp = PyNumber_Add(sign, uval); - Py_DECREF(uval); - uval = tmp; - } -done_or_error: - Py_XDECREF(padding); - Py_XDECREF(sign); - } -#endif - return uval; -} - -/* CIntToPyUnicode */ -static CYTHON_INLINE PyObject* __Pyx_PyUnicode_From_int(int value, Py_ssize_t width, char padding_char, char format_char) { - char digits[sizeof(int)*3+2]; - char *dpos, *end = digits + sizeof(int)*3+2; - const char *hex_digits = DIGITS_HEX; - Py_ssize_t length, ulength; - int prepend_sign, last_one_off; - int remaining; -#ifdef __Pyx_HAS_GCC_DIAGNOSTIC -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wconversion" -#endif - const int neg_one = (int) -1, const_zero = (int) 0; -#ifdef __Pyx_HAS_GCC_DIAGNOSTIC -#pragma GCC diagnostic pop -#endif - const int is_unsigned = neg_one > const_zero; - if (format_char == 'X') { - hex_digits += 16; - format_char = 'x'; - } - remaining = value; - last_one_off = 0; - dpos = end; - do { - int digit_pos; - switch (format_char) { - case 'o': - digit_pos = abs((int)(remaining % (8*8))); - remaining = (int) (remaining / (8*8)); - dpos -= 2; - memcpy(dpos, DIGIT_PAIRS_8 + digit_pos * 2, 2); - last_one_off = (digit_pos < 8); - break; - case 'd': - digit_pos = abs((int)(remaining % (10*10))); - remaining = (int) (remaining / (10*10)); - dpos -= 2; - memcpy(dpos, DIGIT_PAIRS_10 + digit_pos * 2, 2); - last_one_off = (digit_pos < 10); - break; - case 'x': - *(--dpos) = hex_digits[abs((int)(remaining % 16))]; - remaining = (int) (remaining / 16); - break; - default: - assert(0); - break; - } - } while (unlikely(remaining != 0)); - assert(!last_one_off || *dpos == '0'); - dpos += last_one_off; - length = end - dpos; - ulength = length; - prepend_sign = 0; - if (!is_unsigned && value <= neg_one) { - if (padding_char == ' ' || width <= length + 1) { - *(--dpos) = '-'; - ++length; - } else { - prepend_sign = 1; - } - ++ulength; - } - if (width > ulength) { - ulength = width; - } - if (ulength == 1) { - return PyUnicode_FromOrdinal(*dpos); - } - return __Pyx_PyUnicode_BuildFromAscii(ulength, dpos, (int) length, prepend_sign, padding_char); -} - -/* CIntToPyUnicode */ -static CYTHON_INLINE PyObject* __Pyx_PyUnicode_From_Py_ssize_t(Py_ssize_t value, Py_ssize_t width, char padding_char, char format_char) { - char digits[sizeof(Py_ssize_t)*3+2]; - char *dpos, *end = digits + sizeof(Py_ssize_t)*3+2; - const char *hex_digits = DIGITS_HEX; - Py_ssize_t length, ulength; - int prepend_sign, last_one_off; - Py_ssize_t remaining; -#ifdef __Pyx_HAS_GCC_DIAGNOSTIC -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wconversion" -#endif - const Py_ssize_t neg_one = (Py_ssize_t) -1, const_zero = (Py_ssize_t) 0; -#ifdef __Pyx_HAS_GCC_DIAGNOSTIC -#pragma GCC diagnostic pop -#endif - const int is_unsigned = neg_one > const_zero; - if (format_char == 'X') { - hex_digits += 16; - format_char = 'x'; - } - remaining = value; - last_one_off = 0; - dpos = end; - do { - int digit_pos; - switch (format_char) { - case 'o': - digit_pos = abs((int)(remaining % (8*8))); - remaining = (Py_ssize_t) (remaining / (8*8)); - dpos -= 2; - memcpy(dpos, DIGIT_PAIRS_8 + digit_pos * 2, 2); - last_one_off = (digit_pos < 8); - break; - case 'd': - digit_pos = abs((int)(remaining % (10*10))); - remaining = (Py_ssize_t) (remaining / (10*10)); - dpos -= 2; - memcpy(dpos, DIGIT_PAIRS_10 + digit_pos * 2, 2); - last_one_off = (digit_pos < 10); - break; - case 'x': - *(--dpos) = hex_digits[abs((int)(remaining % 16))]; - remaining = (Py_ssize_t) (remaining / 16); - break; - default: - assert(0); - break; - } - } while (unlikely(remaining != 0)); - assert(!last_one_off || *dpos == '0'); - dpos += last_one_off; - length = end - dpos; - ulength = length; - prepend_sign = 0; - if (!is_unsigned && value <= neg_one) { - if (padding_char == ' ' || width <= length + 1) { - *(--dpos) = '-'; - ++length; - } else { - prepend_sign = 1; - } - ++ulength; - } - if (width > ulength) { - ulength = width; - } - if (ulength == 1) { - return PyUnicode_FromOrdinal(*dpos); - } - return __Pyx_PyUnicode_BuildFromAscii(ulength, dpos, (int) length, prepend_sign, padding_char); -} - -/* JoinPyUnicode */ -static PyObject* __Pyx_PyUnicode_Join(PyObject* value_tuple, Py_ssize_t value_count, Py_ssize_t result_ulength, - Py_UCS4 max_char) { -#if CYTHON_USE_UNICODE_INTERNALS && CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS - PyObject *result_uval; - int result_ukind, kind_shift; - Py_ssize_t i, char_pos; - void *result_udata; - CYTHON_MAYBE_UNUSED_VAR(max_char); -#if CYTHON_PEP393_ENABLED - result_uval = PyUnicode_New(result_ulength, max_char); - if (unlikely(!result_uval)) return NULL; - result_ukind = (max_char <= 255) ? PyUnicode_1BYTE_KIND : (max_char <= 65535) ? PyUnicode_2BYTE_KIND : PyUnicode_4BYTE_KIND; - kind_shift = (result_ukind == PyUnicode_4BYTE_KIND) ? 2 : result_ukind - 1; - result_udata = PyUnicode_DATA(result_uval); -#else - result_uval = PyUnicode_FromUnicode(NULL, result_ulength); - if (unlikely(!result_uval)) return NULL; - result_ukind = sizeof(Py_UNICODE); - kind_shift = (result_ukind == 4) ? 2 : result_ukind - 1; - result_udata = PyUnicode_AS_UNICODE(result_uval); -#endif - assert(kind_shift == 2 || kind_shift == 1 || kind_shift == 0); - char_pos = 0; - for (i=0; i < value_count; i++) { - int ukind; - Py_ssize_t ulength; - void *udata; - PyObject *uval = PyTuple_GET_ITEM(value_tuple, i); - if (unlikely(__Pyx_PyUnicode_READY(uval))) - goto bad; - ulength = __Pyx_PyUnicode_GET_LENGTH(uval); - if (unlikely(!ulength)) - continue; - if (unlikely((PY_SSIZE_T_MAX >> kind_shift) - ulength < char_pos)) - goto overflow; - ukind = __Pyx_PyUnicode_KIND(uval); - udata = __Pyx_PyUnicode_DATA(uval); - if (!CYTHON_PEP393_ENABLED || ukind == result_ukind) { - memcpy((char *)result_udata + (char_pos << kind_shift), udata, (size_t) (ulength << kind_shift)); - } else { - #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030300F0 || defined(_PyUnicode_FastCopyCharacters) - _PyUnicode_FastCopyCharacters(result_uval, char_pos, uval, 0, ulength); - #else - Py_ssize_t j; - for (j=0; j < ulength; j++) { - Py_UCS4 uchar = __Pyx_PyUnicode_READ(ukind, udata, j); - __Pyx_PyUnicode_WRITE(result_ukind, result_udata, char_pos+j, uchar); - } - #endif - } - char_pos += ulength; - } - return result_uval; -overflow: - PyErr_SetString(PyExc_OverflowError, "join() result is too long for a Python string"); -bad: - Py_DECREF(result_uval); - return NULL; -#else - CYTHON_UNUSED_VAR(max_char); - CYTHON_UNUSED_VAR(result_ulength); - CYTHON_UNUSED_VAR(value_count); - return PyUnicode_Join(__pyx_empty_unicode, value_tuple); -#endif -} - -/* GetAttr */ -static CYTHON_INLINE PyObject *__Pyx_GetAttr(PyObject *o, PyObject *n) { -#if CYTHON_USE_TYPE_SLOTS -#if PY_MAJOR_VERSION >= 3 - if (likely(PyUnicode_Check(n))) -#else - if (likely(PyString_Check(n))) -#endif - return __Pyx_PyObject_GetAttrStr(o, n); -#endif - return PyObject_GetAttr(o, n); -} - -/* GetItemInt */ -static PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j) { - PyObject *r; - if (unlikely(!j)) return NULL; - r = PyObject_GetItem(o, j); - Py_DECREF(j); - return r; -} -static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i, - CYTHON_NCP_UNUSED int wraparound, - CYTHON_NCP_UNUSED int boundscheck) { -#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS - Py_ssize_t wrapped_i = i; - if (wraparound & unlikely(i < 0)) { - wrapped_i += PyList_GET_SIZE(o); - } - if ((!boundscheck) || likely(__Pyx_is_valid_index(wrapped_i, PyList_GET_SIZE(o)))) { - PyObject *r = PyList_GET_ITEM(o, wrapped_i); - Py_INCREF(r); - return r; - } - return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); -#else - return PySequence_GetItem(o, i); -#endif -} -static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i, - CYTHON_NCP_UNUSED int wraparound, - CYTHON_NCP_UNUSED int boundscheck) { -#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS - Py_ssize_t wrapped_i = i; - if (wraparound & unlikely(i < 0)) { - wrapped_i += PyTuple_GET_SIZE(o); - } - if ((!boundscheck) || likely(__Pyx_is_valid_index(wrapped_i, PyTuple_GET_SIZE(o)))) { - PyObject *r = PyTuple_GET_ITEM(o, wrapped_i); - Py_INCREF(r); - return r; - } - return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); -#else - return PySequence_GetItem(o, i); -#endif -} -static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i, int is_list, - CYTHON_NCP_UNUSED int wraparound, - CYTHON_NCP_UNUSED int boundscheck) { -#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS && CYTHON_USE_TYPE_SLOTS - if (is_list || PyList_CheckExact(o)) { - Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyList_GET_SIZE(o); - if ((!boundscheck) || (likely(__Pyx_is_valid_index(n, PyList_GET_SIZE(o))))) { - PyObject *r = PyList_GET_ITEM(o, n); - Py_INCREF(r); - return r; - } - } - else if (PyTuple_CheckExact(o)) { - Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyTuple_GET_SIZE(o); - if ((!boundscheck) || likely(__Pyx_is_valid_index(n, PyTuple_GET_SIZE(o)))) { - PyObject *r = PyTuple_GET_ITEM(o, n); - Py_INCREF(r); - return r; - } - } else { - PyMappingMethods *mm = Py_TYPE(o)->tp_as_mapping; - PySequenceMethods *sm = Py_TYPE(o)->tp_as_sequence; - if (mm && mm->mp_subscript) { - PyObject *r, *key = PyInt_FromSsize_t(i); - if (unlikely(!key)) return NULL; - r = mm->mp_subscript(o, key); - Py_DECREF(key); - return r; - } - if (likely(sm && sm->sq_item)) { - if (wraparound && unlikely(i < 0) && likely(sm->sq_length)) { - Py_ssize_t l = sm->sq_length(o); - if (likely(l >= 0)) { - i += l; - } else { - if (!PyErr_ExceptionMatches(PyExc_OverflowError)) - return NULL; - PyErr_Clear(); - } - } - return sm->sq_item(o, i); - } - } -#else - if (is_list || PySequence_Check(o)) { - return PySequence_GetItem(o, i); - } -#endif - return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); -} - -/* PyObjectCallOneArg */ -static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) { - PyObject *args[2] = {NULL, arg}; - return __Pyx_PyObject_FastCall(func, args+1, 1 | __Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET); -} - -/* ObjectGetItem */ -#if CYTHON_USE_TYPE_SLOTS -static PyObject *__Pyx_PyObject_GetIndex(PyObject *obj, PyObject *index) { - PyObject *runerr = NULL; - Py_ssize_t key_value; - key_value = __Pyx_PyIndex_AsSsize_t(index); - if (likely(key_value != -1 || !(runerr = PyErr_Occurred()))) { - return __Pyx_GetItemInt_Fast(obj, key_value, 0, 1, 1); - } - if (PyErr_GivenExceptionMatches(runerr, PyExc_OverflowError)) { - __Pyx_TypeName index_type_name = __Pyx_PyType_GetName(Py_TYPE(index)); - PyErr_Clear(); - PyErr_Format(PyExc_IndexError, - "cannot fit '" __Pyx_FMT_TYPENAME "' into an index-sized integer", index_type_name); - __Pyx_DECREF_TypeName(index_type_name); - } - return NULL; -} -static PyObject *__Pyx_PyObject_GetItem_Slow(PyObject *obj, PyObject *key) { - __Pyx_TypeName obj_type_name; - if (likely(PyType_Check(obj))) { - PyObject *meth = __Pyx_PyObject_GetAttrStrNoError(obj, __pyx_n_s_class_getitem); - if (meth) { - PyObject *result = __Pyx_PyObject_CallOneArg(meth, key); - Py_DECREF(meth); - return result; - } - } - obj_type_name = __Pyx_PyType_GetName(Py_TYPE(obj)); - PyErr_Format(PyExc_TypeError, - "'" __Pyx_FMT_TYPENAME "' object is not subscriptable", obj_type_name); - __Pyx_DECREF_TypeName(obj_type_name); - return NULL; -} -static PyObject *__Pyx_PyObject_GetItem(PyObject *obj, PyObject *key) { - PyTypeObject *tp = Py_TYPE(obj); - PyMappingMethods *mm = tp->tp_as_mapping; - PySequenceMethods *sm = tp->tp_as_sequence; - if (likely(mm && mm->mp_subscript)) { - return mm->mp_subscript(obj, key); - } - if (likely(sm && sm->sq_item)) { - return __Pyx_PyObject_GetIndex(obj, key); - } - return __Pyx_PyObject_GetItem_Slow(obj, key); -} -#endif - -/* KeywordStringCheck */ -static int __Pyx_CheckKeywordStrings( - PyObject *kw, - const char* function_name, - int kw_allowed) -{ - PyObject* key = 0; - Py_ssize_t pos = 0; -#if CYTHON_COMPILING_IN_PYPY - if (!kw_allowed && PyDict_Next(kw, &pos, &key, 0)) - goto invalid_keyword; - return 1; -#else - if (CYTHON_METH_FASTCALL && likely(PyTuple_Check(kw))) { - if (unlikely(PyTuple_GET_SIZE(kw) == 0)) - return 1; - if (!kw_allowed) { - key = PyTuple_GET_ITEM(kw, 0); - goto invalid_keyword; - } -#if PY_VERSION_HEX < 0x03090000 - for (pos = 0; pos < PyTuple_GET_SIZE(kw); pos++) { - key = PyTuple_GET_ITEM(kw, pos); - if (unlikely(!PyUnicode_Check(key))) - goto invalid_keyword_type; - } -#endif - return 1; - } - while (PyDict_Next(kw, &pos, &key, 0)) { - #if PY_MAJOR_VERSION < 3 - if (unlikely(!PyString_Check(key))) - #endif - if (unlikely(!PyUnicode_Check(key))) - goto invalid_keyword_type; - } - if (!kw_allowed && unlikely(key)) - goto invalid_keyword; - return 1; -invalid_keyword_type: - PyErr_Format(PyExc_TypeError, - "%.200s() keywords must be strings", function_name); - return 0; -#endif -invalid_keyword: - #if PY_MAJOR_VERSION < 3 - PyErr_Format(PyExc_TypeError, - "%.200s() got an unexpected keyword argument '%.200s'", - function_name, PyString_AsString(key)); - #else - PyErr_Format(PyExc_TypeError, - "%s() got an unexpected keyword argument '%U'", - function_name, key); - #endif - return 0; -} - -/* DivInt[Py_ssize_t] */ -static CYTHON_INLINE Py_ssize_t __Pyx_div_Py_ssize_t(Py_ssize_t a, Py_ssize_t b) { - Py_ssize_t q = a / b; - Py_ssize_t r = a - q*b; - q -= ((r != 0) & ((r ^ b) < 0)); - return q; -} - -/* GetAttr3 */ -static PyObject *__Pyx_GetAttr3Default(PyObject *d) { - __Pyx_PyThreadState_declare - __Pyx_PyThreadState_assign - if (unlikely(!__Pyx_PyErr_ExceptionMatches(PyExc_AttributeError))) - return NULL; - __Pyx_PyErr_Clear(); - Py_INCREF(d); - return d; -} -static CYTHON_INLINE PyObject *__Pyx_GetAttr3(PyObject *o, PyObject *n, PyObject *d) { - PyObject *r; -#if CYTHON_USE_TYPE_SLOTS - if (likely(PyString_Check(n))) { - r = __Pyx_PyObject_GetAttrStrNoError(o, n); - if (unlikely(!r) && likely(!PyErr_Occurred())) { - r = __Pyx_NewRef(d); - } - return r; - } -#endif - r = PyObject_GetAttr(o, n); - return (likely(r)) ? r : __Pyx_GetAttr3Default(d); -} - -/* PyDictVersioning */ -#if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_TYPE_SLOTS -static CYTHON_INLINE PY_UINT64_T __Pyx_get_tp_dict_version(PyObject *obj) { - PyObject *dict = Py_TYPE(obj)->tp_dict; - return likely(dict) ? __PYX_GET_DICT_VERSION(dict) : 0; -} -static CYTHON_INLINE PY_UINT64_T __Pyx_get_object_dict_version(PyObject *obj) { - PyObject **dictptr = NULL; - Py_ssize_t offset = Py_TYPE(obj)->tp_dictoffset; - if (offset) { -#if CYTHON_COMPILING_IN_CPYTHON - dictptr = (likely(offset > 0)) ? (PyObject **) ((char *)obj + offset) : _PyObject_GetDictPtr(obj); -#else - dictptr = _PyObject_GetDictPtr(obj); -#endif - } - return (dictptr && *dictptr) ? __PYX_GET_DICT_VERSION(*dictptr) : 0; -} -static CYTHON_INLINE int __Pyx_object_dict_version_matches(PyObject* obj, PY_UINT64_T tp_dict_version, PY_UINT64_T obj_dict_version) { - PyObject *dict = Py_TYPE(obj)->tp_dict; - if (unlikely(!dict) || unlikely(tp_dict_version != __PYX_GET_DICT_VERSION(dict))) - return 0; - return obj_dict_version == __Pyx_get_object_dict_version(obj); -} -#endif - -/* GetModuleGlobalName */ -#if CYTHON_USE_DICT_VERSIONS -static PyObject *__Pyx__GetModuleGlobalName(PyObject *name, PY_UINT64_T *dict_version, PyObject **dict_cached_value) -#else -static CYTHON_INLINE PyObject *__Pyx__GetModuleGlobalName(PyObject *name) -#endif -{ - PyObject *result; -#if !CYTHON_AVOID_BORROWED_REFS -#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030500A1 - result = _PyDict_GetItem_KnownHash(__pyx_d, name, ((PyASCIIObject *) name)->hash); - __PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version) - if (likely(result)) { - return __Pyx_NewRef(result); - } else if (unlikely(PyErr_Occurred())) { - return NULL; - } -#elif CYTHON_COMPILING_IN_LIMITED_API - if (unlikely(!__pyx_m)) { - return NULL; - } - result = PyObject_GetAttr(__pyx_m, name); - if (likely(result)) { - return result; - } -#else - result = PyDict_GetItem(__pyx_d, name); - __PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version) - if (likely(result)) { - return __Pyx_NewRef(result); - } -#endif -#else - result = PyObject_GetItem(__pyx_d, name); - __PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version) - if (likely(result)) { - return __Pyx_NewRef(result); - } - PyErr_Clear(); -#endif - return __Pyx_GetBuiltinName(name); -} - -/* RaiseTooManyValuesToUnpack */ -static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected) { - PyErr_Format(PyExc_ValueError, - "too many values to unpack (expected %" CYTHON_FORMAT_SSIZE_T "d)", expected); -} - -/* RaiseNeedMoreValuesToUnpack */ -static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index) { - PyErr_Format(PyExc_ValueError, - "need more than %" CYTHON_FORMAT_SSIZE_T "d value%.1s to unpack", - index, (index == 1) ? "" : "s"); -} - -/* RaiseNoneIterError */ -static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void) { - PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable"); -} - -/* ExtTypeTest */ -static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type) { - __Pyx_TypeName obj_type_name; - __Pyx_TypeName type_name; - if (unlikely(!type)) { - PyErr_SetString(PyExc_SystemError, "Missing type object"); - return 0; - } - if (likely(__Pyx_TypeCheck(obj, type))) - return 1; - obj_type_name = __Pyx_PyType_GetName(Py_TYPE(obj)); - type_name = __Pyx_PyType_GetName(type); - PyErr_Format(PyExc_TypeError, - "Cannot convert " __Pyx_FMT_TYPENAME " to " __Pyx_FMT_TYPENAME, - obj_type_name, type_name); - __Pyx_DECREF_TypeName(obj_type_name); - __Pyx_DECREF_TypeName(type_name); - return 0; -} - -/* GetTopmostException */ -#if CYTHON_USE_EXC_INFO_STACK && CYTHON_FAST_THREAD_STATE -static _PyErr_StackItem * -__Pyx_PyErr_GetTopmostException(PyThreadState *tstate) -{ - _PyErr_StackItem *exc_info = tstate->exc_info; - while ((exc_info->exc_value == NULL || exc_info->exc_value == Py_None) && - exc_info->previous_item != NULL) - { - exc_info = exc_info->previous_item; - } - return exc_info; -} -#endif - -/* SaveResetException */ -#if CYTHON_FAST_THREAD_STATE -static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { - #if CYTHON_USE_EXC_INFO_STACK && PY_VERSION_HEX >= 0x030B00a4 - _PyErr_StackItem *exc_info = __Pyx_PyErr_GetTopmostException(tstate); - PyObject *exc_value = exc_info->exc_value; - if (exc_value == NULL || exc_value == Py_None) { - *value = NULL; - *type = NULL; - *tb = NULL; - } else { - *value = exc_value; - Py_INCREF(*value); - *type = (PyObject*) Py_TYPE(exc_value); - Py_INCREF(*type); - *tb = PyException_GetTraceback(exc_value); - } - #elif CYTHON_USE_EXC_INFO_STACK - _PyErr_StackItem *exc_info = __Pyx_PyErr_GetTopmostException(tstate); - *type = exc_info->exc_type; - *value = exc_info->exc_value; - *tb = exc_info->exc_traceback; - Py_XINCREF(*type); - Py_XINCREF(*value); - Py_XINCREF(*tb); - #else - *type = tstate->exc_type; - *value = tstate->exc_value; - *tb = tstate->exc_traceback; - Py_XINCREF(*type); - Py_XINCREF(*value); - Py_XINCREF(*tb); - #endif -} -static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) { - #if CYTHON_USE_EXC_INFO_STACK && PY_VERSION_HEX >= 0x030B00a4 - _PyErr_StackItem *exc_info = tstate->exc_info; - PyObject *tmp_value = exc_info->exc_value; - exc_info->exc_value = value; - Py_XDECREF(tmp_value); - Py_XDECREF(type); - Py_XDECREF(tb); - #else - PyObject *tmp_type, *tmp_value, *tmp_tb; - #if CYTHON_USE_EXC_INFO_STACK - _PyErr_StackItem *exc_info = tstate->exc_info; - tmp_type = exc_info->exc_type; - tmp_value = exc_info->exc_value; - tmp_tb = exc_info->exc_traceback; - exc_info->exc_type = type; - exc_info->exc_value = value; - exc_info->exc_traceback = tb; - #else - tmp_type = tstate->exc_type; - tmp_value = tstate->exc_value; - tmp_tb = tstate->exc_traceback; - tstate->exc_type = type; - tstate->exc_value = value; - tstate->exc_traceback = tb; - #endif - Py_XDECREF(tmp_type); - Py_XDECREF(tmp_value); - Py_XDECREF(tmp_tb); - #endif -} -#endif - -/* GetException */ -#if CYTHON_FAST_THREAD_STATE -static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) -#else -static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb) -#endif -{ - PyObject *local_type = NULL, *local_value, *local_tb = NULL; -#if CYTHON_FAST_THREAD_STATE - PyObject *tmp_type, *tmp_value, *tmp_tb; - #if PY_VERSION_HEX >= 0x030C00A6 - local_value = tstate->current_exception; - tstate->current_exception = 0; - if (likely(local_value)) { - local_type = (PyObject*) Py_TYPE(local_value); - Py_INCREF(local_type); - local_tb = PyException_GetTraceback(local_value); - } - #else - local_type = tstate->curexc_type; - local_value = tstate->curexc_value; - local_tb = tstate->curexc_traceback; - tstate->curexc_type = 0; - tstate->curexc_value = 0; - tstate->curexc_traceback = 0; - #endif -#else - PyErr_Fetch(&local_type, &local_value, &local_tb); -#endif - PyErr_NormalizeException(&local_type, &local_value, &local_tb); -#if CYTHON_FAST_THREAD_STATE && PY_VERSION_HEX >= 0x030C00A6 - if (unlikely(tstate->current_exception)) -#elif CYTHON_FAST_THREAD_STATE - if (unlikely(tstate->curexc_type)) -#else - if (unlikely(PyErr_Occurred())) -#endif - goto bad; - #if PY_MAJOR_VERSION >= 3 - if (local_tb) { - if (unlikely(PyException_SetTraceback(local_value, local_tb) < 0)) - goto bad; - } - #endif - Py_XINCREF(local_tb); - Py_XINCREF(local_type); - Py_XINCREF(local_value); - *type = local_type; - *value = local_value; - *tb = local_tb; -#if CYTHON_FAST_THREAD_STATE - #if CYTHON_USE_EXC_INFO_STACK - { - _PyErr_StackItem *exc_info = tstate->exc_info; - #if PY_VERSION_HEX >= 0x030B00a4 - tmp_value = exc_info->exc_value; - exc_info->exc_value = local_value; - tmp_type = NULL; - tmp_tb = NULL; - Py_XDECREF(local_type); - Py_XDECREF(local_tb); - #else - tmp_type = exc_info->exc_type; - tmp_value = exc_info->exc_value; - tmp_tb = exc_info->exc_traceback; - exc_info->exc_type = local_type; - exc_info->exc_value = local_value; - exc_info->exc_traceback = local_tb; - #endif - } - #else - tmp_type = tstate->exc_type; - tmp_value = tstate->exc_value; - tmp_tb = tstate->exc_traceback; - tstate->exc_type = local_type; - tstate->exc_value = local_value; - tstate->exc_traceback = local_tb; - #endif - Py_XDECREF(tmp_type); - Py_XDECREF(tmp_value); - Py_XDECREF(tmp_tb); -#else - PyErr_SetExcInfo(local_type, local_value, local_tb); -#endif - return 0; -bad: - *type = 0; - *value = 0; - *tb = 0; - Py_XDECREF(local_type); - Py_XDECREF(local_value); - Py_XDECREF(local_tb); - return -1; -} - -/* SwapException */ -#if CYTHON_FAST_THREAD_STATE -static CYTHON_INLINE void __Pyx__ExceptionSwap(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { - PyObject *tmp_type, *tmp_value, *tmp_tb; - #if CYTHON_USE_EXC_INFO_STACK && PY_VERSION_HEX >= 0x030B00a4 - _PyErr_StackItem *exc_info = tstate->exc_info; - tmp_value = exc_info->exc_value; - exc_info->exc_value = *value; - if (tmp_value == NULL || tmp_value == Py_None) { - Py_XDECREF(tmp_value); - tmp_value = NULL; - tmp_type = NULL; - tmp_tb = NULL; - } else { - tmp_type = (PyObject*) Py_TYPE(tmp_value); - Py_INCREF(tmp_type); - #if CYTHON_COMPILING_IN_CPYTHON - tmp_tb = ((PyBaseExceptionObject*) tmp_value)->traceback; - Py_XINCREF(tmp_tb); - #else - tmp_tb = PyException_GetTraceback(tmp_value); - #endif - } - #elif CYTHON_USE_EXC_INFO_STACK - _PyErr_StackItem *exc_info = tstate->exc_info; - tmp_type = exc_info->exc_type; - tmp_value = exc_info->exc_value; - tmp_tb = exc_info->exc_traceback; - exc_info->exc_type = *type; - exc_info->exc_value = *value; - exc_info->exc_traceback = *tb; - #else - tmp_type = tstate->exc_type; - tmp_value = tstate->exc_value; - tmp_tb = tstate->exc_traceback; - tstate->exc_type = *type; - tstate->exc_value = *value; - tstate->exc_traceback = *tb; - #endif - *type = tmp_type; - *value = tmp_value; - *tb = tmp_tb; -} -#else -static CYTHON_INLINE void __Pyx_ExceptionSwap(PyObject **type, PyObject **value, PyObject **tb) { - PyObject *tmp_type, *tmp_value, *tmp_tb; - PyErr_GetExcInfo(&tmp_type, &tmp_value, &tmp_tb); - PyErr_SetExcInfo(*type, *value, *tb); - *type = tmp_type; - *value = tmp_value; - *tb = tmp_tb; -} -#endif - -/* Import */ -static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level) { - PyObject *module = 0; - PyObject *empty_dict = 0; - PyObject *empty_list = 0; - #if PY_MAJOR_VERSION < 3 - PyObject *py_import; - py_import = __Pyx_PyObject_GetAttrStr(__pyx_b, __pyx_n_s_import); - if (unlikely(!py_import)) - goto bad; - if (!from_list) { - empty_list = PyList_New(0); - if (unlikely(!empty_list)) - goto bad; - from_list = empty_list; - } - #endif - empty_dict = PyDict_New(); - if (unlikely(!empty_dict)) - goto bad; - { - #if PY_MAJOR_VERSION >= 3 - if (level == -1) { - if ((1) && (strchr(__Pyx_MODULE_NAME, '.'))) { - #if CYTHON_COMPILING_IN_LIMITED_API - module = PyImport_ImportModuleLevelObject( - name, empty_dict, empty_dict, from_list, 1); - #else - module = PyImport_ImportModuleLevelObject( - name, __pyx_d, empty_dict, from_list, 1); - #endif - if (unlikely(!module)) { - if (unlikely(!PyErr_ExceptionMatches(PyExc_ImportError))) - goto bad; - PyErr_Clear(); - } - } - level = 0; - } - #endif - if (!module) { - #if PY_MAJOR_VERSION < 3 - PyObject *py_level = PyInt_FromLong(level); - if (unlikely(!py_level)) - goto bad; - module = PyObject_CallFunctionObjArgs(py_import, - name, __pyx_d, empty_dict, from_list, py_level, (PyObject *)NULL); - Py_DECREF(py_level); - #else - #if CYTHON_COMPILING_IN_LIMITED_API - module = PyImport_ImportModuleLevelObject( - name, empty_dict, empty_dict, from_list, level); - #else - module = PyImport_ImportModuleLevelObject( - name, __pyx_d, empty_dict, from_list, level); - #endif - #endif - } - } -bad: - Py_XDECREF(empty_dict); - Py_XDECREF(empty_list); - #if PY_MAJOR_VERSION < 3 - Py_XDECREF(py_import); - #endif - return module; -} - -/* ImportDottedModule */ -#if PY_MAJOR_VERSION >= 3 -static PyObject *__Pyx__ImportDottedModule_Error(PyObject *name, PyObject *parts_tuple, Py_ssize_t count) { - PyObject *partial_name = NULL, *slice = NULL, *sep = NULL; - if (unlikely(PyErr_Occurred())) { - PyErr_Clear(); - } - if (likely(PyTuple_GET_SIZE(parts_tuple) == count)) { - partial_name = name; - } else { - slice = PySequence_GetSlice(parts_tuple, 0, count); - if (unlikely(!slice)) - goto bad; - sep = PyUnicode_FromStringAndSize(".", 1); - if (unlikely(!sep)) - goto bad; - partial_name = PyUnicode_Join(sep, slice); - } - PyErr_Format( -#if PY_MAJOR_VERSION < 3 - PyExc_ImportError, - "No module named '%s'", PyString_AS_STRING(partial_name)); -#else -#if PY_VERSION_HEX >= 0x030600B1 - PyExc_ModuleNotFoundError, -#else - PyExc_ImportError, -#endif - "No module named '%U'", partial_name); -#endif -bad: - Py_XDECREF(sep); - Py_XDECREF(slice); - Py_XDECREF(partial_name); - return NULL; -} -#endif -#if PY_MAJOR_VERSION >= 3 -static PyObject *__Pyx__ImportDottedModule_Lookup(PyObject *name) { - PyObject *imported_module; -#if PY_VERSION_HEX < 0x030700A1 || (CYTHON_COMPILING_IN_PYPY && PYPY_VERSION_NUM < 0x07030400) - PyObject *modules = PyImport_GetModuleDict(); - if (unlikely(!modules)) - return NULL; - imported_module = __Pyx_PyDict_GetItemStr(modules, name); - Py_XINCREF(imported_module); -#else - imported_module = PyImport_GetModule(name); -#endif - return imported_module; -} -#endif -#if PY_MAJOR_VERSION >= 3 -static PyObject *__Pyx_ImportDottedModule_WalkParts(PyObject *module, PyObject *name, PyObject *parts_tuple) { - Py_ssize_t i, nparts; - nparts = PyTuple_GET_SIZE(parts_tuple); - for (i=1; i < nparts && module; i++) { - PyObject *part, *submodule; -#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS - part = PyTuple_GET_ITEM(parts_tuple, i); -#else - part = PySequence_ITEM(parts_tuple, i); -#endif - submodule = __Pyx_PyObject_GetAttrStrNoError(module, part); -#if !(CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS) - Py_DECREF(part); -#endif - Py_DECREF(module); - module = submodule; - } - if (unlikely(!module)) { - return __Pyx__ImportDottedModule_Error(name, parts_tuple, i); - } - return module; -} -#endif -static PyObject *__Pyx__ImportDottedModule(PyObject *name, PyObject *parts_tuple) { -#if PY_MAJOR_VERSION < 3 - PyObject *module, *from_list, *star = __pyx_n_s__3; - CYTHON_UNUSED_VAR(parts_tuple); - from_list = PyList_New(1); - if (unlikely(!from_list)) - return NULL; - Py_INCREF(star); - PyList_SET_ITEM(from_list, 0, star); - module = __Pyx_Import(name, from_list, 0); - Py_DECREF(from_list); - return module; -#else - PyObject *imported_module; - PyObject *module = __Pyx_Import(name, NULL, 0); - if (!parts_tuple || unlikely(!module)) - return module; - imported_module = __Pyx__ImportDottedModule_Lookup(name); - if (likely(imported_module)) { - Py_DECREF(module); - return imported_module; - } - PyErr_Clear(); - return __Pyx_ImportDottedModule_WalkParts(module, name, parts_tuple); -#endif -} -static PyObject *__Pyx_ImportDottedModule(PyObject *name, PyObject *parts_tuple) { -#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030400B1 - PyObject *module = __Pyx__ImportDottedModule_Lookup(name); - if (likely(module)) { - PyObject *spec = __Pyx_PyObject_GetAttrStrNoError(module, __pyx_n_s_spec); - if (likely(spec)) { - PyObject *unsafe = __Pyx_PyObject_GetAttrStrNoError(spec, __pyx_n_s_initializing); - if (likely(!unsafe || !__Pyx_PyObject_IsTrue(unsafe))) { - Py_DECREF(spec); - spec = NULL; - } - Py_XDECREF(unsafe); - } - if (likely(!spec)) { - PyErr_Clear(); - return module; - } - Py_DECREF(spec); - Py_DECREF(module); - } else if (PyErr_Occurred()) { - PyErr_Clear(); - } -#endif - return __Pyx__ImportDottedModule(name, parts_tuple); -} - -/* ssize_strlen */ -static CYTHON_INLINE Py_ssize_t __Pyx_ssize_strlen(const char *s) { - size_t len = strlen(s); - if (unlikely(len > PY_SSIZE_T_MAX)) { - PyErr_SetString(PyExc_OverflowError, "byte string is too long"); - return -1; - } - return (Py_ssize_t) len; -} - -/* FastTypeChecks */ -#if CYTHON_COMPILING_IN_CPYTHON -static int __Pyx_InBases(PyTypeObject *a, PyTypeObject *b) { - while (a) { - a = __Pyx_PyType_GetSlot(a, tp_base, PyTypeObject*); - if (a == b) - return 1; - } - return b == &PyBaseObject_Type; -} -static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b) { - PyObject *mro; - if (a == b) return 1; - mro = a->tp_mro; - if (likely(mro)) { - Py_ssize_t i, n; - n = PyTuple_GET_SIZE(mro); - for (i = 0; i < n; i++) { - if (PyTuple_GET_ITEM(mro, i) == (PyObject *)b) - return 1; - } - return 0; - } - return __Pyx_InBases(a, b); -} -static CYTHON_INLINE int __Pyx_IsAnySubtype2(PyTypeObject *cls, PyTypeObject *a, PyTypeObject *b) { - PyObject *mro; - if (cls == a || cls == b) return 1; - mro = cls->tp_mro; - if (likely(mro)) { - Py_ssize_t i, n; - n = PyTuple_GET_SIZE(mro); - for (i = 0; i < n; i++) { - PyObject *base = PyTuple_GET_ITEM(mro, i); - if (base == (PyObject *)a || base == (PyObject *)b) - return 1; - } - return 0; - } - return __Pyx_InBases(cls, a) || __Pyx_InBases(cls, b); -} -#if PY_MAJOR_VERSION == 2 -static int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject* exc_type2) { - PyObject *exception, *value, *tb; - int res; - __Pyx_PyThreadState_declare - __Pyx_PyThreadState_assign - __Pyx_ErrFetch(&exception, &value, &tb); - res = exc_type1 ? PyObject_IsSubclass(err, exc_type1) : 0; - if (unlikely(res == -1)) { - PyErr_WriteUnraisable(err); - res = 0; - } - if (!res) { - res = PyObject_IsSubclass(err, exc_type2); - if (unlikely(res == -1)) { - PyErr_WriteUnraisable(err); - res = 0; - } - } - __Pyx_ErrRestore(exception, value, tb); - return res; -} -#else -static CYTHON_INLINE int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject *exc_type2) { - if (exc_type1) { - return __Pyx_IsAnySubtype2((PyTypeObject*)err, (PyTypeObject*)exc_type1, (PyTypeObject*)exc_type2); - } else { - return __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type2); - } -} -#endif -static int __Pyx_PyErr_GivenExceptionMatchesTuple(PyObject *exc_type, PyObject *tuple) { - Py_ssize_t i, n; - assert(PyExceptionClass_Check(exc_type)); - n = PyTuple_GET_SIZE(tuple); -#if PY_MAJOR_VERSION >= 3 - for (i=0; itp_as_sequence && type->tp_as_sequence->sq_repeat)) { - return type->tp_as_sequence->sq_repeat(seq, mul); - } else -#endif - { - return __Pyx_PySequence_Multiply_Generic(seq, mul); - } -} - -/* SetItemInt */ -static int __Pyx_SetItemInt_Generic(PyObject *o, PyObject *j, PyObject *v) { - int r; - if (unlikely(!j)) return -1; - r = PyObject_SetItem(o, j, v); - Py_DECREF(j); - return r; -} -static CYTHON_INLINE int __Pyx_SetItemInt_Fast(PyObject *o, Py_ssize_t i, PyObject *v, int is_list, - CYTHON_NCP_UNUSED int wraparound, CYTHON_NCP_UNUSED int boundscheck) { -#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS && CYTHON_USE_TYPE_SLOTS - if (is_list || PyList_CheckExact(o)) { - Py_ssize_t n = (!wraparound) ? i : ((likely(i >= 0)) ? i : i + PyList_GET_SIZE(o)); - if ((!boundscheck) || likely(__Pyx_is_valid_index(n, PyList_GET_SIZE(o)))) { - PyObject* old = PyList_GET_ITEM(o, n); - Py_INCREF(v); - PyList_SET_ITEM(o, n, v); - Py_DECREF(old); - return 1; - } - } else { - PyMappingMethods *mm = Py_TYPE(o)->tp_as_mapping; - PySequenceMethods *sm = Py_TYPE(o)->tp_as_sequence; - if (mm && mm->mp_ass_subscript) { - int r; - PyObject *key = PyInt_FromSsize_t(i); - if (unlikely(!key)) return -1; - r = mm->mp_ass_subscript(o, key, v); - Py_DECREF(key); - return r; - } - if (likely(sm && sm->sq_ass_item)) { - if (wraparound && unlikely(i < 0) && likely(sm->sq_length)) { - Py_ssize_t l = sm->sq_length(o); - if (likely(l >= 0)) { - i += l; - } else { - if (!PyErr_ExceptionMatches(PyExc_OverflowError)) - return -1; - PyErr_Clear(); - } - } - return sm->sq_ass_item(o, i, v); - } - } -#else -#if CYTHON_COMPILING_IN_PYPY - if (is_list || (PySequence_Check(o) && !PyDict_Check(o))) -#else - if (is_list || PySequence_Check(o)) -#endif - { - return PySequence_SetItem(o, i, v); - } -#endif - return __Pyx_SetItemInt_Generic(o, PyInt_FromSsize_t(i), v); -} - -/* RaiseUnboundLocalError */ -static CYTHON_INLINE void __Pyx_RaiseUnboundLocalError(const char *varname) { - PyErr_Format(PyExc_UnboundLocalError, "local variable '%s' referenced before assignment", varname); -} - -/* DivInt[long] */ -static CYTHON_INLINE long __Pyx_div_long(long a, long b) { - long q = a / b; - long r = a - q*b; - q -= ((r != 0) & ((r ^ b) < 0)); - return q; -} - -/* ImportFrom */ -static PyObject* __Pyx_ImportFrom(PyObject* module, PyObject* name) { - PyObject* value = __Pyx_PyObject_GetAttrStr(module, name); - if (unlikely(!value) && PyErr_ExceptionMatches(PyExc_AttributeError)) { - const char* module_name_str = 0; - PyObject* module_name = 0; - PyObject* module_dot = 0; - PyObject* full_name = 0; - PyErr_Clear(); - module_name_str = PyModule_GetName(module); - if (unlikely(!module_name_str)) { goto modbad; } - module_name = PyUnicode_FromString(module_name_str); - if (unlikely(!module_name)) { goto modbad; } - module_dot = PyUnicode_Concat(module_name, __pyx_kp_u__2); - if (unlikely(!module_dot)) { goto modbad; } - full_name = PyUnicode_Concat(module_dot, name); - if (unlikely(!full_name)) { goto modbad; } - #if PY_VERSION_HEX < 0x030700A1 || (CYTHON_COMPILING_IN_PYPY && PYPY_VERSION_NUM < 0x07030400) - { - PyObject *modules = PyImport_GetModuleDict(); - if (unlikely(!modules)) - goto modbad; - value = PyObject_GetItem(modules, full_name); - } - #else - value = PyImport_GetModule(full_name); - #endif - modbad: - Py_XDECREF(full_name); - Py_XDECREF(module_dot); - Py_XDECREF(module_name); - } - if (unlikely(!value)) { - PyErr_Format(PyExc_ImportError, - #if PY_MAJOR_VERSION < 3 - "cannot import name %.230s", PyString_AS_STRING(name)); - #else - "cannot import name %S", name); - #endif - } - return value; -} - -/* HasAttr */ -static CYTHON_INLINE int __Pyx_HasAttr(PyObject *o, PyObject *n) { - PyObject *r; - if (unlikely(!__Pyx_PyBaseString_Check(n))) { - PyErr_SetString(PyExc_TypeError, - "hasattr(): attribute name must be string"); - return -1; - } - r = __Pyx_GetAttr(o, n); - if (!r) { - PyErr_Clear(); - return 0; - } else { - Py_DECREF(r); - return 1; - } -} - -/* ErrOccurredWithGIL */ -static CYTHON_INLINE int __Pyx_ErrOccurredWithGIL(void) { - int err; - #ifdef WITH_THREAD - PyGILState_STATE _save = PyGILState_Ensure(); - #endif - err = !!PyErr_Occurred(); - #ifdef WITH_THREAD - PyGILState_Release(_save); - #endif - return err; -} - -/* PyObject_GenericGetAttrNoDict */ -#if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000 -static PyObject *__Pyx_RaiseGenericGetAttributeError(PyTypeObject *tp, PyObject *attr_name) { - __Pyx_TypeName type_name = __Pyx_PyType_GetName(tp); - PyErr_Format(PyExc_AttributeError, -#if PY_MAJOR_VERSION >= 3 - "'" __Pyx_FMT_TYPENAME "' object has no attribute '%U'", - type_name, attr_name); -#else - "'" __Pyx_FMT_TYPENAME "' object has no attribute '%.400s'", - type_name, PyString_AS_STRING(attr_name)); -#endif - __Pyx_DECREF_TypeName(type_name); - return NULL; -} -static CYTHON_INLINE PyObject* __Pyx_PyObject_GenericGetAttrNoDict(PyObject* obj, PyObject* attr_name) { - PyObject *descr; - PyTypeObject *tp = Py_TYPE(obj); - if (unlikely(!PyString_Check(attr_name))) { - return PyObject_GenericGetAttr(obj, attr_name); - } - assert(!tp->tp_dictoffset); - descr = _PyType_Lookup(tp, attr_name); - if (unlikely(!descr)) { - return __Pyx_RaiseGenericGetAttributeError(tp, attr_name); - } - Py_INCREF(descr); - #if PY_MAJOR_VERSION < 3 - if (likely(PyType_HasFeature(Py_TYPE(descr), Py_TPFLAGS_HAVE_CLASS))) - #endif - { - descrgetfunc f = Py_TYPE(descr)->tp_descr_get; - if (unlikely(f)) { - PyObject *res = f(descr, obj, (PyObject *)tp); - Py_DECREF(descr); - return res; - } - } - return descr; -} -#endif - -/* PyObject_GenericGetAttr */ -#if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000 -static PyObject* __Pyx_PyObject_GenericGetAttr(PyObject* obj, PyObject* attr_name) { - if (unlikely(Py_TYPE(obj)->tp_dictoffset)) { - return PyObject_GenericGetAttr(obj, attr_name); - } - return __Pyx_PyObject_GenericGetAttrNoDict(obj, attr_name); -} -#endif - -/* FixUpExtensionType */ -#if CYTHON_USE_TYPE_SPECS -static int __Pyx_fix_up_extension_type_from_spec(PyType_Spec *spec, PyTypeObject *type) { -#if PY_VERSION_HEX > 0x030900B1 || CYTHON_COMPILING_IN_LIMITED_API - CYTHON_UNUSED_VAR(spec); - CYTHON_UNUSED_VAR(type); -#else - const PyType_Slot *slot = spec->slots; - while (slot && slot->slot && slot->slot != Py_tp_members) - slot++; - if (slot && slot->slot == Py_tp_members) { - int changed = 0; -#if !(PY_VERSION_HEX <= 0x030900b1 && CYTHON_COMPILING_IN_CPYTHON) - const -#endif - PyMemberDef *memb = (PyMemberDef*) slot->pfunc; - while (memb && memb->name) { - if (memb->name[0] == '_' && memb->name[1] == '_') { -#if PY_VERSION_HEX < 0x030900b1 - if (strcmp(memb->name, "__weaklistoffset__") == 0) { - assert(memb->type == T_PYSSIZET); - assert(memb->flags == READONLY); - type->tp_weaklistoffset = memb->offset; - changed = 1; - } - else if (strcmp(memb->name, "__dictoffset__") == 0) { - assert(memb->type == T_PYSSIZET); - assert(memb->flags == READONLY); - type->tp_dictoffset = memb->offset; - changed = 1; - } -#if CYTHON_METH_FASTCALL - else if (strcmp(memb->name, "__vectorcalloffset__") == 0) { - assert(memb->type == T_PYSSIZET); - assert(memb->flags == READONLY); -#if PY_VERSION_HEX >= 0x030800b4 - type->tp_vectorcall_offset = memb->offset; -#else - type->tp_print = (printfunc) memb->offset; -#endif - changed = 1; - } -#endif -#else - if ((0)); -#endif -#if PY_VERSION_HEX <= 0x030900b1 && CYTHON_COMPILING_IN_CPYTHON - else if (strcmp(memb->name, "__module__") == 0) { - PyObject *descr; - assert(memb->type == T_OBJECT); - assert(memb->flags == 0 || memb->flags == READONLY); - descr = PyDescr_NewMember(type, memb); - if (unlikely(!descr)) - return -1; - if (unlikely(PyDict_SetItem(type->tp_dict, PyDescr_NAME(descr), descr) < 0)) { - Py_DECREF(descr); - return -1; - } - Py_DECREF(descr); - changed = 1; - } -#endif - } - memb++; - } - if (changed) - PyType_Modified(type); - } -#endif - return 0; -} -#endif - -/* PyObjectCallNoArg */ -static CYTHON_INLINE PyObject* __Pyx_PyObject_CallNoArg(PyObject *func) { - PyObject *arg = NULL; - return __Pyx_PyObject_FastCall(func, (&arg)+1, 0 | __Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET); -} - -/* PyObjectGetMethod */ -static int __Pyx_PyObject_GetMethod(PyObject *obj, PyObject *name, PyObject **method) { - PyObject *attr; -#if CYTHON_UNPACK_METHODS && CYTHON_COMPILING_IN_CPYTHON && CYTHON_USE_PYTYPE_LOOKUP - __Pyx_TypeName type_name; - PyTypeObject *tp = Py_TYPE(obj); - PyObject *descr; - descrgetfunc f = NULL; - PyObject **dictptr, *dict; - int meth_found = 0; - assert (*method == NULL); - if (unlikely(tp->tp_getattro != PyObject_GenericGetAttr)) { - attr = __Pyx_PyObject_GetAttrStr(obj, name); - goto try_unpack; - } - if (unlikely(tp->tp_dict == NULL) && unlikely(PyType_Ready(tp) < 0)) { - return 0; - } - descr = _PyType_Lookup(tp, name); - if (likely(descr != NULL)) { - Py_INCREF(descr); -#if defined(Py_TPFLAGS_METHOD_DESCRIPTOR) && Py_TPFLAGS_METHOD_DESCRIPTOR - if (__Pyx_PyType_HasFeature(Py_TYPE(descr), Py_TPFLAGS_METHOD_DESCRIPTOR)) -#elif PY_MAJOR_VERSION >= 3 - #ifdef __Pyx_CyFunction_USED - if (likely(PyFunction_Check(descr) || __Pyx_IS_TYPE(descr, &PyMethodDescr_Type) || __Pyx_CyFunction_Check(descr))) - #else - if (likely(PyFunction_Check(descr) || __Pyx_IS_TYPE(descr, &PyMethodDescr_Type))) - #endif -#else - #ifdef __Pyx_CyFunction_USED - if (likely(PyFunction_Check(descr) || __Pyx_CyFunction_Check(descr))) - #else - if (likely(PyFunction_Check(descr))) - #endif -#endif - { - meth_found = 1; - } else { - f = Py_TYPE(descr)->tp_descr_get; - if (f != NULL && PyDescr_IsData(descr)) { - attr = f(descr, obj, (PyObject *)Py_TYPE(obj)); - Py_DECREF(descr); - goto try_unpack; - } - } - } - dictptr = _PyObject_GetDictPtr(obj); - if (dictptr != NULL && (dict = *dictptr) != NULL) { - Py_INCREF(dict); - attr = __Pyx_PyDict_GetItemStr(dict, name); - if (attr != NULL) { - Py_INCREF(attr); - Py_DECREF(dict); - Py_XDECREF(descr); - goto try_unpack; - } - Py_DECREF(dict); - } - if (meth_found) { - *method = descr; - return 1; - } - if (f != NULL) { - attr = f(descr, obj, (PyObject *)Py_TYPE(obj)); - Py_DECREF(descr); - goto try_unpack; - } - if (likely(descr != NULL)) { - *method = descr; - return 0; - } - type_name = __Pyx_PyType_GetName(tp); - PyErr_Format(PyExc_AttributeError, -#if PY_MAJOR_VERSION >= 3 - "'" __Pyx_FMT_TYPENAME "' object has no attribute '%U'", - type_name, name); -#else - "'" __Pyx_FMT_TYPENAME "' object has no attribute '%.400s'", - type_name, PyString_AS_STRING(name)); -#endif - __Pyx_DECREF_TypeName(type_name); - return 0; -#else - attr = __Pyx_PyObject_GetAttrStr(obj, name); - goto try_unpack; -#endif -try_unpack: -#if CYTHON_UNPACK_METHODS - if (likely(attr) && PyMethod_Check(attr) && likely(PyMethod_GET_SELF(attr) == obj)) { - PyObject *function = PyMethod_GET_FUNCTION(attr); - Py_INCREF(function); - Py_DECREF(attr); - *method = function; - return 1; - } -#endif - *method = attr; - return 0; -} - -/* PyObjectCallMethod0 */ -static PyObject* __Pyx_PyObject_CallMethod0(PyObject* obj, PyObject* method_name) { - PyObject *method = NULL, *result = NULL; - int is_method = __Pyx_PyObject_GetMethod(obj, method_name, &method); - if (likely(is_method)) { - result = __Pyx_PyObject_CallOneArg(method, obj); - Py_DECREF(method); - return result; - } - if (unlikely(!method)) goto bad; - result = __Pyx_PyObject_CallNoArg(method); - Py_DECREF(method); -bad: - return result; -} - -/* ValidateBasesTuple */ -#if CYTHON_COMPILING_IN_CPYTHON || CYTHON_COMPILING_IN_LIMITED_API || CYTHON_USE_TYPE_SPECS -static int __Pyx_validate_bases_tuple(const char *type_name, Py_ssize_t dictoffset, PyObject *bases) { - Py_ssize_t i, n = PyTuple_GET_SIZE(bases); - for (i = 1; i < n; i++) - { - PyObject *b0 = PyTuple_GET_ITEM(bases, i); - PyTypeObject *b; -#if PY_MAJOR_VERSION < 3 - if (PyClass_Check(b0)) - { - PyErr_Format(PyExc_TypeError, "base class '%.200s' is an old-style class", - PyString_AS_STRING(((PyClassObject*)b0)->cl_name)); - return -1; - } -#endif - b = (PyTypeObject*) b0; - if (!__Pyx_PyType_HasFeature(b, Py_TPFLAGS_HEAPTYPE)) - { - __Pyx_TypeName b_name = __Pyx_PyType_GetName(b); - PyErr_Format(PyExc_TypeError, - "base class '" __Pyx_FMT_TYPENAME "' is not a heap type", b_name); - __Pyx_DECREF_TypeName(b_name); - return -1; - } - if (dictoffset == 0 && b->tp_dictoffset) - { - __Pyx_TypeName b_name = __Pyx_PyType_GetName(b); - PyErr_Format(PyExc_TypeError, - "extension type '%.200s' has no __dict__ slot, " - "but base type '" __Pyx_FMT_TYPENAME "' has: " - "either add 'cdef dict __dict__' to the extension type " - "or add '__slots__ = [...]' to the base type", - type_name, b_name); - __Pyx_DECREF_TypeName(b_name); - return -1; - } - } - return 0; -} -#endif - -/* PyType_Ready */ -static int __Pyx_PyType_Ready(PyTypeObject *t) { -#if CYTHON_USE_TYPE_SPECS || !(CYTHON_COMPILING_IN_CPYTHON || CYTHON_COMPILING_IN_LIMITED_API) || defined(PYSTON_MAJOR_VERSION) - (void)__Pyx_PyObject_CallMethod0; -#if CYTHON_USE_TYPE_SPECS - (void)__Pyx_validate_bases_tuple; -#endif - return PyType_Ready(t); -#else - int r; - PyObject *bases = __Pyx_PyType_GetSlot(t, tp_bases, PyObject*); - if (bases && unlikely(__Pyx_validate_bases_tuple(t->tp_name, t->tp_dictoffset, bases) == -1)) - return -1; -#if PY_VERSION_HEX >= 0x03050000 && !defined(PYSTON_MAJOR_VERSION) - { - int gc_was_enabled; - #if PY_VERSION_HEX >= 0x030A00b1 - gc_was_enabled = PyGC_Disable(); - (void)__Pyx_PyObject_CallMethod0; - #else - PyObject *ret, *py_status; - PyObject *gc = NULL; - #if PY_VERSION_HEX >= 0x030700a1 && (!CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM+0 >= 0x07030400) - gc = PyImport_GetModule(__pyx_kp_u_gc); - #endif - if (unlikely(!gc)) gc = PyImport_Import(__pyx_kp_u_gc); - if (unlikely(!gc)) return -1; - py_status = __Pyx_PyObject_CallMethod0(gc, __pyx_kp_u_isenabled); - if (unlikely(!py_status)) { - Py_DECREF(gc); - return -1; - } - gc_was_enabled = __Pyx_PyObject_IsTrue(py_status); - Py_DECREF(py_status); - if (gc_was_enabled > 0) { - ret = __Pyx_PyObject_CallMethod0(gc, __pyx_kp_u_disable); - if (unlikely(!ret)) { - Py_DECREF(gc); - return -1; - } - Py_DECREF(ret); - } else if (unlikely(gc_was_enabled == -1)) { - Py_DECREF(gc); - return -1; - } - #endif - t->tp_flags |= Py_TPFLAGS_HEAPTYPE; -#if PY_VERSION_HEX >= 0x030A0000 - t->tp_flags |= Py_TPFLAGS_IMMUTABLETYPE; -#endif -#else - (void)__Pyx_PyObject_CallMethod0; -#endif - r = PyType_Ready(t); -#if PY_VERSION_HEX >= 0x03050000 && !defined(PYSTON_MAJOR_VERSION) - t->tp_flags &= ~Py_TPFLAGS_HEAPTYPE; - #if PY_VERSION_HEX >= 0x030A00b1 - if (gc_was_enabled) - PyGC_Enable(); - #else - if (gc_was_enabled) { - PyObject *tp, *v, *tb; - PyErr_Fetch(&tp, &v, &tb); - ret = __Pyx_PyObject_CallMethod0(gc, __pyx_kp_u_enable); - if (likely(ret || r == -1)) { - Py_XDECREF(ret); - PyErr_Restore(tp, v, tb); - } else { - Py_XDECREF(tp); - Py_XDECREF(v); - Py_XDECREF(tb); - r = -1; - } - } - Py_DECREF(gc); - #endif - } -#endif - return r; -#endif -} - -/* SetVTable */ -static int __Pyx_SetVtable(PyTypeObject *type, void *vtable) { - PyObject *ob = PyCapsule_New(vtable, 0, 0); - if (unlikely(!ob)) - goto bad; -#if CYTHON_COMPILING_IN_LIMITED_API - if (unlikely(PyObject_SetAttr((PyObject *) type, __pyx_n_s_pyx_vtable, ob) < 0)) -#else - if (unlikely(PyDict_SetItem(type->tp_dict, __pyx_n_s_pyx_vtable, ob) < 0)) -#endif - goto bad; - Py_DECREF(ob); - return 0; -bad: - Py_XDECREF(ob); - return -1; -} - -/* GetVTable */ -static void* __Pyx_GetVtable(PyTypeObject *type) { - void* ptr; -#if CYTHON_COMPILING_IN_LIMITED_API - PyObject *ob = PyObject_GetAttr((PyObject *)type, __pyx_n_s_pyx_vtable); -#else - PyObject *ob = PyObject_GetItem(type->tp_dict, __pyx_n_s_pyx_vtable); -#endif - if (!ob) - goto bad; - ptr = PyCapsule_GetPointer(ob, 0); - if (!ptr && !PyErr_Occurred()) - PyErr_SetString(PyExc_RuntimeError, "invalid vtable found for imported type"); - Py_DECREF(ob); - return ptr; -bad: - Py_XDECREF(ob); - return NULL; -} - -/* MergeVTables */ -#if !CYTHON_COMPILING_IN_LIMITED_API -static int __Pyx_MergeVtables(PyTypeObject *type) { - int i; - void** base_vtables; - __Pyx_TypeName tp_base_name; - __Pyx_TypeName base_name; - void* unknown = (void*)-1; - PyObject* bases = type->tp_bases; - int base_depth = 0; - { - PyTypeObject* base = type->tp_base; - while (base) { - base_depth += 1; - base = base->tp_base; - } - } - base_vtables = (void**) malloc(sizeof(void*) * (size_t)(base_depth + 1)); - base_vtables[0] = unknown; - for (i = 1; i < PyTuple_GET_SIZE(bases); i++) { - void* base_vtable = __Pyx_GetVtable(((PyTypeObject*)PyTuple_GET_ITEM(bases, i))); - if (base_vtable != NULL) { - int j; - PyTypeObject* base = type->tp_base; - for (j = 0; j < base_depth; j++) { - if (base_vtables[j] == unknown) { - base_vtables[j] = __Pyx_GetVtable(base); - base_vtables[j + 1] = unknown; - } - if (base_vtables[j] == base_vtable) { - break; - } else if (base_vtables[j] == NULL) { - goto bad; - } - base = base->tp_base; - } - } - } - PyErr_Clear(); - free(base_vtables); - return 0; -bad: - tp_base_name = __Pyx_PyType_GetName(type->tp_base); - base_name = __Pyx_PyType_GetName((PyTypeObject*)PyTuple_GET_ITEM(bases, i)); - PyErr_Format(PyExc_TypeError, - "multiple bases have vtable conflict: '" __Pyx_FMT_TYPENAME "' and '" __Pyx_FMT_TYPENAME "'", tp_base_name, base_name); - __Pyx_DECREF_TypeName(tp_base_name); - __Pyx_DECREF_TypeName(base_name); - free(base_vtables); - return -1; -} -#endif - -/* SetupReduce */ -#if !CYTHON_COMPILING_IN_LIMITED_API -static int __Pyx_setup_reduce_is_named(PyObject* meth, PyObject* name) { - int ret; - PyObject *name_attr; - name_attr = __Pyx_PyObject_GetAttrStrNoError(meth, __pyx_n_s_name_2); - if (likely(name_attr)) { - ret = PyObject_RichCompareBool(name_attr, name, Py_EQ); - } else { - ret = -1; - } - if (unlikely(ret < 0)) { - PyErr_Clear(); - ret = 0; - } - Py_XDECREF(name_attr); - return ret; -} -static int __Pyx_setup_reduce(PyObject* type_obj) { - int ret = 0; - PyObject *object_reduce = NULL; - PyObject *object_getstate = NULL; - PyObject *object_reduce_ex = NULL; - PyObject *reduce = NULL; - PyObject *reduce_ex = NULL; - PyObject *reduce_cython = NULL; - PyObject *setstate = NULL; - PyObject *setstate_cython = NULL; - PyObject *getstate = NULL; -#if CYTHON_USE_PYTYPE_LOOKUP - getstate = _PyType_Lookup((PyTypeObject*)type_obj, __pyx_n_s_getstate); -#else - getstate = __Pyx_PyObject_GetAttrStrNoError(type_obj, __pyx_n_s_getstate); - if (!getstate && PyErr_Occurred()) { - goto __PYX_BAD; - } -#endif - if (getstate) { -#if CYTHON_USE_PYTYPE_LOOKUP - object_getstate = _PyType_Lookup(&PyBaseObject_Type, __pyx_n_s_getstate); -#else - object_getstate = __Pyx_PyObject_GetAttrStrNoError((PyObject*)&PyBaseObject_Type, __pyx_n_s_getstate); - if (!object_getstate && PyErr_Occurred()) { - goto __PYX_BAD; - } -#endif - if (object_getstate != getstate) { - goto __PYX_GOOD; - } - } -#if CYTHON_USE_PYTYPE_LOOKUP - object_reduce_ex = _PyType_Lookup(&PyBaseObject_Type, __pyx_n_s_reduce_ex); if (!object_reduce_ex) goto __PYX_BAD; -#else - object_reduce_ex = __Pyx_PyObject_GetAttrStr((PyObject*)&PyBaseObject_Type, __pyx_n_s_reduce_ex); if (!object_reduce_ex) goto __PYX_BAD; -#endif - reduce_ex = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_reduce_ex); if (unlikely(!reduce_ex)) goto __PYX_BAD; - if (reduce_ex == object_reduce_ex) { -#if CYTHON_USE_PYTYPE_LOOKUP - object_reduce = _PyType_Lookup(&PyBaseObject_Type, __pyx_n_s_reduce); if (!object_reduce) goto __PYX_BAD; -#else - object_reduce = __Pyx_PyObject_GetAttrStr((PyObject*)&PyBaseObject_Type, __pyx_n_s_reduce); if (!object_reduce) goto __PYX_BAD; -#endif - reduce = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_reduce); if (unlikely(!reduce)) goto __PYX_BAD; - if (reduce == object_reduce || __Pyx_setup_reduce_is_named(reduce, __pyx_n_s_reduce_cython)) { - reduce_cython = __Pyx_PyObject_GetAttrStrNoError(type_obj, __pyx_n_s_reduce_cython); - if (likely(reduce_cython)) { - ret = PyDict_SetItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_reduce, reduce_cython); if (unlikely(ret < 0)) goto __PYX_BAD; - ret = PyDict_DelItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_reduce_cython); if (unlikely(ret < 0)) goto __PYX_BAD; - } else if (reduce == object_reduce || PyErr_Occurred()) { - goto __PYX_BAD; - } - setstate = __Pyx_PyObject_GetAttrStrNoError(type_obj, __pyx_n_s_setstate); - if (!setstate) PyErr_Clear(); - if (!setstate || __Pyx_setup_reduce_is_named(setstate, __pyx_n_s_setstate_cython)) { - setstate_cython = __Pyx_PyObject_GetAttrStrNoError(type_obj, __pyx_n_s_setstate_cython); - if (likely(setstate_cython)) { - ret = PyDict_SetItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_setstate, setstate_cython); if (unlikely(ret < 0)) goto __PYX_BAD; - ret = PyDict_DelItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_setstate_cython); if (unlikely(ret < 0)) goto __PYX_BAD; - } else if (!setstate || PyErr_Occurred()) { - goto __PYX_BAD; - } - } - PyType_Modified((PyTypeObject*)type_obj); - } - } - goto __PYX_GOOD; -__PYX_BAD: - if (!PyErr_Occurred()) { - __Pyx_TypeName type_obj_name = - __Pyx_PyType_GetName((PyTypeObject*)type_obj); - PyErr_Format(PyExc_RuntimeError, - "Unable to initialize pickling for " __Pyx_FMT_TYPENAME, type_obj_name); - __Pyx_DECREF_TypeName(type_obj_name); - } - ret = -1; -__PYX_GOOD: -#if !CYTHON_USE_PYTYPE_LOOKUP - Py_XDECREF(object_reduce); - Py_XDECREF(object_reduce_ex); - Py_XDECREF(object_getstate); - Py_XDECREF(getstate); -#endif - Py_XDECREF(reduce); - Py_XDECREF(reduce_ex); - Py_XDECREF(reduce_cython); - Py_XDECREF(setstate); - Py_XDECREF(setstate_cython); - return ret; -} -#endif - -/* FetchSharedCythonModule */ -static PyObject *__Pyx_FetchSharedCythonABIModule(void) { - PyObject *abi_module = PyImport_AddModule((char*) __PYX_ABI_MODULE_NAME); - if (unlikely(!abi_module)) return NULL; - Py_INCREF(abi_module); - return abi_module; -} - -/* FetchCommonType */ -static int __Pyx_VerifyCachedType(PyObject *cached_type, - const char *name, - Py_ssize_t basicsize, - Py_ssize_t expected_basicsize) { - if (!PyType_Check(cached_type)) { - PyErr_Format(PyExc_TypeError, - "Shared Cython type %.200s is not a type object", name); - return -1; - } - if (basicsize != expected_basicsize) { - PyErr_Format(PyExc_TypeError, - "Shared Cython type %.200s has the wrong size, try recompiling", - name); - return -1; - } - return 0; -} -#if !CYTHON_USE_TYPE_SPECS -static PyTypeObject* __Pyx_FetchCommonType(PyTypeObject* type) { - PyObject* abi_module; - const char* object_name; - PyTypeObject *cached_type = NULL; - abi_module = __Pyx_FetchSharedCythonABIModule(); - if (!abi_module) return NULL; - object_name = strrchr(type->tp_name, '.'); - object_name = object_name ? object_name+1 : type->tp_name; - cached_type = (PyTypeObject*) PyObject_GetAttrString(abi_module, object_name); - if (cached_type) { - if (__Pyx_VerifyCachedType( - (PyObject *)cached_type, - object_name, - cached_type->tp_basicsize, - type->tp_basicsize) < 0) { - goto bad; - } - goto done; - } - if (!PyErr_ExceptionMatches(PyExc_AttributeError)) goto bad; - PyErr_Clear(); - if (PyType_Ready(type) < 0) goto bad; - if (PyObject_SetAttrString(abi_module, object_name, (PyObject *)type) < 0) - goto bad; - Py_INCREF(type); - cached_type = type; -done: - Py_DECREF(abi_module); - return cached_type; -bad: - Py_XDECREF(cached_type); - cached_type = NULL; - goto done; -} -#else -static PyTypeObject *__Pyx_FetchCommonTypeFromSpec(PyObject *module, PyType_Spec *spec, PyObject *bases) { - PyObject *abi_module, *cached_type = NULL; - const char* object_name = strrchr(spec->name, '.'); - object_name = object_name ? object_name+1 : spec->name; - abi_module = __Pyx_FetchSharedCythonABIModule(); - if (!abi_module) return NULL; - cached_type = PyObject_GetAttrString(abi_module, object_name); - if (cached_type) { - Py_ssize_t basicsize; -#if CYTHON_COMPILING_IN_LIMITED_API - PyObject *py_basicsize; - py_basicsize = PyObject_GetAttrString(cached_type, "__basicsize__"); - if (unlikely(!py_basicsize)) goto bad; - basicsize = PyLong_AsSsize_t(py_basicsize); - Py_DECREF(py_basicsize); - py_basicsize = 0; - if (unlikely(basicsize == (Py_ssize_t)-1) && PyErr_Occurred()) goto bad; -#else - basicsize = likely(PyType_Check(cached_type)) ? ((PyTypeObject*) cached_type)->tp_basicsize : -1; -#endif - if (__Pyx_VerifyCachedType( - cached_type, - object_name, - basicsize, - spec->basicsize) < 0) { - goto bad; - } - goto done; - } - if (!PyErr_ExceptionMatches(PyExc_AttributeError)) goto bad; - PyErr_Clear(); - CYTHON_UNUSED_VAR(module); - cached_type = __Pyx_PyType_FromModuleAndSpec(abi_module, spec, bases); - if (unlikely(!cached_type)) goto bad; - if (unlikely(__Pyx_fix_up_extension_type_from_spec(spec, (PyTypeObject *) cached_type) < 0)) goto bad; - if (PyObject_SetAttrString(abi_module, object_name, cached_type) < 0) goto bad; -done: - Py_DECREF(abi_module); - assert(cached_type == NULL || PyType_Check(cached_type)); - return (PyTypeObject *) cached_type; -bad: - Py_XDECREF(cached_type); - cached_type = NULL; - goto done; -} -#endif - -/* PyVectorcallFastCallDict */ -#if CYTHON_METH_FASTCALL -static PyObject *__Pyx_PyVectorcall_FastCallDict_kw(PyObject *func, __pyx_vectorcallfunc vc, PyObject *const *args, size_t nargs, PyObject *kw) -{ - PyObject *res = NULL; - PyObject *kwnames; - PyObject **newargs; - PyObject **kwvalues; - Py_ssize_t i, pos; - size_t j; - PyObject *key, *value; - unsigned long keys_are_strings; - Py_ssize_t nkw = PyDict_GET_SIZE(kw); - newargs = (PyObject **)PyMem_Malloc((nargs + (size_t)nkw) * sizeof(args[0])); - if (unlikely(newargs == NULL)) { - PyErr_NoMemory(); - return NULL; - } - for (j = 0; j < nargs; j++) newargs[j] = args[j]; - kwnames = PyTuple_New(nkw); - if (unlikely(kwnames == NULL)) { - PyMem_Free(newargs); - return NULL; - } - kwvalues = newargs + nargs; - pos = i = 0; - keys_are_strings = Py_TPFLAGS_UNICODE_SUBCLASS; - while (PyDict_Next(kw, &pos, &key, &value)) { - keys_are_strings &= Py_TYPE(key)->tp_flags; - Py_INCREF(key); - Py_INCREF(value); - PyTuple_SET_ITEM(kwnames, i, key); - kwvalues[i] = value; - i++; - } - if (unlikely(!keys_are_strings)) { - PyErr_SetString(PyExc_TypeError, "keywords must be strings"); - goto cleanup; - } - res = vc(func, newargs, nargs, kwnames); -cleanup: - Py_DECREF(kwnames); - for (i = 0; i < nkw; i++) - Py_DECREF(kwvalues[i]); - PyMem_Free(newargs); - return res; -} -static CYTHON_INLINE PyObject *__Pyx_PyVectorcall_FastCallDict(PyObject *func, __pyx_vectorcallfunc vc, PyObject *const *args, size_t nargs, PyObject *kw) -{ - if (likely(kw == NULL) || PyDict_GET_SIZE(kw) == 0) { - return vc(func, args, nargs, NULL); - } - return __Pyx_PyVectorcall_FastCallDict_kw(func, vc, args, nargs, kw); -} -#endif - -/* CythonFunctionShared */ -static CYTHON_INLINE void __Pyx__CyFunction_SetClassObj(__pyx_CyFunctionObject* f, PyObject* classobj) { -#if PY_VERSION_HEX < 0x030900B1 - __Pyx_Py_XDECREF_SET( - __Pyx_CyFunction_GetClassObj(f), - ((classobj) ? __Pyx_NewRef(classobj) : NULL)); -#else - __Pyx_Py_XDECREF_SET( - ((PyCMethodObject *) (f))->mm_class, - (PyTypeObject*)((classobj) ? __Pyx_NewRef(classobj) : NULL)); -#endif -} -static PyObject * -__Pyx_CyFunction_get_doc(__pyx_CyFunctionObject *op, void *closure) -{ - CYTHON_UNUSED_VAR(closure); - if (unlikely(op->func_doc == NULL)) { - if (((PyCFunctionObject*)op)->m_ml->ml_doc) { -#if PY_MAJOR_VERSION >= 3 - op->func_doc = PyUnicode_FromString(((PyCFunctionObject*)op)->m_ml->ml_doc); -#else - op->func_doc = PyString_FromString(((PyCFunctionObject*)op)->m_ml->ml_doc); -#endif - if (unlikely(op->func_doc == NULL)) - return NULL; - } else { - Py_INCREF(Py_None); - return Py_None; - } - } - Py_INCREF(op->func_doc); - return op->func_doc; -} -static int -__Pyx_CyFunction_set_doc(__pyx_CyFunctionObject *op, PyObject *value, void *context) -{ - CYTHON_UNUSED_VAR(context); - if (value == NULL) { - value = Py_None; - } - Py_INCREF(value); - __Pyx_Py_XDECREF_SET(op->func_doc, value); - return 0; -} -static PyObject * -__Pyx_CyFunction_get_name(__pyx_CyFunctionObject *op, void *context) -{ - CYTHON_UNUSED_VAR(context); - if (unlikely(op->func_name == NULL)) { -#if PY_MAJOR_VERSION >= 3 - op->func_name = PyUnicode_InternFromString(((PyCFunctionObject*)op)->m_ml->ml_name); -#else - op->func_name = PyString_InternFromString(((PyCFunctionObject*)op)->m_ml->ml_name); -#endif - if (unlikely(op->func_name == NULL)) - return NULL; - } - Py_INCREF(op->func_name); - return op->func_name; -} -static int -__Pyx_CyFunction_set_name(__pyx_CyFunctionObject *op, PyObject *value, void *context) -{ - CYTHON_UNUSED_VAR(context); -#if PY_MAJOR_VERSION >= 3 - if (unlikely(value == NULL || !PyUnicode_Check(value))) -#else - if (unlikely(value == NULL || !PyString_Check(value))) -#endif - { - PyErr_SetString(PyExc_TypeError, - "__name__ must be set to a string object"); - return -1; - } - Py_INCREF(value); - __Pyx_Py_XDECREF_SET(op->func_name, value); - return 0; -} -static PyObject * -__Pyx_CyFunction_get_qualname(__pyx_CyFunctionObject *op, void *context) -{ - CYTHON_UNUSED_VAR(context); - Py_INCREF(op->func_qualname); - return op->func_qualname; -} -static int -__Pyx_CyFunction_set_qualname(__pyx_CyFunctionObject *op, PyObject *value, void *context) -{ - CYTHON_UNUSED_VAR(context); -#if PY_MAJOR_VERSION >= 3 - if (unlikely(value == NULL || !PyUnicode_Check(value))) -#else - if (unlikely(value == NULL || !PyString_Check(value))) -#endif - { - PyErr_SetString(PyExc_TypeError, - "__qualname__ must be set to a string object"); - return -1; - } - Py_INCREF(value); - __Pyx_Py_XDECREF_SET(op->func_qualname, value); - return 0; -} -static PyObject * -__Pyx_CyFunction_get_dict(__pyx_CyFunctionObject *op, void *context) -{ - CYTHON_UNUSED_VAR(context); - if (unlikely(op->func_dict == NULL)) { - op->func_dict = PyDict_New(); - if (unlikely(op->func_dict == NULL)) - return NULL; - } - Py_INCREF(op->func_dict); - return op->func_dict; -} -static int -__Pyx_CyFunction_set_dict(__pyx_CyFunctionObject *op, PyObject *value, void *context) -{ - CYTHON_UNUSED_VAR(context); - if (unlikely(value == NULL)) { - PyErr_SetString(PyExc_TypeError, - "function's dictionary may not be deleted"); - return -1; - } - if (unlikely(!PyDict_Check(value))) { - PyErr_SetString(PyExc_TypeError, - "setting function's dictionary to a non-dict"); - return -1; - } - Py_INCREF(value); - __Pyx_Py_XDECREF_SET(op->func_dict, value); - return 0; -} -static PyObject * -__Pyx_CyFunction_get_globals(__pyx_CyFunctionObject *op, void *context) -{ - CYTHON_UNUSED_VAR(context); - Py_INCREF(op->func_globals); - return op->func_globals; -} -static PyObject * -__Pyx_CyFunction_get_closure(__pyx_CyFunctionObject *op, void *context) -{ - CYTHON_UNUSED_VAR(op); - CYTHON_UNUSED_VAR(context); - Py_INCREF(Py_None); - return Py_None; -} -static PyObject * -__Pyx_CyFunction_get_code(__pyx_CyFunctionObject *op, void *context) -{ - PyObject* result = (op->func_code) ? op->func_code : Py_None; - CYTHON_UNUSED_VAR(context); - Py_INCREF(result); - return result; -} -static int -__Pyx_CyFunction_init_defaults(__pyx_CyFunctionObject *op) { - int result = 0; - PyObject *res = op->defaults_getter((PyObject *) op); - if (unlikely(!res)) - return -1; - #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS - op->defaults_tuple = PyTuple_GET_ITEM(res, 0); - Py_INCREF(op->defaults_tuple); - op->defaults_kwdict = PyTuple_GET_ITEM(res, 1); - Py_INCREF(op->defaults_kwdict); - #else - op->defaults_tuple = PySequence_ITEM(res, 0); - if (unlikely(!op->defaults_tuple)) result = -1; - else { - op->defaults_kwdict = PySequence_ITEM(res, 1); - if (unlikely(!op->defaults_kwdict)) result = -1; - } - #endif - Py_DECREF(res); - return result; -} -static int -__Pyx_CyFunction_set_defaults(__pyx_CyFunctionObject *op, PyObject* value, void *context) { - CYTHON_UNUSED_VAR(context); - if (!value) { - value = Py_None; - } else if (unlikely(value != Py_None && !PyTuple_Check(value))) { - PyErr_SetString(PyExc_TypeError, - "__defaults__ must be set to a tuple object"); - return -1; - } - PyErr_WarnEx(PyExc_RuntimeWarning, "changes to cyfunction.__defaults__ will not " - "currently affect the values used in function calls", 1); - Py_INCREF(value); - __Pyx_Py_XDECREF_SET(op->defaults_tuple, value); - return 0; -} -static PyObject * -__Pyx_CyFunction_get_defaults(__pyx_CyFunctionObject *op, void *context) { - PyObject* result = op->defaults_tuple; - CYTHON_UNUSED_VAR(context); - if (unlikely(!result)) { - if (op->defaults_getter) { - if (unlikely(__Pyx_CyFunction_init_defaults(op) < 0)) return NULL; - result = op->defaults_tuple; - } else { - result = Py_None; - } - } - Py_INCREF(result); - return result; -} -static int -__Pyx_CyFunction_set_kwdefaults(__pyx_CyFunctionObject *op, PyObject* value, void *context) { - CYTHON_UNUSED_VAR(context); - if (!value) { - value = Py_None; - } else if (unlikely(value != Py_None && !PyDict_Check(value))) { - PyErr_SetString(PyExc_TypeError, - "__kwdefaults__ must be set to a dict object"); - return -1; - } - PyErr_WarnEx(PyExc_RuntimeWarning, "changes to cyfunction.__kwdefaults__ will not " - "currently affect the values used in function calls", 1); - Py_INCREF(value); - __Pyx_Py_XDECREF_SET(op->defaults_kwdict, value); - return 0; -} -static PyObject * -__Pyx_CyFunction_get_kwdefaults(__pyx_CyFunctionObject *op, void *context) { - PyObject* result = op->defaults_kwdict; - CYTHON_UNUSED_VAR(context); - if (unlikely(!result)) { - if (op->defaults_getter) { - if (unlikely(__Pyx_CyFunction_init_defaults(op) < 0)) return NULL; - result = op->defaults_kwdict; - } else { - result = Py_None; - } - } - Py_INCREF(result); - return result; -} -static int -__Pyx_CyFunction_set_annotations(__pyx_CyFunctionObject *op, PyObject* value, void *context) { - CYTHON_UNUSED_VAR(context); - if (!value || value == Py_None) { - value = NULL; - } else if (unlikely(!PyDict_Check(value))) { - PyErr_SetString(PyExc_TypeError, - "__annotations__ must be set to a dict object"); - return -1; - } - Py_XINCREF(value); - __Pyx_Py_XDECREF_SET(op->func_annotations, value); - return 0; -} -static PyObject * -__Pyx_CyFunction_get_annotations(__pyx_CyFunctionObject *op, void *context) { - PyObject* result = op->func_annotations; - CYTHON_UNUSED_VAR(context); - if (unlikely(!result)) { - result = PyDict_New(); - if (unlikely(!result)) return NULL; - op->func_annotations = result; - } - Py_INCREF(result); - return result; -} -static PyObject * -__Pyx_CyFunction_get_is_coroutine(__pyx_CyFunctionObject *op, void *context) { - int is_coroutine; - CYTHON_UNUSED_VAR(context); - if (op->func_is_coroutine) { - return __Pyx_NewRef(op->func_is_coroutine); - } - is_coroutine = op->flags & __Pyx_CYFUNCTION_COROUTINE; -#if PY_VERSION_HEX >= 0x03050000 - if (is_coroutine) { - PyObject *module, *fromlist, *marker = __pyx_n_s_is_coroutine; - fromlist = PyList_New(1); - if (unlikely(!fromlist)) return NULL; - Py_INCREF(marker); - PyList_SET_ITEM(fromlist, 0, marker); - module = PyImport_ImportModuleLevelObject(__pyx_n_s_asyncio_coroutines, NULL, NULL, fromlist, 0); - Py_DECREF(fromlist); - if (unlikely(!module)) goto ignore; - op->func_is_coroutine = __Pyx_PyObject_GetAttrStr(module, marker); - Py_DECREF(module); - if (likely(op->func_is_coroutine)) { - return __Pyx_NewRef(op->func_is_coroutine); - } -ignore: - PyErr_Clear(); - } -#endif - op->func_is_coroutine = __Pyx_PyBool_FromLong(is_coroutine); - return __Pyx_NewRef(op->func_is_coroutine); -} -static PyGetSetDef __pyx_CyFunction_getsets[] = { - {(char *) "func_doc", (getter)__Pyx_CyFunction_get_doc, (setter)__Pyx_CyFunction_set_doc, 0, 0}, - {(char *) "__doc__", (getter)__Pyx_CyFunction_get_doc, (setter)__Pyx_CyFunction_set_doc, 0, 0}, - {(char *) "func_name", (getter)__Pyx_CyFunction_get_name, (setter)__Pyx_CyFunction_set_name, 0, 0}, - {(char *) "__name__", (getter)__Pyx_CyFunction_get_name, (setter)__Pyx_CyFunction_set_name, 0, 0}, - {(char *) "__qualname__", (getter)__Pyx_CyFunction_get_qualname, (setter)__Pyx_CyFunction_set_qualname, 0, 0}, - {(char *) "func_dict", (getter)__Pyx_CyFunction_get_dict, (setter)__Pyx_CyFunction_set_dict, 0, 0}, - {(char *) "__dict__", (getter)__Pyx_CyFunction_get_dict, (setter)__Pyx_CyFunction_set_dict, 0, 0}, - {(char *) "func_globals", (getter)__Pyx_CyFunction_get_globals, 0, 0, 0}, - {(char *) "__globals__", (getter)__Pyx_CyFunction_get_globals, 0, 0, 0}, - {(char *) "func_closure", (getter)__Pyx_CyFunction_get_closure, 0, 0, 0}, - {(char *) "__closure__", (getter)__Pyx_CyFunction_get_closure, 0, 0, 0}, - {(char *) "func_code", (getter)__Pyx_CyFunction_get_code, 0, 0, 0}, - {(char *) "__code__", (getter)__Pyx_CyFunction_get_code, 0, 0, 0}, - {(char *) "func_defaults", (getter)__Pyx_CyFunction_get_defaults, (setter)__Pyx_CyFunction_set_defaults, 0, 0}, - {(char *) "__defaults__", (getter)__Pyx_CyFunction_get_defaults, (setter)__Pyx_CyFunction_set_defaults, 0, 0}, - {(char *) "__kwdefaults__", (getter)__Pyx_CyFunction_get_kwdefaults, (setter)__Pyx_CyFunction_set_kwdefaults, 0, 0}, - {(char *) "__annotations__", (getter)__Pyx_CyFunction_get_annotations, (setter)__Pyx_CyFunction_set_annotations, 0, 0}, - {(char *) "_is_coroutine", (getter)__Pyx_CyFunction_get_is_coroutine, 0, 0, 0}, - {0, 0, 0, 0, 0} -}; -static PyMemberDef __pyx_CyFunction_members[] = { - {(char *) "__module__", T_OBJECT, offsetof(PyCFunctionObject, m_module), 0, 0}, -#if CYTHON_USE_TYPE_SPECS - {(char *) "__dictoffset__", T_PYSSIZET, offsetof(__pyx_CyFunctionObject, func_dict), READONLY, 0}, -#if CYTHON_METH_FASTCALL -#if CYTHON_BACKPORT_VECTORCALL - {(char *) "__vectorcalloffset__", T_PYSSIZET, offsetof(__pyx_CyFunctionObject, func_vectorcall), READONLY, 0}, -#else - {(char *) "__vectorcalloffset__", T_PYSSIZET, offsetof(PyCFunctionObject, vectorcall), READONLY, 0}, -#endif -#endif -#if PY_VERSION_HEX < 0x030500A0 - {(char *) "__weaklistoffset__", T_PYSSIZET, offsetof(__pyx_CyFunctionObject, func_weakreflist), READONLY, 0}, -#else - {(char *) "__weaklistoffset__", T_PYSSIZET, offsetof(PyCFunctionObject, m_weakreflist), READONLY, 0}, -#endif -#endif - {0, 0, 0, 0, 0} -}; -static PyObject * -__Pyx_CyFunction_reduce(__pyx_CyFunctionObject *m, PyObject *args) -{ - CYTHON_UNUSED_VAR(args); -#if PY_MAJOR_VERSION >= 3 - Py_INCREF(m->func_qualname); - return m->func_qualname; -#else - return PyString_FromString(((PyCFunctionObject*)m)->m_ml->ml_name); -#endif -} -static PyMethodDef __pyx_CyFunction_methods[] = { - {"__reduce__", (PyCFunction)__Pyx_CyFunction_reduce, METH_VARARGS, 0}, - {0, 0, 0, 0} -}; -#if PY_VERSION_HEX < 0x030500A0 -#define __Pyx_CyFunction_weakreflist(cyfunc) ((cyfunc)->func_weakreflist) -#else -#define __Pyx_CyFunction_weakreflist(cyfunc) (((PyCFunctionObject*)cyfunc)->m_weakreflist) -#endif -static PyObject *__Pyx_CyFunction_Init(__pyx_CyFunctionObject *op, PyMethodDef *ml, int flags, PyObject* qualname, - PyObject *closure, PyObject *module, PyObject* globals, PyObject* code) { - PyCFunctionObject *cf = (PyCFunctionObject*) op; - if (unlikely(op == NULL)) - return NULL; - op->flags = flags; - __Pyx_CyFunction_weakreflist(op) = NULL; - cf->m_ml = ml; - cf->m_self = (PyObject *) op; - Py_XINCREF(closure); - op->func_closure = closure; - Py_XINCREF(module); - cf->m_module = module; - op->func_dict = NULL; - op->func_name = NULL; - Py_INCREF(qualname); - op->func_qualname = qualname; - op->func_doc = NULL; -#if PY_VERSION_HEX < 0x030900B1 - op->func_classobj = NULL; -#else - ((PyCMethodObject*)op)->mm_class = NULL; -#endif - op->func_globals = globals; - Py_INCREF(op->func_globals); - Py_XINCREF(code); - op->func_code = code; - op->defaults_pyobjects = 0; - op->defaults_size = 0; - op->defaults = NULL; - op->defaults_tuple = NULL; - op->defaults_kwdict = NULL; - op->defaults_getter = NULL; - op->func_annotations = NULL; - op->func_is_coroutine = NULL; -#if CYTHON_METH_FASTCALL - switch (ml->ml_flags & (METH_VARARGS | METH_FASTCALL | METH_NOARGS | METH_O | METH_KEYWORDS | METH_METHOD)) { - case METH_NOARGS: - __Pyx_CyFunction_func_vectorcall(op) = __Pyx_CyFunction_Vectorcall_NOARGS; - break; - case METH_O: - __Pyx_CyFunction_func_vectorcall(op) = __Pyx_CyFunction_Vectorcall_O; - break; - case METH_METHOD | METH_FASTCALL | METH_KEYWORDS: - __Pyx_CyFunction_func_vectorcall(op) = __Pyx_CyFunction_Vectorcall_FASTCALL_KEYWORDS_METHOD; - break; - case METH_FASTCALL | METH_KEYWORDS: - __Pyx_CyFunction_func_vectorcall(op) = __Pyx_CyFunction_Vectorcall_FASTCALL_KEYWORDS; - break; - case METH_VARARGS | METH_KEYWORDS: - __Pyx_CyFunction_func_vectorcall(op) = NULL; - break; - default: - PyErr_SetString(PyExc_SystemError, "Bad call flags for CyFunction"); - Py_DECREF(op); - return NULL; - } -#endif - return (PyObject *) op; -} -static int -__Pyx_CyFunction_clear(__pyx_CyFunctionObject *m) -{ - Py_CLEAR(m->func_closure); - Py_CLEAR(((PyCFunctionObject*)m)->m_module); - Py_CLEAR(m->func_dict); - Py_CLEAR(m->func_name); - Py_CLEAR(m->func_qualname); - Py_CLEAR(m->func_doc); - Py_CLEAR(m->func_globals); - Py_CLEAR(m->func_code); -#if PY_VERSION_HEX < 0x030900B1 - Py_CLEAR(__Pyx_CyFunction_GetClassObj(m)); -#else - { - PyObject *cls = (PyObject*) ((PyCMethodObject *) (m))->mm_class; - ((PyCMethodObject *) (m))->mm_class = NULL; - Py_XDECREF(cls); - } -#endif - Py_CLEAR(m->defaults_tuple); - Py_CLEAR(m->defaults_kwdict); - Py_CLEAR(m->func_annotations); - Py_CLEAR(m->func_is_coroutine); - if (m->defaults) { - PyObject **pydefaults = __Pyx_CyFunction_Defaults(PyObject *, m); - int i; - for (i = 0; i < m->defaults_pyobjects; i++) - Py_XDECREF(pydefaults[i]); - PyObject_Free(m->defaults); - m->defaults = NULL; - } - return 0; -} -static void __Pyx__CyFunction_dealloc(__pyx_CyFunctionObject *m) -{ - if (__Pyx_CyFunction_weakreflist(m) != NULL) - PyObject_ClearWeakRefs((PyObject *) m); - __Pyx_CyFunction_clear(m); - __Pyx_PyHeapTypeObject_GC_Del(m); -} -static void __Pyx_CyFunction_dealloc(__pyx_CyFunctionObject *m) -{ - PyObject_GC_UnTrack(m); - __Pyx__CyFunction_dealloc(m); -} -static int __Pyx_CyFunction_traverse(__pyx_CyFunctionObject *m, visitproc visit, void *arg) -{ - Py_VISIT(m->func_closure); - Py_VISIT(((PyCFunctionObject*)m)->m_module); - Py_VISIT(m->func_dict); - Py_VISIT(m->func_name); - Py_VISIT(m->func_qualname); - Py_VISIT(m->func_doc); - Py_VISIT(m->func_globals); - Py_VISIT(m->func_code); - Py_VISIT(__Pyx_CyFunction_GetClassObj(m)); - Py_VISIT(m->defaults_tuple); - Py_VISIT(m->defaults_kwdict); - Py_VISIT(m->func_is_coroutine); - if (m->defaults) { - PyObject **pydefaults = __Pyx_CyFunction_Defaults(PyObject *, m); - int i; - for (i = 0; i < m->defaults_pyobjects; i++) - Py_VISIT(pydefaults[i]); - } - return 0; -} -static PyObject* -__Pyx_CyFunction_repr(__pyx_CyFunctionObject *op) -{ -#if PY_MAJOR_VERSION >= 3 - return PyUnicode_FromFormat("", - op->func_qualname, (void *)op); -#else - return PyString_FromFormat("", - PyString_AsString(op->func_qualname), (void *)op); -#endif -} -static PyObject * __Pyx_CyFunction_CallMethod(PyObject *func, PyObject *self, PyObject *arg, PyObject *kw) { - PyCFunctionObject* f = (PyCFunctionObject*)func; - PyCFunction meth = f->m_ml->ml_meth; - Py_ssize_t size; - switch (f->m_ml->ml_flags & (METH_VARARGS | METH_KEYWORDS | METH_NOARGS | METH_O)) { - case METH_VARARGS: - if (likely(kw == NULL || PyDict_Size(kw) == 0)) - return (*meth)(self, arg); - break; - case METH_VARARGS | METH_KEYWORDS: - return (*(PyCFunctionWithKeywords)(void*)meth)(self, arg, kw); - case METH_NOARGS: - if (likely(kw == NULL || PyDict_Size(kw) == 0)) { - size = PyTuple_GET_SIZE(arg); - if (likely(size == 0)) - return (*meth)(self, NULL); - PyErr_Format(PyExc_TypeError, - "%.200s() takes no arguments (%" CYTHON_FORMAT_SSIZE_T "d given)", - f->m_ml->ml_name, size); - return NULL; - } - break; - case METH_O: - if (likely(kw == NULL || PyDict_Size(kw) == 0)) { - size = PyTuple_GET_SIZE(arg); - if (likely(size == 1)) { - PyObject *result, *arg0; - #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS - arg0 = PyTuple_GET_ITEM(arg, 0); - #else - arg0 = PySequence_ITEM(arg, 0); if (unlikely(!arg0)) return NULL; - #endif - result = (*meth)(self, arg0); - #if !(CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS) - Py_DECREF(arg0); - #endif - return result; - } - PyErr_Format(PyExc_TypeError, - "%.200s() takes exactly one argument (%" CYTHON_FORMAT_SSIZE_T "d given)", - f->m_ml->ml_name, size); - return NULL; - } - break; - default: - PyErr_SetString(PyExc_SystemError, "Bad call flags for CyFunction"); - return NULL; - } - PyErr_Format(PyExc_TypeError, "%.200s() takes no keyword arguments", - f->m_ml->ml_name); - return NULL; -} -static CYTHON_INLINE PyObject *__Pyx_CyFunction_Call(PyObject *func, PyObject *arg, PyObject *kw) { - return __Pyx_CyFunction_CallMethod(func, ((PyCFunctionObject*)func)->m_self, arg, kw); -} -static PyObject *__Pyx_CyFunction_CallAsMethod(PyObject *func, PyObject *args, PyObject *kw) { - PyObject *result; - __pyx_CyFunctionObject *cyfunc = (__pyx_CyFunctionObject *) func; -#if CYTHON_METH_FASTCALL - __pyx_vectorcallfunc vc = __Pyx_CyFunction_func_vectorcall(cyfunc); - if (vc) { -#if CYTHON_ASSUME_SAFE_MACROS - return __Pyx_PyVectorcall_FastCallDict(func, vc, &PyTuple_GET_ITEM(args, 0), (size_t)PyTuple_GET_SIZE(args), kw); -#else - (void) &__Pyx_PyVectorcall_FastCallDict; - return PyVectorcall_Call(func, args, kw); -#endif - } -#endif - if ((cyfunc->flags & __Pyx_CYFUNCTION_CCLASS) && !(cyfunc->flags & __Pyx_CYFUNCTION_STATICMETHOD)) { - Py_ssize_t argc; - PyObject *new_args; - PyObject *self; - argc = PyTuple_GET_SIZE(args); - new_args = PyTuple_GetSlice(args, 1, argc); - if (unlikely(!new_args)) - return NULL; - self = PyTuple_GetItem(args, 0); - if (unlikely(!self)) { - Py_DECREF(new_args); -#if PY_MAJOR_VERSION > 2 - PyErr_Format(PyExc_TypeError, - "unbound method %.200S() needs an argument", - cyfunc->func_qualname); -#else - PyErr_SetString(PyExc_TypeError, - "unbound method needs an argument"); -#endif - return NULL; - } - result = __Pyx_CyFunction_CallMethod(func, self, new_args, kw); - Py_DECREF(new_args); - } else { - result = __Pyx_CyFunction_Call(func, args, kw); - } - return result; -} -#if CYTHON_METH_FASTCALL -static CYTHON_INLINE int __Pyx_CyFunction_Vectorcall_CheckArgs(__pyx_CyFunctionObject *cyfunc, Py_ssize_t nargs, PyObject *kwnames) -{ - int ret = 0; - if ((cyfunc->flags & __Pyx_CYFUNCTION_CCLASS) && !(cyfunc->flags & __Pyx_CYFUNCTION_STATICMETHOD)) { - if (unlikely(nargs < 1)) { - PyErr_Format(PyExc_TypeError, "%.200s() needs an argument", - ((PyCFunctionObject*)cyfunc)->m_ml->ml_name); - return -1; - } - ret = 1; - } - if (unlikely(kwnames) && unlikely(PyTuple_GET_SIZE(kwnames))) { - PyErr_Format(PyExc_TypeError, - "%.200s() takes no keyword arguments", ((PyCFunctionObject*)cyfunc)->m_ml->ml_name); - return -1; - } - return ret; -} -static PyObject * __Pyx_CyFunction_Vectorcall_NOARGS(PyObject *func, PyObject *const *args, size_t nargsf, PyObject *kwnames) -{ - __pyx_CyFunctionObject *cyfunc = (__pyx_CyFunctionObject *)func; - PyMethodDef* def = ((PyCFunctionObject*)cyfunc)->m_ml; -#if CYTHON_BACKPORT_VECTORCALL - Py_ssize_t nargs = (Py_ssize_t)nargsf; -#else - Py_ssize_t nargs = PyVectorcall_NARGS(nargsf); -#endif - PyObject *self; - switch (__Pyx_CyFunction_Vectorcall_CheckArgs(cyfunc, nargs, kwnames)) { - case 1: - self = args[0]; - args += 1; - nargs -= 1; - break; - case 0: - self = ((PyCFunctionObject*)cyfunc)->m_self; - break; - default: - return NULL; - } - if (unlikely(nargs != 0)) { - PyErr_Format(PyExc_TypeError, - "%.200s() takes no arguments (%" CYTHON_FORMAT_SSIZE_T "d given)", - def->ml_name, nargs); - return NULL; - } - return def->ml_meth(self, NULL); -} -static PyObject * __Pyx_CyFunction_Vectorcall_O(PyObject *func, PyObject *const *args, size_t nargsf, PyObject *kwnames) -{ - __pyx_CyFunctionObject *cyfunc = (__pyx_CyFunctionObject *)func; - PyMethodDef* def = ((PyCFunctionObject*)cyfunc)->m_ml; -#if CYTHON_BACKPORT_VECTORCALL - Py_ssize_t nargs = (Py_ssize_t)nargsf; -#else - Py_ssize_t nargs = PyVectorcall_NARGS(nargsf); -#endif - PyObject *self; - switch (__Pyx_CyFunction_Vectorcall_CheckArgs(cyfunc, nargs, kwnames)) { - case 1: - self = args[0]; - args += 1; - nargs -= 1; - break; - case 0: - self = ((PyCFunctionObject*)cyfunc)->m_self; - break; - default: - return NULL; - } - if (unlikely(nargs != 1)) { - PyErr_Format(PyExc_TypeError, - "%.200s() takes exactly one argument (%" CYTHON_FORMAT_SSIZE_T "d given)", - def->ml_name, nargs); - return NULL; - } - return def->ml_meth(self, args[0]); -} -static PyObject * __Pyx_CyFunction_Vectorcall_FASTCALL_KEYWORDS(PyObject *func, PyObject *const *args, size_t nargsf, PyObject *kwnames) -{ - __pyx_CyFunctionObject *cyfunc = (__pyx_CyFunctionObject *)func; - PyMethodDef* def = ((PyCFunctionObject*)cyfunc)->m_ml; -#if CYTHON_BACKPORT_VECTORCALL - Py_ssize_t nargs = (Py_ssize_t)nargsf; -#else - Py_ssize_t nargs = PyVectorcall_NARGS(nargsf); -#endif - PyObject *self; - switch (__Pyx_CyFunction_Vectorcall_CheckArgs(cyfunc, nargs, NULL)) { - case 1: - self = args[0]; - args += 1; - nargs -= 1; - break; - case 0: - self = ((PyCFunctionObject*)cyfunc)->m_self; - break; - default: - return NULL; - } - return ((_PyCFunctionFastWithKeywords)(void(*)(void))def->ml_meth)(self, args, nargs, kwnames); -} -static PyObject * __Pyx_CyFunction_Vectorcall_FASTCALL_KEYWORDS_METHOD(PyObject *func, PyObject *const *args, size_t nargsf, PyObject *kwnames) -{ - __pyx_CyFunctionObject *cyfunc = (__pyx_CyFunctionObject *)func; - PyMethodDef* def = ((PyCFunctionObject*)cyfunc)->m_ml; - PyTypeObject *cls = (PyTypeObject *) __Pyx_CyFunction_GetClassObj(cyfunc); -#if CYTHON_BACKPORT_VECTORCALL - Py_ssize_t nargs = (Py_ssize_t)nargsf; -#else - Py_ssize_t nargs = PyVectorcall_NARGS(nargsf); -#endif - PyObject *self; - switch (__Pyx_CyFunction_Vectorcall_CheckArgs(cyfunc, nargs, NULL)) { - case 1: - self = args[0]; - args += 1; - nargs -= 1; - break; - case 0: - self = ((PyCFunctionObject*)cyfunc)->m_self; - break; - default: - return NULL; - } - return ((__Pyx_PyCMethod)(void(*)(void))def->ml_meth)(self, cls, args, (size_t)nargs, kwnames); -} -#endif -#if CYTHON_USE_TYPE_SPECS -static PyType_Slot __pyx_CyFunctionType_slots[] = { - {Py_tp_dealloc, (void *)__Pyx_CyFunction_dealloc}, - {Py_tp_repr, (void *)__Pyx_CyFunction_repr}, - {Py_tp_call, (void *)__Pyx_CyFunction_CallAsMethod}, - {Py_tp_traverse, (void *)__Pyx_CyFunction_traverse}, - {Py_tp_clear, (void *)__Pyx_CyFunction_clear}, - {Py_tp_methods, (void *)__pyx_CyFunction_methods}, - {Py_tp_members, (void *)__pyx_CyFunction_members}, - {Py_tp_getset, (void *)__pyx_CyFunction_getsets}, - {Py_tp_descr_get, (void *)__Pyx_PyMethod_New}, - {0, 0}, -}; -static PyType_Spec __pyx_CyFunctionType_spec = { - __PYX_TYPE_MODULE_PREFIX "cython_function_or_method", - sizeof(__pyx_CyFunctionObject), - 0, -#ifdef Py_TPFLAGS_METHOD_DESCRIPTOR - Py_TPFLAGS_METHOD_DESCRIPTOR | -#endif -#if (defined(_Py_TPFLAGS_HAVE_VECTORCALL) && CYTHON_METH_FASTCALL) - _Py_TPFLAGS_HAVE_VECTORCALL | -#endif - Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC | Py_TPFLAGS_BASETYPE, - __pyx_CyFunctionType_slots -}; -#else -static PyTypeObject __pyx_CyFunctionType_type = { - PyVarObject_HEAD_INIT(0, 0) - __PYX_TYPE_MODULE_PREFIX "cython_function_or_method", - sizeof(__pyx_CyFunctionObject), - 0, - (destructor) __Pyx_CyFunction_dealloc, -#if !CYTHON_METH_FASTCALL - 0, -#elif CYTHON_BACKPORT_VECTORCALL - (printfunc)offsetof(__pyx_CyFunctionObject, func_vectorcall), -#else - offsetof(PyCFunctionObject, vectorcall), -#endif - 0, - 0, -#if PY_MAJOR_VERSION < 3 - 0, -#else - 0, -#endif - (reprfunc) __Pyx_CyFunction_repr, - 0, - 0, - 0, - 0, - __Pyx_CyFunction_CallAsMethod, - 0, - 0, - 0, - 0, -#ifdef Py_TPFLAGS_METHOD_DESCRIPTOR - Py_TPFLAGS_METHOD_DESCRIPTOR | -#endif -#ifdef _Py_TPFLAGS_HAVE_VECTORCALL - _Py_TPFLAGS_HAVE_VECTORCALL | -#endif - Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC | Py_TPFLAGS_BASETYPE, - 0, - (traverseproc) __Pyx_CyFunction_traverse, - (inquiry) __Pyx_CyFunction_clear, - 0, -#if PY_VERSION_HEX < 0x030500A0 - offsetof(__pyx_CyFunctionObject, func_weakreflist), -#else - offsetof(PyCFunctionObject, m_weakreflist), -#endif - 0, - 0, - __pyx_CyFunction_methods, - __pyx_CyFunction_members, - __pyx_CyFunction_getsets, - 0, - 0, - __Pyx_PyMethod_New, - 0, - offsetof(__pyx_CyFunctionObject, func_dict), - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, -#if PY_VERSION_HEX >= 0x030400a1 - 0, -#endif -#if PY_VERSION_HEX >= 0x030800b1 && (!CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800) - 0, -#endif -#if __PYX_NEED_TP_PRINT_SLOT - 0, -#endif -#if PY_VERSION_HEX >= 0x030C0000 - 0, -#endif -#if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000 && PY_VERSION_HEX < 0x030a0000 - 0, -#endif -}; -#endif -static int __pyx_CyFunction_init(PyObject *module) { -#if CYTHON_USE_TYPE_SPECS - __pyx_CyFunctionType = __Pyx_FetchCommonTypeFromSpec(module, &__pyx_CyFunctionType_spec, NULL); -#else - CYTHON_UNUSED_VAR(module); - __pyx_CyFunctionType = __Pyx_FetchCommonType(&__pyx_CyFunctionType_type); -#endif - if (unlikely(__pyx_CyFunctionType == NULL)) { - return -1; - } - return 0; -} -static CYTHON_INLINE void *__Pyx_CyFunction_InitDefaults(PyObject *func, size_t size, int pyobjects) { - __pyx_CyFunctionObject *m = (__pyx_CyFunctionObject *) func; - m->defaults = PyObject_Malloc(size); - if (unlikely(!m->defaults)) - return PyErr_NoMemory(); - memset(m->defaults, 0, size); - m->defaults_pyobjects = pyobjects; - m->defaults_size = size; - return m->defaults; -} -static CYTHON_INLINE void __Pyx_CyFunction_SetDefaultsTuple(PyObject *func, PyObject *tuple) { - __pyx_CyFunctionObject *m = (__pyx_CyFunctionObject *) func; - m->defaults_tuple = tuple; - Py_INCREF(tuple); -} -static CYTHON_INLINE void __Pyx_CyFunction_SetDefaultsKwDict(PyObject *func, PyObject *dict) { - __pyx_CyFunctionObject *m = (__pyx_CyFunctionObject *) func; - m->defaults_kwdict = dict; - Py_INCREF(dict); -} -static CYTHON_INLINE void __Pyx_CyFunction_SetAnnotationsDict(PyObject *func, PyObject *dict) { - __pyx_CyFunctionObject *m = (__pyx_CyFunctionObject *) func; - m->func_annotations = dict; - Py_INCREF(dict); -} - -/* CythonFunction */ -static PyObject *__Pyx_CyFunction_New(PyMethodDef *ml, int flags, PyObject* qualname, - PyObject *closure, PyObject *module, PyObject* globals, PyObject* code) { - PyObject *op = __Pyx_CyFunction_Init( - PyObject_GC_New(__pyx_CyFunctionObject, __pyx_CyFunctionType), - ml, flags, qualname, closure, module, globals, code - ); - if (likely(op)) { - PyObject_GC_Track(op); - } - return op; -} - -/* CLineInTraceback */ -#ifndef CYTHON_CLINE_IN_TRACEBACK -static int __Pyx_CLineForTraceback(PyThreadState *tstate, int c_line) { - PyObject *use_cline; - PyObject *ptype, *pvalue, *ptraceback; -#if CYTHON_COMPILING_IN_CPYTHON - PyObject **cython_runtime_dict; -#endif - CYTHON_MAYBE_UNUSED_VAR(tstate); - if (unlikely(!__pyx_cython_runtime)) { - return c_line; - } - __Pyx_ErrFetchInState(tstate, &ptype, &pvalue, &ptraceback); -#if CYTHON_COMPILING_IN_CPYTHON - cython_runtime_dict = _PyObject_GetDictPtr(__pyx_cython_runtime); - if (likely(cython_runtime_dict)) { - __PYX_PY_DICT_LOOKUP_IF_MODIFIED( - use_cline, *cython_runtime_dict, - __Pyx_PyDict_GetItemStr(*cython_runtime_dict, __pyx_n_s_cline_in_traceback)) - } else -#endif - { - PyObject *use_cline_obj = __Pyx_PyObject_GetAttrStrNoError(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback); - if (use_cline_obj) { - use_cline = PyObject_Not(use_cline_obj) ? Py_False : Py_True; - Py_DECREF(use_cline_obj); - } else { - PyErr_Clear(); - use_cline = NULL; - } - } - if (!use_cline) { - c_line = 0; - (void) PyObject_SetAttr(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback, Py_False); - } - else if (use_cline == Py_False || (use_cline != Py_True && PyObject_Not(use_cline) != 0)) { - c_line = 0; - } - __Pyx_ErrRestoreInState(tstate, ptype, pvalue, ptraceback); - return c_line; -} -#endif - -/* CodeObjectCache */ -#if !CYTHON_COMPILING_IN_LIMITED_API -static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line) { - int start = 0, mid = 0, end = count - 1; - if (end >= 0 && code_line > entries[end].code_line) { - return count; - } - while (start < end) { - mid = start + (end - start) / 2; - if (code_line < entries[mid].code_line) { - end = mid; - } else if (code_line > entries[mid].code_line) { - start = mid + 1; - } else { - return mid; - } - } - if (code_line <= entries[mid].code_line) { - return mid; - } else { - return mid + 1; - } -} -static PyCodeObject *__pyx_find_code_object(int code_line) { - PyCodeObject* code_object; - int pos; - if (unlikely(!code_line) || unlikely(!__pyx_code_cache.entries)) { - return NULL; - } - pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); - if (unlikely(pos >= __pyx_code_cache.count) || unlikely(__pyx_code_cache.entries[pos].code_line != code_line)) { - return NULL; - } - code_object = __pyx_code_cache.entries[pos].code_object; - Py_INCREF(code_object); - return code_object; -} -static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object) { - int pos, i; - __Pyx_CodeObjectCacheEntry* entries = __pyx_code_cache.entries; - if (unlikely(!code_line)) { - return; - } - if (unlikely(!entries)) { - entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Malloc(64*sizeof(__Pyx_CodeObjectCacheEntry)); - if (likely(entries)) { - __pyx_code_cache.entries = entries; - __pyx_code_cache.max_count = 64; - __pyx_code_cache.count = 1; - entries[0].code_line = code_line; - entries[0].code_object = code_object; - Py_INCREF(code_object); - } - return; - } - pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); - if ((pos < __pyx_code_cache.count) && unlikely(__pyx_code_cache.entries[pos].code_line == code_line)) { - PyCodeObject* tmp = entries[pos].code_object; - entries[pos].code_object = code_object; - Py_DECREF(tmp); - return; - } - if (__pyx_code_cache.count == __pyx_code_cache.max_count) { - int new_max = __pyx_code_cache.max_count + 64; - entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Realloc( - __pyx_code_cache.entries, ((size_t)new_max) * sizeof(__Pyx_CodeObjectCacheEntry)); - if (unlikely(!entries)) { - return; - } - __pyx_code_cache.entries = entries; - __pyx_code_cache.max_count = new_max; - } - for (i=__pyx_code_cache.count; i>pos; i--) { - entries[i] = entries[i-1]; - } - entries[pos].code_line = code_line; - entries[pos].code_object = code_object; - __pyx_code_cache.count++; - Py_INCREF(code_object); -} -#endif - -/* AddTraceback */ -#include "compile.h" -#include "frameobject.h" -#include "traceback.h" -#if PY_VERSION_HEX >= 0x030b00a6 - #ifndef Py_BUILD_CORE - #define Py_BUILD_CORE 1 - #endif - #include "internal/pycore_frame.h" -#endif -#if CYTHON_COMPILING_IN_LIMITED_API -static void __Pyx_AddTraceback(const char *funcname, int c_line, - int py_line, const char *filename) { - if (c_line) { - (void) __pyx_cfilenm; - (void) __Pyx_CLineForTraceback(__Pyx_PyThreadState_Current, c_line); - } - _PyTraceback_Add(funcname, filename, py_line); -} -#else -static PyCodeObject* __Pyx_CreateCodeObjectForTraceback( - const char *funcname, int c_line, - int py_line, const char *filename) { - PyCodeObject *py_code = NULL; - PyObject *py_funcname = NULL; - #if PY_MAJOR_VERSION < 3 - PyObject *py_srcfile = NULL; - py_srcfile = PyString_FromString(filename); - if (!py_srcfile) goto bad; - #endif - if (c_line) { - #if PY_MAJOR_VERSION < 3 - py_funcname = PyString_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); - if (!py_funcname) goto bad; - #else - py_funcname = PyUnicode_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); - if (!py_funcname) goto bad; - funcname = PyUnicode_AsUTF8(py_funcname); - if (!funcname) goto bad; - #endif - } - else { - #if PY_MAJOR_VERSION < 3 - py_funcname = PyString_FromString(funcname); - if (!py_funcname) goto bad; - #endif - } - #if PY_MAJOR_VERSION < 3 - py_code = __Pyx_PyCode_New( - 0, - 0, - 0, - 0, - 0, - 0, - __pyx_empty_bytes, /*PyObject *code,*/ - __pyx_empty_tuple, /*PyObject *consts,*/ - __pyx_empty_tuple, /*PyObject *names,*/ - __pyx_empty_tuple, /*PyObject *varnames,*/ - __pyx_empty_tuple, /*PyObject *freevars,*/ - __pyx_empty_tuple, /*PyObject *cellvars,*/ - py_srcfile, /*PyObject *filename,*/ - py_funcname, /*PyObject *name,*/ - py_line, - __pyx_empty_bytes /*PyObject *lnotab*/ - ); - Py_DECREF(py_srcfile); - #else - py_code = PyCode_NewEmpty(filename, funcname, py_line); - #endif - Py_XDECREF(py_funcname); // XDECREF since it's only set on Py3 if cline - return py_code; -bad: - Py_XDECREF(py_funcname); - #if PY_MAJOR_VERSION < 3 - Py_XDECREF(py_srcfile); - #endif - return NULL; -} -static void __Pyx_AddTraceback(const char *funcname, int c_line, - int py_line, const char *filename) { - PyCodeObject *py_code = 0; - PyFrameObject *py_frame = 0; - PyThreadState *tstate = __Pyx_PyThreadState_Current; - PyObject *ptype, *pvalue, *ptraceback; - if (c_line) { - c_line = __Pyx_CLineForTraceback(tstate, c_line); - } - py_code = __pyx_find_code_object(c_line ? -c_line : py_line); - if (!py_code) { - __Pyx_ErrFetchInState(tstate, &ptype, &pvalue, &ptraceback); - py_code = __Pyx_CreateCodeObjectForTraceback( - funcname, c_line, py_line, filename); - if (!py_code) { - /* If the code object creation fails, then we should clear the - fetched exception references and propagate the new exception */ - Py_XDECREF(ptype); - Py_XDECREF(pvalue); - Py_XDECREF(ptraceback); - goto bad; - } - __Pyx_ErrRestoreInState(tstate, ptype, pvalue, ptraceback); - __pyx_insert_code_object(c_line ? -c_line : py_line, py_code); - } - py_frame = PyFrame_New( - tstate, /*PyThreadState *tstate,*/ - py_code, /*PyCodeObject *code,*/ - __pyx_d, /*PyObject *globals,*/ - 0 /*PyObject *locals*/ - ); - if (!py_frame) goto bad; - __Pyx_PyFrame_SetLineNumber(py_frame, py_line); - PyTraceBack_Here(py_frame); -bad: - Py_XDECREF(py_code); - Py_XDECREF(py_frame); -} -#endif - -#if PY_MAJOR_VERSION < 3 -static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags) { - __Pyx_TypeName obj_type_name; - if (PyObject_CheckBuffer(obj)) return PyObject_GetBuffer(obj, view, flags); - if (__Pyx_TypeCheck(obj, __pyx_array_type)) return __pyx_array_getbuffer(obj, view, flags); - if (__Pyx_TypeCheck(obj, __pyx_memoryview_type)) return __pyx_memoryview_getbuffer(obj, view, flags); - obj_type_name = __Pyx_PyType_GetName(Py_TYPE(obj)); - PyErr_Format(PyExc_TypeError, - "'" __Pyx_FMT_TYPENAME "' does not have the buffer interface", - obj_type_name); - __Pyx_DECREF_TypeName(obj_type_name); - return -1; -} -static void __Pyx_ReleaseBuffer(Py_buffer *view) { - PyObject *obj = view->obj; - if (!obj) return; - if (PyObject_CheckBuffer(obj)) { - PyBuffer_Release(view); - return; - } - if ((0)) {} - view->obj = NULL; - Py_DECREF(obj); -} -#endif - - -/* MemviewSliceIsContig */ -static int -__pyx_memviewslice_is_contig(const __Pyx_memviewslice mvs, char order, int ndim) -{ - int i, index, step, start; - Py_ssize_t itemsize = mvs.memview->view.itemsize; - if (order == 'F') { - step = 1; - start = 0; - } else { - step = -1; - start = ndim - 1; - } - for (i = 0; i < ndim; i++) { - index = start + step * i; - if (mvs.suboffsets[index] >= 0 || mvs.strides[index] != itemsize) - return 0; - itemsize *= mvs.shape[index]; - } - return 1; -} - -/* OverlappingSlices */ -static void -__pyx_get_array_memory_extents(__Pyx_memviewslice *slice, - void **out_start, void **out_end, - int ndim, size_t itemsize) -{ - char *start, *end; - int i; - start = end = slice->data; - for (i = 0; i < ndim; i++) { - Py_ssize_t stride = slice->strides[i]; - Py_ssize_t extent = slice->shape[i]; - if (extent == 0) { - *out_start = *out_end = start; - return; - } else { - if (stride > 0) - end += stride * (extent - 1); - else - start += stride * (extent - 1); - } - } - *out_start = start; - *out_end = end + itemsize; -} -static int -__pyx_slices_overlap(__Pyx_memviewslice *slice1, - __Pyx_memviewslice *slice2, - int ndim, size_t itemsize) -{ - void *start1, *end1, *start2, *end2; - __pyx_get_array_memory_extents(slice1, &start1, &end1, ndim, itemsize); - __pyx_get_array_memory_extents(slice2, &start2, &end2, ndim, itemsize); - return (start1 < end2) && (start2 < end1); -} - -/* IsLittleEndian */ -static CYTHON_INLINE int __Pyx_Is_Little_Endian(void) -{ - union { - uint32_t u32; - uint8_t u8[4]; - } S; - S.u32 = 0x01020304; - return S.u8[0] == 4; -} - -/* BufferFormatCheck */ -static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx, - __Pyx_BufFmt_StackElem* stack, - __Pyx_TypeInfo* type) { - stack[0].field = &ctx->root; - stack[0].parent_offset = 0; - ctx->root.type = type; - ctx->root.name = "buffer dtype"; - ctx->root.offset = 0; - ctx->head = stack; - ctx->head->field = &ctx->root; - ctx->fmt_offset = 0; - ctx->head->parent_offset = 0; - ctx->new_packmode = '@'; - ctx->enc_packmode = '@'; - ctx->new_count = 1; - ctx->enc_count = 0; - ctx->enc_type = 0; - ctx->is_complex = 0; - ctx->is_valid_array = 0; - ctx->struct_alignment = 0; - while (type->typegroup == 'S') { - ++ctx->head; - ctx->head->field = type->fields; - ctx->head->parent_offset = 0; - type = type->fields->type; - } -} -static int __Pyx_BufFmt_ParseNumber(const char** ts) { - int count; - const char* t = *ts; - if (*t < '0' || *t > '9') { - return -1; - } else { - count = *t++ - '0'; - while (*t >= '0' && *t <= '9') { - count *= 10; - count += *t++ - '0'; - } - } - *ts = t; - return count; -} -static int __Pyx_BufFmt_ExpectNumber(const char **ts) { - int number = __Pyx_BufFmt_ParseNumber(ts); - if (number == -1) - PyErr_Format(PyExc_ValueError,\ - "Does not understand character buffer dtype format string ('%c')", **ts); - return number; -} -static void __Pyx_BufFmt_RaiseUnexpectedChar(char ch) { - PyErr_Format(PyExc_ValueError, - "Unexpected format string character: '%c'", ch); -} -static const char* __Pyx_BufFmt_DescribeTypeChar(char ch, int is_complex) { - switch (ch) { - case '?': return "'bool'"; - case 'c': return "'char'"; - case 'b': return "'signed char'"; - case 'B': return "'unsigned char'"; - case 'h': return "'short'"; - case 'H': return "'unsigned short'"; - case 'i': return "'int'"; - case 'I': return "'unsigned int'"; - case 'l': return "'long'"; - case 'L': return "'unsigned long'"; - case 'q': return "'long long'"; - case 'Q': return "'unsigned long long'"; - case 'f': return (is_complex ? "'complex float'" : "'float'"); - case 'd': return (is_complex ? "'complex double'" : "'double'"); - case 'g': return (is_complex ? "'complex long double'" : "'long double'"); - case 'T': return "a struct"; - case 'O': return "Python object"; - case 'P': return "a pointer"; - case 's': case 'p': return "a string"; - case 0: return "end"; - default: return "unparsable format string"; - } -} -static size_t __Pyx_BufFmt_TypeCharToStandardSize(char ch, int is_complex) { - switch (ch) { - case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; - case 'h': case 'H': return 2; - case 'i': case 'I': case 'l': case 'L': return 4; - case 'q': case 'Q': return 8; - case 'f': return (is_complex ? 8 : 4); - case 'd': return (is_complex ? 16 : 8); - case 'g': { - PyErr_SetString(PyExc_ValueError, "Python does not define a standard format string size for long double ('g').."); - return 0; - } - case 'O': case 'P': return sizeof(void*); - default: - __Pyx_BufFmt_RaiseUnexpectedChar(ch); - return 0; - } -} -static size_t __Pyx_BufFmt_TypeCharToNativeSize(char ch, int is_complex) { - switch (ch) { - case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; - case 'h': case 'H': return sizeof(short); - case 'i': case 'I': return sizeof(int); - case 'l': case 'L': return sizeof(long); - #ifdef HAVE_LONG_LONG - case 'q': case 'Q': return sizeof(PY_LONG_LONG); - #endif - case 'f': return sizeof(float) * (is_complex ? 2 : 1); - case 'd': return sizeof(double) * (is_complex ? 2 : 1); - case 'g': return sizeof(long double) * (is_complex ? 2 : 1); - case 'O': case 'P': return sizeof(void*); - default: { - __Pyx_BufFmt_RaiseUnexpectedChar(ch); - return 0; - } - } -} -typedef struct { char c; short x; } __Pyx_st_short; -typedef struct { char c; int x; } __Pyx_st_int; -typedef struct { char c; long x; } __Pyx_st_long; -typedef struct { char c; float x; } __Pyx_st_float; -typedef struct { char c; double x; } __Pyx_st_double; -typedef struct { char c; long double x; } __Pyx_st_longdouble; -typedef struct { char c; void *x; } __Pyx_st_void_p; -#ifdef HAVE_LONG_LONG -typedef struct { char c; PY_LONG_LONG x; } __Pyx_st_longlong; -#endif -static size_t __Pyx_BufFmt_TypeCharToAlignment(char ch, int is_complex) { - CYTHON_UNUSED_VAR(is_complex); - switch (ch) { - case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; - case 'h': case 'H': return sizeof(__Pyx_st_short) - sizeof(short); - case 'i': case 'I': return sizeof(__Pyx_st_int) - sizeof(int); - case 'l': case 'L': return sizeof(__Pyx_st_long) - sizeof(long); -#ifdef HAVE_LONG_LONG - case 'q': case 'Q': return sizeof(__Pyx_st_longlong) - sizeof(PY_LONG_LONG); -#endif - case 'f': return sizeof(__Pyx_st_float) - sizeof(float); - case 'd': return sizeof(__Pyx_st_double) - sizeof(double); - case 'g': return sizeof(__Pyx_st_longdouble) - sizeof(long double); - case 'P': case 'O': return sizeof(__Pyx_st_void_p) - sizeof(void*); - default: - __Pyx_BufFmt_RaiseUnexpectedChar(ch); - return 0; - } -} -/* These are for computing the padding at the end of the struct to align - on the first member of the struct. This will probably the same as above, - but we don't have any guarantees. - */ -typedef struct { short x; char c; } __Pyx_pad_short; -typedef struct { int x; char c; } __Pyx_pad_int; -typedef struct { long x; char c; } __Pyx_pad_long; -typedef struct { float x; char c; } __Pyx_pad_float; -typedef struct { double x; char c; } __Pyx_pad_double; -typedef struct { long double x; char c; } __Pyx_pad_longdouble; -typedef struct { void *x; char c; } __Pyx_pad_void_p; -#ifdef HAVE_LONG_LONG -typedef struct { PY_LONG_LONG x; char c; } __Pyx_pad_longlong; -#endif -static size_t __Pyx_BufFmt_TypeCharToPadding(char ch, int is_complex) { - CYTHON_UNUSED_VAR(is_complex); - switch (ch) { - case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; - case 'h': case 'H': return sizeof(__Pyx_pad_short) - sizeof(short); - case 'i': case 'I': return sizeof(__Pyx_pad_int) - sizeof(int); - case 'l': case 'L': return sizeof(__Pyx_pad_long) - sizeof(long); -#ifdef HAVE_LONG_LONG - case 'q': case 'Q': return sizeof(__Pyx_pad_longlong) - sizeof(PY_LONG_LONG); -#endif - case 'f': return sizeof(__Pyx_pad_float) - sizeof(float); - case 'd': return sizeof(__Pyx_pad_double) - sizeof(double); - case 'g': return sizeof(__Pyx_pad_longdouble) - sizeof(long double); - case 'P': case 'O': return sizeof(__Pyx_pad_void_p) - sizeof(void*); - default: - __Pyx_BufFmt_RaiseUnexpectedChar(ch); - return 0; - } -} -static char __Pyx_BufFmt_TypeCharToGroup(char ch, int is_complex) { - switch (ch) { - case 'c': - return 'H'; - case 'b': case 'h': case 'i': - case 'l': case 'q': case 's': case 'p': - return 'I'; - case '?': case 'B': case 'H': case 'I': case 'L': case 'Q': - return 'U'; - case 'f': case 'd': case 'g': - return (is_complex ? 'C' : 'R'); - case 'O': - return 'O'; - case 'P': - return 'P'; - default: { - __Pyx_BufFmt_RaiseUnexpectedChar(ch); - return 0; - } - } -} -static void __Pyx_BufFmt_RaiseExpected(__Pyx_BufFmt_Context* ctx) { - if (ctx->head == NULL || ctx->head->field == &ctx->root) { - const char* expected; - const char* quote; - if (ctx->head == NULL) { - expected = "end"; - quote = ""; - } else { - expected = ctx->head->field->type->name; - quote = "'"; - } - PyErr_Format(PyExc_ValueError, - "Buffer dtype mismatch, expected %s%s%s but got %s", - quote, expected, quote, - __Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex)); - } else { - __Pyx_StructField* field = ctx->head->field; - __Pyx_StructField* parent = (ctx->head - 1)->field; - PyErr_Format(PyExc_ValueError, - "Buffer dtype mismatch, expected '%s' but got %s in '%s.%s'", - field->type->name, __Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex), - parent->type->name, field->name); - } -} -static int __Pyx_BufFmt_ProcessTypeChunk(__Pyx_BufFmt_Context* ctx) { - char group; - size_t size, offset, arraysize = 1; - if (ctx->enc_type == 0) return 0; - if (ctx->head->field->type->arraysize[0]) { - int i, ndim = 0; - if (ctx->enc_type == 's' || ctx->enc_type == 'p') { - ctx->is_valid_array = ctx->head->field->type->ndim == 1; - ndim = 1; - if (ctx->enc_count != ctx->head->field->type->arraysize[0]) { - PyErr_Format(PyExc_ValueError, - "Expected a dimension of size %zu, got %zu", - ctx->head->field->type->arraysize[0], ctx->enc_count); - return -1; - } - } - if (!ctx->is_valid_array) { - PyErr_Format(PyExc_ValueError, "Expected %d dimensions, got %d", - ctx->head->field->type->ndim, ndim); - return -1; - } - for (i = 0; i < ctx->head->field->type->ndim; i++) { - arraysize *= ctx->head->field->type->arraysize[i]; - } - ctx->is_valid_array = 0; - ctx->enc_count = 1; - } - group = __Pyx_BufFmt_TypeCharToGroup(ctx->enc_type, ctx->is_complex); - do { - __Pyx_StructField* field = ctx->head->field; - __Pyx_TypeInfo* type = field->type; - if (ctx->enc_packmode == '@' || ctx->enc_packmode == '^') { - size = __Pyx_BufFmt_TypeCharToNativeSize(ctx->enc_type, ctx->is_complex); - } else { - size = __Pyx_BufFmt_TypeCharToStandardSize(ctx->enc_type, ctx->is_complex); - } - if (ctx->enc_packmode == '@') { - size_t align_at = __Pyx_BufFmt_TypeCharToAlignment(ctx->enc_type, ctx->is_complex); - size_t align_mod_offset; - if (align_at == 0) return -1; - align_mod_offset = ctx->fmt_offset % align_at; - if (align_mod_offset > 0) ctx->fmt_offset += align_at - align_mod_offset; - if (ctx->struct_alignment == 0) - ctx->struct_alignment = __Pyx_BufFmt_TypeCharToPadding(ctx->enc_type, - ctx->is_complex); - } - if (type->size != size || type->typegroup != group) { - if (type->typegroup == 'C' && type->fields != NULL) { - size_t parent_offset = ctx->head->parent_offset + field->offset; - ++ctx->head; - ctx->head->field = type->fields; - ctx->head->parent_offset = parent_offset; - continue; - } - if ((type->typegroup == 'H' || group == 'H') && type->size == size) { - } else { - __Pyx_BufFmt_RaiseExpected(ctx); - return -1; - } - } - offset = ctx->head->parent_offset + field->offset; - if (ctx->fmt_offset != offset) { - PyErr_Format(PyExc_ValueError, - "Buffer dtype mismatch; next field is at offset %" CYTHON_FORMAT_SSIZE_T "d but %" CYTHON_FORMAT_SSIZE_T "d expected", - (Py_ssize_t)ctx->fmt_offset, (Py_ssize_t)offset); - return -1; - } - ctx->fmt_offset += size; - if (arraysize) - ctx->fmt_offset += (arraysize - 1) * size; - --ctx->enc_count; - while (1) { - if (field == &ctx->root) { - ctx->head = NULL; - if (ctx->enc_count != 0) { - __Pyx_BufFmt_RaiseExpected(ctx); - return -1; - } - break; - } - ctx->head->field = ++field; - if (field->type == NULL) { - --ctx->head; - field = ctx->head->field; - continue; - } else if (field->type->typegroup == 'S') { - size_t parent_offset = ctx->head->parent_offset + field->offset; - if (field->type->fields->type == NULL) continue; - field = field->type->fields; - ++ctx->head; - ctx->head->field = field; - ctx->head->parent_offset = parent_offset; - break; - } else { - break; - } - } - } while (ctx->enc_count); - ctx->enc_type = 0; - ctx->is_complex = 0; - return 0; -} -static PyObject * -__pyx_buffmt_parse_array(__Pyx_BufFmt_Context* ctx, const char** tsp) -{ - const char *ts = *tsp; - int i = 0, number, ndim; - ++ts; - if (ctx->new_count != 1) { - PyErr_SetString(PyExc_ValueError, - "Cannot handle repeated arrays in format string"); - return NULL; - } - if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; - ndim = ctx->head->field->type->ndim; - while (*ts && *ts != ')') { - switch (*ts) { - case ' ': case '\f': case '\r': case '\n': case '\t': case '\v': continue; - default: break; - } - number = __Pyx_BufFmt_ExpectNumber(&ts); - if (number == -1) return NULL; - if (i < ndim && (size_t) number != ctx->head->field->type->arraysize[i]) - return PyErr_Format(PyExc_ValueError, - "Expected a dimension of size %zu, got %d", - ctx->head->field->type->arraysize[i], number); - if (*ts != ',' && *ts != ')') - return PyErr_Format(PyExc_ValueError, - "Expected a comma in format string, got '%c'", *ts); - if (*ts == ',') ts++; - i++; - } - if (i != ndim) - return PyErr_Format(PyExc_ValueError, "Expected %d dimension(s), got %d", - ctx->head->field->type->ndim, i); - if (!*ts) { - PyErr_SetString(PyExc_ValueError, - "Unexpected end of format string, expected ')'"); - return NULL; - } - ctx->is_valid_array = 1; - ctx->new_count = 1; - *tsp = ++ts; - return Py_None; -} -static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts) { - int got_Z = 0; - while (1) { - switch(*ts) { - case 0: - if (ctx->enc_type != 0 && ctx->head == NULL) { - __Pyx_BufFmt_RaiseExpected(ctx); - return NULL; - } - if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; - if (ctx->head != NULL) { - __Pyx_BufFmt_RaiseExpected(ctx); - return NULL; - } - return ts; - case ' ': - case '\r': - case '\n': - ++ts; - break; - case '<': - if (!__Pyx_Is_Little_Endian()) { - PyErr_SetString(PyExc_ValueError, "Little-endian buffer not supported on big-endian compiler"); - return NULL; - } - ctx->new_packmode = '='; - ++ts; - break; - case '>': - case '!': - if (__Pyx_Is_Little_Endian()) { - PyErr_SetString(PyExc_ValueError, "Big-endian buffer not supported on little-endian compiler"); - return NULL; - } - ctx->new_packmode = '='; - ++ts; - break; - case '=': - case '@': - case '^': - ctx->new_packmode = *ts++; - break; - case 'T': - { - const char* ts_after_sub; - size_t i, struct_count = ctx->new_count; - size_t struct_alignment = ctx->struct_alignment; - ctx->new_count = 1; - ++ts; - if (*ts != '{') { - PyErr_SetString(PyExc_ValueError, "Buffer acquisition: Expected '{' after 'T'"); - return NULL; - } - if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; - ctx->enc_type = 0; - ctx->enc_count = 0; - ctx->struct_alignment = 0; - ++ts; - ts_after_sub = ts; - for (i = 0; i != struct_count; ++i) { - ts_after_sub = __Pyx_BufFmt_CheckString(ctx, ts); - if (!ts_after_sub) return NULL; - } - ts = ts_after_sub; - if (struct_alignment) ctx->struct_alignment = struct_alignment; - } - break; - case '}': - { - size_t alignment = ctx->struct_alignment; - ++ts; - if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; - ctx->enc_type = 0; - if (alignment && ctx->fmt_offset % alignment) { - ctx->fmt_offset += alignment - (ctx->fmt_offset % alignment); - } - } - return ts; - case 'x': - if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; - ctx->fmt_offset += ctx->new_count; - ctx->new_count = 1; - ctx->enc_count = 0; - ctx->enc_type = 0; - ctx->enc_packmode = ctx->new_packmode; - ++ts; - break; - case 'Z': - got_Z = 1; - ++ts; - if (*ts != 'f' && *ts != 'd' && *ts != 'g') { - __Pyx_BufFmt_RaiseUnexpectedChar('Z'); - return NULL; - } - CYTHON_FALLTHROUGH; - case '?': case 'c': case 'b': case 'B': case 'h': case 'H': case 'i': case 'I': - case 'l': case 'L': case 'q': case 'Q': - case 'f': case 'd': case 'g': - case 'O': case 'p': - if ((ctx->enc_type == *ts) && (got_Z == ctx->is_complex) && - (ctx->enc_packmode == ctx->new_packmode) && (!ctx->is_valid_array)) { - ctx->enc_count += ctx->new_count; - ctx->new_count = 1; - got_Z = 0; - ++ts; - break; - } - CYTHON_FALLTHROUGH; - case 's': - if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; - ctx->enc_count = ctx->new_count; - ctx->enc_packmode = ctx->new_packmode; - ctx->enc_type = *ts; - ctx->is_complex = got_Z; - ++ts; - ctx->new_count = 1; - got_Z = 0; - break; - case ':': - ++ts; - while(*ts != ':') ++ts; - ++ts; - break; - case '(': - if (!__pyx_buffmt_parse_array(ctx, &ts)) return NULL; - break; - default: - { - int number = __Pyx_BufFmt_ExpectNumber(&ts); - if (number == -1) return NULL; - ctx->new_count = (size_t)number; - } - } - } -} - -/* TypeInfoCompare */ - static int -__pyx_typeinfo_cmp(__Pyx_TypeInfo *a, __Pyx_TypeInfo *b) -{ - int i; - if (!a || !b) - return 0; - if (a == b) - return 1; - if (a->size != b->size || a->typegroup != b->typegroup || - a->is_unsigned != b->is_unsigned || a->ndim != b->ndim) { - if (a->typegroup == 'H' || b->typegroup == 'H') { - return a->size == b->size; - } else { - return 0; - } - } - if (a->ndim) { - for (i = 0; i < a->ndim; i++) - if (a->arraysize[i] != b->arraysize[i]) - return 0; - } - if (a->typegroup == 'S') { - if (a->flags != b->flags) - return 0; - if (a->fields || b->fields) { - if (!(a->fields && b->fields)) - return 0; - for (i = 0; a->fields[i].type && b->fields[i].type; i++) { - __Pyx_StructField *field_a = a->fields + i; - __Pyx_StructField *field_b = b->fields + i; - if (field_a->offset != field_b->offset || - !__pyx_typeinfo_cmp(field_a->type, field_b->type)) - return 0; - } - return !a->fields[i].type && !b->fields[i].type; - } - } - return 1; -} - -/* MemviewSliceValidateAndInit */ - static int -__pyx_check_strides(Py_buffer *buf, int dim, int ndim, int spec) -{ - if (buf->shape[dim] <= 1) - return 1; - if (buf->strides) { - if (spec & __Pyx_MEMVIEW_CONTIG) { - if (spec & (__Pyx_MEMVIEW_PTR|__Pyx_MEMVIEW_FULL)) { - if (unlikely(buf->strides[dim] != sizeof(void *))) { - PyErr_Format(PyExc_ValueError, - "Buffer is not indirectly contiguous " - "in dimension %d.", dim); - goto fail; - } - } else if (unlikely(buf->strides[dim] != buf->itemsize)) { - PyErr_SetString(PyExc_ValueError, - "Buffer and memoryview are not contiguous " - "in the same dimension."); - goto fail; - } - } - if (spec & __Pyx_MEMVIEW_FOLLOW) { - Py_ssize_t stride = buf->strides[dim]; - if (stride < 0) - stride = -stride; - if (unlikely(stride < buf->itemsize)) { - PyErr_SetString(PyExc_ValueError, - "Buffer and memoryview are not contiguous " - "in the same dimension."); - goto fail; - } - } - } else { - if (unlikely(spec & __Pyx_MEMVIEW_CONTIG && dim != ndim - 1)) { - PyErr_Format(PyExc_ValueError, - "C-contiguous buffer is not contiguous in " - "dimension %d", dim); - goto fail; - } else if (unlikely(spec & (__Pyx_MEMVIEW_PTR))) { - PyErr_Format(PyExc_ValueError, - "C-contiguous buffer is not indirect in " - "dimension %d", dim); - goto fail; - } else if (unlikely(buf->suboffsets)) { - PyErr_SetString(PyExc_ValueError, - "Buffer exposes suboffsets but no strides"); - goto fail; - } - } - return 1; -fail: - return 0; -} -static int -__pyx_check_suboffsets(Py_buffer *buf, int dim, int ndim, int spec) -{ - CYTHON_UNUSED_VAR(ndim); - if (spec & __Pyx_MEMVIEW_DIRECT) { - if (unlikely(buf->suboffsets && buf->suboffsets[dim] >= 0)) { - PyErr_Format(PyExc_ValueError, - "Buffer not compatible with direct access " - "in dimension %d.", dim); - goto fail; - } - } - if (spec & __Pyx_MEMVIEW_PTR) { - if (unlikely(!buf->suboffsets || (buf->suboffsets[dim] < 0))) { - PyErr_Format(PyExc_ValueError, - "Buffer is not indirectly accessible " - "in dimension %d.", dim); - goto fail; - } - } - return 1; -fail: - return 0; -} -static int -__pyx_verify_contig(Py_buffer *buf, int ndim, int c_or_f_flag) -{ - int i; - if (c_or_f_flag & __Pyx_IS_F_CONTIG) { - Py_ssize_t stride = 1; - for (i = 0; i < ndim; i++) { - if (unlikely(stride * buf->itemsize != buf->strides[i] && buf->shape[i] > 1)) { - PyErr_SetString(PyExc_ValueError, - "Buffer not fortran contiguous."); - goto fail; - } - stride = stride * buf->shape[i]; - } - } else if (c_or_f_flag & __Pyx_IS_C_CONTIG) { - Py_ssize_t stride = 1; - for (i = ndim - 1; i >- 1; i--) { - if (unlikely(stride * buf->itemsize != buf->strides[i] && buf->shape[i] > 1)) { - PyErr_SetString(PyExc_ValueError, - "Buffer not C contiguous."); - goto fail; - } - stride = stride * buf->shape[i]; - } - } - return 1; -fail: - return 0; -} -static int __Pyx_ValidateAndInit_memviewslice( - int *axes_specs, - int c_or_f_flag, - int buf_flags, - int ndim, - __Pyx_TypeInfo *dtype, - __Pyx_BufFmt_StackElem stack[], - __Pyx_memviewslice *memviewslice, - PyObject *original_obj) -{ - struct __pyx_memoryview_obj *memview, *new_memview; - __Pyx_RefNannyDeclarations - Py_buffer *buf; - int i, spec = 0, retval = -1; - __Pyx_BufFmt_Context ctx; - int from_memoryview = __pyx_memoryview_check(original_obj); - __Pyx_RefNannySetupContext("ValidateAndInit_memviewslice", 0); - if (from_memoryview && __pyx_typeinfo_cmp(dtype, ((struct __pyx_memoryview_obj *) - original_obj)->typeinfo)) { - memview = (struct __pyx_memoryview_obj *) original_obj; - new_memview = NULL; - } else { - memview = (struct __pyx_memoryview_obj *) __pyx_memoryview_new( - original_obj, buf_flags, 0, dtype); - new_memview = memview; - if (unlikely(!memview)) - goto fail; - } - buf = &memview->view; - if (unlikely(buf->ndim != ndim)) { - PyErr_Format(PyExc_ValueError, - "Buffer has wrong number of dimensions (expected %d, got %d)", - ndim, buf->ndim); - goto fail; - } - if (new_memview) { - __Pyx_BufFmt_Init(&ctx, stack, dtype); - if (unlikely(!__Pyx_BufFmt_CheckString(&ctx, buf->format))) goto fail; - } - if (unlikely((unsigned) buf->itemsize != dtype->size)) { - PyErr_Format(PyExc_ValueError, - "Item size of buffer (%" CYTHON_FORMAT_SSIZE_T "u byte%s) " - "does not match size of '%s' (%" CYTHON_FORMAT_SSIZE_T "u byte%s)", - buf->itemsize, - (buf->itemsize > 1) ? "s" : "", - dtype->name, - dtype->size, - (dtype->size > 1) ? "s" : ""); - goto fail; - } - if (buf->len > 0) { - for (i = 0; i < ndim; i++) { - spec = axes_specs[i]; - if (unlikely(!__pyx_check_strides(buf, i, ndim, spec))) - goto fail; - if (unlikely(!__pyx_check_suboffsets(buf, i, ndim, spec))) - goto fail; - } - if (unlikely(buf->strides && !__pyx_verify_contig(buf, ndim, c_or_f_flag))) - goto fail; - } - if (unlikely(__Pyx_init_memviewslice(memview, ndim, memviewslice, - new_memview != NULL) == -1)) { - goto fail; - } - retval = 0; - goto no_fail; -fail: - Py_XDECREF(new_memview); - retval = -1; -no_fail: - __Pyx_RefNannyFinishContext(); - return retval; -} - -/* ObjectToMemviewSlice */ - static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_d_d_dc_int(PyObject *obj, int writable_flag) { - __Pyx_memviewslice result = { 0, 0, { 0 }, { 0 }, { 0 } }; - __Pyx_BufFmt_StackElem stack[1]; - int axes_specs[] = { (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_FOLLOW), (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_FOLLOW), (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_CONTIG) }; - int retcode; - if (obj == Py_None) { - result.memview = (struct __pyx_memoryview_obj *) Py_None; - return result; - } - retcode = __Pyx_ValidateAndInit_memviewslice(axes_specs, __Pyx_IS_C_CONTIG, - (PyBUF_C_CONTIGUOUS | PyBUF_FORMAT) | writable_flag, 3, - &__Pyx_TypeInfo_int, stack, - &result, obj); - if (unlikely(retcode == -1)) - goto __pyx_fail; - return result; -__pyx_fail: - result.memview = NULL; - result.data = NULL; - return result; -} - -/* ObjectToMemviewSlice */ - static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_d_d_dc_float(PyObject *obj, int writable_flag) { - __Pyx_memviewslice result = { 0, 0, { 0 }, { 0 }, { 0 } }; - __Pyx_BufFmt_StackElem stack[1]; - int axes_specs[] = { (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_FOLLOW), (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_FOLLOW), (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_CONTIG) }; - int retcode; - if (obj == Py_None) { - result.memview = (struct __pyx_memoryview_obj *) Py_None; - return result; - } - retcode = __Pyx_ValidateAndInit_memviewslice(axes_specs, __Pyx_IS_C_CONTIG, - (PyBUF_C_CONTIGUOUS | PyBUF_FORMAT) | writable_flag, 3, - &__Pyx_TypeInfo_float, stack, - &result, obj); - if (unlikely(retcode == -1)) - goto __pyx_fail; - return result; -__pyx_fail: - result.memview = NULL; - result.data = NULL; - return result; -} - -/* ObjectToMemviewSlice */ - static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_dc_int(PyObject *obj, int writable_flag) { - __Pyx_memviewslice result = { 0, 0, { 0 }, { 0 }, { 0 } }; - __Pyx_BufFmt_StackElem stack[1]; - int axes_specs[] = { (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_CONTIG) }; - int retcode; - if (obj == Py_None) { - result.memview = (struct __pyx_memoryview_obj *) Py_None; - return result; - } - retcode = __Pyx_ValidateAndInit_memviewslice(axes_specs, __Pyx_IS_C_CONTIG, - (PyBUF_C_CONTIGUOUS | PyBUF_FORMAT) | writable_flag, 1, - &__Pyx_TypeInfo_int, stack, - &result, obj); - if (unlikely(retcode == -1)) - goto __pyx_fail; - return result; -__pyx_fail: - result.memview = NULL; - result.data = NULL; - return result; -} - -/* CIntFromPyVerify */ - #define __PYX_VERIFY_RETURN_INT(target_type, func_type, func_value)\ - __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 0) -#define __PYX_VERIFY_RETURN_INT_EXC(target_type, func_type, func_value)\ - __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 1) -#define __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, exc)\ - {\ - func_type value = func_value;\ - if (sizeof(target_type) < sizeof(func_type)) {\ - if (unlikely(value != (func_type) (target_type) value)) {\ - func_type zero = 0;\ - if (exc && unlikely(value == (func_type)-1 && PyErr_Occurred()))\ - return (target_type) -1;\ - if (is_unsigned && unlikely(value < zero))\ - goto raise_neg_overflow;\ - else\ - goto raise_overflow;\ - }\ - }\ - return (target_type) value;\ - } - -/* MemviewSliceCopyTemplate */ - static __Pyx_memviewslice -__pyx_memoryview_copy_new_contig(const __Pyx_memviewslice *from_mvs, - const char *mode, int ndim, - size_t sizeof_dtype, int contig_flag, - int dtype_is_object) -{ - __Pyx_RefNannyDeclarations - int i; - __Pyx_memviewslice new_mvs = { 0, 0, { 0 }, { 0 }, { 0 } }; - struct __pyx_memoryview_obj *from_memview = from_mvs->memview; - Py_buffer *buf = &from_memview->view; - PyObject *shape_tuple = NULL; - PyObject *temp_int = NULL; - struct __pyx_array_obj *array_obj = NULL; - struct __pyx_memoryview_obj *memview_obj = NULL; - __Pyx_RefNannySetupContext("__pyx_memoryview_copy_new_contig", 0); - for (i = 0; i < ndim; i++) { - if (unlikely(from_mvs->suboffsets[i] >= 0)) { - PyErr_Format(PyExc_ValueError, "Cannot copy memoryview slice with " - "indirect dimensions (axis %d)", i); - goto fail; - } - } - shape_tuple = PyTuple_New(ndim); - if (unlikely(!shape_tuple)) { - goto fail; - } - __Pyx_GOTREF(shape_tuple); - for(i = 0; i < ndim; i++) { - temp_int = PyInt_FromSsize_t(from_mvs->shape[i]); - if(unlikely(!temp_int)) { - goto fail; - } else { - PyTuple_SET_ITEM(shape_tuple, i, temp_int); - temp_int = NULL; - } - } - array_obj = __pyx_array_new(shape_tuple, sizeof_dtype, buf->format, (char *) mode, NULL); - if (unlikely(!array_obj)) { - goto fail; - } - __Pyx_GOTREF(array_obj); - memview_obj = (struct __pyx_memoryview_obj *) __pyx_memoryview_new( - (PyObject *) array_obj, contig_flag, - dtype_is_object, - from_mvs->memview->typeinfo); - if (unlikely(!memview_obj)) - goto fail; - if (unlikely(__Pyx_init_memviewslice(memview_obj, ndim, &new_mvs, 1) < 0)) - goto fail; - if (unlikely(__pyx_memoryview_copy_contents(*from_mvs, new_mvs, ndim, ndim, - dtype_is_object) < 0)) - goto fail; - goto no_fail; -fail: - __Pyx_XDECREF(new_mvs.memview); - new_mvs.memview = NULL; - new_mvs.data = NULL; -no_fail: - __Pyx_XDECREF(shape_tuple); - __Pyx_XDECREF(temp_int); - __Pyx_XDECREF(array_obj); - __Pyx_RefNannyFinishContext(); - return new_mvs; -} - -/* MemviewSliceInit */ - static int -__Pyx_init_memviewslice(struct __pyx_memoryview_obj *memview, - int ndim, - __Pyx_memviewslice *memviewslice, - int memview_is_new_reference) -{ - __Pyx_RefNannyDeclarations - int i, retval=-1; - Py_buffer *buf = &memview->view; - __Pyx_RefNannySetupContext("init_memviewslice", 0); - if (unlikely(memviewslice->memview || memviewslice->data)) { - PyErr_SetString(PyExc_ValueError, - "memviewslice is already initialized!"); - goto fail; - } - if (buf->strides) { - for (i = 0; i < ndim; i++) { - memviewslice->strides[i] = buf->strides[i]; - } - } else { - Py_ssize_t stride = buf->itemsize; - for (i = ndim - 1; i >= 0; i--) { - memviewslice->strides[i] = stride; - stride *= buf->shape[i]; - } - } - for (i = 0; i < ndim; i++) { - memviewslice->shape[i] = buf->shape[i]; - if (buf->suboffsets) { - memviewslice->suboffsets[i] = buf->suboffsets[i]; - } else { - memviewslice->suboffsets[i] = -1; - } - } - memviewslice->memview = memview; - memviewslice->data = (char *)buf->buf; - if (__pyx_add_acquisition_count(memview) == 0 && !memview_is_new_reference) { - Py_INCREF(memview); - } - retval = 0; - goto no_fail; -fail: - memviewslice->memview = 0; - memviewslice->data = 0; - retval = -1; -no_fail: - __Pyx_RefNannyFinishContext(); - return retval; -} -#ifndef Py_NO_RETURN -#define Py_NO_RETURN -#endif -static void __pyx_fatalerror(const char *fmt, ...) Py_NO_RETURN { - va_list vargs; - char msg[200]; -#if PY_VERSION_HEX >= 0x030A0000 || defined(HAVE_STDARG_PROTOTYPES) - va_start(vargs, fmt); -#else - va_start(vargs); -#endif - vsnprintf(msg, 200, fmt, vargs); - va_end(vargs); - Py_FatalError(msg); -} -static CYTHON_INLINE int -__pyx_add_acquisition_count_locked(__pyx_atomic_int_type *acquisition_count, - PyThread_type_lock lock) -{ - int result; - PyThread_acquire_lock(lock, 1); - result = (*acquisition_count)++; - PyThread_release_lock(lock); - return result; -} -static CYTHON_INLINE int -__pyx_sub_acquisition_count_locked(__pyx_atomic_int_type *acquisition_count, - PyThread_type_lock lock) -{ - int result; - PyThread_acquire_lock(lock, 1); - result = (*acquisition_count)--; - PyThread_release_lock(lock); - return result; -} -static CYTHON_INLINE void -__Pyx_INC_MEMVIEW(__Pyx_memviewslice *memslice, int have_gil, int lineno) -{ - __pyx_nonatomic_int_type old_acquisition_count; - struct __pyx_memoryview_obj *memview = memslice->memview; - if (unlikely(!memview || (PyObject *) memview == Py_None)) { - return; - } - old_acquisition_count = __pyx_add_acquisition_count(memview); - if (unlikely(old_acquisition_count <= 0)) { - if (likely(old_acquisition_count == 0)) { - if (have_gil) { - Py_INCREF((PyObject *) memview); - } else { - PyGILState_STATE _gilstate = PyGILState_Ensure(); - Py_INCREF((PyObject *) memview); - PyGILState_Release(_gilstate); - } - } else { - __pyx_fatalerror("Acquisition count is %d (line %d)", - old_acquisition_count+1, lineno); - } - } -} -static CYTHON_INLINE void __Pyx_XCLEAR_MEMVIEW(__Pyx_memviewslice *memslice, - int have_gil, int lineno) { - __pyx_nonatomic_int_type old_acquisition_count; - struct __pyx_memoryview_obj *memview = memslice->memview; - if (unlikely(!memview || (PyObject *) memview == Py_None)) { - memslice->memview = NULL; - return; - } - old_acquisition_count = __pyx_sub_acquisition_count(memview); - memslice->data = NULL; - if (likely(old_acquisition_count > 1)) { - memslice->memview = NULL; - } else if (likely(old_acquisition_count == 1)) { - if (have_gil) { - Py_CLEAR(memslice->memview); - } else { - PyGILState_STATE _gilstate = PyGILState_Ensure(); - Py_CLEAR(memslice->memview); - PyGILState_Release(_gilstate); - } - } else { - __pyx_fatalerror("Acquisition count is %d (line %d)", - old_acquisition_count-1, lineno); - } -} - -/* CIntToPy */ - static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value) { -#ifdef __Pyx_HAS_GCC_DIAGNOSTIC -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wconversion" -#endif - const int neg_one = (int) -1, const_zero = (int) 0; -#ifdef __Pyx_HAS_GCC_DIAGNOSTIC -#pragma GCC diagnostic pop -#endif - const int is_unsigned = neg_one > const_zero; - if (is_unsigned) { - if (sizeof(int) < sizeof(long)) { - return PyInt_FromLong((long) value); - } else if (sizeof(int) <= sizeof(unsigned long)) { - return PyLong_FromUnsignedLong((unsigned long) value); -#ifdef HAVE_LONG_LONG - } else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) { - return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); -#endif - } - } else { - if (sizeof(int) <= sizeof(long)) { - return PyInt_FromLong((long) value); -#ifdef HAVE_LONG_LONG - } else if (sizeof(int) <= sizeof(PY_LONG_LONG)) { - return PyLong_FromLongLong((PY_LONG_LONG) value); -#endif - } - } - { - int one = 1; int little = (int)*(unsigned char *)&one; - unsigned char *bytes = (unsigned char *)&value; - return _PyLong_FromByteArray(bytes, sizeof(int), - little, !is_unsigned); - } -} - -/* CIntFromPy */ - static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *x) { -#ifdef __Pyx_HAS_GCC_DIAGNOSTIC -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wconversion" -#endif - const int neg_one = (int) -1, const_zero = (int) 0; -#ifdef __Pyx_HAS_GCC_DIAGNOSTIC -#pragma GCC diagnostic pop -#endif - const int is_unsigned = neg_one > const_zero; -#if PY_MAJOR_VERSION < 3 - if (likely(PyInt_Check(x))) { - if ((sizeof(int) < sizeof(long))) { - __PYX_VERIFY_RETURN_INT(int, long, PyInt_AS_LONG(x)) - } else { - long val = PyInt_AS_LONG(x); - if (is_unsigned && unlikely(val < 0)) { - goto raise_neg_overflow; - } - return (int) val; - } - } else -#endif - if (likely(PyLong_Check(x))) { - if (is_unsigned) { -#if CYTHON_USE_PYLONG_INTERNALS - if (unlikely(__Pyx_PyLong_IsNeg(x))) { - goto raise_neg_overflow; - } else if (__Pyx_PyLong_IsCompact(x)) { - __PYX_VERIFY_RETURN_INT(int, __Pyx_compact_upylong, __Pyx_PyLong_CompactValueUnsigned(x)) - } else { - const digit* digits = __Pyx_PyLong_Digits(x); - assert(__Pyx_PyLong_DigitCount(x) > 1); - switch (__Pyx_PyLong_DigitCount(x)) { - case 2: - if ((8 * sizeof(int) > 1 * PyLong_SHIFT)) { - if ((8 * sizeof(unsigned long) > 2 * PyLong_SHIFT)) { - __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if ((8 * sizeof(int) >= 2 * PyLong_SHIFT)) { - return (int) (((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); - } - } - break; - case 3: - if ((8 * sizeof(int) > 2 * PyLong_SHIFT)) { - if ((8 * sizeof(unsigned long) > 3 * PyLong_SHIFT)) { - __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if ((8 * sizeof(int) >= 3 * PyLong_SHIFT)) { - return (int) (((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); - } - } - break; - case 4: - if ((8 * sizeof(int) > 3 * PyLong_SHIFT)) { - if ((8 * sizeof(unsigned long) > 4 * PyLong_SHIFT)) { - __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if ((8 * sizeof(int) >= 4 * PyLong_SHIFT)) { - return (int) (((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); - } - } - break; - } - } -#endif -#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX < 0x030C00A7 - if (unlikely(Py_SIZE(x) < 0)) { - goto raise_neg_overflow; - } -#else - { - int result = PyObject_RichCompareBool(x, Py_False, Py_LT); - if (unlikely(result < 0)) - return (int) -1; - if (unlikely(result == 1)) - goto raise_neg_overflow; - } -#endif - if ((sizeof(int) <= sizeof(unsigned long))) { - __PYX_VERIFY_RETURN_INT_EXC(int, unsigned long, PyLong_AsUnsignedLong(x)) -#ifdef HAVE_LONG_LONG - } else if ((sizeof(int) <= sizeof(unsigned PY_LONG_LONG))) { - __PYX_VERIFY_RETURN_INT_EXC(int, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) -#endif - } - } else { -#if CYTHON_USE_PYLONG_INTERNALS - if (__Pyx_PyLong_IsCompact(x)) { - __PYX_VERIFY_RETURN_INT(int, __Pyx_compact_pylong, __Pyx_PyLong_CompactValue(x)) - } else { - const digit* digits = __Pyx_PyLong_Digits(x); - assert(__Pyx_PyLong_DigitCount(x) > 1); - switch (__Pyx_PyLong_SignedDigitCount(x)) { - case -2: - if ((8 * sizeof(int) - 1 > 1 * PyLong_SHIFT)) { - if ((8 * sizeof(unsigned long) > 2 * PyLong_SHIFT)) { - __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if ((8 * sizeof(int) - 1 > 2 * PyLong_SHIFT)) { - return (int) (((int)-1)*(((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); - } - } - break; - case 2: - if ((8 * sizeof(int) > 1 * PyLong_SHIFT)) { - if ((8 * sizeof(unsigned long) > 2 * PyLong_SHIFT)) { - __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if ((8 * sizeof(int) - 1 > 2 * PyLong_SHIFT)) { - return (int) ((((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); - } - } - break; - case -3: - if ((8 * sizeof(int) - 1 > 2 * PyLong_SHIFT)) { - if ((8 * sizeof(unsigned long) > 3 * PyLong_SHIFT)) { - __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if ((8 * sizeof(int) - 1 > 3 * PyLong_SHIFT)) { - return (int) (((int)-1)*(((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); - } - } - break; - case 3: - if ((8 * sizeof(int) > 2 * PyLong_SHIFT)) { - if ((8 * sizeof(unsigned long) > 3 * PyLong_SHIFT)) { - __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if ((8 * sizeof(int) - 1 > 3 * PyLong_SHIFT)) { - return (int) ((((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); - } - } - break; - case -4: - if ((8 * sizeof(int) - 1 > 3 * PyLong_SHIFT)) { - if ((8 * sizeof(unsigned long) > 4 * PyLong_SHIFT)) { - __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if ((8 * sizeof(int) - 1 > 4 * PyLong_SHIFT)) { - return (int) (((int)-1)*(((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); - } - } - break; - case 4: - if ((8 * sizeof(int) > 3 * PyLong_SHIFT)) { - if ((8 * sizeof(unsigned long) > 4 * PyLong_SHIFT)) { - __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if ((8 * sizeof(int) - 1 > 4 * PyLong_SHIFT)) { - return (int) ((((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); - } - } - break; - } - } -#endif - if ((sizeof(int) <= sizeof(long))) { - __PYX_VERIFY_RETURN_INT_EXC(int, long, PyLong_AsLong(x)) -#ifdef HAVE_LONG_LONG - } else if ((sizeof(int) <= sizeof(PY_LONG_LONG))) { - __PYX_VERIFY_RETURN_INT_EXC(int, PY_LONG_LONG, PyLong_AsLongLong(x)) -#endif - } - } - { - int val; - PyObject *v = __Pyx_PyNumber_IntOrLong(x); -#if PY_MAJOR_VERSION < 3 - if (likely(v) && !PyLong_Check(v)) { - PyObject *tmp = v; - v = PyNumber_Long(tmp); - Py_DECREF(tmp); - } -#endif - if (likely(v)) { - int ret = -1; -#if !(CYTHON_COMPILING_IN_PYPY || CYTHON_COMPILING_IN_LIMITED_API) || defined(_PyLong_AsByteArray) - int one = 1; int is_little = (int)*(unsigned char *)&one; - unsigned char *bytes = (unsigned char *)&val; - ret = _PyLong_AsByteArray((PyLongObject *)v, - bytes, sizeof(val), - is_little, !is_unsigned); -#else - PyObject *stepval = NULL, *mask = NULL, *shift = NULL; - int bits, remaining_bits, is_negative = 0; - long idigit; - int chunk_size = (sizeof(long) < 8) ? 30 : 62; - if (unlikely(!PyLong_CheckExact(v))) { - PyObject *tmp = v; - v = PyNumber_Long(v); - assert(PyLong_CheckExact(v)); - Py_DECREF(tmp); - if (unlikely(!v)) return (int) -1; - } -#if CYTHON_COMPILING_IN_LIMITED_API && PY_VERSION_HEX < 0x030B0000 - if (Py_SIZE(x) == 0) - return (int) 0; - is_negative = Py_SIZE(x) < 0; -#else - { - int result = PyObject_RichCompareBool(x, Py_False, Py_LT); - if (unlikely(result < 0)) - return (int) -1; - is_negative = result == 1; - } -#endif - if (is_unsigned && unlikely(is_negative)) { - goto raise_neg_overflow; - } else if (is_negative) { - stepval = PyNumber_Invert(v); - if (unlikely(!stepval)) - return (int) -1; - } else { - stepval = __Pyx_NewRef(v); - } - val = (int) 0; - mask = PyLong_FromLong((1L << chunk_size) - 1); if (unlikely(!mask)) goto done; - shift = PyLong_FromLong(chunk_size); if (unlikely(!shift)) goto done; - for (bits = 0; bits < (int) sizeof(int) * 8 - chunk_size; bits += chunk_size) { - PyObject *tmp, *digit; - digit = PyNumber_And(stepval, mask); - if (unlikely(!digit)) goto done; - idigit = PyLong_AsLong(digit); - Py_DECREF(digit); - if (unlikely(idigit < 0)) goto done; - tmp = PyNumber_Rshift(stepval, shift); - if (unlikely(!tmp)) goto done; - Py_DECREF(stepval); stepval = tmp; - val |= ((int) idigit) << bits; - #if CYTHON_COMPILING_IN_LIMITED_API && PY_VERSION_HEX < 0x030B0000 - if (Py_SIZE(stepval) == 0) - goto unpacking_done; - #endif - } - idigit = PyLong_AsLong(stepval); - if (unlikely(idigit < 0)) goto done; - remaining_bits = ((int) sizeof(int) * 8) - bits - (is_unsigned ? 0 : 1); - if (unlikely(idigit >= (1L << remaining_bits))) - goto raise_overflow; - val |= ((int) idigit) << bits; - #if CYTHON_COMPILING_IN_LIMITED_API && PY_VERSION_HEX < 0x030B0000 - unpacking_done: - #endif - if (!is_unsigned) { - if (unlikely(val & (((int) 1) << (sizeof(int) * 8 - 1)))) - goto raise_overflow; - if (is_negative) - val = ~val; - } - ret = 0; - done: - Py_XDECREF(shift); - Py_XDECREF(mask); - Py_XDECREF(stepval); -#endif - Py_DECREF(v); - if (likely(!ret)) - return val; - } - return (int) -1; - } - } else { - int val; - PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); - if (!tmp) return (int) -1; - val = __Pyx_PyInt_As_int(tmp); - Py_DECREF(tmp); - return val; - } -raise_overflow: - PyErr_SetString(PyExc_OverflowError, - "value too large to convert to int"); - return (int) -1; -raise_neg_overflow: - PyErr_SetString(PyExc_OverflowError, - "can't convert negative value to int"); - return (int) -1; -} - -/* CIntToPy */ - static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value) { -#ifdef __Pyx_HAS_GCC_DIAGNOSTIC -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wconversion" -#endif - const long neg_one = (long) -1, const_zero = (long) 0; -#ifdef __Pyx_HAS_GCC_DIAGNOSTIC -#pragma GCC diagnostic pop -#endif - const int is_unsigned = neg_one > const_zero; - if (is_unsigned) { - if (sizeof(long) < sizeof(long)) { - return PyInt_FromLong((long) value); - } else if (sizeof(long) <= sizeof(unsigned long)) { - return PyLong_FromUnsignedLong((unsigned long) value); -#ifdef HAVE_LONG_LONG - } else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) { - return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); -#endif - } - } else { - if (sizeof(long) <= sizeof(long)) { - return PyInt_FromLong((long) value); -#ifdef HAVE_LONG_LONG - } else if (sizeof(long) <= sizeof(PY_LONG_LONG)) { - return PyLong_FromLongLong((PY_LONG_LONG) value); -#endif - } - } - { - int one = 1; int little = (int)*(unsigned char *)&one; - unsigned char *bytes = (unsigned char *)&value; - return _PyLong_FromByteArray(bytes, sizeof(long), - little, !is_unsigned); - } -} - -/* CIntFromPy */ - static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *x) { -#ifdef __Pyx_HAS_GCC_DIAGNOSTIC -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wconversion" -#endif - const long neg_one = (long) -1, const_zero = (long) 0; -#ifdef __Pyx_HAS_GCC_DIAGNOSTIC -#pragma GCC diagnostic pop -#endif - const int is_unsigned = neg_one > const_zero; -#if PY_MAJOR_VERSION < 3 - if (likely(PyInt_Check(x))) { - if ((sizeof(long) < sizeof(long))) { - __PYX_VERIFY_RETURN_INT(long, long, PyInt_AS_LONG(x)) - } else { - long val = PyInt_AS_LONG(x); - if (is_unsigned && unlikely(val < 0)) { - goto raise_neg_overflow; - } - return (long) val; - } - } else -#endif - if (likely(PyLong_Check(x))) { - if (is_unsigned) { -#if CYTHON_USE_PYLONG_INTERNALS - if (unlikely(__Pyx_PyLong_IsNeg(x))) { - goto raise_neg_overflow; - } else if (__Pyx_PyLong_IsCompact(x)) { - __PYX_VERIFY_RETURN_INT(long, __Pyx_compact_upylong, __Pyx_PyLong_CompactValueUnsigned(x)) - } else { - const digit* digits = __Pyx_PyLong_Digits(x); - assert(__Pyx_PyLong_DigitCount(x) > 1); - switch (__Pyx_PyLong_DigitCount(x)) { - case 2: - if ((8 * sizeof(long) > 1 * PyLong_SHIFT)) { - if ((8 * sizeof(unsigned long) > 2 * PyLong_SHIFT)) { - __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if ((8 * sizeof(long) >= 2 * PyLong_SHIFT)) { - return (long) (((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); - } - } - break; - case 3: - if ((8 * sizeof(long) > 2 * PyLong_SHIFT)) { - if ((8 * sizeof(unsigned long) > 3 * PyLong_SHIFT)) { - __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if ((8 * sizeof(long) >= 3 * PyLong_SHIFT)) { - return (long) (((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); - } - } - break; - case 4: - if ((8 * sizeof(long) > 3 * PyLong_SHIFT)) { - if ((8 * sizeof(unsigned long) > 4 * PyLong_SHIFT)) { - __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if ((8 * sizeof(long) >= 4 * PyLong_SHIFT)) { - return (long) (((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); - } - } - break; - } - } -#endif -#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX < 0x030C00A7 - if (unlikely(Py_SIZE(x) < 0)) { - goto raise_neg_overflow; - } -#else - { - int result = PyObject_RichCompareBool(x, Py_False, Py_LT); - if (unlikely(result < 0)) - return (long) -1; - if (unlikely(result == 1)) - goto raise_neg_overflow; - } -#endif - if ((sizeof(long) <= sizeof(unsigned long))) { - __PYX_VERIFY_RETURN_INT_EXC(long, unsigned long, PyLong_AsUnsignedLong(x)) -#ifdef HAVE_LONG_LONG - } else if ((sizeof(long) <= sizeof(unsigned PY_LONG_LONG))) { - __PYX_VERIFY_RETURN_INT_EXC(long, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) -#endif - } - } else { -#if CYTHON_USE_PYLONG_INTERNALS - if (__Pyx_PyLong_IsCompact(x)) { - __PYX_VERIFY_RETURN_INT(long, __Pyx_compact_pylong, __Pyx_PyLong_CompactValue(x)) - } else { - const digit* digits = __Pyx_PyLong_Digits(x); - assert(__Pyx_PyLong_DigitCount(x) > 1); - switch (__Pyx_PyLong_SignedDigitCount(x)) { - case -2: - if ((8 * sizeof(long) - 1 > 1 * PyLong_SHIFT)) { - if ((8 * sizeof(unsigned long) > 2 * PyLong_SHIFT)) { - __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if ((8 * sizeof(long) - 1 > 2 * PyLong_SHIFT)) { - return (long) (((long)-1)*(((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); - } - } - break; - case 2: - if ((8 * sizeof(long) > 1 * PyLong_SHIFT)) { - if ((8 * sizeof(unsigned long) > 2 * PyLong_SHIFT)) { - __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if ((8 * sizeof(long) - 1 > 2 * PyLong_SHIFT)) { - return (long) ((((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); - } - } - break; - case -3: - if ((8 * sizeof(long) - 1 > 2 * PyLong_SHIFT)) { - if ((8 * sizeof(unsigned long) > 3 * PyLong_SHIFT)) { - __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if ((8 * sizeof(long) - 1 > 3 * PyLong_SHIFT)) { - return (long) (((long)-1)*(((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); - } - } - break; - case 3: - if ((8 * sizeof(long) > 2 * PyLong_SHIFT)) { - if ((8 * sizeof(unsigned long) > 3 * PyLong_SHIFT)) { - __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if ((8 * sizeof(long) - 1 > 3 * PyLong_SHIFT)) { - return (long) ((((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); - } - } - break; - case -4: - if ((8 * sizeof(long) - 1 > 3 * PyLong_SHIFT)) { - if ((8 * sizeof(unsigned long) > 4 * PyLong_SHIFT)) { - __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if ((8 * sizeof(long) - 1 > 4 * PyLong_SHIFT)) { - return (long) (((long)-1)*(((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); - } - } - break; - case 4: - if ((8 * sizeof(long) > 3 * PyLong_SHIFT)) { - if ((8 * sizeof(unsigned long) > 4 * PyLong_SHIFT)) { - __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if ((8 * sizeof(long) - 1 > 4 * PyLong_SHIFT)) { - return (long) ((((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); - } - } - break; - } - } -#endif - if ((sizeof(long) <= sizeof(long))) { - __PYX_VERIFY_RETURN_INT_EXC(long, long, PyLong_AsLong(x)) -#ifdef HAVE_LONG_LONG - } else if ((sizeof(long) <= sizeof(PY_LONG_LONG))) { - __PYX_VERIFY_RETURN_INT_EXC(long, PY_LONG_LONG, PyLong_AsLongLong(x)) -#endif - } - } - { - long val; - PyObject *v = __Pyx_PyNumber_IntOrLong(x); -#if PY_MAJOR_VERSION < 3 - if (likely(v) && !PyLong_Check(v)) { - PyObject *tmp = v; - v = PyNumber_Long(tmp); - Py_DECREF(tmp); - } -#endif - if (likely(v)) { - int ret = -1; -#if !(CYTHON_COMPILING_IN_PYPY || CYTHON_COMPILING_IN_LIMITED_API) || defined(_PyLong_AsByteArray) - int one = 1; int is_little = (int)*(unsigned char *)&one; - unsigned char *bytes = (unsigned char *)&val; - ret = _PyLong_AsByteArray((PyLongObject *)v, - bytes, sizeof(val), - is_little, !is_unsigned); -#else - PyObject *stepval = NULL, *mask = NULL, *shift = NULL; - int bits, remaining_bits, is_negative = 0; - long idigit; - int chunk_size = (sizeof(long) < 8) ? 30 : 62; - if (unlikely(!PyLong_CheckExact(v))) { - PyObject *tmp = v; - v = PyNumber_Long(v); - assert(PyLong_CheckExact(v)); - Py_DECREF(tmp); - if (unlikely(!v)) return (long) -1; - } -#if CYTHON_COMPILING_IN_LIMITED_API && PY_VERSION_HEX < 0x030B0000 - if (Py_SIZE(x) == 0) - return (long) 0; - is_negative = Py_SIZE(x) < 0; -#else - { - int result = PyObject_RichCompareBool(x, Py_False, Py_LT); - if (unlikely(result < 0)) - return (long) -1; - is_negative = result == 1; - } -#endif - if (is_unsigned && unlikely(is_negative)) { - goto raise_neg_overflow; - } else if (is_negative) { - stepval = PyNumber_Invert(v); - if (unlikely(!stepval)) - return (long) -1; - } else { - stepval = __Pyx_NewRef(v); - } - val = (long) 0; - mask = PyLong_FromLong((1L << chunk_size) - 1); if (unlikely(!mask)) goto done; - shift = PyLong_FromLong(chunk_size); if (unlikely(!shift)) goto done; - for (bits = 0; bits < (int) sizeof(long) * 8 - chunk_size; bits += chunk_size) { - PyObject *tmp, *digit; - digit = PyNumber_And(stepval, mask); - if (unlikely(!digit)) goto done; - idigit = PyLong_AsLong(digit); - Py_DECREF(digit); - if (unlikely(idigit < 0)) goto done; - tmp = PyNumber_Rshift(stepval, shift); - if (unlikely(!tmp)) goto done; - Py_DECREF(stepval); stepval = tmp; - val |= ((long) idigit) << bits; - #if CYTHON_COMPILING_IN_LIMITED_API && PY_VERSION_HEX < 0x030B0000 - if (Py_SIZE(stepval) == 0) - goto unpacking_done; - #endif - } - idigit = PyLong_AsLong(stepval); - if (unlikely(idigit < 0)) goto done; - remaining_bits = ((int) sizeof(long) * 8) - bits - (is_unsigned ? 0 : 1); - if (unlikely(idigit >= (1L << remaining_bits))) - goto raise_overflow; - val |= ((long) idigit) << bits; - #if CYTHON_COMPILING_IN_LIMITED_API && PY_VERSION_HEX < 0x030B0000 - unpacking_done: - #endif - if (!is_unsigned) { - if (unlikely(val & (((long) 1) << (sizeof(long) * 8 - 1)))) - goto raise_overflow; - if (is_negative) - val = ~val; - } - ret = 0; - done: - Py_XDECREF(shift); - Py_XDECREF(mask); - Py_XDECREF(stepval); -#endif - Py_DECREF(v); - if (likely(!ret)) - return val; - } - return (long) -1; - } - } else { - long val; - PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); - if (!tmp) return (long) -1; - val = __Pyx_PyInt_As_long(tmp); - Py_DECREF(tmp); - return val; - } -raise_overflow: - PyErr_SetString(PyExc_OverflowError, - "value too large to convert to long"); - return (long) -1; -raise_neg_overflow: - PyErr_SetString(PyExc_OverflowError, - "can't convert negative value to long"); - return (long) -1; -} - -/* CIntFromPy */ - static CYTHON_INLINE char __Pyx_PyInt_As_char(PyObject *x) { -#ifdef __Pyx_HAS_GCC_DIAGNOSTIC -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wconversion" -#endif - const char neg_one = (char) -1, const_zero = (char) 0; -#ifdef __Pyx_HAS_GCC_DIAGNOSTIC -#pragma GCC diagnostic pop -#endif - const int is_unsigned = neg_one > const_zero; -#if PY_MAJOR_VERSION < 3 - if (likely(PyInt_Check(x))) { - if ((sizeof(char) < sizeof(long))) { - __PYX_VERIFY_RETURN_INT(char, long, PyInt_AS_LONG(x)) - } else { - long val = PyInt_AS_LONG(x); - if (is_unsigned && unlikely(val < 0)) { - goto raise_neg_overflow; - } - return (char) val; - } - } else -#endif - if (likely(PyLong_Check(x))) { - if (is_unsigned) { -#if CYTHON_USE_PYLONG_INTERNALS - if (unlikely(__Pyx_PyLong_IsNeg(x))) { - goto raise_neg_overflow; - } else if (__Pyx_PyLong_IsCompact(x)) { - __PYX_VERIFY_RETURN_INT(char, __Pyx_compact_upylong, __Pyx_PyLong_CompactValueUnsigned(x)) - } else { - const digit* digits = __Pyx_PyLong_Digits(x); - assert(__Pyx_PyLong_DigitCount(x) > 1); - switch (__Pyx_PyLong_DigitCount(x)) { - case 2: - if ((8 * sizeof(char) > 1 * PyLong_SHIFT)) { - if ((8 * sizeof(unsigned long) > 2 * PyLong_SHIFT)) { - __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if ((8 * sizeof(char) >= 2 * PyLong_SHIFT)) { - return (char) (((((char)digits[1]) << PyLong_SHIFT) | (char)digits[0])); - } - } - break; - case 3: - if ((8 * sizeof(char) > 2 * PyLong_SHIFT)) { - if ((8 * sizeof(unsigned long) > 3 * PyLong_SHIFT)) { - __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if ((8 * sizeof(char) >= 3 * PyLong_SHIFT)) { - return (char) (((((((char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0])); - } - } - break; - case 4: - if ((8 * sizeof(char) > 3 * PyLong_SHIFT)) { - if ((8 * sizeof(unsigned long) > 4 * PyLong_SHIFT)) { - __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if ((8 * sizeof(char) >= 4 * PyLong_SHIFT)) { - return (char) (((((((((char)digits[3]) << PyLong_SHIFT) | (char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0])); - } - } - break; - } - } -#endif -#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX < 0x030C00A7 - if (unlikely(Py_SIZE(x) < 0)) { - goto raise_neg_overflow; - } -#else - { - int result = PyObject_RichCompareBool(x, Py_False, Py_LT); - if (unlikely(result < 0)) - return (char) -1; - if (unlikely(result == 1)) - goto raise_neg_overflow; - } -#endif - if ((sizeof(char) <= sizeof(unsigned long))) { - __PYX_VERIFY_RETURN_INT_EXC(char, unsigned long, PyLong_AsUnsignedLong(x)) -#ifdef HAVE_LONG_LONG - } else if ((sizeof(char) <= sizeof(unsigned PY_LONG_LONG))) { - __PYX_VERIFY_RETURN_INT_EXC(char, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) -#endif - } - } else { -#if CYTHON_USE_PYLONG_INTERNALS - if (__Pyx_PyLong_IsCompact(x)) { - __PYX_VERIFY_RETURN_INT(char, __Pyx_compact_pylong, __Pyx_PyLong_CompactValue(x)) - } else { - const digit* digits = __Pyx_PyLong_Digits(x); - assert(__Pyx_PyLong_DigitCount(x) > 1); - switch (__Pyx_PyLong_SignedDigitCount(x)) { - case -2: - if ((8 * sizeof(char) - 1 > 1 * PyLong_SHIFT)) { - if ((8 * sizeof(unsigned long) > 2 * PyLong_SHIFT)) { - __PYX_VERIFY_RETURN_INT(char, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if ((8 * sizeof(char) - 1 > 2 * PyLong_SHIFT)) { - return (char) (((char)-1)*(((((char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); - } - } - break; - case 2: - if ((8 * sizeof(char) > 1 * PyLong_SHIFT)) { - if ((8 * sizeof(unsigned long) > 2 * PyLong_SHIFT)) { - __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if ((8 * sizeof(char) - 1 > 2 * PyLong_SHIFT)) { - return (char) ((((((char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); - } - } - break; - case -3: - if ((8 * sizeof(char) - 1 > 2 * PyLong_SHIFT)) { - if ((8 * sizeof(unsigned long) > 3 * PyLong_SHIFT)) { - __PYX_VERIFY_RETURN_INT(char, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if ((8 * sizeof(char) - 1 > 3 * PyLong_SHIFT)) { - return (char) (((char)-1)*(((((((char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); - } - } - break; - case 3: - if ((8 * sizeof(char) > 2 * PyLong_SHIFT)) { - if ((8 * sizeof(unsigned long) > 3 * PyLong_SHIFT)) { - __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if ((8 * sizeof(char) - 1 > 3 * PyLong_SHIFT)) { - return (char) ((((((((char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); - } - } - break; - case -4: - if ((8 * sizeof(char) - 1 > 3 * PyLong_SHIFT)) { - if ((8 * sizeof(unsigned long) > 4 * PyLong_SHIFT)) { - __PYX_VERIFY_RETURN_INT(char, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if ((8 * sizeof(char) - 1 > 4 * PyLong_SHIFT)) { - return (char) (((char)-1)*(((((((((char)digits[3]) << PyLong_SHIFT) | (char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); - } - } - break; - case 4: - if ((8 * sizeof(char) > 3 * PyLong_SHIFT)) { - if ((8 * sizeof(unsigned long) > 4 * PyLong_SHIFT)) { - __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if ((8 * sizeof(char) - 1 > 4 * PyLong_SHIFT)) { - return (char) ((((((((((char)digits[3]) << PyLong_SHIFT) | (char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); - } - } - break; - } - } -#endif - if ((sizeof(char) <= sizeof(long))) { - __PYX_VERIFY_RETURN_INT_EXC(char, long, PyLong_AsLong(x)) -#ifdef HAVE_LONG_LONG - } else if ((sizeof(char) <= sizeof(PY_LONG_LONG))) { - __PYX_VERIFY_RETURN_INT_EXC(char, PY_LONG_LONG, PyLong_AsLongLong(x)) -#endif - } - } - { - char val; - PyObject *v = __Pyx_PyNumber_IntOrLong(x); -#if PY_MAJOR_VERSION < 3 - if (likely(v) && !PyLong_Check(v)) { - PyObject *tmp = v; - v = PyNumber_Long(tmp); - Py_DECREF(tmp); - } -#endif - if (likely(v)) { - int ret = -1; -#if !(CYTHON_COMPILING_IN_PYPY || CYTHON_COMPILING_IN_LIMITED_API) || defined(_PyLong_AsByteArray) - int one = 1; int is_little = (int)*(unsigned char *)&one; - unsigned char *bytes = (unsigned char *)&val; - ret = _PyLong_AsByteArray((PyLongObject *)v, - bytes, sizeof(val), - is_little, !is_unsigned); -#else - PyObject *stepval = NULL, *mask = NULL, *shift = NULL; - int bits, remaining_bits, is_negative = 0; - long idigit; - int chunk_size = (sizeof(long) < 8) ? 30 : 62; - if (unlikely(!PyLong_CheckExact(v))) { - PyObject *tmp = v; - v = PyNumber_Long(v); - assert(PyLong_CheckExact(v)); - Py_DECREF(tmp); - if (unlikely(!v)) return (char) -1; - } -#if CYTHON_COMPILING_IN_LIMITED_API && PY_VERSION_HEX < 0x030B0000 - if (Py_SIZE(x) == 0) - return (char) 0; - is_negative = Py_SIZE(x) < 0; -#else - { - int result = PyObject_RichCompareBool(x, Py_False, Py_LT); - if (unlikely(result < 0)) - return (char) -1; - is_negative = result == 1; - } -#endif - if (is_unsigned && unlikely(is_negative)) { - goto raise_neg_overflow; - } else if (is_negative) { - stepval = PyNumber_Invert(v); - if (unlikely(!stepval)) - return (char) -1; - } else { - stepval = __Pyx_NewRef(v); - } - val = (char) 0; - mask = PyLong_FromLong((1L << chunk_size) - 1); if (unlikely(!mask)) goto done; - shift = PyLong_FromLong(chunk_size); if (unlikely(!shift)) goto done; - for (bits = 0; bits < (int) sizeof(char) * 8 - chunk_size; bits += chunk_size) { - PyObject *tmp, *digit; - digit = PyNumber_And(stepval, mask); - if (unlikely(!digit)) goto done; - idigit = PyLong_AsLong(digit); - Py_DECREF(digit); - if (unlikely(idigit < 0)) goto done; - tmp = PyNumber_Rshift(stepval, shift); - if (unlikely(!tmp)) goto done; - Py_DECREF(stepval); stepval = tmp; - val |= ((char) idigit) << bits; - #if CYTHON_COMPILING_IN_LIMITED_API && PY_VERSION_HEX < 0x030B0000 - if (Py_SIZE(stepval) == 0) - goto unpacking_done; - #endif - } - idigit = PyLong_AsLong(stepval); - if (unlikely(idigit < 0)) goto done; - remaining_bits = ((int) sizeof(char) * 8) - bits - (is_unsigned ? 0 : 1); - if (unlikely(idigit >= (1L << remaining_bits))) - goto raise_overflow; - val |= ((char) idigit) << bits; - #if CYTHON_COMPILING_IN_LIMITED_API && PY_VERSION_HEX < 0x030B0000 - unpacking_done: - #endif - if (!is_unsigned) { - if (unlikely(val & (((char) 1) << (sizeof(char) * 8 - 1)))) - goto raise_overflow; - if (is_negative) - val = ~val; - } - ret = 0; - done: - Py_XDECREF(shift); - Py_XDECREF(mask); - Py_XDECREF(stepval); -#endif - Py_DECREF(v); - if (likely(!ret)) - return val; - } - return (char) -1; - } - } else { - char val; - PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); - if (!tmp) return (char) -1; - val = __Pyx_PyInt_As_char(tmp); - Py_DECREF(tmp); - return val; - } -raise_overflow: - PyErr_SetString(PyExc_OverflowError, - "value too large to convert to char"); - return (char) -1; -raise_neg_overflow: - PyErr_SetString(PyExc_OverflowError, - "can't convert negative value to char"); - return (char) -1; -} - -/* FormatTypeName */ - #if CYTHON_COMPILING_IN_LIMITED_API -static __Pyx_TypeName -__Pyx_PyType_GetName(PyTypeObject* tp) -{ - PyObject *name = __Pyx_PyObject_GetAttrStr((PyObject *)tp, - __pyx_n_s_name_2); - if (unlikely(name == NULL) || unlikely(!PyUnicode_Check(name))) { - PyErr_Clear(); - Py_XSETREF(name, __Pyx_NewRef(__pyx_n_s__23)); - } - return name; -} -#endif - -/* CheckBinaryVersion */ - static int __Pyx_check_binary_version(void) { - char ctversion[5]; - int same=1, i, found_dot; - const char* rt_from_call = Py_GetVersion(); - PyOS_snprintf(ctversion, 5, "%d.%d", PY_MAJOR_VERSION, PY_MINOR_VERSION); - found_dot = 0; - for (i = 0; i < 4; i++) { - if (!ctversion[i]) { - same = (rt_from_call[i] < '0' || rt_from_call[i] > '9'); - break; - } - if (rt_from_call[i] != ctversion[i]) { - same = 0; - break; - } - } - if (!same) { - char rtversion[5] = {'\0'}; - char message[200]; - for (i=0; i<4; ++i) { - if (rt_from_call[i] == '.') { - if (found_dot) break; - found_dot = 1; - } else if (rt_from_call[i] < '0' || rt_from_call[i] > '9') { - break; - } - rtversion[i] = rt_from_call[i]; - } - PyOS_snprintf(message, sizeof(message), - "compile time version %s of module '%.100s' " - "does not match runtime version %s", - ctversion, __Pyx_MODULE_NAME, rtversion); - return PyErr_WarnEx(NULL, message, 1); - } - return 0; -} - -/* InitStrings */ - #if PY_MAJOR_VERSION >= 3 -static int __Pyx_InitString(__Pyx_StringTabEntry t, PyObject **str) { - if (t.is_unicode | t.is_str) { - if (t.intern) { - *str = PyUnicode_InternFromString(t.s); - } else if (t.encoding) { - *str = PyUnicode_Decode(t.s, t.n - 1, t.encoding, NULL); - } else { - *str = PyUnicode_FromStringAndSize(t.s, t.n - 1); - } - } else { - *str = PyBytes_FromStringAndSize(t.s, t.n - 1); - } - if (!*str) - return -1; - if (PyObject_Hash(*str) == -1) - return -1; - return 0; -} -#endif -static int __Pyx_InitStrings(__Pyx_StringTabEntry *t) { - while (t->p) { - #if PY_MAJOR_VERSION >= 3 - __Pyx_InitString(*t, t->p); - #else - if (t->is_unicode) { - *t->p = PyUnicode_DecodeUTF8(t->s, t->n - 1, NULL); - } else if (t->intern) { - *t->p = PyString_InternFromString(t->s); - } else { - *t->p = PyString_FromStringAndSize(t->s, t->n - 1); - } - if (!*t->p) - return -1; - if (PyObject_Hash(*t->p) == -1) - return -1; - #endif - ++t; - } - return 0; -} - -static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char* c_str) { - return __Pyx_PyUnicode_FromStringAndSize(c_str, (Py_ssize_t)strlen(c_str)); -} -static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject* o) { - Py_ssize_t ignore; - return __Pyx_PyObject_AsStringAndSize(o, &ignore); -} -#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT -#if !CYTHON_PEP393_ENABLED -static const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) { - char* defenc_c; - PyObject* defenc = _PyUnicode_AsDefaultEncodedString(o, NULL); - if (!defenc) return NULL; - defenc_c = PyBytes_AS_STRING(defenc); -#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII - { - char* end = defenc_c + PyBytes_GET_SIZE(defenc); - char* c; - for (c = defenc_c; c < end; c++) { - if ((unsigned char) (*c) >= 128) { - PyUnicode_AsASCIIString(o); - return NULL; - } - } - } -#endif - *length = PyBytes_GET_SIZE(defenc); - return defenc_c; -} -#else -static CYTHON_INLINE const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) { - if (unlikely(__Pyx_PyUnicode_READY(o) == -1)) return NULL; -#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII - if (likely(PyUnicode_IS_ASCII(o))) { - *length = PyUnicode_GET_LENGTH(o); - return PyUnicode_AsUTF8(o); - } else { - PyUnicode_AsASCIIString(o); - return NULL; - } -#else - return PyUnicode_AsUTF8AndSize(o, length); -#endif -} -#endif -#endif -static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject* o, Py_ssize_t *length) { -#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT - if ( -#if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII - __Pyx_sys_getdefaultencoding_not_ascii && -#endif - PyUnicode_Check(o)) { - return __Pyx_PyUnicode_AsStringAndSize(o, length); - } else -#endif -#if (!CYTHON_COMPILING_IN_PYPY && !CYTHON_COMPILING_IN_LIMITED_API) || (defined(PyByteArray_AS_STRING) && defined(PyByteArray_GET_SIZE)) - if (PyByteArray_Check(o)) { - *length = PyByteArray_GET_SIZE(o); - return PyByteArray_AS_STRING(o); - } else -#endif - { - char* result; - int r = PyBytes_AsStringAndSize(o, &result, length); - if (unlikely(r < 0)) { - return NULL; - } else { - return result; - } - } -} -static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject* x) { - int is_true = x == Py_True; - if (is_true | (x == Py_False) | (x == Py_None)) return is_true; - else return PyObject_IsTrue(x); -} -static CYTHON_INLINE int __Pyx_PyObject_IsTrueAndDecref(PyObject* x) { - int retval; - if (unlikely(!x)) return -1; - retval = __Pyx_PyObject_IsTrue(x); - Py_DECREF(x); - return retval; -} -static PyObject* __Pyx_PyNumber_IntOrLongWrongResultType(PyObject* result, const char* type_name) { - __Pyx_TypeName result_type_name = __Pyx_PyType_GetName(Py_TYPE(result)); -#if PY_MAJOR_VERSION >= 3 - if (PyLong_Check(result)) { - if (PyErr_WarnFormat(PyExc_DeprecationWarning, 1, - "__int__ returned non-int (type " __Pyx_FMT_TYPENAME "). " - "The ability to return an instance of a strict subclass of int is deprecated, " - "and may be removed in a future version of Python.", - result_type_name)) { - __Pyx_DECREF_TypeName(result_type_name); - Py_DECREF(result); - return NULL; - } - __Pyx_DECREF_TypeName(result_type_name); - return result; - } -#endif - PyErr_Format(PyExc_TypeError, - "__%.4s__ returned non-%.4s (type " __Pyx_FMT_TYPENAME ")", - type_name, type_name, result_type_name); - __Pyx_DECREF_TypeName(result_type_name); - Py_DECREF(result); - return NULL; -} -static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x) { -#if CYTHON_USE_TYPE_SLOTS - PyNumberMethods *m; -#endif - const char *name = NULL; - PyObject *res = NULL; -#if PY_MAJOR_VERSION < 3 - if (likely(PyInt_Check(x) || PyLong_Check(x))) -#else - if (likely(PyLong_Check(x))) -#endif - return __Pyx_NewRef(x); -#if CYTHON_USE_TYPE_SLOTS - m = Py_TYPE(x)->tp_as_number; - #if PY_MAJOR_VERSION < 3 - if (m && m->nb_int) { - name = "int"; - res = m->nb_int(x); - } - else if (m && m->nb_long) { - name = "long"; - res = m->nb_long(x); - } - #else - if (likely(m && m->nb_int)) { - name = "int"; - res = m->nb_int(x); - } - #endif -#else - if (!PyBytes_CheckExact(x) && !PyUnicode_CheckExact(x)) { - res = PyNumber_Int(x); - } -#endif - if (likely(res)) { -#if PY_MAJOR_VERSION < 3 - if (unlikely(!PyInt_Check(res) && !PyLong_Check(res))) { -#else - if (unlikely(!PyLong_CheckExact(res))) { -#endif - return __Pyx_PyNumber_IntOrLongWrongResultType(res, name); - } - } - else if (!PyErr_Occurred()) { - PyErr_SetString(PyExc_TypeError, - "an integer is required"); - } - return res; -} -static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject* b) { - Py_ssize_t ival; - PyObject *x; -#if PY_MAJOR_VERSION < 3 - if (likely(PyInt_CheckExact(b))) { - if (sizeof(Py_ssize_t) >= sizeof(long)) - return PyInt_AS_LONG(b); - else - return PyInt_AsSsize_t(b); - } -#endif - if (likely(PyLong_CheckExact(b))) { - #if CYTHON_USE_PYLONG_INTERNALS - if (likely(__Pyx_PyLong_IsCompact(b))) { - return __Pyx_PyLong_CompactValue(b); - } else { - const digit* digits = __Pyx_PyLong_Digits(b); - const Py_ssize_t size = __Pyx_PyLong_SignedDigitCount(b); - switch (size) { - case 2: - if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) { - return (Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); - } - break; - case -2: - if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) { - return -(Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); - } - break; - case 3: - if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) { - return (Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); - } - break; - case -3: - if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) { - return -(Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); - } - break; - case 4: - if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) { - return (Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); - } - break; - case -4: - if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) { - return -(Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); - } - break; - } - } - #endif - return PyLong_AsSsize_t(b); - } - x = PyNumber_Index(b); - if (!x) return -1; - ival = PyInt_AsSsize_t(x); - Py_DECREF(x); - return ival; -} -static CYTHON_INLINE Py_hash_t __Pyx_PyIndex_AsHash_t(PyObject* o) { - if (sizeof(Py_hash_t) == sizeof(Py_ssize_t)) { - return (Py_hash_t) __Pyx_PyIndex_AsSsize_t(o); -#if PY_MAJOR_VERSION < 3 - } else if (likely(PyInt_CheckExact(o))) { - return PyInt_AS_LONG(o); -#endif - } else { - Py_ssize_t ival; - PyObject *x; - x = PyNumber_Index(o); - if (!x) return -1; - ival = PyInt_AsLong(x); - Py_DECREF(x); - return ival; - } -} -static CYTHON_INLINE PyObject * __Pyx_PyBool_FromLong(long b) { - return b ? __Pyx_NewRef(Py_True) : __Pyx_NewRef(Py_False); -} -static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t ival) { - return PyInt_FromSize_t(ival); -} - - -/* #### Code section: utility_code_pragmas_end ### */ -#ifdef _MSC_VER -#pragma warning( pop ) -#endif - - - -/* #### Code section: end ### */ -#endif /* Py_PYTHON_H */ diff --git a/spaces/digitalxingtong/Nanami-Bert-VITS2/text/__init__.py b/spaces/digitalxingtong/Nanami-Bert-VITS2/text/__init__.py deleted file mode 100644 index 7566bf351ca9b95af9cdc6d729557a9da083800f..0000000000000000000000000000000000000000 --- a/spaces/digitalxingtong/Nanami-Bert-VITS2/text/__init__.py +++ /dev/null @@ -1,28 +0,0 @@ -from text.symbols import * - - -_symbol_to_id = {s: i for i, s in enumerate(symbols)} - -def cleaned_text_to_sequence(cleaned_text, tones, language): - '''Converts a string of text to a sequence of IDs corresponding to the symbols in the text. - Args: - text: string to convert to a sequence - Returns: - List of integers corresponding to the symbols in the text - ''' - phones = [_symbol_to_id[symbol] for symbol in cleaned_text] - tone_start = language_tone_start_map[language] - tones = [i + tone_start for i in tones] - lang_id = language_id_map[language] - lang_ids = [lang_id for i in phones] - return phones, tones, lang_ids - -def get_bert(norm_text, word2ph, language): - from .chinese_bert import get_bert_feature as zh_bert - from .english_bert_mock import get_bert_feature as en_bert - lang_bert_func_map = { - 'ZH': zh_bert, - 'EN': en_bert - } - bert = lang_bert_func_map[language](norm_text, word2ph) - return bert diff --git a/spaces/dinhminh20521597/OCR_DEMO/configs/_base_/recog_datasets/seg_toy_data.py b/spaces/dinhminh20521597/OCR_DEMO/configs/_base_/recog_datasets/seg_toy_data.py deleted file mode 100644 index 7f0b7d8f4c520ec7847d69743d8e430b8795b656..0000000000000000000000000000000000000000 --- a/spaces/dinhminh20521597/OCR_DEMO/configs/_base_/recog_datasets/seg_toy_data.py +++ /dev/null @@ -1,34 +0,0 @@ -prefix = 'tests/data/ocr_char_ann_toy_dataset/' - -train = dict( - type='OCRSegDataset', - img_prefix=f'{prefix}/imgs', - ann_file=f'{prefix}/instances_train.txt', - loader=dict( - type='AnnFileLoader', - repeat=100, - file_format='txt', - parser=dict( - type='LineJsonParser', keys=['file_name', 'annotations', 'text'])), - pipeline=None, - test_mode=True) - -test = dict( - type='OCRDataset', - img_prefix=f'{prefix}/imgs', - ann_file=f'{prefix}/instances_test.txt', - loader=dict( - type='AnnFileLoader', - repeat=1, - file_format='txt', - parser=dict( - type='LineStrParser', - keys=['filename', 'text'], - keys_idx=[0, 1], - separator=' ')), - pipeline=None, - test_mode=True) - -train_list = [train] - -test_list = [test] diff --git a/spaces/dorkai/text-generation-webui-main/docs/Using-LoRAs.md b/spaces/dorkai/text-generation-webui-main/docs/Using-LoRAs.md deleted file mode 100644 index fafd6cde2d87bfdf46d942ab841a74bf50facdb5..0000000000000000000000000000000000000000 --- a/spaces/dorkai/text-generation-webui-main/docs/Using-LoRAs.md +++ /dev/null @@ -1,55 +0,0 @@ -Based on https://github.com/tloen/alpaca-lora - -## Instructions - -1. Download a LoRA, for instance: - -``` -python download-model.py tloen/alpaca-lora-7b -``` - -2. Load the LoRA. 16-bit, 8-bit, and CPU modes work: - -``` -python server.py --model llama-7b-hf --lora tloen_alpaca-lora-7b -python server.py --model llama-7b-hf --lora tloen_alpaca-lora-7b --load-in-8bit -python server.py --model llama-7b-hf --lora tloen_alpaca-lora-7b --cpu -``` - -* For using LoRAs in 4-bit mode, follow [these special instructions](GPTQ-models-(4-bit-mode).md#using-loras-in-4-bit-mode). - -* Instead of using the `--lora` command-line flag, you can also select the LoRA in the "Parameters" tab of the interface. - -## Prompt -For the Alpaca LoRA in particular, the prompt must be formatted like this: - -``` -Below is an instruction that describes a task. Write a response that appropriately completes the request. -### Instruction: -Write a Python script that generates text using the transformers library. -### Response: -``` - -Sample output: - -``` -Below is an instruction that describes a task. Write a response that appropriately completes the request. -### Instruction: -Write a Python script that generates text using the transformers library. -### Response: - -import transformers -from transformers import AutoTokenizer, AutoModelForCausalLM -tokenizer = AutoTokenizer.from_pretrained("bert-base-uncased") -model = AutoModelForCausalLM.from_pretrained("bert-base-uncased") -texts = ["Hello world", "How are you"] -for sentence in texts: -sentence = tokenizer(sentence) -print(f"Generated {len(sentence)} tokens from '{sentence}'") -output = model(sentences=sentence).predict() -print(f"Predicted {len(output)} tokens for '{sentence}':\n{output}") -``` - -## Training a LoRA - -You can train your own LoRAs from the `Training` tab. See [Training LoRAs](Training-LoRAs.md) for details. diff --git a/spaces/ehristoforu/Iro/README.md b/spaces/ehristoforu/Iro/README.md deleted file mode 100644 index 4cce30f932592821f6e2ba4f6cd932600ef1c08c..0000000000000000000000000000000000000000 --- a/spaces/ehristoforu/Iro/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Iro -emoji: 💻 -colorFrom: indigo -colorTo: yellow -sdk: gradio -sdk_version: 3.39.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/ehristoforu/NLLB-Translator/README.md b/spaces/ehristoforu/NLLB-Translator/README.md deleted file mode 100644 index e6440c1f0cfbaebe6573193e597bf5cfa934e231..0000000000000000000000000000000000000000 --- a/spaces/ehristoforu/NLLB-Translator/README.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -title: NLLB Translator -emoji: 🗺️ -colorFrom: purple -colorTo: pink -sdk: gradio -sdk_version: 3.0.26 -app_file: app.py -pinned: false -license: wtfpl -duplicated_from: Narrativaai/NLLB-Translator ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/elkraken/Video-Object-Detection/utils/loss.py b/spaces/elkraken/Video-Object-Detection/utils/loss.py deleted file mode 100644 index 2b1d968f8fee4ae7822776c006cd9e05424f4286..0000000000000000000000000000000000000000 --- a/spaces/elkraken/Video-Object-Detection/utils/loss.py +++ /dev/null @@ -1,1697 +0,0 @@ -# Loss functions - -import torch -import torch.nn as nn -import torch.nn.functional as F - -from utils.general import bbox_iou, bbox_alpha_iou, box_iou, box_giou, box_diou, box_ciou, xywh2xyxy -from utils.torch_utils import is_parallel - - -def smooth_BCE(eps=0.1): # https://github.com/ultralytics/yolov3/issues/238#issuecomment-598028441 - # return positive, negative label smoothing BCE targets - return 1.0 - 0.5 * eps, 0.5 * eps - - -class BCEBlurWithLogitsLoss(nn.Module): - # BCEwithLogitLoss() with reduced missing label effects. - def __init__(self, alpha=0.05): - super(BCEBlurWithLogitsLoss, self).__init__() - self.loss_fcn = nn.BCEWithLogitsLoss(reduction='none') # must be nn.BCEWithLogitsLoss() - self.alpha = alpha - - def forward(self, pred, true): - loss = self.loss_fcn(pred, true) - pred = torch.sigmoid(pred) # prob from logits - dx = pred - true # reduce only missing label effects - # dx = (pred - true).abs() # reduce missing label and false label effects - alpha_factor = 1 - torch.exp((dx - 1) / (self.alpha + 1e-4)) - loss *= alpha_factor - return loss.mean() - - -class SigmoidBin(nn.Module): - stride = None # strides computed during build - export = False # onnx export - - def __init__(self, bin_count=10, min=0.0, max=1.0, reg_scale = 2.0, use_loss_regression=True, use_fw_regression=True, BCE_weight=1.0, smooth_eps=0.0): - super(SigmoidBin, self).__init__() - - self.bin_count = bin_count - self.length = bin_count + 1 - self.min = min - self.max = max - self.scale = float(max - min) - self.shift = self.scale / 2.0 - - self.use_loss_regression = use_loss_regression - self.use_fw_regression = use_fw_regression - self.reg_scale = reg_scale - self.BCE_weight = BCE_weight - - start = min + (self.scale/2.0) / self.bin_count - end = max - (self.scale/2.0) / self.bin_count - step = self.scale / self.bin_count - self.step = step - #print(f" start = {start}, end = {end}, step = {step} ") - - bins = torch.range(start, end + 0.0001, step).float() - self.register_buffer('bins', bins) - - - self.cp = 1.0 - 0.5 * smooth_eps - self.cn = 0.5 * smooth_eps - - self.BCEbins = nn.BCEWithLogitsLoss(pos_weight=torch.Tensor([BCE_weight])) - self.MSELoss = nn.MSELoss() - - def get_length(self): - return self.length - - def forward(self, pred): - assert pred.shape[-1] == self.length, 'pred.shape[-1]=%d is not equal to self.length=%d' % (pred.shape[-1], self.length) - - pred_reg = (pred[..., 0] * self.reg_scale - self.reg_scale/2.0) * self.step - pred_bin = pred[..., 1:(1+self.bin_count)] - - _, bin_idx = torch.max(pred_bin, dim=-1) - bin_bias = self.bins[bin_idx] - - if self.use_fw_regression: - result = pred_reg + bin_bias - else: - result = bin_bias - result = result.clamp(min=self.min, max=self.max) - - return result - - - def training_loss(self, pred, target): - assert pred.shape[-1] == self.length, 'pred.shape[-1]=%d is not equal to self.length=%d' % (pred.shape[-1], self.length) - assert pred.shape[0] == target.shape[0], 'pred.shape=%d is not equal to the target.shape=%d' % (pred.shape[0], target.shape[0]) - device = pred.device - - pred_reg = (pred[..., 0].sigmoid() * self.reg_scale - self.reg_scale/2.0) * self.step - pred_bin = pred[..., 1:(1+self.bin_count)] - - diff_bin_target = torch.abs(target[..., None] - self.bins) - _, bin_idx = torch.min(diff_bin_target, dim=-1) - - bin_bias = self.bins[bin_idx] - bin_bias.requires_grad = False - result = pred_reg + bin_bias - - target_bins = torch.full_like(pred_bin, self.cn, device=device) # targets - n = pred.shape[0] - target_bins[range(n), bin_idx] = self.cp - - loss_bin = self.BCEbins(pred_bin, target_bins) # BCE - - if self.use_loss_regression: - loss_regression = self.MSELoss(result, target) # MSE - loss = loss_bin + loss_regression - else: - loss = loss_bin - - out_result = result.clamp(min=self.min, max=self.max) - - return loss, out_result - - -class FocalLoss(nn.Module): - # Wraps focal loss around existing loss_fcn(), i.e. criteria = FocalLoss(nn.BCEWithLogitsLoss(), gamma=1.5) - def __init__(self, loss_fcn, gamma=1.5, alpha=0.25): - super(FocalLoss, self).__init__() - self.loss_fcn = loss_fcn # must be nn.BCEWithLogitsLoss() - self.gamma = gamma - self.alpha = alpha - self.reduction = loss_fcn.reduction - self.loss_fcn.reduction = 'none' # required to apply FL to each element - - def forward(self, pred, true): - loss = self.loss_fcn(pred, true) - # p_t = torch.exp(-loss) - # loss *= self.alpha * (1.000001 - p_t) ** self.gamma # non-zero power for gradient stability - - # TF implementation https://github.com/tensorflow/addons/blob/v0.7.1/tensorflow_addons/losses/focal_loss.py - pred_prob = torch.sigmoid(pred) # prob from logits - p_t = true * pred_prob + (1 - true) * (1 - pred_prob) - alpha_factor = true * self.alpha + (1 - true) * (1 - self.alpha) - modulating_factor = (1.0 - p_t) ** self.gamma - loss *= alpha_factor * modulating_factor - - if self.reduction == 'mean': - return loss.mean() - elif self.reduction == 'sum': - return loss.sum() - else: # 'none' - return loss - - -class QFocalLoss(nn.Module): - # Wraps Quality focal loss around existing loss_fcn(), i.e. criteria = FocalLoss(nn.BCEWithLogitsLoss(), gamma=1.5) - def __init__(self, loss_fcn, gamma=1.5, alpha=0.25): - super(QFocalLoss, self).__init__() - self.loss_fcn = loss_fcn # must be nn.BCEWithLogitsLoss() - self.gamma = gamma - self.alpha = alpha - self.reduction = loss_fcn.reduction - self.loss_fcn.reduction = 'none' # required to apply FL to each element - - def forward(self, pred, true): - loss = self.loss_fcn(pred, true) - - pred_prob = torch.sigmoid(pred) # prob from logits - alpha_factor = true * self.alpha + (1 - true) * (1 - self.alpha) - modulating_factor = torch.abs(true - pred_prob) ** self.gamma - loss *= alpha_factor * modulating_factor - - if self.reduction == 'mean': - return loss.mean() - elif self.reduction == 'sum': - return loss.sum() - else: # 'none' - return loss - -class RankSort(torch.autograd.Function): - @staticmethod - def forward(ctx, logits, targets, delta_RS=0.50, eps=1e-10): - - classification_grads=torch.zeros(logits.shape).cuda() - - #Filter fg logits - fg_labels = (targets > 0.) - fg_logits = logits[fg_labels] - fg_targets = targets[fg_labels] - fg_num = len(fg_logits) - - #Do not use bg with scores less than minimum fg logit - #since changing its score does not have an effect on precision - threshold_logit = torch.min(fg_logits)-delta_RS - relevant_bg_labels=((targets==0) & (logits>=threshold_logit)) - - relevant_bg_logits = logits[relevant_bg_labels] - relevant_bg_grad=torch.zeros(len(relevant_bg_logits)).cuda() - sorting_error=torch.zeros(fg_num).cuda() - ranking_error=torch.zeros(fg_num).cuda() - fg_grad=torch.zeros(fg_num).cuda() - - #sort the fg logits - order=torch.argsort(fg_logits) - #Loops over each positive following the order - for ii in order: - # Difference Transforms (x_ij) - fg_relations=fg_logits-fg_logits[ii] - bg_relations=relevant_bg_logits-fg_logits[ii] - - if delta_RS > 0: - fg_relations=torch.clamp(fg_relations/(2*delta_RS)+0.5,min=0,max=1) - bg_relations=torch.clamp(bg_relations/(2*delta_RS)+0.5,min=0,max=1) - else: - fg_relations = (fg_relations >= 0).float() - bg_relations = (bg_relations >= 0).float() - - # Rank of ii among pos and false positive number (bg with larger scores) - rank_pos=torch.sum(fg_relations) - FP_num=torch.sum(bg_relations) - - # Rank of ii among all examples - rank=rank_pos+FP_num - - # Ranking error of example ii. target_ranking_error is always 0. (Eq. 7) - ranking_error[ii]=FP_num/rank - - # Current sorting error of example ii. (Eq. 7) - current_sorting_error = torch.sum(fg_relations*(1-fg_targets))/rank_pos - - #Find examples in the target sorted order for example ii - iou_relations = (fg_targets >= fg_targets[ii]) - target_sorted_order = iou_relations * fg_relations - - #The rank of ii among positives in sorted order - rank_pos_target = torch.sum(target_sorted_order) - - #Compute target sorting error. (Eq. 8) - #Since target ranking error is 0, this is also total target error - target_sorting_error= torch.sum(target_sorted_order*(1-fg_targets))/rank_pos_target - - #Compute sorting error on example ii - sorting_error[ii] = current_sorting_error - target_sorting_error - - #Identity Update for Ranking Error - if FP_num > eps: - #For ii the update is the ranking error - fg_grad[ii] -= ranking_error[ii] - #For negatives, distribute error via ranking pmf (i.e. bg_relations/FP_num) - relevant_bg_grad += (bg_relations*(ranking_error[ii]/FP_num)) - - #Find the positives that are misranked (the cause of the error) - #These are the ones with smaller IoU but larger logits - missorted_examples = (~ iou_relations) * fg_relations - - #Denominotor of sorting pmf - sorting_pmf_denom = torch.sum(missorted_examples) - - #Identity Update for Sorting Error - if sorting_pmf_denom > eps: - #For ii the update is the sorting error - fg_grad[ii] -= sorting_error[ii] - #For positives, distribute error via sorting pmf (i.e. missorted_examples/sorting_pmf_denom) - fg_grad += (missorted_examples*(sorting_error[ii]/sorting_pmf_denom)) - - #Normalize gradients by number of positives - classification_grads[fg_labels]= (fg_grad/fg_num) - classification_grads[relevant_bg_labels]= (relevant_bg_grad/fg_num) - - ctx.save_for_backward(classification_grads) - - return ranking_error.mean(), sorting_error.mean() - - @staticmethod - def backward(ctx, out_grad1, out_grad2): - g1, =ctx.saved_tensors - return g1*out_grad1, None, None, None - -class aLRPLoss(torch.autograd.Function): - @staticmethod - def forward(ctx, logits, targets, regression_losses, delta=1., eps=1e-5): - classification_grads=torch.zeros(logits.shape).cuda() - - #Filter fg logits - fg_labels = (targets == 1) - fg_logits = logits[fg_labels] - fg_num = len(fg_logits) - - #Do not use bg with scores less than minimum fg logit - #since changing its score does not have an effect on precision - threshold_logit = torch.min(fg_logits)-delta - - #Get valid bg logits - relevant_bg_labels=((targets==0)&(logits>=threshold_logit)) - relevant_bg_logits=logits[relevant_bg_labels] - relevant_bg_grad=torch.zeros(len(relevant_bg_logits)).cuda() - rank=torch.zeros(fg_num).cuda() - prec=torch.zeros(fg_num).cuda() - fg_grad=torch.zeros(fg_num).cuda() - - max_prec=0 - #sort the fg logits - order=torch.argsort(fg_logits) - #Loops over each positive following the order - for ii in order: - #x_ij s as score differences with fgs - fg_relations=fg_logits-fg_logits[ii] - #Apply piecewise linear function and determine relations with fgs - fg_relations=torch.clamp(fg_relations/(2*delta)+0.5,min=0,max=1) - #Discard i=j in the summation in rank_pos - fg_relations[ii]=0 - - #x_ij s as score differences with bgs - bg_relations=relevant_bg_logits-fg_logits[ii] - #Apply piecewise linear function and determine relations with bgs - bg_relations=torch.clamp(bg_relations/(2*delta)+0.5,min=0,max=1) - - #Compute the rank of the example within fgs and number of bgs with larger scores - rank_pos=1+torch.sum(fg_relations) - FP_num=torch.sum(bg_relations) - #Store the total since it is normalizer also for aLRP Regression error - rank[ii]=rank_pos+FP_num - - #Compute precision for this example to compute classification loss - prec[ii]=rank_pos/rank[ii] - #For stability, set eps to a infinitesmall value (e.g. 1e-6), then compute grads - if FP_num > eps: - fg_grad[ii] = -(torch.sum(fg_relations*regression_losses)+FP_num)/rank[ii] - relevant_bg_grad += (bg_relations*(-fg_grad[ii]/FP_num)) - - #aLRP with grad formulation fg gradient - classification_grads[fg_labels]= fg_grad - #aLRP with grad formulation bg gradient - classification_grads[relevant_bg_labels]= relevant_bg_grad - - classification_grads /= (fg_num) - - cls_loss=1-prec.mean() - ctx.save_for_backward(classification_grads) - - return cls_loss, rank, order - - @staticmethod - def backward(ctx, out_grad1, out_grad2, out_grad3): - g1, =ctx.saved_tensors - return g1*out_grad1, None, None, None, None - - -class APLoss(torch.autograd.Function): - @staticmethod - def forward(ctx, logits, targets, delta=1.): - classification_grads=torch.zeros(logits.shape).cuda() - - #Filter fg logits - fg_labels = (targets == 1) - fg_logits = logits[fg_labels] - fg_num = len(fg_logits) - - #Do not use bg with scores less than minimum fg logit - #since changing its score does not have an effect on precision - threshold_logit = torch.min(fg_logits)-delta - - #Get valid bg logits - relevant_bg_labels=((targets==0)&(logits>=threshold_logit)) - relevant_bg_logits=logits[relevant_bg_labels] - relevant_bg_grad=torch.zeros(len(relevant_bg_logits)).cuda() - rank=torch.zeros(fg_num).cuda() - prec=torch.zeros(fg_num).cuda() - fg_grad=torch.zeros(fg_num).cuda() - - max_prec=0 - #sort the fg logits - order=torch.argsort(fg_logits) - #Loops over each positive following the order - for ii in order: - #x_ij s as score differences with fgs - fg_relations=fg_logits-fg_logits[ii] - #Apply piecewise linear function and determine relations with fgs - fg_relations=torch.clamp(fg_relations/(2*delta)+0.5,min=0,max=1) - #Discard i=j in the summation in rank_pos - fg_relations[ii]=0 - - #x_ij s as score differences with bgs - bg_relations=relevant_bg_logits-fg_logits[ii] - #Apply piecewise linear function and determine relations with bgs - bg_relations=torch.clamp(bg_relations/(2*delta)+0.5,min=0,max=1) - - #Compute the rank of the example within fgs and number of bgs with larger scores - rank_pos=1+torch.sum(fg_relations) - FP_num=torch.sum(bg_relations) - #Store the total since it is normalizer also for aLRP Regression error - rank[ii]=rank_pos+FP_num - - #Compute precision for this example - current_prec=rank_pos/rank[ii] - - #Compute interpolated AP and store gradients for relevant bg examples - if (max_prec<=current_prec): - max_prec=current_prec - relevant_bg_grad += (bg_relations/rank[ii]) - else: - relevant_bg_grad += (bg_relations/rank[ii])*(((1-max_prec)/(1-current_prec))) - - #Store fg gradients - fg_grad[ii]=-(1-max_prec) - prec[ii]=max_prec - - #aLRP with grad formulation fg gradient - classification_grads[fg_labels]= fg_grad - #aLRP with grad formulation bg gradient - classification_grads[relevant_bg_labels]= relevant_bg_grad - - classification_grads /= fg_num - - cls_loss=1-prec.mean() - ctx.save_for_backward(classification_grads) - - return cls_loss - - @staticmethod - def backward(ctx, out_grad1): - g1, =ctx.saved_tensors - return g1*out_grad1, None, None - - -class ComputeLoss: - # Compute losses - def __init__(self, model, autobalance=False): - super(ComputeLoss, self).__init__() - device = next(model.parameters()).device # get model device - h = model.hyp # hyperparameters - - # Define criteria - BCEcls = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h['cls_pw']], device=device)) - BCEobj = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h['obj_pw']], device=device)) - - # Class label smoothing https://arxiv.org/pdf/1902.04103.pdf eqn 3 - self.cp, self.cn = smooth_BCE(eps=h.get('label_smoothing', 0.0)) # positive, negative BCE targets - - # Focal loss - g = h['fl_gamma'] # focal loss gamma - if g > 0: - BCEcls, BCEobj = FocalLoss(BCEcls, g), FocalLoss(BCEobj, g) - - det = model.module.model[-1] if is_parallel(model) else model.model[-1] # Detect() module - self.balance = {3: [4.0, 1.0, 0.4]}.get(det.nl, [4.0, 1.0, 0.25, 0.06, .02]) # P3-P7 - #self.balance = {3: [4.0, 1.0, 0.4]}.get(det.nl, [4.0, 1.0, 0.25, 0.1, .05]) # P3-P7 - #self.balance = {3: [4.0, 1.0, 0.4]}.get(det.nl, [4.0, 1.0, 0.5, 0.4, .1]) # P3-P7 - self.ssi = list(det.stride).index(16) if autobalance else 0 # stride 16 index - self.BCEcls, self.BCEobj, self.gr, self.hyp, self.autobalance = BCEcls, BCEobj, model.gr, h, autobalance - for k in 'na', 'nc', 'nl', 'anchors': - setattr(self, k, getattr(det, k)) - - def __call__(self, p, targets): # predictions, targets, model - device = targets.device - lcls, lbox, lobj = torch.zeros(1, device=device), torch.zeros(1, device=device), torch.zeros(1, device=device) - tcls, tbox, indices, anchors = self.build_targets(p, targets) # targets - - # Losses - for i, pi in enumerate(p): # layer index, layer predictions - b, a, gj, gi = indices[i] # image, anchor, gridy, gridx - tobj = torch.zeros_like(pi[..., 0], device=device) # target obj - - n = b.shape[0] # number of targets - if n: - ps = pi[b, a, gj, gi] # prediction subset corresponding to targets - - # Regression - pxy = ps[:, :2].sigmoid() * 2. - 0.5 - pwh = (ps[:, 2:4].sigmoid() * 2) ** 2 * anchors[i] - pbox = torch.cat((pxy, pwh), 1) # predicted box - iou = bbox_iou(pbox.T, tbox[i], x1y1x2y2=False, CIoU=True) # iou(prediction, target) - lbox += (1.0 - iou).mean() # iou loss - - # Objectness - tobj[b, a, gj, gi] = (1.0 - self.gr) + self.gr * iou.detach().clamp(0).type(tobj.dtype) # iou ratio - - # Classification - if self.nc > 1: # cls loss (only if multiple classes) - t = torch.full_like(ps[:, 5:], self.cn, device=device) # targets - t[range(n), tcls[i]] = self.cp - #t[t==self.cp] = iou.detach().clamp(0).type(t.dtype) - lcls += self.BCEcls(ps[:, 5:], t) # BCE - - # Append targets to text file - # with open('targets.txt', 'a') as file: - # [file.write('%11.5g ' * 4 % tuple(x) + '\n') for x in torch.cat((txy[i], twh[i]), 1)] - - obji = self.BCEobj(pi[..., 4], tobj) - lobj += obji * self.balance[i] # obj loss - if self.autobalance: - self.balance[i] = self.balance[i] * 0.9999 + 0.0001 / obji.detach().item() - - if self.autobalance: - self.balance = [x / self.balance[self.ssi] for x in self.balance] - lbox *= self.hyp['box'] - lobj *= self.hyp['obj'] - lcls *= self.hyp['cls'] - bs = tobj.shape[0] # batch size - - loss = lbox + lobj + lcls - return loss * bs, torch.cat((lbox, lobj, lcls, loss)).detach() - - def build_targets(self, p, targets): - # Build targets for compute_loss(), input targets(image,class,x,y,w,h) - na, nt = self.na, targets.shape[0] # number of anchors, targets - tcls, tbox, indices, anch = [], [], [], [] - gain = torch.ones(7, device=targets.device).long() # normalized to gridspace gain - ai = torch.arange(na, device=targets.device).float().view(na, 1).repeat(1, nt) # same as .repeat_interleave(nt) - targets = torch.cat((targets.repeat(na, 1, 1), ai[:, :, None]), 2) # append anchor indices - - g = 0.5 # bias - off = torch.tensor([[0, 0], - [1, 0], [0, 1], [-1, 0], [0, -1], # j,k,l,m - # [1, 1], [1, -1], [-1, 1], [-1, -1], # jk,jm,lk,lm - ], device=targets.device).float() * g # offsets - - for i in range(self.nl): - anchors = self.anchors[i] - gain[2:6] = torch.tensor(p[i].shape)[[3, 2, 3, 2]] # xyxy gain - - # Match targets to anchors - t = targets * gain - if nt: - # Matches - r = t[:, :, 4:6] / anchors[:, None] # wh ratio - j = torch.max(r, 1. / r).max(2)[0] < self.hyp['anchor_t'] # compare - # j = wh_iou(anchors, t[:, 4:6]) > model.hyp['iou_t'] # iou(3,n)=wh_iou(anchors(3,2), gwh(n,2)) - t = t[j] # filter - - # Offsets - gxy = t[:, 2:4] # grid xy - gxi = gain[[2, 3]] - gxy # inverse - j, k = ((gxy % 1. < g) & (gxy > 1.)).T - l, m = ((gxi % 1. < g) & (gxi > 1.)).T - j = torch.stack((torch.ones_like(j), j, k, l, m)) - t = t.repeat((5, 1, 1))[j] - offsets = (torch.zeros_like(gxy)[None] + off[:, None])[j] - else: - t = targets[0] - offsets = 0 - - # Define - b, c = t[:, :2].long().T # image, class - gxy = t[:, 2:4] # grid xy - gwh = t[:, 4:6] # grid wh - gij = (gxy - offsets).long() - gi, gj = gij.T # grid xy indices - - # Append - a = t[:, 6].long() # anchor indices - indices.append((b, a, gj.clamp_(0, gain[3] - 1), gi.clamp_(0, gain[2] - 1))) # image, anchor, grid indices - tbox.append(torch.cat((gxy - gij, gwh), 1)) # box - anch.append(anchors[a]) # anchors - tcls.append(c) # class - - return tcls, tbox, indices, anch - - -class ComputeLossOTA: - # Compute losses - def __init__(self, model, autobalance=False): - super(ComputeLossOTA, self).__init__() - device = next(model.parameters()).device # get model device - h = model.hyp # hyperparameters - - # Define criteria - BCEcls = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h['cls_pw']], device=device)) - BCEobj = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h['obj_pw']], device=device)) - - # Class label smoothing https://arxiv.org/pdf/1902.04103.pdf eqn 3 - self.cp, self.cn = smooth_BCE(eps=h.get('label_smoothing', 0.0)) # positive, negative BCE targets - - # Focal loss - g = h['fl_gamma'] # focal loss gamma - if g > 0: - BCEcls, BCEobj = FocalLoss(BCEcls, g), FocalLoss(BCEobj, g) - - det = model.module.model[-1] if is_parallel(model) else model.model[-1] # Detect() module - self.balance = {3: [4.0, 1.0, 0.4]}.get(det.nl, [4.0, 1.0, 0.25, 0.06, .02]) # P3-P7 - self.ssi = list(det.stride).index(16) if autobalance else 0 # stride 16 index - self.BCEcls, self.BCEobj, self.gr, self.hyp, self.autobalance = BCEcls, BCEobj, model.gr, h, autobalance - for k in 'na', 'nc', 'nl', 'anchors', 'stride': - setattr(self, k, getattr(det, k)) - - def __call__(self, p, targets, imgs): # predictions, targets, model - device = targets.device - lcls, lbox, lobj = torch.zeros(1, device=device), torch.zeros(1, device=device), torch.zeros(1, device=device) - bs, as_, gjs, gis, targets, anchors = self.build_targets(p, targets, imgs) - pre_gen_gains = [torch.tensor(pp.shape, device=device)[[3, 2, 3, 2]] for pp in p] - - - # Losses - for i, pi in enumerate(p): # layer index, layer predictions - b, a, gj, gi = bs[i], as_[i], gjs[i], gis[i] # image, anchor, gridy, gridx - tobj = torch.zeros_like(pi[..., 0], device=device) # target obj - - n = b.shape[0] # number of targets - if n: - ps = pi[b, a, gj, gi] # prediction subset corresponding to targets - - # Regression - grid = torch.stack([gi, gj], dim=1) - pxy = ps[:, :2].sigmoid() * 2. - 0.5 - #pxy = ps[:, :2].sigmoid() * 3. - 1. - pwh = (ps[:, 2:4].sigmoid() * 2) ** 2 * anchors[i] - pbox = torch.cat((pxy, pwh), 1) # predicted box - selected_tbox = targets[i][:, 2:6] * pre_gen_gains[i] - selected_tbox[:, :2] -= grid - iou = bbox_iou(pbox.T, selected_tbox, x1y1x2y2=False, CIoU=True) # iou(prediction, target) - lbox += (1.0 - iou).mean() # iou loss - - # Objectness - tobj[b, a, gj, gi] = (1.0 - self.gr) + self.gr * iou.detach().clamp(0).type(tobj.dtype) # iou ratio - - # Classification - selected_tcls = targets[i][:, 1].long() - if self.nc > 1: # cls loss (only if multiple classes) - t = torch.full_like(ps[:, 5:], self.cn, device=device) # targets - t[range(n), selected_tcls] = self.cp - lcls += self.BCEcls(ps[:, 5:], t) # BCE - - # Append targets to text file - # with open('targets.txt', 'a') as file: - # [file.write('%11.5g ' * 4 % tuple(x) + '\n') for x in torch.cat((txy[i], twh[i]), 1)] - - obji = self.BCEobj(pi[..., 4], tobj) - lobj += obji * self.balance[i] # obj loss - if self.autobalance: - self.balance[i] = self.balance[i] * 0.9999 + 0.0001 / obji.detach().item() - - if self.autobalance: - self.balance = [x / self.balance[self.ssi] for x in self.balance] - lbox *= self.hyp['box'] - lobj *= self.hyp['obj'] - lcls *= self.hyp['cls'] - bs = tobj.shape[0] # batch size - - loss = lbox + lobj + lcls - return loss * bs, torch.cat((lbox, lobj, lcls, loss)).detach() - - def build_targets(self, p, targets, imgs): - - #indices, anch = self.find_positive(p, targets) - indices, anch = self.find_3_positive(p, targets) - #indices, anch = self.find_4_positive(p, targets) - #indices, anch = self.find_5_positive(p, targets) - #indices, anch = self.find_9_positive(p, targets) - device = torch.device(targets.device) - matching_bs = [[] for pp in p] - matching_as = [[] for pp in p] - matching_gjs = [[] for pp in p] - matching_gis = [[] for pp in p] - matching_targets = [[] for pp in p] - matching_anchs = [[] for pp in p] - - nl = len(p) - - for batch_idx in range(p[0].shape[0]): - - b_idx = targets[:, 0]==batch_idx - this_target = targets[b_idx] - if this_target.shape[0] == 0: - continue - - txywh = this_target[:, 2:6] * imgs[batch_idx].shape[1] - txyxy = xywh2xyxy(txywh) - - pxyxys = [] - p_cls = [] - p_obj = [] - from_which_layer = [] - all_b = [] - all_a = [] - all_gj = [] - all_gi = [] - all_anch = [] - - for i, pi in enumerate(p): - - b, a, gj, gi = indices[i] - idx = (b == batch_idx) - b, a, gj, gi = b[idx], a[idx], gj[idx], gi[idx] - all_b.append(b) - all_a.append(a) - all_gj.append(gj) - all_gi.append(gi) - all_anch.append(anch[i][idx]) - from_which_layer.append((torch.ones(size=(len(b),)) * i).to(device)) - - fg_pred = pi[b, a, gj, gi] - p_obj.append(fg_pred[:, 4:5]) - p_cls.append(fg_pred[:, 5:]) - - grid = torch.stack([gi, gj], dim=1) - pxy = (fg_pred[:, :2].sigmoid() * 2. - 0.5 + grid) * self.stride[i] #/ 8. - #pxy = (fg_pred[:, :2].sigmoid() * 3. - 1. + grid) * self.stride[i] - pwh = (fg_pred[:, 2:4].sigmoid() * 2) ** 2 * anch[i][idx] * self.stride[i] #/ 8. - pxywh = torch.cat([pxy, pwh], dim=-1) - pxyxy = xywh2xyxy(pxywh) - pxyxys.append(pxyxy) - - pxyxys = torch.cat(pxyxys, dim=0) - if pxyxys.shape[0] == 0: - continue - p_obj = torch.cat(p_obj, dim=0) - p_cls = torch.cat(p_cls, dim=0) - from_which_layer = torch.cat(from_which_layer, dim=0) - all_b = torch.cat(all_b, dim=0) - all_a = torch.cat(all_a, dim=0) - all_gj = torch.cat(all_gj, dim=0) - all_gi = torch.cat(all_gi, dim=0) - all_anch = torch.cat(all_anch, dim=0) - - pair_wise_iou = box_iou(txyxy, pxyxys) - - pair_wise_iou_loss = -torch.log(pair_wise_iou + 1e-8) - - top_k, _ = torch.topk(pair_wise_iou, min(10, pair_wise_iou.shape[1]), dim=1) - dynamic_ks = torch.clamp(top_k.sum(1).int(), min=1) - - gt_cls_per_image = ( - F.one_hot(this_target[:, 1].to(torch.int64), self.nc) - .float() - .unsqueeze(1) - .repeat(1, pxyxys.shape[0], 1) - ) - - num_gt = this_target.shape[0] - cls_preds_ = ( - p_cls.float().unsqueeze(0).repeat(num_gt, 1, 1).sigmoid_() - * p_obj.unsqueeze(0).repeat(num_gt, 1, 1).sigmoid_() - ) - - y = cls_preds_.sqrt_() - pair_wise_cls_loss = F.binary_cross_entropy_with_logits( - torch.log(y/(1-y)) , gt_cls_per_image, reduction="none" - ).sum(-1) - del cls_preds_ - - cost = ( - pair_wise_cls_loss - + 3.0 * pair_wise_iou_loss - ) - - matching_matrix = torch.zeros_like(cost, device=device) - - for gt_idx in range(num_gt): - _, pos_idx = torch.topk( - cost[gt_idx], k=dynamic_ks[gt_idx].item(), largest=False - ) - matching_matrix[gt_idx][pos_idx] = 1.0 - - del top_k, dynamic_ks - anchor_matching_gt = matching_matrix.sum(0) - if (anchor_matching_gt > 1).sum() > 0: - _, cost_argmin = torch.min(cost[:, anchor_matching_gt > 1], dim=0) - matching_matrix[:, anchor_matching_gt > 1] *= 0.0 - matching_matrix[cost_argmin, anchor_matching_gt > 1] = 1.0 - fg_mask_inboxes = (matching_matrix.sum(0) > 0.0).to(device) - matched_gt_inds = matching_matrix[:, fg_mask_inboxes].argmax(0) - - from_which_layer = from_which_layer[fg_mask_inboxes] - all_b = all_b[fg_mask_inboxes] - all_a = all_a[fg_mask_inboxes] - all_gj = all_gj[fg_mask_inboxes] - all_gi = all_gi[fg_mask_inboxes] - all_anch = all_anch[fg_mask_inboxes] - - this_target = this_target[matched_gt_inds] - - for i in range(nl): - layer_idx = from_which_layer == i - matching_bs[i].append(all_b[layer_idx]) - matching_as[i].append(all_a[layer_idx]) - matching_gjs[i].append(all_gj[layer_idx]) - matching_gis[i].append(all_gi[layer_idx]) - matching_targets[i].append(this_target[layer_idx]) - matching_anchs[i].append(all_anch[layer_idx]) - - for i in range(nl): - if matching_targets[i] != []: - matching_bs[i] = torch.cat(matching_bs[i], dim=0) - matching_as[i] = torch.cat(matching_as[i], dim=0) - matching_gjs[i] = torch.cat(matching_gjs[i], dim=0) - matching_gis[i] = torch.cat(matching_gis[i], dim=0) - matching_targets[i] = torch.cat(matching_targets[i], dim=0) - matching_anchs[i] = torch.cat(matching_anchs[i], dim=0) - else: - matching_bs[i] = torch.tensor([], device='cuda:0', dtype=torch.int64) - matching_as[i] = torch.tensor([], device='cuda:0', dtype=torch.int64) - matching_gjs[i] = torch.tensor([], device='cuda:0', dtype=torch.int64) - matching_gis[i] = torch.tensor([], device='cuda:0', dtype=torch.int64) - matching_targets[i] = torch.tensor([], device='cuda:0', dtype=torch.int64) - matching_anchs[i] = torch.tensor([], device='cuda:0', dtype=torch.int64) - - return matching_bs, matching_as, matching_gjs, matching_gis, matching_targets, matching_anchs - - def find_3_positive(self, p, targets): - # Build targets for compute_loss(), input targets(image,class,x,y,w,h) - na, nt = self.na, targets.shape[0] # number of anchors, targets - indices, anch = [], [] - gain = torch.ones(7, device=targets.device).long() # normalized to gridspace gain - ai = torch.arange(na, device=targets.device).float().view(na, 1).repeat(1, nt) # same as .repeat_interleave(nt) - targets = torch.cat((targets.repeat(na, 1, 1), ai[:, :, None]), 2) # append anchor indices - - g = 0.5 # bias - off = torch.tensor([[0, 0], - [1, 0], [0, 1], [-1, 0], [0, -1], # j,k,l,m - # [1, 1], [1, -1], [-1, 1], [-1, -1], # jk,jm,lk,lm - ], device=targets.device).float() * g # offsets - - for i in range(self.nl): - anchors = self.anchors[i] - gain[2:6] = torch.tensor(p[i].shape)[[3, 2, 3, 2]] # xyxy gain - - # Match targets to anchors - t = targets * gain - if nt: - # Matches - r = t[:, :, 4:6] / anchors[:, None] # wh ratio - j = torch.max(r, 1. / r).max(2)[0] < self.hyp['anchor_t'] # compare - # j = wh_iou(anchors, t[:, 4:6]) > model.hyp['iou_t'] # iou(3,n)=wh_iou(anchors(3,2), gwh(n,2)) - t = t[j] # filter - - # Offsets - gxy = t[:, 2:4] # grid xy - gxi = gain[[2, 3]] - gxy # inverse - j, k = ((gxy % 1. < g) & (gxy > 1.)).T - l, m = ((gxi % 1. < g) & (gxi > 1.)).T - j = torch.stack((torch.ones_like(j), j, k, l, m)) - t = t.repeat((5, 1, 1))[j] - offsets = (torch.zeros_like(gxy)[None] + off[:, None])[j] - else: - t = targets[0] - offsets = 0 - - # Define - b, c = t[:, :2].long().T # image, class - gxy = t[:, 2:4] # grid xy - gwh = t[:, 4:6] # grid wh - gij = (gxy - offsets).long() - gi, gj = gij.T # grid xy indices - - # Append - a = t[:, 6].long() # anchor indices - indices.append((b, a, gj.clamp_(0, gain[3] - 1), gi.clamp_(0, gain[2] - 1))) # image, anchor, grid indices - anch.append(anchors[a]) # anchors - - return indices, anch - - -class ComputeLossBinOTA: - # Compute losses - def __init__(self, model, autobalance=False): - super(ComputeLossBinOTA, self).__init__() - device = next(model.parameters()).device # get model device - h = model.hyp # hyperparameters - - # Define criteria - BCEcls = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h['cls_pw']], device=device)) - BCEobj = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h['obj_pw']], device=device)) - #MSEangle = nn.MSELoss().to(device) - - # Class label smoothing https://arxiv.org/pdf/1902.04103.pdf eqn 3 - self.cp, self.cn = smooth_BCE(eps=h.get('label_smoothing', 0.0)) # positive, negative BCE targets - - # Focal loss - g = h['fl_gamma'] # focal loss gamma - if g > 0: - BCEcls, BCEobj = FocalLoss(BCEcls, g), FocalLoss(BCEobj, g) - - det = model.module.model[-1] if is_parallel(model) else model.model[-1] # Detect() module - self.balance = {3: [4.0, 1.0, 0.4]}.get(det.nl, [4.0, 1.0, 0.25, 0.06, .02]) # P3-P7 - self.ssi = list(det.stride).index(16) if autobalance else 0 # stride 16 index - self.BCEcls, self.BCEobj, self.gr, self.hyp, self.autobalance = BCEcls, BCEobj, model.gr, h, autobalance - for k in 'na', 'nc', 'nl', 'anchors', 'stride', 'bin_count': - setattr(self, k, getattr(det, k)) - - #xy_bin_sigmoid = SigmoidBin(bin_count=11, min=-0.5, max=1.5, use_loss_regression=False).to(device) - wh_bin_sigmoid = SigmoidBin(bin_count=self.bin_count, min=0.0, max=4.0, use_loss_regression=False).to(device) - #angle_bin_sigmoid = SigmoidBin(bin_count=31, min=-1.1, max=1.1, use_loss_regression=False).to(device) - self.wh_bin_sigmoid = wh_bin_sigmoid - - def __call__(self, p, targets, imgs): # predictions, targets, model - device = targets.device - lcls, lbox, lobj = torch.zeros(1, device=device), torch.zeros(1, device=device), torch.zeros(1, device=device) - bs, as_, gjs, gis, targets, anchors = self.build_targets(p, targets, imgs) - pre_gen_gains = [torch.tensor(pp.shape, device=device)[[3, 2, 3, 2]] for pp in p] - - - # Losses - for i, pi in enumerate(p): # layer index, layer predictions - b, a, gj, gi = bs[i], as_[i], gjs[i], gis[i] # image, anchor, gridy, gridx - tobj = torch.zeros_like(pi[..., 0], device=device) # target obj - - obj_idx = self.wh_bin_sigmoid.get_length()*2 + 2 # x,y, w-bce, h-bce # xy_bin_sigmoid.get_length()*2 - - n = b.shape[0] # number of targets - if n: - ps = pi[b, a, gj, gi] # prediction subset corresponding to targets - - # Regression - grid = torch.stack([gi, gj], dim=1) - selected_tbox = targets[i][:, 2:6] * pre_gen_gains[i] - selected_tbox[:, :2] -= grid - - #pxy = ps[:, :2].sigmoid() * 2. - 0.5 - ##pxy = ps[:, :2].sigmoid() * 3. - 1. - #pwh = (ps[:, 2:4].sigmoid() * 2) ** 2 * anchors[i] - #pbox = torch.cat((pxy, pwh), 1) # predicted box - - #x_loss, px = xy_bin_sigmoid.training_loss(ps[..., 0:12], tbox[i][..., 0]) - #y_loss, py = xy_bin_sigmoid.training_loss(ps[..., 12:24], tbox[i][..., 1]) - w_loss, pw = self.wh_bin_sigmoid.training_loss(ps[..., 2:(3+self.bin_count)], selected_tbox[..., 2] / anchors[i][..., 0]) - h_loss, ph = self.wh_bin_sigmoid.training_loss(ps[..., (3+self.bin_count):obj_idx], selected_tbox[..., 3] / anchors[i][..., 1]) - - pw *= anchors[i][..., 0] - ph *= anchors[i][..., 1] - - px = ps[:, 0].sigmoid() * 2. - 0.5 - py = ps[:, 1].sigmoid() * 2. - 0.5 - - lbox += w_loss + h_loss # + x_loss + y_loss - - #print(f"\n px = {px.shape}, py = {py.shape}, pw = {pw.shape}, ph = {ph.shape} \n") - - pbox = torch.cat((px.unsqueeze(1), py.unsqueeze(1), pw.unsqueeze(1), ph.unsqueeze(1)), 1).to(device) # predicted box - - - - - iou = bbox_iou(pbox.T, selected_tbox, x1y1x2y2=False, CIoU=True) # iou(prediction, target) - lbox += (1.0 - iou).mean() # iou loss - - # Objectness - tobj[b, a, gj, gi] = (1.0 - self.gr) + self.gr * iou.detach().clamp(0).type(tobj.dtype) # iou ratio - - # Classification - selected_tcls = targets[i][:, 1].long() - if self.nc > 1: # cls loss (only if multiple classes) - t = torch.full_like(ps[:, (1+obj_idx):], self.cn, device=device) # targets - t[range(n), selected_tcls] = self.cp - lcls += self.BCEcls(ps[:, (1+obj_idx):], t) # BCE - - # Append targets to text file - # with open('targets.txt', 'a') as file: - # [file.write('%11.5g ' * 4 % tuple(x) + '\n') for x in torch.cat((txy[i], twh[i]), 1)] - - obji = self.BCEobj(pi[..., obj_idx], tobj) - lobj += obji * self.balance[i] # obj loss - if self.autobalance: - self.balance[i] = self.balance[i] * 0.9999 + 0.0001 / obji.detach().item() - - if self.autobalance: - self.balance = [x / self.balance[self.ssi] for x in self.balance] - lbox *= self.hyp['box'] - lobj *= self.hyp['obj'] - lcls *= self.hyp['cls'] - bs = tobj.shape[0] # batch size - - loss = lbox + lobj + lcls - return loss * bs, torch.cat((lbox, lobj, lcls, loss)).detach() - - def build_targets(self, p, targets, imgs): - - #indices, anch = self.find_positive(p, targets) - indices, anch = self.find_3_positive(p, targets) - #indices, anch = self.find_4_positive(p, targets) - #indices, anch = self.find_5_positive(p, targets) - #indices, anch = self.find_9_positive(p, targets) - - matching_bs = [[] for pp in p] - matching_as = [[] for pp in p] - matching_gjs = [[] for pp in p] - matching_gis = [[] for pp in p] - matching_targets = [[] for pp in p] - matching_anchs = [[] for pp in p] - - nl = len(p) - - for batch_idx in range(p[0].shape[0]): - - b_idx = targets[:, 0]==batch_idx - this_target = targets[b_idx] - if this_target.shape[0] == 0: - continue - - txywh = this_target[:, 2:6] * imgs[batch_idx].shape[1] - txyxy = xywh2xyxy(txywh) - - pxyxys = [] - p_cls = [] - p_obj = [] - from_which_layer = [] - all_b = [] - all_a = [] - all_gj = [] - all_gi = [] - all_anch = [] - - for i, pi in enumerate(p): - - obj_idx = self.wh_bin_sigmoid.get_length()*2 + 2 - - b, a, gj, gi = indices[i] - idx = (b == batch_idx) - b, a, gj, gi = b[idx], a[idx], gj[idx], gi[idx] - all_b.append(b) - all_a.append(a) - all_gj.append(gj) - all_gi.append(gi) - all_anch.append(anch[i][idx]) - from_which_layer.append(torch.ones(size=(len(b),)) * i) - - fg_pred = pi[b, a, gj, gi] - p_obj.append(fg_pred[:, obj_idx:(obj_idx+1)]) - p_cls.append(fg_pred[:, (obj_idx+1):]) - - grid = torch.stack([gi, gj], dim=1) - pxy = (fg_pred[:, :2].sigmoid() * 2. - 0.5 + grid) * self.stride[i] #/ 8. - #pwh = (fg_pred[:, 2:4].sigmoid() * 2) ** 2 * anch[i][idx] * self.stride[i] #/ 8. - pw = self.wh_bin_sigmoid.forward(fg_pred[..., 2:(3+self.bin_count)].sigmoid()) * anch[i][idx][:, 0] * self.stride[i] - ph = self.wh_bin_sigmoid.forward(fg_pred[..., (3+self.bin_count):obj_idx].sigmoid()) * anch[i][idx][:, 1] * self.stride[i] - - pxywh = torch.cat([pxy, pw.unsqueeze(1), ph.unsqueeze(1)], dim=-1) - pxyxy = xywh2xyxy(pxywh) - pxyxys.append(pxyxy) - - pxyxys = torch.cat(pxyxys, dim=0) - if pxyxys.shape[0] == 0: - continue - p_obj = torch.cat(p_obj, dim=0) - p_cls = torch.cat(p_cls, dim=0) - from_which_layer = torch.cat(from_which_layer, dim=0) - all_b = torch.cat(all_b, dim=0) - all_a = torch.cat(all_a, dim=0) - all_gj = torch.cat(all_gj, dim=0) - all_gi = torch.cat(all_gi, dim=0) - all_anch = torch.cat(all_anch, dim=0) - - pair_wise_iou = box_iou(txyxy, pxyxys) - - pair_wise_iou_loss = -torch.log(pair_wise_iou + 1e-8) - - top_k, _ = torch.topk(pair_wise_iou, min(10, pair_wise_iou.shape[1]), dim=1) - dynamic_ks = torch.clamp(top_k.sum(1).int(), min=1) - - gt_cls_per_image = ( - F.one_hot(this_target[:, 1].to(torch.int64), self.nc) - .float() - .unsqueeze(1) - .repeat(1, pxyxys.shape[0], 1) - ) - - num_gt = this_target.shape[0] - cls_preds_ = ( - p_cls.float().unsqueeze(0).repeat(num_gt, 1, 1).sigmoid_() - * p_obj.unsqueeze(0).repeat(num_gt, 1, 1).sigmoid_() - ) - - y = cls_preds_.sqrt_() - pair_wise_cls_loss = F.binary_cross_entropy_with_logits( - torch.log(y/(1-y)) , gt_cls_per_image, reduction="none" - ).sum(-1) - del cls_preds_ - - cost = ( - pair_wise_cls_loss - + 3.0 * pair_wise_iou_loss - ) - - matching_matrix = torch.zeros_like(cost) - - for gt_idx in range(num_gt): - _, pos_idx = torch.topk( - cost[gt_idx], k=dynamic_ks[gt_idx].item(), largest=False - ) - matching_matrix[gt_idx][pos_idx] = 1.0 - - del top_k, dynamic_ks - anchor_matching_gt = matching_matrix.sum(0) - if (anchor_matching_gt > 1).sum() > 0: - _, cost_argmin = torch.min(cost[:, anchor_matching_gt > 1], dim=0) - matching_matrix[:, anchor_matching_gt > 1] *= 0.0 - matching_matrix[cost_argmin, anchor_matching_gt > 1] = 1.0 - fg_mask_inboxes = matching_matrix.sum(0) > 0.0 - matched_gt_inds = matching_matrix[:, fg_mask_inboxes].argmax(0) - - from_which_layer = from_which_layer[fg_mask_inboxes] - all_b = all_b[fg_mask_inboxes] - all_a = all_a[fg_mask_inboxes] - all_gj = all_gj[fg_mask_inboxes] - all_gi = all_gi[fg_mask_inboxes] - all_anch = all_anch[fg_mask_inboxes] - - this_target = this_target[matched_gt_inds] - - for i in range(nl): - layer_idx = from_which_layer == i - matching_bs[i].append(all_b[layer_idx]) - matching_as[i].append(all_a[layer_idx]) - matching_gjs[i].append(all_gj[layer_idx]) - matching_gis[i].append(all_gi[layer_idx]) - matching_targets[i].append(this_target[layer_idx]) - matching_anchs[i].append(all_anch[layer_idx]) - - for i in range(nl): - if matching_targets[i] != []: - matching_bs[i] = torch.cat(matching_bs[i], dim=0) - matching_as[i] = torch.cat(matching_as[i], dim=0) - matching_gjs[i] = torch.cat(matching_gjs[i], dim=0) - matching_gis[i] = torch.cat(matching_gis[i], dim=0) - matching_targets[i] = torch.cat(matching_targets[i], dim=0) - matching_anchs[i] = torch.cat(matching_anchs[i], dim=0) - else: - matching_bs[i] = torch.tensor([], device='cuda:0', dtype=torch.int64) - matching_as[i] = torch.tensor([], device='cuda:0', dtype=torch.int64) - matching_gjs[i] = torch.tensor([], device='cuda:0', dtype=torch.int64) - matching_gis[i] = torch.tensor([], device='cuda:0', dtype=torch.int64) - matching_targets[i] = torch.tensor([], device='cuda:0', dtype=torch.int64) - matching_anchs[i] = torch.tensor([], device='cuda:0', dtype=torch.int64) - - return matching_bs, matching_as, matching_gjs, matching_gis, matching_targets, matching_anchs - - def find_3_positive(self, p, targets): - # Build targets for compute_loss(), input targets(image,class,x,y,w,h) - na, nt = self.na, targets.shape[0] # number of anchors, targets - indices, anch = [], [] - gain = torch.ones(7, device=targets.device).long() # normalized to gridspace gain - ai = torch.arange(na, device=targets.device).float().view(na, 1).repeat(1, nt) # same as .repeat_interleave(nt) - targets = torch.cat((targets.repeat(na, 1, 1), ai[:, :, None]), 2) # append anchor indices - - g = 0.5 # bias - off = torch.tensor([[0, 0], - [1, 0], [0, 1], [-1, 0], [0, -1], # j,k,l,m - # [1, 1], [1, -1], [-1, 1], [-1, -1], # jk,jm,lk,lm - ], device=targets.device).float() * g # offsets - - for i in range(self.nl): - anchors = self.anchors[i] - gain[2:6] = torch.tensor(p[i].shape)[[3, 2, 3, 2]] # xyxy gain - - # Match targets to anchors - t = targets * gain - if nt: - # Matches - r = t[:, :, 4:6] / anchors[:, None] # wh ratio - j = torch.max(r, 1. / r).max(2)[0] < self.hyp['anchor_t'] # compare - # j = wh_iou(anchors, t[:, 4:6]) > model.hyp['iou_t'] # iou(3,n)=wh_iou(anchors(3,2), gwh(n,2)) - t = t[j] # filter - - # Offsets - gxy = t[:, 2:4] # grid xy - gxi = gain[[2, 3]] - gxy # inverse - j, k = ((gxy % 1. < g) & (gxy > 1.)).T - l, m = ((gxi % 1. < g) & (gxi > 1.)).T - j = torch.stack((torch.ones_like(j), j, k, l, m)) - t = t.repeat((5, 1, 1))[j] - offsets = (torch.zeros_like(gxy)[None] + off[:, None])[j] - else: - t = targets[0] - offsets = 0 - - # Define - b, c = t[:, :2].long().T # image, class - gxy = t[:, 2:4] # grid xy - gwh = t[:, 4:6] # grid wh - gij = (gxy - offsets).long() - gi, gj = gij.T # grid xy indices - - # Append - a = t[:, 6].long() # anchor indices - indices.append((b, a, gj.clamp_(0, gain[3] - 1), gi.clamp_(0, gain[2] - 1))) # image, anchor, grid indices - anch.append(anchors[a]) # anchors - - return indices, anch - - -class ComputeLossAuxOTA: - # Compute losses - def __init__(self, model, autobalance=False): - super(ComputeLossAuxOTA, self).__init__() - device = next(model.parameters()).device # get model device - h = model.hyp # hyperparameters - - # Define criteria - BCEcls = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h['cls_pw']], device=device)) - BCEobj = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h['obj_pw']], device=device)) - - # Class label smoothing https://arxiv.org/pdf/1902.04103.pdf eqn 3 - self.cp, self.cn = smooth_BCE(eps=h.get('label_smoothing', 0.0)) # positive, negative BCE targets - - # Focal loss - g = h['fl_gamma'] # focal loss gamma - if g > 0: - BCEcls, BCEobj = FocalLoss(BCEcls, g), FocalLoss(BCEobj, g) - - det = model.module.model[-1] if is_parallel(model) else model.model[-1] # Detect() module - self.balance = {3: [4.0, 1.0, 0.4]}.get(det.nl, [4.0, 1.0, 0.25, 0.06, .02]) # P3-P7 - self.ssi = list(det.stride).index(16) if autobalance else 0 # stride 16 index - self.BCEcls, self.BCEobj, self.gr, self.hyp, self.autobalance = BCEcls, BCEobj, model.gr, h, autobalance - for k in 'na', 'nc', 'nl', 'anchors', 'stride': - setattr(self, k, getattr(det, k)) - - def __call__(self, p, targets, imgs): # predictions, targets, model - device = targets.device - lcls, lbox, lobj = torch.zeros(1, device=device), torch.zeros(1, device=device), torch.zeros(1, device=device) - bs_aux, as_aux_, gjs_aux, gis_aux, targets_aux, anchors_aux = self.build_targets2(p[:self.nl], targets, imgs) - bs, as_, gjs, gis, targets, anchors = self.build_targets(p[:self.nl], targets, imgs) - pre_gen_gains_aux = [torch.tensor(pp.shape, device=device)[[3, 2, 3, 2]] for pp in p[:self.nl]] - pre_gen_gains = [torch.tensor(pp.shape, device=device)[[3, 2, 3, 2]] for pp in p[:self.nl]] - - - # Losses - for i in range(self.nl): # layer index, layer predictions - pi = p[i] - pi_aux = p[i+self.nl] - b, a, gj, gi = bs[i], as_[i], gjs[i], gis[i] # image, anchor, gridy, gridx - b_aux, a_aux, gj_aux, gi_aux = bs_aux[i], as_aux_[i], gjs_aux[i], gis_aux[i] # image, anchor, gridy, gridx - tobj = torch.zeros_like(pi[..., 0], device=device) # target obj - tobj_aux = torch.zeros_like(pi_aux[..., 0], device=device) # target obj - - n = b.shape[0] # number of targets - if n: - ps = pi[b, a, gj, gi] # prediction subset corresponding to targets - - # Regression - grid = torch.stack([gi, gj], dim=1) - pxy = ps[:, :2].sigmoid() * 2. - 0.5 - pwh = (ps[:, 2:4].sigmoid() * 2) ** 2 * anchors[i] - pbox = torch.cat((pxy, pwh), 1) # predicted box - selected_tbox = targets[i][:, 2:6] * pre_gen_gains[i] - selected_tbox[:, :2] -= grid - iou = bbox_iou(pbox.T, selected_tbox, x1y1x2y2=False, CIoU=True) # iou(prediction, target) - lbox += (1.0 - iou).mean() # iou loss - - # Objectness - tobj[b, a, gj, gi] = (1.0 - self.gr) + self.gr * iou.detach().clamp(0).type(tobj.dtype) # iou ratio - - # Classification - selected_tcls = targets[i][:, 1].long() - if self.nc > 1: # cls loss (only if multiple classes) - t = torch.full_like(ps[:, 5:], self.cn, device=device) # targets - t[range(n), selected_tcls] = self.cp - lcls += self.BCEcls(ps[:, 5:], t) # BCE - - # Append targets to text file - # with open('targets.txt', 'a') as file: - # [file.write('%11.5g ' * 4 % tuple(x) + '\n') for x in torch.cat((txy[i], twh[i]), 1)] - - n_aux = b_aux.shape[0] # number of targets - if n_aux: - ps_aux = pi_aux[b_aux, a_aux, gj_aux, gi_aux] # prediction subset corresponding to targets - grid_aux = torch.stack([gi_aux, gj_aux], dim=1) - pxy_aux = ps_aux[:, :2].sigmoid() * 2. - 0.5 - #pxy_aux = ps_aux[:, :2].sigmoid() * 3. - 1. - pwh_aux = (ps_aux[:, 2:4].sigmoid() * 2) ** 2 * anchors_aux[i] - pbox_aux = torch.cat((pxy_aux, pwh_aux), 1) # predicted box - selected_tbox_aux = targets_aux[i][:, 2:6] * pre_gen_gains_aux[i] - selected_tbox_aux[:, :2] -= grid_aux - iou_aux = bbox_iou(pbox_aux.T, selected_tbox_aux, x1y1x2y2=False, CIoU=True) # iou(prediction, target) - lbox += 0.25 * (1.0 - iou_aux).mean() # iou loss - - # Objectness - tobj_aux[b_aux, a_aux, gj_aux, gi_aux] = (1.0 - self.gr) + self.gr * iou_aux.detach().clamp(0).type(tobj_aux.dtype) # iou ratio - - # Classification - selected_tcls_aux = targets_aux[i][:, 1].long() - if self.nc > 1: # cls loss (only if multiple classes) - t_aux = torch.full_like(ps_aux[:, 5:], self.cn, device=device) # targets - t_aux[range(n_aux), selected_tcls_aux] = self.cp - lcls += 0.25 * self.BCEcls(ps_aux[:, 5:], t_aux) # BCE - - obji = self.BCEobj(pi[..., 4], tobj) - obji_aux = self.BCEobj(pi_aux[..., 4], tobj_aux) - lobj += obji * self.balance[i] + 0.25 * obji_aux * self.balance[i] # obj loss - if self.autobalance: - self.balance[i] = self.balance[i] * 0.9999 + 0.0001 / obji.detach().item() - - if self.autobalance: - self.balance = [x / self.balance[self.ssi] for x in self.balance] - lbox *= self.hyp['box'] - lobj *= self.hyp['obj'] - lcls *= self.hyp['cls'] - bs = tobj.shape[0] # batch size - - loss = lbox + lobj + lcls - return loss * bs, torch.cat((lbox, lobj, lcls, loss)).detach() - - def build_targets(self, p, targets, imgs): - - indices, anch = self.find_3_positive(p, targets) - - matching_bs = [[] for pp in p] - matching_as = [[] for pp in p] - matching_gjs = [[] for pp in p] - matching_gis = [[] for pp in p] - matching_targets = [[] for pp in p] - matching_anchs = [[] for pp in p] - - nl = len(p) - - for batch_idx in range(p[0].shape[0]): - - b_idx = targets[:, 0]==batch_idx - this_target = targets[b_idx] - if this_target.shape[0] == 0: - continue - - txywh = this_target[:, 2:6] * imgs[batch_idx].shape[1] - txyxy = xywh2xyxy(txywh) - - pxyxys = [] - p_cls = [] - p_obj = [] - from_which_layer = [] - all_b = [] - all_a = [] - all_gj = [] - all_gi = [] - all_anch = [] - - for i, pi in enumerate(p): - - b, a, gj, gi = indices[i] - idx = (b == batch_idx) - b, a, gj, gi = b[idx], a[idx], gj[idx], gi[idx] - all_b.append(b) - all_a.append(a) - all_gj.append(gj) - all_gi.append(gi) - all_anch.append(anch[i][idx]) - from_which_layer.append(torch.ones(size=(len(b),)) * i) - - fg_pred = pi[b, a, gj, gi] - p_obj.append(fg_pred[:, 4:5]) - p_cls.append(fg_pred[:, 5:]) - - grid = torch.stack([gi, gj], dim=1) - pxy = (fg_pred[:, :2].sigmoid() * 2. - 0.5 + grid) * self.stride[i] #/ 8. - #pxy = (fg_pred[:, :2].sigmoid() * 3. - 1. + grid) * self.stride[i] - pwh = (fg_pred[:, 2:4].sigmoid() * 2) ** 2 * anch[i][idx] * self.stride[i] #/ 8. - pxywh = torch.cat([pxy, pwh], dim=-1) - pxyxy = xywh2xyxy(pxywh) - pxyxys.append(pxyxy) - - pxyxys = torch.cat(pxyxys, dim=0) - if pxyxys.shape[0] == 0: - continue - p_obj = torch.cat(p_obj, dim=0) - p_cls = torch.cat(p_cls, dim=0) - from_which_layer = torch.cat(from_which_layer, dim=0) - all_b = torch.cat(all_b, dim=0) - all_a = torch.cat(all_a, dim=0) - all_gj = torch.cat(all_gj, dim=0) - all_gi = torch.cat(all_gi, dim=0) - all_anch = torch.cat(all_anch, dim=0) - - pair_wise_iou = box_iou(txyxy, pxyxys) - - pair_wise_iou_loss = -torch.log(pair_wise_iou + 1e-8) - - top_k, _ = torch.topk(pair_wise_iou, min(20, pair_wise_iou.shape[1]), dim=1) - dynamic_ks = torch.clamp(top_k.sum(1).int(), min=1) - - gt_cls_per_image = ( - F.one_hot(this_target[:, 1].to(torch.int64), self.nc) - .float() - .unsqueeze(1) - .repeat(1, pxyxys.shape[0], 1) - ) - - num_gt = this_target.shape[0] - cls_preds_ = ( - p_cls.float().unsqueeze(0).repeat(num_gt, 1, 1).sigmoid_() - * p_obj.unsqueeze(0).repeat(num_gt, 1, 1).sigmoid_() - ) - - y = cls_preds_.sqrt_() - pair_wise_cls_loss = F.binary_cross_entropy_with_logits( - torch.log(y/(1-y)) , gt_cls_per_image, reduction="none" - ).sum(-1) - del cls_preds_ - - cost = ( - pair_wise_cls_loss - + 3.0 * pair_wise_iou_loss - ) - - matching_matrix = torch.zeros_like(cost) - - for gt_idx in range(num_gt): - _, pos_idx = torch.topk( - cost[gt_idx], k=dynamic_ks[gt_idx].item(), largest=False - ) - matching_matrix[gt_idx][pos_idx] = 1.0 - - del top_k, dynamic_ks - anchor_matching_gt = matching_matrix.sum(0) - if (anchor_matching_gt > 1).sum() > 0: - _, cost_argmin = torch.min(cost[:, anchor_matching_gt > 1], dim=0) - matching_matrix[:, anchor_matching_gt > 1] *= 0.0 - matching_matrix[cost_argmin, anchor_matching_gt > 1] = 1.0 - fg_mask_inboxes = matching_matrix.sum(0) > 0.0 - matched_gt_inds = matching_matrix[:, fg_mask_inboxes].argmax(0) - - from_which_layer = from_which_layer[fg_mask_inboxes] - all_b = all_b[fg_mask_inboxes] - all_a = all_a[fg_mask_inboxes] - all_gj = all_gj[fg_mask_inboxes] - all_gi = all_gi[fg_mask_inboxes] - all_anch = all_anch[fg_mask_inboxes] - - this_target = this_target[matched_gt_inds] - - for i in range(nl): - layer_idx = from_which_layer == i - matching_bs[i].append(all_b[layer_idx]) - matching_as[i].append(all_a[layer_idx]) - matching_gjs[i].append(all_gj[layer_idx]) - matching_gis[i].append(all_gi[layer_idx]) - matching_targets[i].append(this_target[layer_idx]) - matching_anchs[i].append(all_anch[layer_idx]) - - for i in range(nl): - if matching_targets[i] != []: - matching_bs[i] = torch.cat(matching_bs[i], dim=0) - matching_as[i] = torch.cat(matching_as[i], dim=0) - matching_gjs[i] = torch.cat(matching_gjs[i], dim=0) - matching_gis[i] = torch.cat(matching_gis[i], dim=0) - matching_targets[i] = torch.cat(matching_targets[i], dim=0) - matching_anchs[i] = torch.cat(matching_anchs[i], dim=0) - else: - matching_bs[i] = torch.tensor([], device='cuda:0', dtype=torch.int64) - matching_as[i] = torch.tensor([], device='cuda:0', dtype=torch.int64) - matching_gjs[i] = torch.tensor([], device='cuda:0', dtype=torch.int64) - matching_gis[i] = torch.tensor([], device='cuda:0', dtype=torch.int64) - matching_targets[i] = torch.tensor([], device='cuda:0', dtype=torch.int64) - matching_anchs[i] = torch.tensor([], device='cuda:0', dtype=torch.int64) - - return matching_bs, matching_as, matching_gjs, matching_gis, matching_targets, matching_anchs - - def build_targets2(self, p, targets, imgs): - - indices, anch = self.find_5_positive(p, targets) - - matching_bs = [[] for pp in p] - matching_as = [[] for pp in p] - matching_gjs = [[] for pp in p] - matching_gis = [[] for pp in p] - matching_targets = [[] for pp in p] - matching_anchs = [[] for pp in p] - - nl = len(p) - - for batch_idx in range(p[0].shape[0]): - - b_idx = targets[:, 0]==batch_idx - this_target = targets[b_idx] - if this_target.shape[0] == 0: - continue - - txywh = this_target[:, 2:6] * imgs[batch_idx].shape[1] - txyxy = xywh2xyxy(txywh) - - pxyxys = [] - p_cls = [] - p_obj = [] - from_which_layer = [] - all_b = [] - all_a = [] - all_gj = [] - all_gi = [] - all_anch = [] - - for i, pi in enumerate(p): - - b, a, gj, gi = indices[i] - idx = (b == batch_idx) - b, a, gj, gi = b[idx], a[idx], gj[idx], gi[idx] - all_b.append(b) - all_a.append(a) - all_gj.append(gj) - all_gi.append(gi) - all_anch.append(anch[i][idx]) - from_which_layer.append(torch.ones(size=(len(b),)) * i) - - fg_pred = pi[b, a, gj, gi] - p_obj.append(fg_pred[:, 4:5]) - p_cls.append(fg_pred[:, 5:]) - - grid = torch.stack([gi, gj], dim=1) - pxy = (fg_pred[:, :2].sigmoid() * 2. - 0.5 + grid) * self.stride[i] #/ 8. - #pxy = (fg_pred[:, :2].sigmoid() * 3. - 1. + grid) * self.stride[i] - pwh = (fg_pred[:, 2:4].sigmoid() * 2) ** 2 * anch[i][idx] * self.stride[i] #/ 8. - pxywh = torch.cat([pxy, pwh], dim=-1) - pxyxy = xywh2xyxy(pxywh) - pxyxys.append(pxyxy) - - pxyxys = torch.cat(pxyxys, dim=0) - if pxyxys.shape[0] == 0: - continue - p_obj = torch.cat(p_obj, dim=0) - p_cls = torch.cat(p_cls, dim=0) - from_which_layer = torch.cat(from_which_layer, dim=0) - all_b = torch.cat(all_b, dim=0) - all_a = torch.cat(all_a, dim=0) - all_gj = torch.cat(all_gj, dim=0) - all_gi = torch.cat(all_gi, dim=0) - all_anch = torch.cat(all_anch, dim=0) - - pair_wise_iou = box_iou(txyxy, pxyxys) - - pair_wise_iou_loss = -torch.log(pair_wise_iou + 1e-8) - - top_k, _ = torch.topk(pair_wise_iou, min(20, pair_wise_iou.shape[1]), dim=1) - dynamic_ks = torch.clamp(top_k.sum(1).int(), min=1) - - gt_cls_per_image = ( - F.one_hot(this_target[:, 1].to(torch.int64), self.nc) - .float() - .unsqueeze(1) - .repeat(1, pxyxys.shape[0], 1) - ) - - num_gt = this_target.shape[0] - cls_preds_ = ( - p_cls.float().unsqueeze(0).repeat(num_gt, 1, 1).sigmoid_() - * p_obj.unsqueeze(0).repeat(num_gt, 1, 1).sigmoid_() - ) - - y = cls_preds_.sqrt_() - pair_wise_cls_loss = F.binary_cross_entropy_with_logits( - torch.log(y/(1-y)) , gt_cls_per_image, reduction="none" - ).sum(-1) - del cls_preds_ - - cost = ( - pair_wise_cls_loss - + 3.0 * pair_wise_iou_loss - ) - - matching_matrix = torch.zeros_like(cost) - - for gt_idx in range(num_gt): - _, pos_idx = torch.topk( - cost[gt_idx], k=dynamic_ks[gt_idx].item(), largest=False - ) - matching_matrix[gt_idx][pos_idx] = 1.0 - - del top_k, dynamic_ks - anchor_matching_gt = matching_matrix.sum(0) - if (anchor_matching_gt > 1).sum() > 0: - _, cost_argmin = torch.min(cost[:, anchor_matching_gt > 1], dim=0) - matching_matrix[:, anchor_matching_gt > 1] *= 0.0 - matching_matrix[cost_argmin, anchor_matching_gt > 1] = 1.0 - fg_mask_inboxes = matching_matrix.sum(0) > 0.0 - matched_gt_inds = matching_matrix[:, fg_mask_inboxes].argmax(0) - - from_which_layer = from_which_layer[fg_mask_inboxes] - all_b = all_b[fg_mask_inboxes] - all_a = all_a[fg_mask_inboxes] - all_gj = all_gj[fg_mask_inboxes] - all_gi = all_gi[fg_mask_inboxes] - all_anch = all_anch[fg_mask_inboxes] - - this_target = this_target[matched_gt_inds] - - for i in range(nl): - layer_idx = from_which_layer == i - matching_bs[i].append(all_b[layer_idx]) - matching_as[i].append(all_a[layer_idx]) - matching_gjs[i].append(all_gj[layer_idx]) - matching_gis[i].append(all_gi[layer_idx]) - matching_targets[i].append(this_target[layer_idx]) - matching_anchs[i].append(all_anch[layer_idx]) - - for i in range(nl): - if matching_targets[i] != []: - matching_bs[i] = torch.cat(matching_bs[i], dim=0) - matching_as[i] = torch.cat(matching_as[i], dim=0) - matching_gjs[i] = torch.cat(matching_gjs[i], dim=0) - matching_gis[i] = torch.cat(matching_gis[i], dim=0) - matching_targets[i] = torch.cat(matching_targets[i], dim=0) - matching_anchs[i] = torch.cat(matching_anchs[i], dim=0) - else: - matching_bs[i] = torch.tensor([], device='cuda:0', dtype=torch.int64) - matching_as[i] = torch.tensor([], device='cuda:0', dtype=torch.int64) - matching_gjs[i] = torch.tensor([], device='cuda:0', dtype=torch.int64) - matching_gis[i] = torch.tensor([], device='cuda:0', dtype=torch.int64) - matching_targets[i] = torch.tensor([], device='cuda:0', dtype=torch.int64) - matching_anchs[i] = torch.tensor([], device='cuda:0', dtype=torch.int64) - - return matching_bs, matching_as, matching_gjs, matching_gis, matching_targets, matching_anchs - - def find_5_positive(self, p, targets): - # Build targets for compute_loss(), input targets(image,class,x,y,w,h) - na, nt = self.na, targets.shape[0] # number of anchors, targets - indices, anch = [], [] - gain = torch.ones(7, device=targets.device).long() # normalized to gridspace gain - ai = torch.arange(na, device=targets.device).float().view(na, 1).repeat(1, nt) # same as .repeat_interleave(nt) - targets = torch.cat((targets.repeat(na, 1, 1), ai[:, :, None]), 2) # append anchor indices - - g = 1.0 # bias - off = torch.tensor([[0, 0], - [1, 0], [0, 1], [-1, 0], [0, -1], # j,k,l,m - # [1, 1], [1, -1], [-1, 1], [-1, -1], # jk,jm,lk,lm - ], device=targets.device).float() * g # offsets - - for i in range(self.nl): - anchors = self.anchors[i] - gain[2:6] = torch.tensor(p[i].shape)[[3, 2, 3, 2]] # xyxy gain - - # Match targets to anchors - t = targets * gain - if nt: - # Matches - r = t[:, :, 4:6] / anchors[:, None] # wh ratio - j = torch.max(r, 1. / r).max(2)[0] < self.hyp['anchor_t'] # compare - # j = wh_iou(anchors, t[:, 4:6]) > model.hyp['iou_t'] # iou(3,n)=wh_iou(anchors(3,2), gwh(n,2)) - t = t[j] # filter - - # Offsets - gxy = t[:, 2:4] # grid xy - gxi = gain[[2, 3]] - gxy # inverse - j, k = ((gxy % 1. < g) & (gxy > 1.)).T - l, m = ((gxi % 1. < g) & (gxi > 1.)).T - j = torch.stack((torch.ones_like(j), j, k, l, m)) - t = t.repeat((5, 1, 1))[j] - offsets = (torch.zeros_like(gxy)[None] + off[:, None])[j] - else: - t = targets[0] - offsets = 0 - - # Define - b, c = t[:, :2].long().T # image, class - gxy = t[:, 2:4] # grid xy - gwh = t[:, 4:6] # grid wh - gij = (gxy - offsets).long() - gi, gj = gij.T # grid xy indices - - # Append - a = t[:, 6].long() # anchor indices - indices.append((b, a, gj.clamp_(0, gain[3] - 1), gi.clamp_(0, gain[2] - 1))) # image, anchor, grid indices - anch.append(anchors[a]) # anchors - - return indices, anch - - def find_3_positive(self, p, targets): - # Build targets for compute_loss(), input targets(image,class,x,y,w,h) - na, nt = self.na, targets.shape[0] # number of anchors, targets - indices, anch = [], [] - gain = torch.ones(7, device=targets.device).long() # normalized to gridspace gain - ai = torch.arange(na, device=targets.device).float().view(na, 1).repeat(1, nt) # same as .repeat_interleave(nt) - targets = torch.cat((targets.repeat(na, 1, 1), ai[:, :, None]), 2) # append anchor indices - - g = 0.5 # bias - off = torch.tensor([[0, 0], - [1, 0], [0, 1], [-1, 0], [0, -1], # j,k,l,m - # [1, 1], [1, -1], [-1, 1], [-1, -1], # jk,jm,lk,lm - ], device=targets.device).float() * g # offsets - - for i in range(self.nl): - anchors = self.anchors[i] - gain[2:6] = torch.tensor(p[i].shape)[[3, 2, 3, 2]] # xyxy gain - - # Match targets to anchors - t = targets * gain - if nt: - # Matches - r = t[:, :, 4:6] / anchors[:, None] # wh ratio - j = torch.max(r, 1. / r).max(2)[0] < self.hyp['anchor_t'] # compare - # j = wh_iou(anchors, t[:, 4:6]) > model.hyp['iou_t'] # iou(3,n)=wh_iou(anchors(3,2), gwh(n,2)) - t = t[j] # filter - - # Offsets - gxy = t[:, 2:4] # grid xy - gxi = gain[[2, 3]] - gxy # inverse - j, k = ((gxy % 1. < g) & (gxy > 1.)).T - l, m = ((gxi % 1. < g) & (gxi > 1.)).T - j = torch.stack((torch.ones_like(j), j, k, l, m)) - t = t.repeat((5, 1, 1))[j] - offsets = (torch.zeros_like(gxy)[None] + off[:, None])[j] - else: - t = targets[0] - offsets = 0 - - # Define - b, c = t[:, :2].long().T # image, class - gxy = t[:, 2:4] # grid xy - gwh = t[:, 4:6] # grid wh - gij = (gxy - offsets).long() - gi, gj = gij.T # grid xy indices - - # Append - a = t[:, 6].long() # anchor indices - indices.append((b, a, gj.clamp_(0, gain[3] - 1), gi.clamp_(0, gain[2] - 1))) # image, anchor, grid indices - anch.append(anchors[a]) # anchors - - return indices, anch diff --git a/spaces/emc348/faces-through-time/training/projectors/w_plus_projector.py b/spaces/emc348/faces-through-time/training/projectors/w_plus_projector.py deleted file mode 100644 index d30a59e27bcdb4cdcf19bbafc688f1260439c9f2..0000000000000000000000000000000000000000 --- a/spaces/emc348/faces-through-time/training/projectors/w_plus_projector.py +++ /dev/null @@ -1,160 +0,0 @@ -# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. -# -# NVIDIA CORPORATION and its licensors retain all intellectual property -# and proprietary rights in and to this software, related documentation -# and any modifications thereto. Any use, reproduction, disclosure or -# distribution of this software and related documentation without an express -# license agreement from NVIDIA CORPORATION is strictly prohibited. - -"""Project given image to the latent space of pretrained network pickle.""" - -import copy -import wandb -import numpy as np -import torch -import torch.nn.functional as F -from tqdm import tqdm -from configs import global_config, hyperparameters -import dnnlib -from utils.log_utils import log_image_from_w - - -def project( - G, - target: torch.Tensor, # [C,H,W] and dynamic range [0,255], W & H must match G output resolution - *, - num_steps=1000, - w_avg_samples=10000, - initial_learning_rate=0.01, - initial_noise_factor=0.05, - lr_rampdown_length=0.25, - lr_rampup_length=0.05, - noise_ramp_length=0.75, - regularize_noise_weight=1e5, - verbose=False, - device: torch.device, - use_wandb=False, - initial_w=None, - image_log_step=global_config.image_rec_result_log_snapshot, - w_name: str, -): - assert target.shape == (G.img_channels, G.img_resolution, G.img_resolution) - - def logprint(*args): - if verbose: - print(*args) - - G = copy.deepcopy(G).eval().requires_grad_(False).to(device).float() # type: ignore - - # Compute w stats. - logprint(f"Computing W midpoint and stddev using {w_avg_samples} samples...") - z_samples = np.random.RandomState(123).randn(w_avg_samples, G.z_dim) - w_samples = G.mapping(torch.from_numpy(z_samples).to(device), None) # [N, L, C] - w_samples = w_samples[:, :1, :].cpu().numpy().astype(np.float32) # [N, 1, C] - w_avg = np.mean(w_samples, axis=0, keepdims=True) # [1, 1, C] - w_avg_tensor = torch.from_numpy(w_avg).to(global_config.device) - w_std = (np.sum((w_samples - w_avg) ** 2) / w_avg_samples) ** 0.5 - - start_w = initial_w if initial_w is not None else w_avg - - # Setup noise inputs. - noise_bufs = { - name: buf - for (name, buf) in G.synthesis.named_buffers() - if "noise_const" in name - } - - # Load VGG16 feature detector. - url = "https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada-pytorch/pretrained/metrics/vgg16.pt" - with dnnlib.util.open_url(url) as f: - vgg16 = torch.jit.load(f).eval().to(device) - - # Features for target image. - target_images = target.unsqueeze(0).to(device).to(torch.float32) - if target_images.shape[2] > 256: - target_images = F.interpolate(target_images, size=(256, 256), mode="area") - target_features = vgg16(target_images, resize_images=False, return_lpips=True) - - start_w = np.repeat(start_w, G.mapping.num_ws, axis=1) - w_opt = torch.tensor( - start_w, dtype=torch.float32, device=device, requires_grad=True - ) # pylint: disable=not-callable - - optimizer = torch.optim.Adam( - [w_opt] + list(noise_bufs.values()), - betas=(0.9, 0.999), - lr=hyperparameters.first_inv_lr, - ) - - # Init noise. - for buf in noise_bufs.values(): - buf[:] = torch.randn_like(buf) - buf.requires_grad = True - - for step in tqdm(range(num_steps)): - - # Learning rate schedule. - t = step / num_steps - w_noise_scale = ( - w_std * initial_noise_factor * max(0.0, 1.0 - t / noise_ramp_length) ** 2 - ) - lr_ramp = min(1.0, (1.0 - t) / lr_rampdown_length) - lr_ramp = 0.5 - 0.5 * np.cos(lr_ramp * np.pi) - lr_ramp = lr_ramp * min(1.0, t / lr_rampup_length) - lr = initial_learning_rate * lr_ramp - for param_group in optimizer.param_groups: - param_group["lr"] = lr - - # Synth images from opt_w. - w_noise = torch.randn_like(w_opt) * w_noise_scale - ws = w_opt + w_noise - - synth_images = G.synthesis(ws, noise_mode="const", force_fp32=True) - - # Downsample image to 256x256 if it's larger than that. VGG was built for 224x224 images. - synth_images = (synth_images + 1) * (255 / 2) - if synth_images.shape[2] > 256: - synth_images = F.interpolate(synth_images, size=(256, 256), mode="area") - - # Features for synth images. - synth_features = vgg16(synth_images, resize_images=False, return_lpips=True) - dist = (target_features - synth_features).square().sum() - - # Noise regularization. - reg_loss = 0.0 - for v in noise_bufs.values(): - noise = v[None, None, :, :] # must be [1,1,H,W] for F.avg_pool2d() - while True: - reg_loss += (noise * torch.roll(noise, shifts=1, dims=3)).mean() ** 2 - reg_loss += (noise * torch.roll(noise, shifts=1, dims=2)).mean() ** 2 - if noise.shape[2] <= 8: - break - noise = F.avg_pool2d(noise, kernel_size=2) - loss = dist + reg_loss * regularize_noise_weight - - if step % image_log_step == 0: - with torch.no_grad(): - if use_wandb: - global_config.training_step += 1 - wandb.log( - {f"first projection _{w_name}": loss.detach().cpu()}, - step=global_config.training_step, - ) - log_image_from_w(w_opt, G, w_name) - - # Step - optimizer.zero_grad(set_to_none=True) - loss.backward() - optimizer.step() - logprint( - f"step {step + 1:>4d}/{num_steps}: dist {dist:<4.2f} loss {float(loss):<5.2f}" - ) - - # Normalize noise. - with torch.no_grad(): - for buf in noise_bufs.values(): - buf -= buf.mean() - buf *= buf.square().mean().rsqrt() - - del G - return w_opt diff --git a/spaces/enzostvs/stable-diffusion-tpu/components/react-query/providers.tsx b/spaces/enzostvs/stable-diffusion-tpu/components/react-query/providers.tsx deleted file mode 100644 index 9d155bea99dab75c0f296b07826fe95acd4d1410..0000000000000000000000000000000000000000 --- a/spaces/enzostvs/stable-diffusion-tpu/components/react-query/providers.tsx +++ /dev/null @@ -1,11 +0,0 @@ -"use client"; -import * as React from "react"; -import { QueryClient, QueryClientProvider } from "@tanstack/react-query"; - -export default function Providers({ children }: { children: React.ReactNode }) { - const [queryClient] = React.useState(() => new QueryClient()); - - return ( - {children} - ); -} diff --git a/spaces/eson/kplug/app.py b/spaces/eson/kplug/app.py deleted file mode 100644 index c2d7b6d7655e9a2687a191ad4b1a8d60d828ccb2..0000000000000000000000000000000000000000 --- a/spaces/eson/kplug/app.py +++ /dev/null @@ -1,24 +0,0 @@ -# coding=utf-8 -# author: xusong -# time: 2022/8/23 16:06 - -""" -https://gradio.app/docs/#tabbedinterface-header - -## 更多任务 -- 抽取式摘要 -- 检索式对话 、 抽取式问答 -- -""" - -import gradio_patch -import gradio as gr -from demo_sum import sum_iface -from demo_mlm import mlm_iface -from demo_corrector import corr_iface -from demo_chatbot_jddc import jddc_iface - -demo = gr.TabbedInterface([sum_iface, mlm_iface, corr_iface, jddc_iface], ["生成式摘要", "文本填词", "句子纠错", "对话机器人"]) - -if __name__ == "__main__": - demo.launch() \ No newline at end of file diff --git a/spaces/facebook/MusicGen/tests/modules/test_codebooks_patterns.py b/spaces/facebook/MusicGen/tests/modules/test_codebooks_patterns.py deleted file mode 100644 index b658f4779a369f9ec8dde692a61b7f0fe3485724..0000000000000000000000000000000000000000 --- a/spaces/facebook/MusicGen/tests/modules/test_codebooks_patterns.py +++ /dev/null @@ -1,246 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -import pytest -import torch - -from audiocraft.modules.codebooks_patterns import ( - DelayedPatternProvider, - ParallelPatternProvider, - Pattern, - UnrolledPatternProvider, -) - - -class TestParallelPatternProvider: - - @pytest.mark.parametrize("n_q", [1, 4, 32]) - @pytest.mark.parametrize("timesteps", [0, 1, 16, 100]) - def test_get_pattern(self, n_q: int, timesteps: int): - provider = ParallelPatternProvider(n_q) - pattern = provider.get_pattern(timesteps) - # + 1 to account for 1st step - assert len(pattern.layout) == timesteps + 1 - - @pytest.mark.parametrize("n_q", [1, 4, 32]) - @pytest.mark.parametrize("timesteps", [8, 16, 100]) - def test_pattern_content(self, n_q: int, timesteps: int): - provider = ParallelPatternProvider(n_q) - pattern = provider.get_pattern(timesteps) - for s, v in enumerate(pattern.layout): - for i, code in enumerate(v): - assert i == code.q - assert code.t == s - 1 # account for the 1st empty step - - @pytest.mark.parametrize("n_q", [1, 4, 32]) - @pytest.mark.parametrize("timesteps", [8, 16, 100]) - def test_pattern_max_delay(self, n_q: int, timesteps: int): - provider = ParallelPatternProvider(n_q) - pattern = provider.get_pattern(timesteps) - assert pattern.max_delay == 0 - assert len(pattern.valid_layout) == len(pattern.layout) - pattern.max_delay - - -class TestDelayedPatternProvider: - - @pytest.mark.parametrize("n_q", [1, 4, 32]) - @pytest.mark.parametrize("timesteps", [0, 1, 16, 100]) - def test_get_pattern(self, n_q: int, timesteps: int): - delays = [ - list(range(n_q)), - [0] + [1] * (n_q - 1), - [0] + [4] * (n_q - 1), - ] - for delay in delays: - provider = DelayedPatternProvider(n_q, delay) - pattern = provider.get_pattern(timesteps) - # + 1 to account for 1st step - assert len(pattern.layout) == timesteps + max(delay) + 1 - - @pytest.mark.parametrize("n_q", [1, 4, 32]) - @pytest.mark.parametrize("timesteps", [8, 16, 100]) - def test_pattern_content(self, n_q: int, timesteps: int): - provider = DelayedPatternProvider(n_q) - pattern = provider.get_pattern(timesteps) - for s, v in enumerate(pattern.layout): - for i, code in enumerate(v): - assert i == code.q - assert code.t == max(0, s - code.q - 1) - - @pytest.mark.parametrize("timesteps", [8, 16, 100]) - @pytest.mark.parametrize("delay", [[0, 1, 2, 3], [0, 1, 1, 1], [0, 3, 3, 3], [0, 3]]) - def test_pattern_max_delay(self, timesteps: int, delay: list): - provider = DelayedPatternProvider(len(delay), delay) - pattern = provider.get_pattern(timesteps) - assert pattern.max_delay == max(delay) - assert len(pattern.valid_layout) == len(pattern.layout) - pattern.max_delay - - -class TestUnrolledPatternProvider: - - @pytest.mark.parametrize("timesteps", [0, 1, 16]) - @pytest.mark.parametrize("flattening", [[0, 1, 2], [0, 1, 1]]) - @pytest.mark.parametrize("delays", [[0, 0, 0], [0, 5, 5]]) - def test_get_pattern(self, timesteps: int, flattening: list, delays: list): - n_q = len(flattening) - max_delay = max(delays) - provider = UnrolledPatternProvider(n_q, flattening, delays) - pattern = provider.get_pattern(timesteps) - assert len(pattern.layout) == provider.num_virtual_steps(timesteps) + max_delay - - @pytest.mark.parametrize("timesteps", [0, 1, 16]) - @pytest.mark.parametrize("flattening", [[0, 1, 2], [0, 1, 1]]) - @pytest.mark.parametrize("delays", [[0, 0, 0], [0, 5, 5]]) - def test_pattern_max_delay(self, timesteps: int, flattening: list, delays: list): - n_q = len(flattening) - max_delay = max(delays) - provider = UnrolledPatternProvider(n_q, flattening, delays) - pattern = provider.get_pattern(timesteps) - assert pattern.max_delay == max_delay - - -class TestPattern: - - def ref_build_pattern_sequence(self, z: torch.Tensor, pattern: Pattern, special_token: int): - """Reference method to build the sequence from the pattern without using fancy scatter.""" - bs, n_q, T = z.shape - z = z.cpu().numpy() - assert n_q == pattern.n_q - assert T <= pattern.timesteps - inp = torch.full((bs, n_q, len(pattern.layout)), special_token, dtype=torch.long).numpy() - inp[:] = special_token - for s, v in enumerate(pattern.layout): - for (t, q) in v: - if t < T: - inp[:, q, s] = z[:, q, t] - return torch.from_numpy(inp) - - def ref_revert_pattern_sequence(self, z: torch.Tensor, pattern: Pattern, special_token: int): - """Reference method to revert the sequence from the pattern without using fancy scatter.""" - z = z.cpu().numpy() - bs, n_q, S = z.shape - assert pattern.n_q == n_q - inp = torch.full((bs, pattern.n_q, pattern.timesteps), special_token, dtype=torch.long).numpy() - inp[:] = special_token - for s, v in enumerate(pattern.layout): - for (t, q) in v: - if t < pattern.timesteps: - inp[:, q, t] = z[:, q, s] - return torch.from_numpy(inp) - - def ref_revert_pattern_logits(self, z: torch.Tensor, pattern: Pattern, special_token: float): - """Reference method to revert the logits from the pattern without using fancy scatter.""" - z = z.cpu().numpy() - bs, card, n_q, S = z.shape - assert pattern.n_q == n_q - ref_layout = pattern.layout - inp = torch.full((bs, card, pattern.n_q, pattern.timesteps), special_token, dtype=torch.float).numpy() - inp[:] = special_token - for s, v in enumerate(ref_layout[1:]): - if s < S: - for (t, q) in v: - if t < pattern.timesteps: - inp[:, :, q, t] = z[:, :, q, s] - return torch.from_numpy(inp) - - def _get_pattern_providers(self, n_q: int): - pattern_provider_1 = ParallelPatternProvider(n_q) - pattern_provider_2 = DelayedPatternProvider(n_q, list(range(n_q))) - pattern_provider_3 = DelayedPatternProvider(n_q, [0] + [1] * (n_q - 1)) - pattern_provider_4 = UnrolledPatternProvider( - n_q, flattening=list(range(n_q)), delays=[0] * n_q - ) - pattern_provider_5 = UnrolledPatternProvider( - n_q, flattening=[0] + [1] * (n_q - 1), delays=[0] * n_q - ) - pattern_provider_6 = UnrolledPatternProvider( - n_q, flattening=[0] + [1] * (n_q - 1), delays=[0] + [5] * (n_q - 1) - ) - return [ - pattern_provider_1, - pattern_provider_2, - pattern_provider_3, - pattern_provider_4, - pattern_provider_5, - pattern_provider_6, - ] - - @pytest.mark.parametrize("n_q", [1, 4, 32]) - @pytest.mark.parametrize("timesteps", [16, 72]) - def test_build_pattern_sequence(self, n_q: int, timesteps: int): - bs = 2 - card = 256 - special_token = card - - pattern_providers = self._get_pattern_providers(n_q) - for pattern_provider in pattern_providers: - pattern = pattern_provider.get_pattern(timesteps) - # we can correctly build the sequence from the pattern - z = torch.randint(0, card, (bs, n_q, timesteps)) - ref_res = self.ref_build_pattern_sequence(z, pattern, special_token) - res, indexes, mask = pattern.build_pattern_sequence(z, special_token) - assert (res == ref_res).float().mean() == 1.0 - - # expected assertion fails on the number of timesteps - invalid_timesteps = [timesteps + 1] - if pattern.num_sequence_steps != pattern.timesteps: - invalid_timesteps.append(pattern.num_sequence_steps) - for i_timesteps in invalid_timesteps: - z2 = torch.randint(0, card, (bs, n_q, i_timesteps)) - with pytest.raises(AssertionError): - pattern.build_pattern_sequence(z2, special_token) - - # expected assertion fails on the number of codebooks - invalid_qs = [0, n_q - 1, n_q + 1] - for i_q in invalid_qs: - z3 = torch.randint(0, card, (bs, i_q, timesteps)) - with pytest.raises(AssertionError): - pattern.build_pattern_sequence(z3, special_token) - - @pytest.mark.parametrize("n_q", [1, 4, 32]) - @pytest.mark.parametrize("timesteps", [16, 72]) - def test_revert_pattern_sequence(self, n_q: int, timesteps: int): - bs = 2 - card = 256 - special_token = card - - pattern_providers = self._get_pattern_providers(n_q) - for pattern_provider in pattern_providers: - pattern = pattern_provider.get_pattern(timesteps) - # this works assuming previous tests are successful - z = torch.randint(0, card, (bs, n_q, timesteps)) - s = self.ref_build_pattern_sequence(z, pattern, special_token) - ref_out = self.ref_revert_pattern_sequence(s, pattern, special_token) - # ensure our reference script retrieve the original sequence - assert z.shape == ref_out.shape - assert (z == ref_out).float().mean() == 1.0 - # now we can test the scatter version - out, indexes, mask = pattern.revert_pattern_sequence(s, special_token) - assert out.shape == ref_out.shape - assert (out == ref_out).float().mean() == 1.0 - - @pytest.mark.parametrize("n_q", [1, 4, 32]) - @pytest.mark.parametrize("timesteps", [16, 72]) - @pytest.mark.parametrize("card", [1, 2, 256, 1024]) - def test_revert_pattern_logits(self, n_q: int, timesteps: int, card: int): - bs = 2 - special_token = card - logits_special_token = float('nan') - - pattern_providers = self._get_pattern_providers(n_q) - for pattern_provider in pattern_providers: - pattern = pattern_provider.get_pattern(timesteps) - # this works assuming previous tests are successful - z = torch.randint(0, card, (bs, n_q, timesteps)) - s = self.ref_build_pattern_sequence(z, pattern, special_token) - logits = torch.randn((bs, card, n_q, s.shape[-1])) - ref_out = self.ref_revert_pattern_logits(logits, pattern, logits_special_token) - # ensure our reference script retrieve the original sequence - assert ref_out.shape == torch.Size([bs, card, n_q, timesteps]) - # now we can test the scatter version - out, indexes, mask = pattern.revert_pattern_logits(logits, logits_special_token) - assert out.shape == ref_out.shape - assert (out == ref_out).float().mean() == 1.0 diff --git a/spaces/facebook/StyleNeRF/torch_utils/ops/filtered_lrelu.cpp b/spaces/facebook/StyleNeRF/torch_utils/ops/filtered_lrelu.cpp deleted file mode 100644 index ff4149b8b46b54d2f400ae10e44d19f20503ba1f..0000000000000000000000000000000000000000 --- a/spaces/facebook/StyleNeRF/torch_utils/ops/filtered_lrelu.cpp +++ /dev/null @@ -1,300 +0,0 @@ -// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// NVIDIA CORPORATION and its licensors retain all intellectual property -// and proprietary rights in and to this software, related documentation -// and any modifications thereto. Any use, reproduction, disclosure or -// distribution of this software and related documentation without an express -// license agreement from NVIDIA CORPORATION is strictly prohibited. - -#include -#include -#include -#include "filtered_lrelu.h" - -//------------------------------------------------------------------------ - -static std::tuple filtered_lrelu( - torch::Tensor x, torch::Tensor fu, torch::Tensor fd, torch::Tensor b, torch::Tensor si, - int up, int down, int px0, int px1, int py0, int py1, int sx, int sy, float gain, float slope, float clamp, bool flip_filters, bool writeSigns) -{ - // Set CUDA device. - TORCH_CHECK(x.is_cuda(), "x must reside on CUDA device"); - const at::cuda::OptionalCUDAGuard device_guard(device_of(x)); - - // Validate arguments. - TORCH_CHECK(fu.device() == x.device() && fd.device() == x.device() && b.device() == x.device(), "all input tensors must reside on the same device"); - TORCH_CHECK(fu.dtype() == torch::kFloat && fd.dtype() == torch::kFloat, "fu and fd must be float32"); - TORCH_CHECK(b.dtype() == x.dtype(), "x and b must have the same dtype"); - TORCH_CHECK(x.dtype() == torch::kHalf || x.dtype() == torch::kFloat, "x and b must be float16 or float32"); - TORCH_CHECK(x.dim() == 4, "x must be rank 4"); - TORCH_CHECK(x.size(0) * x.size(1) <= INT_MAX && x.size(2) <= INT_MAX && x.size(3) <= INT_MAX, "x is too large"); - TORCH_CHECK(x.numel() > 0, "x is empty"); - TORCH_CHECK((fu.dim() == 1 || fu.dim() == 2) && (fd.dim() == 1 || fd.dim() == 2), "fu and fd must be rank 1 or 2"); - TORCH_CHECK(fu.size(0) <= INT_MAX && fu.size(-1) <= INT_MAX, "fu is too large"); - TORCH_CHECK(fd.size(0) <= INT_MAX && fd.size(-1) <= INT_MAX, "fd is too large"); - TORCH_CHECK(fu.numel() > 0, "fu is empty"); - TORCH_CHECK(fd.numel() > 0, "fd is empty"); - TORCH_CHECK(b.dim() == 1 && b.size(0) == x.size(1), "b must be a vector with the same number of channels as x"); - TORCH_CHECK(up >= 1 && down >= 1, "up and down must be at least 1"); - - // Figure out how much shared memory is available on the device. - int maxSharedBytes = 0; - AT_CUDA_CHECK(cudaDeviceGetAttribute(&maxSharedBytes, cudaDevAttrMaxSharedMemoryPerBlockOptin, x.device().index())); - int sharedKB = maxSharedBytes >> 10; - - // Populate enough launch parameters to check if a CUDA kernel exists. - filtered_lrelu_kernel_params p; - p.up = up; - p.down = down; - p.fuShape = make_int2((int)fu.size(-1), fu.dim() == 2 ? (int)fu.size(0) : 0); // shape [n, 0] indicates separable filter. - p.fdShape = make_int2((int)fd.size(-1), fd.dim() == 2 ? (int)fd.size(0) : 0); - filtered_lrelu_kernel_spec test_spec = choose_filtered_lrelu_kernel(p, sharedKB); - if (!test_spec.exec) - { - // No kernel found - return empty tensors and indicate missing kernel with return code of -1. - return std::make_tuple(torch::Tensor(), torch::Tensor(), -1); - } - - // Input/output element size. - int64_t sz = (x.dtype() == torch::kHalf) ? 2 : 4; - - // Input sizes. - int64_t xw = (int)x.size(3); - int64_t xh = (int)x.size(2); - int64_t fut_w = (int)fu.size(-1) - 1; - int64_t fut_h = (int)fu.size(0) - 1; - int64_t fdt_w = (int)fd.size(-1) - 1; - int64_t fdt_h = (int)fd.size(0) - 1; - - // Logical size of upsampled buffer. - int64_t cw = xw * up + (px0 + px1) - fut_w; - int64_t ch = xh * up + (py0 + py1) - fut_h; - TORCH_CHECK(cw > fdt_w && ch > fdt_h, "upsampled buffer must be at least the size of downsampling filter"); - TORCH_CHECK(cw <= INT_MAX && ch <= INT_MAX, "upsampled buffer is too large"); - - // Compute output size and allocate. - int64_t yw = (cw - fdt_w + (down - 1)) / down; - int64_t yh = (ch - fdt_h + (down - 1)) / down; - TORCH_CHECK(yw > 0 && yh > 0, "output must be at least 1x1"); - TORCH_CHECK(yw <= INT_MAX && yh <= INT_MAX, "output is too large"); - torch::Tensor y = torch::empty({x.size(0), x.size(1), yh, yw}, x.options(), x.suggest_memory_format()); - - // Allocate sign tensor. - torch::Tensor so; - torch::Tensor s = si; - bool readSigns = !!s.numel(); - int64_t sw_active = 0; // Active width of sign tensor. - if (writeSigns) - { - sw_active = yw * down - (down - 1) + fdt_w; // Active width in elements. - int64_t sh = yh * down - (down - 1) + fdt_h; // Height = active height. - int64_t sw = (sw_active + 15) & ~15; // Width = active width in elements, rounded up to multiple of 16. - TORCH_CHECK(sh <= INT_MAX && (sw >> 2) <= INT_MAX, "signs is too large"); - s = so = torch::empty({x.size(0), x.size(1), sh, sw >> 2}, x.options().dtype(torch::kUInt8), at::MemoryFormat::Contiguous); - } - else if (readSigns) - sw_active = s.size(3) << 2; - - // Validate sign tensor if in use. - if (readSigns || writeSigns) - { - TORCH_CHECK(s.is_contiguous(), "signs must be contiguous"); - TORCH_CHECK(s.dtype() == torch::kUInt8, "signs must be uint8"); - TORCH_CHECK(s.device() == x.device(), "signs must reside on the same device as x"); - TORCH_CHECK(s.dim() == 4, "signs must be rank 4"); - TORCH_CHECK(s.size(0) == x.size(0) && s.size(1) == x.size(1), "signs must have same batch & channels as x"); - TORCH_CHECK(s.size(2) <= INT_MAX && s.size(3) <= INT_MAX, "signs is too large"); - } - - // Populate rest of CUDA kernel parameters. - p.x = x.data_ptr(); - p.y = y.data_ptr(); - p.b = b.data_ptr(); - p.s = (readSigns || writeSigns) ? s.data_ptr() : 0; - p.fu = fu.data_ptr(); - p.fd = fd.data_ptr(); - p.pad0 = make_int2(px0, py0); - p.gain = gain; - p.slope = slope; - p.clamp = clamp; - p.flip = (flip_filters) ? 1 : 0; - p.xShape = make_int4((int)x.size(3), (int)x.size(2), (int)x.size(1), (int)x.size(0)); - p.yShape = make_int4((int)y.size(3), (int)y.size(2), (int)y.size(1), (int)y.size(0)); - p.sShape = (readSigns || writeSigns) ? make_int2((int)s.size(3), (int)s.size(2)) : make_int2(0, 0); // Width is in bytes. Contiguous. - p.sOfs = make_int2(sx, sy); - p.swLimit = (sw_active + 3) >> 2; // Rounded up to bytes. - - // x, y, b strides are in bytes. - p.xStride = make_longlong4(sz * x.stride(3), sz * x.stride(2), sz * x.stride(1), sz * x.stride(0)); - p.yStride = make_longlong4(sz * y.stride(3), sz * y.stride(2), sz * y.stride(1), sz * y.stride(0)); - p.bStride = sz * b.stride(0); - - // fu, fd strides are in elements. - p.fuStride = make_longlong3(fu.stride(-1), fu.dim() == 2 ? fu.stride(0) : 0, 0); - p.fdStride = make_longlong3(fd.stride(-1), fd.dim() == 2 ? fd.stride(0) : 0, 0); - - // Determine if indices don't fit in int32. Support negative strides although Torch currently never produces those. - bool index64b = false; - if (std::abs(p.bStride * x.size(1)) > INT_MAX) index64b = true; - if (std::min(x.size(0) * p.xStride.w, 0ll) + std::min(x.size(1) * p.xStride.z, 0ll) + std::min(x.size(2) * p.xStride.y, 0ll) + std::min(x.size(3) * p.xStride.x, 0ll) < -INT_MAX) index64b = true; - if (std::max(x.size(0) * p.xStride.w, 0ll) + std::max(x.size(1) * p.xStride.z, 0ll) + std::max(x.size(2) * p.xStride.y, 0ll) + std::max(x.size(3) * p.xStride.x, 0ll) > INT_MAX) index64b = true; - if (std::min(y.size(0) * p.yStride.w, 0ll) + std::min(y.size(1) * p.yStride.z, 0ll) + std::min(y.size(2) * p.yStride.y, 0ll) + std::min(y.size(3) * p.yStride.x, 0ll) < -INT_MAX) index64b = true; - if (std::max(y.size(0) * p.yStride.w, 0ll) + std::max(y.size(1) * p.yStride.z, 0ll) + std::max(y.size(2) * p.yStride.y, 0ll) + std::max(y.size(3) * p.yStride.x, 0ll) > INT_MAX) index64b = true; - if (s.numel() > INT_MAX) index64b = true; - - // Choose CUDA kernel. - filtered_lrelu_kernel_spec spec = { 0 }; - AT_DISPATCH_FLOATING_TYPES_AND_HALF(x.scalar_type(), "filtered_lrelu_cuda", [&] - { - if constexpr (sizeof(scalar_t) <= 4) // Exclude doubles. constexpr prevents template instantiation. - { - // Choose kernel based on index type, datatype and sign read/write modes. - if (!index64b && writeSigns && !readSigns) spec = choose_filtered_lrelu_kernel(p, sharedKB); - else if (!index64b && !writeSigns && readSigns) spec = choose_filtered_lrelu_kernel(p, sharedKB); - else if (!index64b && !writeSigns && !readSigns) spec = choose_filtered_lrelu_kernel(p, sharedKB); - else if ( index64b && writeSigns && !readSigns) spec = choose_filtered_lrelu_kernel(p, sharedKB); - else if ( index64b && !writeSigns && readSigns) spec = choose_filtered_lrelu_kernel(p, sharedKB); - else if ( index64b && !writeSigns && !readSigns) spec = choose_filtered_lrelu_kernel(p, sharedKB); - } - }); - TORCH_CHECK(spec.exec, "internal error - CUDA kernel not found") // This should not happen because we tested earlier that kernel exists. - - // Launch CUDA kernel. - void* args[] = {&p}; - int bx = spec.numWarps * 32; - int gx = (p.yShape.x - 1) / spec.tileOut.x + 1; - int gy = (p.yShape.y - 1) / spec.tileOut.y + 1; - int gz = p.yShape.z * p.yShape.w; - - // Repeat multiple horizontal tiles in a CTA? - if (spec.xrep) - { - p.tilesXrep = spec.xrep; - p.tilesXdim = gx; - - gx = (gx + p.tilesXrep - 1) / p.tilesXrep; - std::swap(gx, gy); - } - else - { - p.tilesXrep = 0; - p.tilesXdim = 0; - } - - // Launch filter setup kernel. - AT_CUDA_CHECK(cudaLaunchKernel(spec.setup, 1, 1024, args, 0, at::cuda::getCurrentCUDAStream())); - - // Copy kernels to constant memory. - if ( writeSigns && !readSigns) AT_CUDA_CHECK((copy_filters(at::cuda::getCurrentCUDAStream()))); - else if (!writeSigns && readSigns) AT_CUDA_CHECK((copy_filters(at::cuda::getCurrentCUDAStream()))); - else if (!writeSigns && !readSigns) AT_CUDA_CHECK((copy_filters(at::cuda::getCurrentCUDAStream()))); - - // Set cache and shared memory configurations for main kernel. - AT_CUDA_CHECK(cudaFuncSetCacheConfig(spec.exec, cudaFuncCachePreferShared)); - if (spec.dynamicSharedKB) // Need dynamically allocated shared memory? - AT_CUDA_CHECK(cudaFuncSetAttribute(spec.exec, cudaFuncAttributeMaxDynamicSharedMemorySize, spec.dynamicSharedKB << 10)); - AT_CUDA_CHECK(cudaFuncSetSharedMemConfig(spec.exec, cudaSharedMemBankSizeFourByte)); - - // Launch main kernel. - const int maxSubGz = 65535; // CUDA maximum for block z dimension. - for (int zofs=0; zofs < gz; zofs += maxSubGz) // Do multiple launches if gz is too big. - { - p.blockZofs = zofs; - int subGz = std::min(maxSubGz, gz - zofs); - AT_CUDA_CHECK(cudaLaunchKernel(spec.exec, dim3(gx, gy, subGz), bx, args, spec.dynamicSharedKB << 10, at::cuda::getCurrentCUDAStream())); - } - - // Done. - return std::make_tuple(y, so, 0); -} - -//------------------------------------------------------------------------ - -static torch::Tensor filtered_lrelu_act(torch::Tensor x, torch::Tensor si, int sx, int sy, float gain, float slope, float clamp, bool writeSigns) -{ - // Set CUDA device. - TORCH_CHECK(x.is_cuda(), "x must reside on CUDA device"); - const at::cuda::OptionalCUDAGuard device_guard(device_of(x)); - - // Validate arguments. - TORCH_CHECK(x.dim() == 4, "x must be rank 4"); - TORCH_CHECK(x.size(0) * x.size(1) <= INT_MAX && x.size(2) <= INT_MAX && x.size(3) <= INT_MAX, "x is too large"); - TORCH_CHECK(x.numel() > 0, "x is empty"); - TORCH_CHECK(x.dtype() == torch::kHalf || x.dtype() == torch::kFloat || x.dtype() == torch::kDouble, "x must be float16, float32 or float64"); - - // Output signs if we don't have sign input. - torch::Tensor so; - torch::Tensor s = si; - bool readSigns = !!s.numel(); - if (writeSigns) - { - int64_t sw = x.size(3); - sw = (sw + 15) & ~15; // Round to a multiple of 16 for coalescing. - s = so = torch::empty({x.size(0), x.size(1), x.size(2), sw >> 2}, x.options().dtype(torch::kUInt8), at::MemoryFormat::Contiguous); - } - - // Validate sign tensor if in use. - if (readSigns || writeSigns) - { - TORCH_CHECK(s.is_contiguous(), "signs must be contiguous"); - TORCH_CHECK(s.dtype() == torch::kUInt8, "signs must be uint8"); - TORCH_CHECK(s.device() == x.device(), "signs must reside on the same device as x"); - TORCH_CHECK(s.dim() == 4, "signs must be rank 4"); - TORCH_CHECK(s.size(0) == x.size(0) && s.size(1) == x.size(1), "signs must have same batch & channels as x"); - TORCH_CHECK(s.size(2) <= INT_MAX && (s.size(3) << 2) <= INT_MAX, "signs tensor is too large"); - } - - // Initialize CUDA kernel parameters. - filtered_lrelu_act_kernel_params p; - p.x = x.data_ptr(); - p.s = (readSigns || writeSigns) ? s.data_ptr() : 0; - p.gain = gain; - p.slope = slope; - p.clamp = clamp; - p.xShape = make_int4((int)x.size(3), (int)x.size(2), (int)x.size(1), (int)x.size(0)); - p.xStride = make_longlong4(x.stride(3), x.stride(2), x.stride(1), x.stride(0)); - p.sShape = (readSigns || writeSigns) ? make_int2((int)s.size(3) << 2, (int)s.size(2)) : make_int2(0, 0); // Width is in elements. Contiguous. - p.sOfs = make_int2(sx, sy); - - // Choose CUDA kernel. - void* func = 0; - AT_DISPATCH_FLOATING_TYPES_AND_HALF(x.scalar_type(), "filtered_lrelu_act_cuda", [&] - { - if (writeSigns) - func = choose_filtered_lrelu_act_kernel(); - else if (readSigns) - func = choose_filtered_lrelu_act_kernel(); - else - func = choose_filtered_lrelu_act_kernel(); - }); - TORCH_CHECK(func, "internal error - CUDA kernel not found"); - - // Launch CUDA kernel. - void* args[] = {&p}; - int bx = 128; // 4 warps per block. - - // Logical size of launch = writeSigns ? p.s : p.x - uint32_t gx = writeSigns ? p.sShape.x : p.xShape.x; - uint32_t gy = writeSigns ? p.sShape.y : p.xShape.y; - uint32_t gz = p.xShape.z * p.xShape.w; // Same as in p.sShape if signs are in use. - gx = (gx - 1) / bx + 1; - - // Make sure grid y and z dimensions are within CUDA launch limits. Kernel loops internally to do the rest. - const uint32_t gmax = 65535; - gy = std::min(gy, gmax); - gz = std::min(gz, gmax); - - // Launch. - AT_CUDA_CHECK(cudaLaunchKernel(func, dim3(gx, gy, gz), bx, args, 0, at::cuda::getCurrentCUDAStream())); - return so; -} - -//------------------------------------------------------------------------ - -PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) -{ - m.def("filtered_lrelu", &filtered_lrelu); // The whole thing. - m.def("filtered_lrelu_act_", &filtered_lrelu_act); // Activation and sign tensor handling only. Modifies data tensor in-place. -} - -//------------------------------------------------------------------------ diff --git a/spaces/falterWliame/Face_Mask_Detection/Descargar Biblioteca Electronica Libronix 2013 Gratis En 107.md b/spaces/falterWliame/Face_Mask_Detection/Descargar Biblioteca Electronica Libronix 2013 Gratis En 107.md deleted file mode 100644 index d91f545401f61bec3efbf7e61ab4adb61a5ff84a..0000000000000000000000000000000000000000 --- a/spaces/falterWliame/Face_Mask_Detection/Descargar Biblioteca Electronica Libronix 2013 Gratis En 107.md +++ /dev/null @@ -1,21 +0,0 @@ - -

    ¿Cómo descargar la Biblioteca Electrónica Libronix 2013 gratis en 107?

    -

    La Biblioteca Electrónica Libronix 2013 es un software que te permite acceder a miles de libros digitales de diferentes temas, como teología, historia, filosofía, literatura y más. Con este programa, puedes leer, estudiar, buscar y comparar textos de forma rápida y fácil. Además, puedes personalizar tu biblioteca con las colecciones que más te interesen y sincronizarla con otros dispositivos.

    -

    Si quieres descargar la Biblioteca Electrónica Libronix 2013 gratis en 107, solo tienes que seguir estos pasos:

    -

    Descargar Biblioteca Electronica Libronix 2013 Gratis En 107


    DOWNLOAD ››› https://urlca.com/2uDc7k



    -
      -
    1. Entra en el sitio web oficial de Libronix: https://www.logos.com/es/libronix
    2. -
    3. Haz clic en el botón "Descargar ahora" y elige la opción "Libronix 2013".
    4. -
    5. Ejecuta el archivo descargado y sigue las instrucciones de instalación.
    6. -
    7. Una vez instalado el programa, abrelo y crea una cuenta de usuario o inicia sesión con una existente.
    8. -
    9. En la ventana principal del programa, haz clic en el menú "Herramientas" y luego en "Biblioteca".
    10. -
    11. En la sección "Recursos disponibles", selecciona las colecciones que quieras descargar y haz clic en el botón "Descargar".
    12. -
    13. Espera a que se complete la descarga y disfruta de tu biblioteca electrónica.
    14. -
    -

    Así de fácil es descargar la Biblioteca Electrónica Libronix 2013 gratis en 107. Recuerda que puedes actualizar el programa y los recursos periódicamente para tener acceso a las últimas novedades. También puedes explorar otras funciones del programa, como el lector de libros electrónicos, el buscador avanzado, el analizador de textos y más. Con Libronix, tendrás una biblioteca completa al alcance de tu mano.

    - -

    Libronix es un software desarrollado por Logos Bible Software, una empresa líder en el campo de la edición y distribución de libros digitales cristianos. Libronix ofrece una plataforma integrada que combina el contenido de más de 100 editoriales y autores reconocidos, con herramientas de estudio e investigación de alta calidad. Con Libronix, puedes acceder a una biblioteca de más de 20.000 recursos, entre los que se encuentran biblias, comentarios, diccionarios, enciclopedias, atlas, revistas y más.

    -

    Libronix es compatible con Windows, Mac y Linux, y también tiene versiones para dispositivos móviles como Android e iOS. Además, Libronix cuenta con un servicio de nube que te permite sincronizar tu biblioteca con otros dispositivos y acceder a ella desde cualquier lugar. También puedes compartir tus notas, resaltados y citas con otros usuarios de Libronix y participar en grupos de estudio online.

    -

    Libronix es un software ideal para estudiantes, profesores, pastores y cualquier persona interesada en el estudio de la Biblia y la cultura cristiana. Con Libronix, puedes profundizar en el conocimiento de la Palabra de Dios y enriquecer tu vida espiritual. Libronix es más que un programa, es una comunidad de aprendizaje y crecimiento.

    d5da3c52bf
    -
    -
    \ No newline at end of file diff --git a/spaces/falterWliame/Face_Mask_Detection/Jimmy Tonik Custom Nude Set Megagolkes.md b/spaces/falterWliame/Face_Mask_Detection/Jimmy Tonik Custom Nude Set Megagolkes.md deleted file mode 100644 index f0b74b2d0a418351ccc65816b5689848be04aa10..0000000000000000000000000000000000000000 --- a/spaces/falterWliame/Face_Mask_Detection/Jimmy Tonik Custom Nude Set Megagolkes.md +++ /dev/null @@ -1,10 +0,0 @@ -

    jimmy tonik custom nude set megagolkes


    Download File >>> https://urlca.com/2uDd5E



    - -... 9, 2019 - January 25, 2022 at 5:26 am. leamal fe98829e30 18, 2020 - nazkah d868ddde6e. Responder. Avatar. teodjan. ene 29, 2022. ----- -✨OBERBLE✨ -The police station chief's office is dark. The windows are tightly curtained. And there are two candles burning on the desk, which illuminate only a small piece of countertop and a chiseled silhouette. -Mr. Jeong? It's raining outside. But it's warm in the office. So we're just sitting there in a sweater and pants. ------ -✨OBSERB✨ 8a78ff9644
    -
    -
    -

    diff --git a/spaces/falterWliame/Face_Mask_Detection/Lm567 Proteus Download For 82.md b/spaces/falterWliame/Face_Mask_Detection/Lm567 Proteus Download For 82.md deleted file mode 100644 index a61e9f0d507510cd419ea43c67fec9e5e2f7ca92..0000000000000000000000000000000000000000 --- a/spaces/falterWliame/Face_Mask_Detection/Lm567 Proteus Download For 82.md +++ /dev/null @@ -1,48 +0,0 @@ -

    lm567 proteus download for 82


    DOWNLOAD ✺✺✺ https://urlca.com/2uDcb1



    -
    -iWVk_!v) % F#~Vv^&N#) (  .&^2\8k) [€&é5CÈ-™·‚Ò1 Ä[·K*³ - -%!Ñk - -„ " - -i Ó!N 8 Ç - -_Hôù È_ - -8., » - -ªOú¨ÛEfå - -ÕS1Ô`¦[ã!Ç - -–ò»¹Ù˜±Ø’tø=t - -çô¬àŸ¿‚Tù¦]Ô·f´m´PÈ´´4&5 - -ª¸ùöLÖÆíXr¤o"¦ìâБ°K·qW·^ - -Ììöç÷¨ - -ÀÇ.³º¡£O+öGc»q´oܶÙNÜ - -Ù ðX‘Àóª´ÙZ’F³Ö(è=ªˆF„£‹4´ø - -ˆSÅÍÏÃÅÜöƈS¨‹°¸Â¦Ó´®U)¸O‹ - -Í´OÜ - -¿ôŨZøވs“)3^¦ªª\4« - -4*„M*Ùá‘%=1%‘dÜôW^©û«´Qù - -´º¸yõD6Ñç§K¿ñÉ4Ë7`_µ,J÷¸ - -Àõ®mX]ѨÓJ“$´KˆW¹Mf¨÷úãܯã÷á - -Å.öÙV–kÃQ¯Ñ)¸sżĴì - -9òä 4fefd39f24
    -
    -
    -

    diff --git a/spaces/falterWliame/Face_Mask_Detection/Playway To English 3 Teachers.epub __LINK__.md b/spaces/falterWliame/Face_Mask_Detection/Playway To English 3 Teachers.epub __LINK__.md deleted file mode 100644 index 0bcc0a5abd2abcc7371e88018e47acf4c759fc79..0000000000000000000000000000000000000000 --- a/spaces/falterWliame/Face_Mask_Detection/Playway To English 3 Teachers.epub __LINK__.md +++ /dev/null @@ -1,6 +0,0 @@ -

    Playway To English 3 Teacher's.epub


    Download Zip 🗸🗸🗸 https://urlca.com/2uDc65



    -
    -document viewer online [e-book - pdf - epub] playway to english level 1 pupils book playway ... Playway To English Level 3 Teacher Book 2nd Edition [pdf] Free. 1fdad05405
    -
    -
    -

    diff --git a/spaces/fatiXbelha/sd/Bounce Tales A Platformer Game Inspired by the Nokia Classic.md b/spaces/fatiXbelha/sd/Bounce Tales A Platformer Game Inspired by the Nokia Classic.md deleted file mode 100644 index b531aa1dcbb988abae2c0f591f19b73921dff9b2..0000000000000000000000000000000000000000 --- a/spaces/fatiXbelha/sd/Bounce Tales A Platformer Game Inspired by the Nokia Classic.md +++ /dev/null @@ -1,90 +0,0 @@ -
    -

    Nokia Bounce Ball Game Free Download for Android

    -

    Do you remember playing Bounce on your old Nokia phone? It was one of the most popular games that came pre-installed on many Nokia models. It was a simple but addictive game where you had to control a red ball and collect rings, avoid obstacles, and reach the end of each level. If you miss this game and want to relive the nostalgia, you can now download it for free on your Android devices. In this article, we will tell you how to do that and what are the features and benefits of playing Nokia Bounce Ball game on your smartphone or tablet.

    -

    Introduction

    -

    Nokia Bounce Ball game is a fan-made remake of the original Bounce game that was developed by Nokia for its Java platform. The game has been updated with improved graphics, sounds, and controls to suit the modern Android devices. The game consists of 11 original levels from the classic Bounce game, as well as many new episodes with more levels and challenges. You can download the game from Google Play Store or from other sources like APKCombo . To install the game, you need to have an Android device with at least 4.4 version and 96 MB of free space. You also need to allow the app to access your location, app activity, device ID, and other data for data safety purposes.

    -

    nokia bounce ball game free download for android


    Download Zip ►►► https://urllie.com/2uNDXO



    -

    Features of Nokia Bounce Ball Game

    -

    Original and new episodes

    -

    The game has two modes: Original and New. In the Original mode, you can play the 11 levels that were in the classic Bounce game. These levels are based on different themes like forest, cave, factory, etc. You have to collect all the rings in each level and avoid hitting spikes, enemies, or falling into pits. You also have to use springs, trampolines, magnets, and other objects to bounce your way through. In the New mode, you can play many more episodes that have been added by the developers. These episodes have different names like Adventure, Jungle, Space, etc. They have more levels with more variety and difficulty. You can unlock new episodes by completing previous ones or by watching ads.

    -

    Classic and modern graphics

    -

    The game has a retro style that resembles the old Nokia games. The graphics are colorful and pixelated, but they are also smooth and clear on different screen sizes. The game also has original sounds and effects that add to the nostalgia factor. You can hear the familiar bounce sound when you jump, or the ring sound when you collect a ring. You can also adjust the sound volume or mute it from the settings menu. The game also has a modern touch that makes it compatible with Android devices. You can choose between portrait or landscape mode, depending on your preference. You can also pause or resume the game anytime by tapping on the screen.

    -

    Simple and intuitive controls

    -

    The game has easy and intuitive controls that let you play with one hand or two hands. You can use either virtual buttons or swipe gestures to move left or right, jump or bounce higher. You can also change the sensitivity of the controls from the settings menu. The game also has an option to switch between left-handed or right-handed mode, depending on which hand you use to play. The game also has a tutorial that explains the basic controls and objectives of the game.

    -

    Benefits of Nokia Bounce Ball Game

    -

    Fun and addictive gameplay

    -

    Nokia Bounce Ball game is a fun and addictive game that will keep you entertained for hours. The game has a simple but challenging gameplay that requires skill, timing, and strategy. You have to bounce your ball through various obstacles and enemies, while collecting rings and bonuses. You also have to avoid losing lives or running out of time. The game has different levels of difficulty, from easy to hard, that will test your abilities and make you want to beat your own score. The game also has a leaderboard that shows your rank and achievements among other players.

    -

    Nostalgic and nostalgic value

    -

    Nokia Bounce Ball game is a nostalgic game that will bring back memories of the old Nokia phones and games. The game has a retro style that mimics the original Bounce game that was popular in the early 2000s. The game also has original sounds and effects that will make you feel like you are playing on a Nokia device. The game is a tribute to the classic Bounce game that many people loved and enjoyed. The game also has a nostalgic value for those who want to experience the old Nokia games on their modern Android devices.

    -

    Free and safe to use

    -

    Nokia Bounce Ball game is a free game that you can download and play on your Android devices. The game does not require any registration or subscription to play. The game also does not have any in-app purchases or hidden fees. You can play the game as much as you want without spending any money. The game also does not have any harmful or malicious content that could harm your device or data. The game only asks for your location, app activity, device ID, and other data for data safety purposes. You can also opt out of sharing your data or delete your data from the settings menu.

    -

    Bounce Tales - Original Nokia app
    -Bounce Classic - fan-made remake of Bounce
    -Bounce Tales APK - Android Game
    -Bounce Original - classic platformer game
    -Bounce Ball Adventure - fun and challenging levels
    -Bounce Tales 2 - sequel to the popular game
    -Bounce Master - physics-based arcade game
    -Bounce Ball Blast - shoot and destroy bricks
    -Bounce Tales Red Mod - modified version of Bounce Tales
    -Bounce Evolution - 3D graphics and realistic physics
    -Bounce On Lite - roll and jump through worlds
    -Bounce Ball Hero - rescue the world from evil
    -Bounce Ball 5 - new adventure with more obstacles
    -Bounce Back - retro-style arcade game
    -Bounce N Bang - fire cannonballs and bounce them
    -Bounce Forever! - addictive and endless game
    -Bounce Ball Galaxy - explore the space with your ball
    -Bounce Classic Game - old school game with modern features
    -Bounce Ball: Red Adventure Game - exciting and colorful journey
    -Bounce Tales Travel of Bounce - follow the story of Bounce
    -Bounce Original HD - high definition version of Bounce Original
    -Bounce Classic Deluxe - premium version of Bounce Classic
    -Bounce Ball: Rolling Jump Game - simple and fun game
    -Bounce Tales: The Magic Revolution - new chapter of the saga
    -Bounce Ball Hop: Music Game Tiles - tap and bounce to the music
    -Bounce Ball Crusher: Bricks Breaker Game - smash and break bricks with your ball
    -Bounce Tales Rainbow: Magic Jump Game - bounce and collect rainbow coins
    -Bounce Classic 2023: New Game Free Download - latest update of the classic game
    -Bounce Ball Runner: 3D Game for Android - run and bounce in 3D environments
    -Bounce Tales Jungle: Adventure Game Free - explore the jungle with your ball
    -Bounce Classic Pro: No Ads Version - enjoy the game without interruptions
    -Bounce Ball Switch: Color Game for Android - change the color of your ball to match the platforms
    -Bounce Tales Ocean: Underwater Game Free - dive into the ocean with your ball
    -Bounce Classic Legend: Retro Game for Android - relive the nostalgia of the old game
    -Bounce Ball Shooter: Bubble Pop Game Free - shoot and pop bubbles with your ball
    -Bounce Tales City: Urban Game for Android - bounce around the city and avoid traffic
    -Bounce Classic Plus: More Levels and Features - enhance your gaming experience with more content
    -Bounce Ball Jumper: Arcade Game for Android - jump and bounce as high as you can
    -Bounce Tales Winter: Snowy Game Free Download - enjoy the winter theme with your ball
    -Bounce Classic Challenge: Hard Game for Android - test your skills and reflexes with this game
    -Bounce Ball Breaker: Smash Hit Game Free Download - destroy everything in your way with your ball
    -Bounce Tales Candy: Sweet Game for Android - bounce in a candy world and collect treats
    -Bounce Classic Adventure: Platformer Game Free Download - embark on an adventure with your ball
    -Bounce Ball Racing: Speed Game for Android - race against other balls and win prizes
    -Bounce Tales Egypt: Ancient Game Free Download - explore the pyramids and tombs with your ball
    -Bounce Classic Puzzle: Brain Game for Android - solve puzzles and challenges with your ball
    -Bounce Ball Soccer: Sports Game Free Download - play soccer with your ball and score goals
    -Bounce Tales Halloween: Spooky Game for Android - bounce in a haunted world and avoid ghosts
    -Bounce Classic HD: High Quality Game for Android - enjoy the HD graphics and sound effects of this game

    -

    Conclusion

    -

    Nokia Bounce Ball game is a fan-made remake of the original Bounce game that was developed by Nokia for its Java platform. The game has been updated with improved graphics, sounds, and controls to suit the modern Android devices. The game consists of 11 original levels from the classic Bounce game, as well as many new episodes with more levels and challenges. The game has a fun and addictive gameplay that will keep you entertained for hours. The game also has a nostalgic and nostalgic value that will bring back memories of the old Nokia phones and games. The game is also free and safe to use on your Android devices. If you want to download and play Nokia Bounce Ball game on your smartphone or tablet, you can do so by following the links below:

    - -

    Have fun bouncing your way through Nokia Bounce Ball game!

    -

    FAQs

    -

    Q: How many levels are there in Nokia Bounce Ball game?

    -

    A: There are 11 original levels from the classic Bounce game, as well as many new episodes with more levels and challenges.

    -

    Q: How can I unlock new episodes in Nokia Bounce Ball game?

    -

    A: You can unlock new episodes by completing previous ones or by watching ads.

    -

    Q: How can I switch between portrait or landscape mode in Nokia Bounce Ball game?

    -

    A: You can switch between portrait or landscape mode by tapping on the screen icon on the top right corner of the screen.

    -

    Q: How can I change the sensitivity of the controls in Nokia Bounce Ball game?

    -

    A: You can change the sensitivity of the controls by tapping on the settings icon on the top left corner of the screen.

    -

    Q: How can I delete my data from Nokia Bounce Ball game?

    -

    A: You can delete your data from Nokia Bounce Ball game by tapping on the settings icon on the top left corner of the screen, then tapping on "Delete Data".

    197e85843d
    -
    -
    \ No newline at end of file diff --git a/spaces/fb700/chatglm-fitness-RLHF/src/face3d/models/arcface_torch/configs/ms1mv3_r2060.py b/spaces/fb700/chatglm-fitness-RLHF/src/face3d/models/arcface_torch/configs/ms1mv3_r2060.py deleted file mode 100644 index 23ad81e082c4b6390b67b164d0ceb84bb0635684..0000000000000000000000000000000000000000 --- a/spaces/fb700/chatglm-fitness-RLHF/src/face3d/models/arcface_torch/configs/ms1mv3_r2060.py +++ /dev/null @@ -1,26 +0,0 @@ -from easydict import EasyDict as edict - -# make training faster -# our RAM is 256G -# mount -t tmpfs -o size=140G tmpfs /train_tmp - -config = edict() -config.loss = "arcface" -config.network = "r2060" -config.resume = False -config.output = None -config.embedding_size = 512 -config.sample_rate = 1.0 -config.fp16 = True -config.momentum = 0.9 -config.weight_decay = 5e-4 -config.batch_size = 64 -config.lr = 0.1 # batch size is 512 - -config.rec = "/train_tmp/ms1m-retinaface-t1" -config.num_classes = 93431 -config.num_image = 5179510 -config.num_epoch = 25 -config.warmup_epoch = -1 -config.decay_epoch = [10, 16, 22] -config.val_targets = ["lfw", "cfp_fp", "agedb_30"] diff --git a/spaces/fbrynpk/image-caption-generator/README.md b/spaces/fbrynpk/image-caption-generator/README.md deleted file mode 100644 index ef80e1b563d32dec5c1f1567b3477a589a4b01c0..0000000000000000000000000000000000000000 --- a/spaces/fbrynpk/image-caption-generator/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Image Caption Generator -emoji: 🐨 -colorFrom: yellow -colorTo: yellow -sdk: streamlit -sdk_version: 1.15.2 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/fclong/summary/fengshen/models/deberta_v2/modeling_deberta_v2.py b/spaces/fclong/summary/fengshen/models/deberta_v2/modeling_deberta_v2.py deleted file mode 100644 index d7437a1160cabb7f1446ee3c62bc6fa5a02a59ba..0000000000000000000000000000000000000000 --- a/spaces/fclong/summary/fengshen/models/deberta_v2/modeling_deberta_v2.py +++ /dev/null @@ -1,1617 +0,0 @@ -# coding=utf-8 -# Copyright 2020 Microsoft and the Hugging Face Inc. team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" PyTorch DeBERTa-v2 model.""" - -import math -from collections.abc import Sequence -from typing import Optional, Tuple, Union - -import numpy as np -import torch -from torch import nn -from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, LayerNorm, MSELoss - -from transformers.activations import ACT2FN -from transformers.modeling_outputs import ( - BaseModelOutput, - MaskedLMOutput, - MultipleChoiceModelOutput, - QuestionAnsweringModelOutput, - SequenceClassifierOutput, - TokenClassifierOutput, -) -from transformers.modeling_utils import PreTrainedModel -from transformers.pytorch_utils import softmax_backward_data -from transformers.utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging -from transformers import DebertaV2Config - - -logger = logging.get_logger(__name__) - -_CONFIG_FOR_DOC = "DebertaV2Config" -_TOKENIZER_FOR_DOC = "DebertaV2Tokenizer" -_CHECKPOINT_FOR_DOC = "microsoft/deberta-v2-xlarge" - -DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST = [ - "microsoft/deberta-v2-xlarge", - "microsoft/deberta-v2-xxlarge", - "microsoft/deberta-v2-xlarge-mnli", - "microsoft/deberta-v2-xxlarge-mnli", -] - - -# Copied from transformers.models.deberta.modeling_deberta.ContextPooler -class ContextPooler(nn.Module): - def __init__(self, config): - super().__init__() - self.dense = nn.Linear(config.pooler_hidden_size, config.pooler_hidden_size) - self.dropout = StableDropout(config.pooler_dropout) - self.config = config - - def forward(self, hidden_states): - # We "pool" the model by simply taking the hidden state corresponding - # to the first token. - - context_token = hidden_states[:, 0] - context_token = self.dropout(context_token) - pooled_output = self.dense(context_token) - pooled_output = ACT2FN[self.config.pooler_hidden_act](pooled_output) - return pooled_output - - @property - def output_dim(self): - return self.config.hidden_size - - -# Copied from transformers.models.deberta.modeling_deberta.XSoftmax with deberta->deberta_v2 -class XSoftmax(torch.autograd.Function): - """ - Masked Softmax which is optimized for saving memory - - Args: - input (`torch.tensor`): The input tensor that will apply softmax. - mask (`torch.IntTensor`): - The mask matrix where 0 indicate that element will be ignored in the softmax calculation. - dim (int): The dimension that will apply softmax - - Example: - - ```python - >>> import torch - >>> from transformers.models.deberta_v2.modeling_deberta_v2 import XSoftmax - - >>> # Make a tensor - >>> x = torch.randn([4, 20, 100]) - - >>> # Create a mask - >>> mask = (x > 0).int() - - >>> # Specify the dimension to apply softmax - >>> dim = -1 - - >>> y = XSoftmax.apply(x, mask, dim) - ```""" - - @staticmethod - def forward(self, input, mask, dim): - self.dim = dim - rmask = ~(mask.to(torch.bool)) - - output = input.masked_fill(rmask, torch.tensor(torch.finfo(input.dtype).min)) - output = torch.softmax(output, self.dim) - output.masked_fill_(rmask, 0) - self.save_for_backward(output) - return output - - @staticmethod - def backward(self, grad_output): - (output,) = self.saved_tensors - inputGrad = softmax_backward_data(self, grad_output, output, self.dim, output) - return inputGrad, None, None - - @staticmethod - def symbolic(g, self, mask, dim): - import torch.onnx.symbolic_helper as sym_help - from torch.onnx.symbolic_opset9 import masked_fill, softmax - - mask_cast_value = g.op("Cast", mask, to_i=sym_help.cast_pytorch_to_onnx["Long"]) - r_mask = g.op( - "Cast", - g.op("Sub", g.op("Constant", value_t=torch.tensor(1, dtype=torch.int64)), mask_cast_value), - to_i=sym_help.cast_pytorch_to_onnx["Byte"], - ) - output = masked_fill(g, self, r_mask, g.op("Constant", value_t=torch.tensor(torch.finfo(self.dtype).min))) - output = softmax(g, output, dim) - return masked_fill(g, output, r_mask, g.op("Constant", value_t=torch.tensor(0, dtype=torch.uint8))) - - -# Copied from transformers.models.deberta.modeling_deberta.DropoutContext -class DropoutContext(object): - def __init__(self): - self.dropout = 0 - self.mask = None - self.scale = 1 - self.reuse_mask = True - - -# Copied from transformers.models.deberta.modeling_deberta.get_mask -def get_mask(input, local_context): - if not isinstance(local_context, DropoutContext): - dropout = local_context - mask = None - else: - dropout = local_context.dropout - dropout *= local_context.scale - mask = local_context.mask if local_context.reuse_mask else None - - if dropout > 0 and mask is None: - mask = (1 - torch.empty_like(input).bernoulli_(1 - dropout)).to(torch.bool) - - if isinstance(local_context, DropoutContext): - if local_context.mask is None: - local_context.mask = mask - - return mask, dropout - - -# Copied from transformers.models.deberta.modeling_deberta.XDropout -class XDropout(torch.autograd.Function): - """Optimized dropout function to save computation and memory by using mask operation instead of multiplication.""" - - @staticmethod - def forward(ctx, input, local_ctx): - mask, dropout = get_mask(input, local_ctx) - ctx.scale = 1.0 / (1 - dropout) - if dropout > 0: - ctx.save_for_backward(mask) - return input.masked_fill(mask, 0) * ctx.scale - else: - return input - - @staticmethod - def backward(ctx, grad_output): - if ctx.scale > 1: - (mask,) = ctx.saved_tensors - return grad_output.masked_fill(mask, 0) * ctx.scale, None - else: - return grad_output, None - - -# Copied from transformers.models.deberta.modeling_deberta.StableDropout -class StableDropout(nn.Module): - """ - Optimized dropout module for stabilizing the training - - Args: - drop_prob (float): the dropout probabilities - """ - - def __init__(self, drop_prob): - super().__init__() - self.drop_prob = drop_prob - self.count = 0 - self.context_stack = None - - def forward(self, x): - """ - Call the module - - Args: - x (`torch.tensor`): The input tensor to apply dropout - """ - if self.training and self.drop_prob > 0: - return XDropout.apply(x, self.get_context()) - return x - - def clear_context(self): - self.count = 0 - self.context_stack = None - - def init_context(self, reuse_mask=True, scale=1): - if self.context_stack is None: - self.context_stack = [] - self.count = 0 - for c in self.context_stack: - c.reuse_mask = reuse_mask - c.scale = scale - - def get_context(self): - if self.context_stack is not None: - if self.count >= len(self.context_stack): - self.context_stack.append(DropoutContext()) - ctx = self.context_stack[self.count] - ctx.dropout = self.drop_prob - self.count += 1 - return ctx - else: - return self.drop_prob - - -# Copied from transformers.models.deberta.modeling_deberta.DebertaSelfOutput with DebertaLayerNorm->LayerNorm -class DebertaV2SelfOutput(nn.Module): - def __init__(self, config): - super().__init__() - self.dense = nn.Linear(config.hidden_size, config.hidden_size) - self.LayerNorm = LayerNorm(config.hidden_size, config.layer_norm_eps) - self.dropout = StableDropout(config.hidden_dropout_prob) - - def forward(self, hidden_states, input_tensor): - hidden_states = self.dense(hidden_states) - hidden_states = self.dropout(hidden_states) - hidden_states = self.LayerNorm(hidden_states + input_tensor) - return hidden_states - - -# Copied from transformers.models.deberta.modeling_deberta.DebertaAttention with Deberta->DebertaV2 -class DebertaV2Attention(nn.Module): - def __init__(self, config): - super().__init__() - self.self = DisentangledSelfAttention(config) - self.output = DebertaV2SelfOutput(config) - self.config = config - - def forward( - self, - hidden_states, - attention_mask, - output_attentions=False, - query_states=None, - relative_pos=None, - rel_embeddings=None, - ): - self_output = self.self( - hidden_states, - attention_mask, - output_attentions, - query_states=query_states, - relative_pos=relative_pos, - rel_embeddings=rel_embeddings, - ) - if output_attentions: - self_output, att_matrix = self_output - if query_states is None: - query_states = hidden_states - attention_output = self.output(self_output, query_states) - - if output_attentions: - return (attention_output, att_matrix) - else: - return attention_output - - -# Copied from transformers.models.bert.modeling_bert.BertIntermediate with Bert->DebertaV2 -class DebertaV2Intermediate(nn.Module): - def __init__(self, config): - super().__init__() - self.dense = nn.Linear(config.hidden_size, config.intermediate_size) - if isinstance(config.hidden_act, str): - self.intermediate_act_fn = ACT2FN[config.hidden_act] - else: - self.intermediate_act_fn = config.hidden_act - - def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: - hidden_states = self.dense(hidden_states) - hidden_states = self.intermediate_act_fn(hidden_states) - return hidden_states - - -# Copied from transformers.models.deberta.modeling_deberta.DebertaOutput with DebertaLayerNorm->LayerNorm -class DebertaV2Output(nn.Module): - def __init__(self, config): - super().__init__() - self.dense = nn.Linear(config.intermediate_size, config.hidden_size) - self.LayerNorm = LayerNorm(config.hidden_size, config.layer_norm_eps) - self.dropout = StableDropout(config.hidden_dropout_prob) - self.config = config - - def forward(self, hidden_states, input_tensor): - hidden_states = self.dense(hidden_states) - hidden_states = self.dropout(hidden_states) - hidden_states = self.LayerNorm(hidden_states + input_tensor) - return hidden_states - - -# Copied from transformers.models.deberta.modeling_deberta.DebertaLayer with Deberta->DebertaV2 -class DebertaV2Layer(nn.Module): - def __init__(self, config): - super().__init__() - self.attention = DebertaV2Attention(config) - self.intermediate = DebertaV2Intermediate(config) - self.output = DebertaV2Output(config) - - def forward( - self, - hidden_states, - attention_mask, - query_states=None, - relative_pos=None, - rel_embeddings=None, - output_attentions=False, - ): - attention_output = self.attention( - hidden_states, - attention_mask, - output_attentions=output_attentions, - query_states=query_states, - relative_pos=relative_pos, - rel_embeddings=rel_embeddings, - ) - if output_attentions: - attention_output, att_matrix = attention_output - intermediate_output = self.intermediate(attention_output) - layer_output = self.output(intermediate_output, attention_output) - if output_attentions: - return (layer_output, att_matrix) - else: - return layer_output - - -class ConvLayer(nn.Module): - def __init__(self, config): - super().__init__() - kernel_size = getattr(config, "conv_kernel_size", 3) - groups = getattr(config, "conv_groups", 1) - self.conv_act = getattr(config, "conv_act", "tanh") - self.conv = nn.Conv1d( - config.hidden_size, config.hidden_size, kernel_size, padding=(kernel_size - 1) // 2, groups=groups - ) - self.LayerNorm = LayerNorm(config.hidden_size, config.layer_norm_eps) - self.dropout = StableDropout(config.hidden_dropout_prob) - self.config = config - - def forward(self, hidden_states, residual_states, input_mask): - out = self.conv(hidden_states.permute(0, 2, 1).contiguous()).permute(0, 2, 1).contiguous() - rmask = (1 - input_mask).bool() - out.masked_fill_(rmask.unsqueeze(-1).expand(out.size()), 0) - out = ACT2FN[self.conv_act](self.dropout(out)) - - layer_norm_input = residual_states + out - output = self.LayerNorm(layer_norm_input).to(layer_norm_input) - - if input_mask is None: - output_states = output - else: - if input_mask.dim() != layer_norm_input.dim(): - if input_mask.dim() == 4: - input_mask = input_mask.squeeze(1).squeeze(1) - input_mask = input_mask.unsqueeze(2) - - input_mask = input_mask.to(output.dtype) - output_states = output * input_mask - - return output_states - - -class DebertaV2Encoder(nn.Module): - """Modified BertEncoder with relative position bias support""" - - def __init__(self, config): - super().__init__() - - self.layer = nn.ModuleList([DebertaV2Layer(config) for _ in range(config.num_hidden_layers)]) - self.relative_attention = getattr(config, "relative_attention", False) - - if self.relative_attention: - self.max_relative_positions = getattr(config, "max_relative_positions", -1) - if self.max_relative_positions < 1: - self.max_relative_positions = config.max_position_embeddings - - self.position_buckets = getattr(config, "position_buckets", -1) - pos_ebd_size = self.max_relative_positions * 2 - - if self.position_buckets > 0: - pos_ebd_size = self.position_buckets * 2 - - self.rel_embeddings = nn.Embedding(pos_ebd_size, config.hidden_size) - - self.norm_rel_ebd = [x.strip() for x in getattr(config, "norm_rel_ebd", "none").lower().split("|")] - - if "layer_norm" in self.norm_rel_ebd: - self.LayerNorm = LayerNorm(config.hidden_size, config.layer_norm_eps, elementwise_affine=True) - - self.conv = ConvLayer(config) if getattr(config, "conv_kernel_size", 0) > 0 else None - self.gradient_checkpointing = False - - def get_rel_embedding(self): - rel_embeddings = self.rel_embeddings.weight if self.relative_attention else None - if rel_embeddings is not None and ("layer_norm" in self.norm_rel_ebd): - rel_embeddings = self.LayerNorm(rel_embeddings) - return rel_embeddings - - def get_attention_mask(self, attention_mask): - if attention_mask.dim() <= 2: - extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2) - attention_mask = extended_attention_mask * extended_attention_mask.squeeze(-2).unsqueeze(-1) - attention_mask = attention_mask.byte() - elif attention_mask.dim() == 3: - attention_mask = attention_mask.unsqueeze(1) - - return attention_mask - - def get_rel_pos(self, hidden_states, query_states=None, relative_pos=None): - if self.relative_attention and relative_pos is None: - q = query_states.size(-2) if query_states is not None else hidden_states.size(-2) - relative_pos = build_relative_position( - q, hidden_states.size(-2), bucket_size=self.position_buckets, max_position=self.max_relative_positions - ) - return relative_pos - - def forward( - self, - hidden_states, - attention_mask, - output_hidden_states=True, - output_attentions=False, - query_states=None, - relative_pos=None, - return_dict=True, - ): - if attention_mask.dim() <= 2: - input_mask = attention_mask - else: - input_mask = (attention_mask.sum(-2) > 0).byte() - attention_mask = self.get_attention_mask(attention_mask) - relative_pos = self.get_rel_pos(hidden_states, query_states, relative_pos) - - all_hidden_states = () if output_hidden_states else None - all_attentions = () if output_attentions else None - - if isinstance(hidden_states, Sequence): - next_kv = hidden_states[0] - else: - next_kv = hidden_states - rel_embeddings = self.get_rel_embedding() - output_states = next_kv - for i, layer_module in enumerate(self.layer): - - if output_hidden_states: - all_hidden_states = all_hidden_states + (output_states,) - - if self.gradient_checkpointing and self.training: - - def create_custom_forward(module): - def custom_forward(*inputs): - return module(*inputs, output_attentions) - - return custom_forward - - output_states = torch.utils.checkpoint.checkpoint( - create_custom_forward(layer_module), - next_kv, - attention_mask, - query_states, - relative_pos, - rel_embeddings, - ) - else: - output_states = layer_module( - next_kv, - attention_mask, - query_states=query_states, - relative_pos=relative_pos, - rel_embeddings=rel_embeddings, - output_attentions=output_attentions, - ) - - if output_attentions: - output_states, att_m = output_states - - if i == 0 and self.conv is not None: - output_states = self.conv(hidden_states, output_states, input_mask) - - if query_states is not None: - query_states = output_states - if isinstance(hidden_states, Sequence): - next_kv = hidden_states[i + 1] if i + 1 < len(self.layer) else None - else: - next_kv = output_states - - if output_attentions: - all_attentions = all_attentions + (att_m,) - - if output_hidden_states: - all_hidden_states = all_hidden_states + (output_states,) - - if not return_dict: - return tuple(v for v in [output_states, all_hidden_states, all_attentions] if v is not None) - return BaseModelOutput( - last_hidden_state=output_states, hidden_states=all_hidden_states, attentions=all_attentions - ) - - -def make_log_bucket_position(relative_pos, bucket_size, max_position): - sign = np.sign(relative_pos) - mid = bucket_size // 2 - abs_pos = np.where((relative_pos < mid) & (relative_pos > -mid), mid - 1, np.abs(relative_pos)) - log_pos = np.ceil(np.log(abs_pos / mid) / np.log((max_position - 1) / mid) * (mid - 1)) + mid - bucket_pos = np.where(abs_pos <= mid, relative_pos, log_pos * sign).astype(np.int) - return bucket_pos - - -def build_relative_position(query_size, key_size, bucket_size=-1, max_position=-1): - """ - Build relative position according to the query and key - - We assume the absolute position of query \\(P_q\\) is range from (0, query_size) and the absolute position of key - \\(P_k\\) is range from (0, key_size), The relative positions from query to key is \\(R_{q \\rightarrow k} = P_q - - P_k\\) - - Args: - query_size (int): the length of query - key_size (int): the length of key - bucket_size (int): the size of position bucket - max_position (int): the maximum allowed absolute position - - Return: - `torch.LongTensor`: A tensor with shape [1, query_size, key_size] - - """ - q_ids = np.arange(0, query_size) - k_ids = np.arange(0, key_size) - rel_pos_ids = q_ids[:, None] - np.tile(k_ids, (q_ids.shape[0], 1)) - if bucket_size > 0 and max_position > 0: - rel_pos_ids = make_log_bucket_position(rel_pos_ids, bucket_size, max_position) - rel_pos_ids = torch.tensor(rel_pos_ids, dtype=torch.long) - rel_pos_ids = rel_pos_ids[:query_size, :] - rel_pos_ids = rel_pos_ids.unsqueeze(0) - return rel_pos_ids - - -@torch.jit.script -# Copied from transformers.models.deberta.modeling_deberta.c2p_dynamic_expand -def c2p_dynamic_expand(c2p_pos, query_layer, relative_pos): - return c2p_pos.expand([query_layer.size(0), query_layer.size(1), query_layer.size(2), relative_pos.size(-1)]) - - -@torch.jit.script -# Copied from transformers.models.deberta.modeling_deberta.p2c_dynamic_expand -def p2c_dynamic_expand(c2p_pos, query_layer, key_layer): - return c2p_pos.expand([query_layer.size(0), query_layer.size(1), key_layer.size(-2), key_layer.size(-2)]) - - -@torch.jit.script -# Copied from transformers.models.deberta.modeling_deberta.pos_dynamic_expand -def pos_dynamic_expand(pos_index, p2c_att, key_layer): - return pos_index.expand(p2c_att.size()[:2] + (pos_index.size(-2), key_layer.size(-2))) - - -class DisentangledSelfAttention(nn.Module): - """ - Disentangled self-attention module - - Parameters: - config (`DebertaV2Config`): - A model config class instance with the configuration to build a new model. The schema is similar to - *BertConfig*, for more details, please refer [`DebertaV2Config`] - - """ - - def __init__(self, config): - super().__init__() - if config.hidden_size % config.num_attention_heads != 0: - raise ValueError( - f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention " - f"heads ({config.num_attention_heads})" - ) - self.num_attention_heads = config.num_attention_heads - _attention_head_size = config.hidden_size // config.num_attention_heads - self.attention_head_size = getattr(config, "attention_head_size", _attention_head_size) - self.all_head_size = self.num_attention_heads * self.attention_head_size - self.query_proj = nn.Linear(config.hidden_size, self.all_head_size, bias=True) - self.key_proj = nn.Linear(config.hidden_size, self.all_head_size, bias=True) - self.value_proj = nn.Linear(config.hidden_size, self.all_head_size, bias=True) - - self.share_att_key = getattr(config, "share_att_key", False) - self.pos_att_type = config.pos_att_type if config.pos_att_type is not None else [] - self.relative_attention = getattr(config, "relative_attention", False) - - if self.relative_attention: - self.position_buckets = getattr(config, "position_buckets", -1) - self.max_relative_positions = getattr(config, "max_relative_positions", -1) - if self.max_relative_positions < 1: - self.max_relative_positions = config.max_position_embeddings - self.pos_ebd_size = self.max_relative_positions - if self.position_buckets > 0: - self.pos_ebd_size = self.position_buckets - - self.pos_dropout = StableDropout(config.hidden_dropout_prob) - - if not self.share_att_key: - if "c2p" in self.pos_att_type: - self.pos_key_proj = nn.Linear(config.hidden_size, self.all_head_size, bias=True) - if "p2c" in self.pos_att_type: - self.pos_query_proj = nn.Linear(config.hidden_size, self.all_head_size) - - self.dropout = StableDropout(config.attention_probs_dropout_prob) - - def transpose_for_scores(self, x, attention_heads): - new_x_shape = x.size()[:-1] + (attention_heads, -1) - x = x.view(new_x_shape) - return x.permute(0, 2, 1, 3).contiguous().view(-1, x.size(1), x.size(-1)) - - def forward( - self, - hidden_states, - attention_mask, - output_attentions=False, - query_states=None, - relative_pos=None, - rel_embeddings=None, - ): - """ - Call the module - - Args: - hidden_states (`torch.FloatTensor`): - Input states to the module usually the output from previous layer, it will be the Q,K and V in - *Attention(Q,K,V)* - - attention_mask (`torch.ByteTensor`): - An attention mask matrix of shape [*B*, *N*, *N*] where *B* is the batch size, *N* is the maximum - sequence length in which element [i,j] = *1* means the *i* th token in the input can attend to the *j* - th token. - - output_attentions (`bool`, optional): - Whether return the attention matrix. - - query_states (`torch.FloatTensor`, optional): - The *Q* state in *Attention(Q,K,V)*. - - relative_pos (`torch.LongTensor`): - The relative position encoding between the tokens in the sequence. It's of shape [*B*, *N*, *N*] with - values ranging in [*-max_relative_positions*, *max_relative_positions*]. - - rel_embeddings (`torch.FloatTensor`): - The embedding of relative distances. It's a tensor of shape [\\(2 \\times - \\text{max_relative_positions}\\), *hidden_size*]. - - - """ - if query_states is None: - query_states = hidden_states - query_layer = self.transpose_for_scores(self.query_proj(query_states), self.num_attention_heads) - key_layer = self.transpose_for_scores(self.key_proj(hidden_states), self.num_attention_heads) - value_layer = self.transpose_for_scores(self.value_proj(hidden_states), self.num_attention_heads) - - rel_att = None - # Take the dot product between "query" and "key" to get the raw attention scores. - scale_factor = 1 - if "c2p" in self.pos_att_type: - scale_factor += 1 - if "p2c" in self.pos_att_type: - scale_factor += 1 - scale = math.sqrt(query_layer.size(-1) * scale_factor) - attention_scores = torch.bmm(query_layer, key_layer.transpose(-1, -2)) / scale - if self.relative_attention: - rel_embeddings = self.pos_dropout(rel_embeddings) - rel_att = self.disentangled_attention_bias( - query_layer, key_layer, relative_pos, rel_embeddings, scale_factor - ) - - if rel_att is not None: - attention_scores = attention_scores + rel_att - attention_scores = attention_scores - attention_scores = attention_scores.view( - -1, self.num_attention_heads, attention_scores.size(-2), attention_scores.size(-1) - ) - - # bsz x height x length x dimension - attention_probs = XSoftmax.apply(attention_scores, attention_mask, -1) - attention_probs = self.dropout(attention_probs) - context_layer = torch.bmm( - attention_probs.view(-1, attention_probs.size(-2), attention_probs.size(-1)), value_layer - ) - context_layer = ( - context_layer.view(-1, self.num_attention_heads, context_layer.size(-2), context_layer.size(-1)) - .permute(0, 2, 1, 3) - .contiguous() - ) - new_context_layer_shape = context_layer.size()[:-2] + (-1,) - context_layer = context_layer.view(new_context_layer_shape) - if output_attentions: - return (context_layer, attention_probs) - else: - return context_layer - - def disentangled_attention_bias(self, query_layer, key_layer, relative_pos, rel_embeddings, scale_factor): - if relative_pos is None: - q = query_layer.size(-2) - relative_pos = build_relative_position( - q, key_layer.size(-2), bucket_size=self.position_buckets, max_position=self.max_relative_positions - ) - if relative_pos.dim() == 2: - relative_pos = relative_pos.unsqueeze(0).unsqueeze(0) - elif relative_pos.dim() == 3: - relative_pos = relative_pos.unsqueeze(1) - # bsz x height x query x key - elif relative_pos.dim() != 4: - raise ValueError(f"Relative position ids must be of dim 2 or 3 or 4. {relative_pos.dim()}") - - att_span = self.pos_ebd_size - relative_pos = relative_pos.long().to(query_layer.device) - - rel_embeddings = rel_embeddings[0 : att_span * 2, :].unsqueeze(0) - if self.share_att_key: - pos_query_layer = self.transpose_for_scores( - self.query_proj(rel_embeddings), self.num_attention_heads - ).repeat(query_layer.size(0) // self.num_attention_heads, 1, 1) - pos_key_layer = self.transpose_for_scores(self.key_proj(rel_embeddings), self.num_attention_heads).repeat( - query_layer.size(0) // self.num_attention_heads, 1, 1 - ) - else: - if "c2p" in self.pos_att_type: - pos_key_layer = self.transpose_for_scores( - self.pos_key_proj(rel_embeddings), self.num_attention_heads - ).repeat( - query_layer.size(0) // self.num_attention_heads, 1, 1 - ) # .split(self.all_head_size, dim=-1) - if "p2c" in self.pos_att_type: - pos_query_layer = self.transpose_for_scores( - self.pos_query_proj(rel_embeddings), self.num_attention_heads - ).repeat( - query_layer.size(0) // self.num_attention_heads, 1, 1 - ) # .split(self.all_head_size, dim=-1) - - score = 0 - # content->position - if "c2p" in self.pos_att_type: - scale = math.sqrt(pos_key_layer.size(-1) * scale_factor) - c2p_att = torch.bmm(query_layer, pos_key_layer.transpose(-1, -2)) - c2p_pos = torch.clamp(relative_pos + att_span, 0, att_span * 2 - 1) - c2p_att = torch.gather( - c2p_att, - dim=-1, - index=c2p_pos.squeeze(0).expand([query_layer.size(0), query_layer.size(1), relative_pos.size(-1)]), - ) - score += c2p_att / scale - - # position->content - if "p2c" in self.pos_att_type: - scale = math.sqrt(pos_query_layer.size(-1) * scale_factor) - if key_layer.size(-2) != query_layer.size(-2): - r_pos = build_relative_position( - key_layer.size(-2), - key_layer.size(-2), - bucket_size=self.position_buckets, - max_position=self.max_relative_positions, - ).to(query_layer.device) - r_pos = r_pos.unsqueeze(0) - else: - r_pos = relative_pos - - p2c_pos = torch.clamp(-r_pos + att_span, 0, att_span * 2 - 1) - p2c_att = torch.bmm(key_layer, pos_query_layer.transpose(-1, -2)) - p2c_att = torch.gather( - p2c_att, - dim=-1, - index=p2c_pos.squeeze(0).expand([query_layer.size(0), key_layer.size(-2), key_layer.size(-2)]), - ).transpose(-1, -2) - score += p2c_att / scale - - return score - - -# Copied from transformers.models.deberta.modeling_deberta.DebertaEmbeddings with DebertaLayerNorm->LayerNorm -class DebertaV2Embeddings(nn.Module): - """Construct the embeddings from word, position and token_type embeddings.""" - - def __init__(self, config): - super().__init__() - pad_token_id = getattr(config, "pad_token_id", 0) - self.embedding_size = getattr(config, "embedding_size", config.hidden_size) - self.word_embeddings = nn.Embedding(config.vocab_size, self.embedding_size, padding_idx=pad_token_id) - - self.position_biased_input = getattr(config, "position_biased_input", True) - if not self.position_biased_input: - self.position_embeddings = None - else: - self.position_embeddings = nn.Embedding(config.max_position_embeddings, self.embedding_size) - - if config.type_vocab_size > 0: - self.token_type_embeddings = nn.Embedding(config.type_vocab_size, self.embedding_size) - - if self.embedding_size != config.hidden_size: - self.embed_proj = nn.Linear(self.embedding_size, config.hidden_size, bias=False) - self.LayerNorm = LayerNorm(config.hidden_size, config.layer_norm_eps) - self.dropout = StableDropout(config.hidden_dropout_prob) - self.config = config - - # position_ids (1, len position emb) is contiguous in memory and exported when serialized - self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1))) - - def forward(self, input_ids=None, token_type_ids=None, position_ids=None, mask=None, inputs_embeds=None): - if input_ids is not None: - input_shape = input_ids.size() - else: - input_shape = inputs_embeds.size()[:-1] - - seq_length = input_shape[1] - - if position_ids is None: - position_ids = self.position_ids[:, :seq_length] - - if token_type_ids is None: - token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device) - - if inputs_embeds is None: - inputs_embeds = self.word_embeddings(input_ids) - - if self.position_embeddings is not None: - position_embeddings = self.position_embeddings(position_ids.long()) - else: - position_embeddings = torch.zeros_like(inputs_embeds) - - embeddings = inputs_embeds - if self.position_biased_input: - embeddings += position_embeddings - if self.config.type_vocab_size > 0: - token_type_embeddings = self.token_type_embeddings(token_type_ids) - embeddings += token_type_embeddings - - if self.embedding_size != self.config.hidden_size: - embeddings = self.embed_proj(embeddings) - - embeddings = self.LayerNorm(embeddings) - - # if mask is not None: - # if mask.dim() != embeddings.dim(): - # if mask.dim() == 4: - # mask = mask.squeeze(1).squeeze(1) - # mask = mask.unsqueeze(2) - # mask = mask.to(embeddings.dtype) - - # embeddings = embeddings * mask - - embeddings = self.dropout(embeddings) - return embeddings - - -# Copied from transformers.models.deberta.modeling_deberta.DebertaPreTrainedModel with Deberta->DebertaV2 -class DebertaV2PreTrainedModel(PreTrainedModel): - """ - An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained - models. - """ - - config_class = DebertaV2Config - base_model_prefix = "deberta" - _keys_to_ignore_on_load_missing = ["position_ids"] - _keys_to_ignore_on_load_unexpected = ["position_embeddings"] - supports_gradient_checkpointing = True - - def _init_weights(self, module): - """Initialize the weights.""" - if isinstance(module, nn.Linear): - # Slightly different from the TF version which uses truncated_normal for initialization - # cf https://github.com/pytorch/pytorch/pull/5617 - module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) - if module.bias is not None: - module.bias.data.zero_() - elif isinstance(module, nn.Embedding): - module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) - if module.padding_idx is not None: - module.weight.data[module.padding_idx].zero_() - - def _set_gradient_checkpointing(self, module, value=False): - if isinstance(module, DebertaV2Encoder): - module.gradient_checkpointing = value - - -DEBERTA_START_DOCSTRING = r""" - The DeBERTa model was proposed in [DeBERTa: Decoding-enhanced BERT with Disentangled - Attention](https://arxiv.org/abs/2006.03654) by Pengcheng He, Xiaodong Liu, Jianfeng Gao, Weizhu Chen. It's build - on top of BERT/RoBERTa with two improvements, i.e. disentangled attention and enhanced mask decoder. With those two - improvements, it out perform BERT/RoBERTa on a majority of tasks with 80GB pretraining data. - - This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. - Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage - and behavior.``` - - - Parameters: - config ([`DebertaV2Config`]): Model configuration class with all the parameters of the model. - Initializing with a config file does not load the weights associated with the model, only the - configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. -""" - -DEBERTA_INPUTS_DOCSTRING = r""" - Args: - input_ids (`torch.LongTensor` of shape `({0})`): - Indices of input sequence tokens in the vocabulary. - - Indices can be obtained using [`DebertaV2Tokenizer`]. See [`PreTrainedTokenizer.encode`] and - [`PreTrainedTokenizer.__call__`] for details. - - [What are input IDs?](../glossary#input-ids) - attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*): - Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - - - 1 for tokens that are **not masked**, - - 0 for tokens that are **masked**. - - [What are attention masks?](../glossary#attention-mask) - token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*): - Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, - 1]`: - - - 0 corresponds to a *sentence A* token, - - 1 corresponds to a *sentence B* token. - - [What are token type IDs?](../glossary#token-type-ids) - position_ids (`torch.LongTensor` of shape `({0})`, *optional*): - Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, - config.max_position_embeddings - 1]`. - - [What are position IDs?](../glossary#position-ids) - inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*): - Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This - is useful if you want more control over how to convert *input_ids* indices into associated vectors than the - model's internal embedding lookup matrix. - output_attentions (`bool`, *optional*): - Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned - tensors for more detail. - output_hidden_states (`bool`, *optional*): - Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for - more detail. - return_dict (`bool`, *optional*): - Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. -""" - - -@add_start_docstrings( - "The bare DeBERTa Model transformer outputting raw hidden-states without any specific head on top.", - DEBERTA_START_DOCSTRING, -) -# Copied from transformers.models.deberta.modeling_deberta.DebertaModel with Deberta->DebertaV2 -class DebertaV2Model(DebertaV2PreTrainedModel): - def __init__(self, config): - super().__init__(config) - - self.embeddings = DebertaV2Embeddings(config) - self.encoder = DebertaV2Encoder(config) - self.z_steps = 0 - self.config = config - # Initialize weights and apply final processing - self.post_init() - - def get_input_embeddings(self): - return self.embeddings.word_embeddings - - def set_input_embeddings(self, new_embeddings): - self.embeddings.word_embeddings = new_embeddings - - def _prune_heads(self, heads_to_prune): - """ - Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base - class PreTrainedModel - """ - raise NotImplementedError("The prune function is not implemented in DeBERTa model.") - - @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length")) - @add_code_sample_docstrings( - processor_class=_TOKENIZER_FOR_DOC, - checkpoint=_CHECKPOINT_FOR_DOC, - output_type=BaseModelOutput, - config_class=_CONFIG_FOR_DOC, - ) - def forward( - self, - input_ids: Optional[torch.Tensor] = None, - attention_mask: Optional[torch.Tensor] = None, - token_type_ids: Optional[torch.Tensor] = None, - position_ids: Optional[torch.Tensor] = None, - inputs_embeds: Optional[torch.Tensor] = None, - output_attentions: Optional[bool] = None, - output_hidden_states: Optional[bool] = None, - return_dict: Optional[bool] = None, - ) -> Union[Tuple, BaseModelOutput]: - output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions - output_hidden_states = ( - output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states - ) - return_dict = return_dict if return_dict is not None else self.config.use_return_dict - - if input_ids is not None and inputs_embeds is not None: - raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") - elif input_ids is not None: - input_shape = input_ids.size() - elif inputs_embeds is not None: - input_shape = inputs_embeds.size()[:-1] - else: - raise ValueError("You have to specify either input_ids or inputs_embeds") - - device = input_ids.device if input_ids is not None else inputs_embeds.device - - if attention_mask is None: - attention_mask = torch.ones(input_shape, device=device) - if token_type_ids is None: - token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) - - embedding_output = self.embeddings( - input_ids=input_ids, - token_type_ids=token_type_ids, - position_ids=position_ids, - mask=attention_mask, - inputs_embeds=inputs_embeds, - ) - - encoder_outputs = self.encoder( - embedding_output, - attention_mask, - output_hidden_states=True, - output_attentions=output_attentions, - return_dict=return_dict, - ) - encoded_layers = encoder_outputs[1] - - if self.z_steps > 1: - hidden_states = encoded_layers[-2] - layers = [self.encoder.layer[-1] for _ in range(self.z_steps)] - query_states = encoded_layers[-1] - rel_embeddings = self.encoder.get_rel_embedding() - attention_mask = self.encoder.get_attention_mask(attention_mask) - rel_pos = self.encoder.get_rel_pos(embedding_output) - for layer in layers[1:]: - query_states = layer( - hidden_states, - attention_mask, - output_attentions=False, - query_states=query_states, - relative_pos=rel_pos, - rel_embeddings=rel_embeddings, - ) - encoded_layers.append(query_states) - - sequence_output = encoded_layers[-1] - - if not return_dict: - return (sequence_output,) + encoder_outputs[(1 if output_hidden_states else 2) :] - - return BaseModelOutput( - last_hidden_state=sequence_output, - hidden_states=encoder_outputs.hidden_states if output_hidden_states else None, - attentions=encoder_outputs.attentions, - ) - - -@add_start_docstrings("""DeBERTa Model with a `language modeling` head on top.""", DEBERTA_START_DOCSTRING) -# Copied from transformers.models.deberta.modeling_deberta.DebertaForMaskedLM with Deberta->DebertaV2 -class DebertaV2ForMaskedLM(DebertaV2PreTrainedModel): - _keys_to_ignore_on_load_unexpected = [r"pooler"] - _keys_to_ignore_on_load_missing = [r"position_ids", r"predictions.decoder.bias"] - - def __init__(self, config): - super().__init__(config) - - self.deberta = DebertaV2Model(config) - self.cls = DebertaV2OnlyMLMHead(config) - - # Initialize weights and apply final processing - self.post_init() - - def get_output_embeddings(self): - return self.cls.predictions.decoder - - def set_output_embeddings(self, new_embeddings): - self.cls.predictions.decoder = new_embeddings - - @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length")) - @add_code_sample_docstrings( - processor_class=_TOKENIZER_FOR_DOC, - checkpoint=_CHECKPOINT_FOR_DOC, - output_type=MaskedLMOutput, - config_class=_CONFIG_FOR_DOC, - ) - def forward( - self, - input_ids: Optional[torch.Tensor] = None, - attention_mask: Optional[torch.Tensor] = None, - token_type_ids: Optional[torch.Tensor] = None, - position_ids: Optional[torch.Tensor] = None, - inputs_embeds: Optional[torch.Tensor] = None, - labels: Optional[torch.Tensor] = None, - output_attentions: Optional[bool] = None, - output_hidden_states: Optional[bool] = None, - return_dict: Optional[bool] = None, - ) -> Union[Tuple, MaskedLMOutput]: - r""" - labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): - Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ..., - config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the - loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]` - """ - - return_dict = return_dict if return_dict is not None else self.config.use_return_dict - - outputs = self.deberta( - input_ids, - attention_mask=attention_mask, - token_type_ids=token_type_ids, - position_ids=position_ids, - inputs_embeds=inputs_embeds, - output_attentions=output_attentions, - output_hidden_states=output_hidden_states, - return_dict=return_dict, - ) - - sequence_output = outputs[0] - prediction_scores = self.cls(sequence_output) - - masked_lm_loss = None - if labels is not None: - loss_fct = CrossEntropyLoss() # -100 index = padding token - masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1)) - - if not return_dict: - output = (prediction_scores,) + outputs[1:] - return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output - - return MaskedLMOutput( - loss=masked_lm_loss, - logits=prediction_scores, - hidden_states=outputs.hidden_states, - attentions=outputs.attentions, - ) - - -# copied from transformers.models.bert.BertPredictionHeadTransform with bert -> deberta -class DebertaV2PredictionHeadTransform(nn.Module): - def __init__(self, config): - super().__init__() - self.dense = nn.Linear(config.hidden_size, config.hidden_size) - if isinstance(config.hidden_act, str): - self.transform_act_fn = ACT2FN[config.hidden_act] - else: - self.transform_act_fn = config.hidden_act - self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) - - def forward(self, hidden_states): - hidden_states = self.dense(hidden_states) - hidden_states = self.transform_act_fn(hidden_states) - hidden_states = self.LayerNorm(hidden_states) - return hidden_states - - -# copied from transformers.models.bert.BertLMPredictionHead with bert -> deberta -class DebertaV2LMPredictionHead(nn.Module): - def __init__(self, config): - super().__init__() - self.transform = DebertaV2PredictionHeadTransform(config) - - # The output weights are the same as the input embeddings, but there is - # an output-only bias for each token. - self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False) - - self.bias = nn.Parameter(torch.zeros(config.vocab_size)) - - # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings` - self.decoder.bias = self.bias - - def forward(self, hidden_states): - hidden_states = self.transform(hidden_states) - hidden_states = self.decoder(hidden_states) - return hidden_states - - -# copied from transformers.models.bert.BertOnlyMLMHead with bert -> deberta -class DebertaV2OnlyMLMHead(nn.Module): - def __init__(self, config): - super().__init__() - self.predictions = DebertaV2LMPredictionHead(config) - - def forward(self, sequence_output): - prediction_scores = self.predictions(sequence_output) - return prediction_scores - - -@add_start_docstrings( - """ - DeBERTa Model transformer with a sequence classification/regression head on top (a linear layer on top of the - pooled output) e.g. for GLUE tasks. - """, - DEBERTA_START_DOCSTRING, -) -# Copied from transformers.models.deberta.modeling_deberta.DebertaForSequenceClassification with Deberta->DebertaV2 -class DebertaV2ForSequenceClassification(DebertaV2PreTrainedModel): - def __init__(self, config): - super().__init__(config) - - num_labels = getattr(config, "num_labels", 2) - self.num_labels = num_labels - - self.deberta = DebertaV2Model(config) - self.pooler = ContextPooler(config) - output_dim = self.pooler.output_dim - - self.classifier = nn.Linear(output_dim, num_labels) - drop_out = getattr(config, "cls_dropout", None) - drop_out = self.config.hidden_dropout_prob if drop_out is None else drop_out - self.dropout = StableDropout(drop_out) - - # Initialize weights and apply final processing - self.post_init() - - def get_input_embeddings(self): - return self.deberta.get_input_embeddings() - - def set_input_embeddings(self, new_embeddings): - self.deberta.set_input_embeddings(new_embeddings) - - @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length")) - @add_code_sample_docstrings( - processor_class=_TOKENIZER_FOR_DOC, - checkpoint=_CHECKPOINT_FOR_DOC, - output_type=SequenceClassifierOutput, - config_class=_CONFIG_FOR_DOC, - ) - def forward( - self, - input_ids: Optional[torch.Tensor] = None, - attention_mask: Optional[torch.Tensor] = None, - token_type_ids: Optional[torch.Tensor] = None, - position_ids: Optional[torch.Tensor] = None, - inputs_embeds: Optional[torch.Tensor] = None, - labels: Optional[torch.Tensor] = None, - output_attentions: Optional[bool] = None, - output_hidden_states: Optional[bool] = None, - return_dict: Optional[bool] = None, - ) -> Union[Tuple, SequenceClassifierOutput]: - r""" - labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): - Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., - config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If - `config.num_labels > 1` a classification loss is computed (Cross-Entropy). - """ - return_dict = return_dict if return_dict is not None else self.config.use_return_dict - - outputs = self.deberta( - input_ids, - token_type_ids=token_type_ids, - attention_mask=attention_mask, - position_ids=position_ids, - inputs_embeds=inputs_embeds, - output_attentions=output_attentions, - output_hidden_states=output_hidden_states, - return_dict=return_dict, - ) - - encoder_layer = outputs[0] - pooled_output = self.pooler(encoder_layer) - pooled_output = self.dropout(pooled_output) - logits = self.classifier(pooled_output) - - loss = None - if labels is not None: - if self.config.problem_type is None: - if self.num_labels == 1: - # regression task - loss_fn = nn.MSELoss() - logits = logits.view(-1).to(labels.dtype) - loss = loss_fn(logits, labels.view(-1)) - elif labels.dim() == 1 or labels.size(-1) == 1: - label_index = (labels >= 0).nonzero() - labels = labels.long() - if label_index.size(0) > 0: - labeled_logits = torch.gather( - logits, 0, label_index.expand(label_index.size(0), logits.size(1)) - ) - labels = torch.gather(labels, 0, label_index.view(-1)) - loss_fct = CrossEntropyLoss() - loss = loss_fct(labeled_logits.view(-1, self.num_labels).float(), labels.view(-1)) - else: - loss = torch.tensor(0).to(logits) - else: - log_softmax = nn.LogSoftmax(-1) - loss = -((log_softmax(logits) * labels).sum(-1)).mean() - elif self.config.problem_type == "regression": - loss_fct = MSELoss() - if self.num_labels == 1: - loss = loss_fct(logits.squeeze(), labels.squeeze()) - else: - loss = loss_fct(logits, labels) - elif self.config.problem_type == "single_label_classification": - loss_fct = CrossEntropyLoss() - loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) - elif self.config.problem_type == "multi_label_classification": - loss_fct = BCEWithLogitsLoss() - loss = loss_fct(logits, labels) - if not return_dict: - output = (logits,) + outputs[1:] - return ((loss,) + output) if loss is not None else output - - return SequenceClassifierOutput( - loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions - ) - - -@add_start_docstrings( - """ - DeBERTa Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for - Named-Entity-Recognition (NER) tasks. - """, - DEBERTA_START_DOCSTRING, -) -# Copied from transformers.models.deberta.modeling_deberta.DebertaForTokenClassification with Deberta->DebertaV2 -class DebertaV2ForTokenClassification(DebertaV2PreTrainedModel): - _keys_to_ignore_on_load_unexpected = [r"pooler"] - - def __init__(self, config): - super().__init__(config) - self.num_labels = config.num_labels - - self.deberta = DebertaV2Model(config) - self.dropout = nn.Dropout(config.hidden_dropout_prob) - self.classifier = nn.Linear(config.hidden_size, config.num_labels) - - # Initialize weights and apply final processing - self.post_init() - - @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length")) - @add_code_sample_docstrings( - processor_class=_TOKENIZER_FOR_DOC, - checkpoint=_CHECKPOINT_FOR_DOC, - output_type=TokenClassifierOutput, - config_class=_CONFIG_FOR_DOC, - ) - def forward( - self, - input_ids: Optional[torch.Tensor] = None, - attention_mask: Optional[torch.Tensor] = None, - token_type_ids: Optional[torch.Tensor] = None, - position_ids: Optional[torch.Tensor] = None, - inputs_embeds: Optional[torch.Tensor] = None, - labels: Optional[torch.Tensor] = None, - output_attentions: Optional[bool] = None, - output_hidden_states: Optional[bool] = None, - return_dict: Optional[bool] = None, - ) -> Union[Tuple, TokenClassifierOutput]: - r""" - labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): - Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`. - """ - return_dict = return_dict if return_dict is not None else self.config.use_return_dict - - outputs = self.deberta( - input_ids, - attention_mask=attention_mask, - token_type_ids=token_type_ids, - position_ids=position_ids, - inputs_embeds=inputs_embeds, - output_attentions=output_attentions, - output_hidden_states=output_hidden_states, - return_dict=return_dict, - ) - - sequence_output = outputs[0] - - sequence_output = self.dropout(sequence_output) - logits = self.classifier(sequence_output) - - loss = None - if labels is not None: - loss_fct = CrossEntropyLoss() - loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) - - if not return_dict: - output = (logits,) + outputs[1:] - return ((loss,) + output) if loss is not None else output - - return TokenClassifierOutput( - loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions - ) - - -@add_start_docstrings( - """ - DeBERTa Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear - layers on top of the hidden-states output to compute `span start logits` and `span end logits`). - """, - DEBERTA_START_DOCSTRING, -) -# Copied from transformers.models.deberta.modeling_deberta.DebertaForQuestionAnswering with Deberta->DebertaV2 -class DebertaV2ForQuestionAnswering(DebertaV2PreTrainedModel): - _keys_to_ignore_on_load_unexpected = [r"pooler"] - - def __init__(self, config): - super().__init__(config) - self.num_labels = config.num_labels - - self.deberta = DebertaV2Model(config) - self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) - - # Initialize weights and apply final processing - self.post_init() - - @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length")) - @add_code_sample_docstrings( - processor_class=_TOKENIZER_FOR_DOC, - checkpoint=_CHECKPOINT_FOR_DOC, - output_type=QuestionAnsweringModelOutput, - config_class=_CONFIG_FOR_DOC, - ) - def forward( - self, - input_ids: Optional[torch.Tensor] = None, - attention_mask: Optional[torch.Tensor] = None, - token_type_ids: Optional[torch.Tensor] = None, - position_ids: Optional[torch.Tensor] = None, - inputs_embeds: Optional[torch.Tensor] = None, - start_positions: Optional[torch.Tensor] = None, - end_positions: Optional[torch.Tensor] = None, - output_attentions: Optional[bool] = None, - output_hidden_states: Optional[bool] = None, - return_dict: Optional[bool] = None, - ) -> Union[Tuple, QuestionAnsweringModelOutput]: - r""" - start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): - Labels for position (index) of the start of the labelled span for computing the token classification loss. - Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence - are not taken into account for computing the loss. - end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): - Labels for position (index) of the end of the labelled span for computing the token classification loss. - Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence - are not taken into account for computing the loss. - """ - return_dict = return_dict if return_dict is not None else self.config.use_return_dict - - outputs = self.deberta( - input_ids, - attention_mask=attention_mask, - token_type_ids=token_type_ids, - position_ids=position_ids, - inputs_embeds=inputs_embeds, - output_attentions=output_attentions, - output_hidden_states=output_hidden_states, - return_dict=return_dict, - ) - - sequence_output = outputs[0] - - logits = self.qa_outputs(sequence_output) - start_logits, end_logits = logits.split(1, dim=-1) - start_logits = start_logits.squeeze(-1).contiguous() - end_logits = end_logits.squeeze(-1).contiguous() - - total_loss = None - if start_positions is not None and end_positions is not None: - # If we are on multi-GPU, split add a dimension - if len(start_positions.size()) > 1: - start_positions = start_positions.squeeze(-1) - if len(end_positions.size()) > 1: - end_positions = end_positions.squeeze(-1) - # sometimes the start/end positions are outside our model inputs, we ignore these terms - ignored_index = start_logits.size(1) - start_positions = start_positions.clamp(0, ignored_index) - end_positions = end_positions.clamp(0, ignored_index) - - loss_fct = CrossEntropyLoss(ignore_index=ignored_index) - start_loss = loss_fct(start_logits, start_positions) - end_loss = loss_fct(end_logits, end_positions) - total_loss = (start_loss + end_loss) / 2 - - if not return_dict: - output = (start_logits, end_logits) + outputs[1:] - return ((total_loss,) + output) if total_loss is not None else output - - return QuestionAnsweringModelOutput( - loss=total_loss, - start_logits=start_logits, - end_logits=end_logits, - hidden_states=outputs.hidden_states, - attentions=outputs.attentions, - ) - - -@add_start_docstrings( - """ - DeBERTa Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a - softmax) e.g. for RocStories/SWAG tasks. - """, - DEBERTA_START_DOCSTRING, -) -class DebertaV2ForMultipleChoice(DebertaV2PreTrainedModel): - def __init__(self, config): - super().__init__(config) - - num_labels = getattr(config, "num_labels", 2) - self.num_labels = num_labels - - self.deberta = DebertaV2Model(config) - self.pooler = ContextPooler(config) - output_dim = self.pooler.output_dim - - self.classifier = nn.Linear(output_dim, 1) - drop_out = getattr(config, "cls_dropout", None) - drop_out = self.config.hidden_dropout_prob if drop_out is None else drop_out - self.dropout = StableDropout(drop_out) - - self.init_weights() - - def get_input_embeddings(self): - return self.deberta.get_input_embeddings() - - def set_input_embeddings(self, new_embeddings): - self.deberta.set_input_embeddings(new_embeddings) - - @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length")) - @add_code_sample_docstrings( - processor_class=_TOKENIZER_FOR_DOC, - checkpoint=_CHECKPOINT_FOR_DOC, - output_type=MultipleChoiceModelOutput, - config_class=_CONFIG_FOR_DOC, - ) - def forward( - self, - input_ids=None, - attention_mask=None, - token_type_ids=None, - position_ids=None, - inputs_embeds=None, - labels=None, - output_attentions=None, - output_hidden_states=None, - return_dict=None, - ): - r""" - labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): - Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., - num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See - `input_ids` above) - """ - return_dict = return_dict if return_dict is not None else self.config.use_return_dict - num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1] - - flat_input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None - flat_position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None - flat_token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None - flat_attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None - flat_inputs_embeds = ( - inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1)) - if inputs_embeds is not None - else None - ) - - outputs = self.deberta( - flat_input_ids, - position_ids=flat_position_ids, - token_type_ids=flat_token_type_ids, - attention_mask=flat_attention_mask, - inputs_embeds=flat_inputs_embeds, - output_attentions=output_attentions, - output_hidden_states=output_hidden_states, - return_dict=return_dict, - ) - - encoder_layer = outputs[0] - pooled_output = self.pooler(encoder_layer) - pooled_output = self.dropout(pooled_output) - logits = self.classifier(pooled_output) - reshaped_logits = logits.view(-1, num_choices) - - loss = None - if labels is not None: - loss_fct = CrossEntropyLoss() - loss = loss_fct(reshaped_logits, labels) - - if not return_dict: - output = (reshaped_logits,) + outputs[1:] - return ((loss,) + output) if loss is not None else output - - return MultipleChoiceModelOutput( - loss=loss, - logits=reshaped_logits, - hidden_states=outputs.hidden_states, - attentions=outputs.attentions, - ) diff --git a/spaces/fffiloni/controlnet-animation-doodle/node_modules/@types/node/string_decoder.d.ts b/spaces/fffiloni/controlnet-animation-doodle/node_modules/@types/node/string_decoder.d.ts deleted file mode 100644 index a5858041116333dfaa018a9e5cf89e3a9fc78d71..0000000000000000000000000000000000000000 --- a/spaces/fffiloni/controlnet-animation-doodle/node_modules/@types/node/string_decoder.d.ts +++ /dev/null @@ -1,67 +0,0 @@ -/** - * The `string_decoder` module provides an API for decoding `Buffer` objects into - * strings in a manner that preserves encoded multi-byte UTF-8 and UTF-16 - * characters. It can be accessed using: - * - * ```js - * const { StringDecoder } = require('string_decoder'); - * ``` - * - * The following example shows the basic use of the `StringDecoder` class. - * - * ```js - * const { StringDecoder } = require('string_decoder'); - * const decoder = new StringDecoder('utf8'); - * - * const cent = Buffer.from([0xC2, 0xA2]); - * console.log(decoder.write(cent)); - * - * const euro = Buffer.from([0xE2, 0x82, 0xAC]); - * console.log(decoder.write(euro)); - * ``` - * - * When a `Buffer` instance is written to the `StringDecoder` instance, an - * internal buffer is used to ensure that the decoded string does not contain - * any incomplete multibyte characters. These are held in the buffer until the - * next call to `stringDecoder.write()` or until `stringDecoder.end()` is called. - * - * In the following example, the three UTF-8 encoded bytes of the European Euro - * symbol (`€`) are written over three separate operations: - * - * ```js - * const { StringDecoder } = require('string_decoder'); - * const decoder = new StringDecoder('utf8'); - * - * decoder.write(Buffer.from([0xE2])); - * decoder.write(Buffer.from([0x82])); - * console.log(decoder.end(Buffer.from([0xAC]))); - * ``` - * @see [source](https://github.com/nodejs/node/blob/v18.0.0/lib/string_decoder.js) - */ -declare module 'string_decoder' { - class StringDecoder { - constructor(encoding?: BufferEncoding); - /** - * Returns a decoded string, ensuring that any incomplete multibyte characters at - * the end of the `Buffer`, or `TypedArray`, or `DataView` are omitted from the - * returned string and stored in an internal buffer for the next call to`stringDecoder.write()` or `stringDecoder.end()`. - * @since v0.1.99 - * @param buffer A `Buffer`, or `TypedArray`, or `DataView` containing the bytes to decode. - */ - write(buffer: Buffer): string; - /** - * Returns any remaining input stored in the internal buffer as a string. Bytes - * representing incomplete UTF-8 and UTF-16 characters will be replaced with - * substitution characters appropriate for the character encoding. - * - * If the `buffer` argument is provided, one final call to `stringDecoder.write()`is performed before returning the remaining input. - * After `end()` is called, the `stringDecoder` object can be reused for new input. - * @since v0.9.3 - * @param buffer A `Buffer`, or `TypedArray`, or `DataView` containing the bytes to decode. - */ - end(buffer?: Buffer): string; - } -} -declare module 'node:string_decoder' { - export * from 'string_decoder'; -} diff --git a/spaces/fffiloni/lama-video-watermark-remover/saicinpainting/training/data/aug.py b/spaces/fffiloni/lama-video-watermark-remover/saicinpainting/training/data/aug.py deleted file mode 100644 index b1246250924e79511b58cd3d7ab79de8012f8949..0000000000000000000000000000000000000000 --- a/spaces/fffiloni/lama-video-watermark-remover/saicinpainting/training/data/aug.py +++ /dev/null @@ -1,84 +0,0 @@ -from albumentations import DualIAATransform, to_tuple -import imgaug.augmenters as iaa - -class IAAAffine2(DualIAATransform): - """Place a regular grid of points on the input and randomly move the neighbourhood of these point around - via affine transformations. - - Note: This class introduce interpolation artifacts to mask if it has values other than {0;1} - - Args: - p (float): probability of applying the transform. Default: 0.5. - - Targets: - image, mask - """ - - def __init__( - self, - scale=(0.7, 1.3), - translate_percent=None, - translate_px=None, - rotate=0.0, - shear=(-0.1, 0.1), - order=1, - cval=0, - mode="reflect", - always_apply=False, - p=0.5, - ): - super(IAAAffine2, self).__init__(always_apply, p) - self.scale = dict(x=scale, y=scale) - self.translate_percent = to_tuple(translate_percent, 0) - self.translate_px = to_tuple(translate_px, 0) - self.rotate = to_tuple(rotate) - self.shear = dict(x=shear, y=shear) - self.order = order - self.cval = cval - self.mode = mode - - @property - def processor(self): - return iaa.Affine( - self.scale, - self.translate_percent, - self.translate_px, - self.rotate, - self.shear, - self.order, - self.cval, - self.mode, - ) - - def get_transform_init_args_names(self): - return ("scale", "translate_percent", "translate_px", "rotate", "shear", "order", "cval", "mode") - - -class IAAPerspective2(DualIAATransform): - """Perform a random four point perspective transform of the input. - - Note: This class introduce interpolation artifacts to mask if it has values other than {0;1} - - Args: - scale ((float, float): standard deviation of the normal distributions. These are used to sample - the random distances of the subimage's corners from the full image's corners. Default: (0.05, 0.1). - p (float): probability of applying the transform. Default: 0.5. - - Targets: - image, mask - """ - - def __init__(self, scale=(0.05, 0.1), keep_size=True, always_apply=False, p=0.5, - order=1, cval=0, mode="replicate"): - super(IAAPerspective2, self).__init__(always_apply, p) - self.scale = to_tuple(scale, 1.0) - self.keep_size = keep_size - self.cval = cval - self.mode = mode - - @property - def processor(self): - return iaa.PerspectiveTransform(self.scale, keep_size=self.keep_size, mode=self.mode, cval=self.cval) - - def get_transform_init_args_names(self): - return ("scale", "keep_size") diff --git a/spaces/fgpzen/remove-photo-object/README.md b/spaces/fgpzen/remove-photo-object/README.md deleted file mode 100644 index fe19898cabac7a393215e57625fbae53bb6197f8..0000000000000000000000000000000000000000 --- a/spaces/fgpzen/remove-photo-object/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Remove Photo Object -emoji: ⚡ -colorFrom: pink -colorTo: purple -sdk: streamlit -sdk_version: 1.10.0 -python_version: 3.9.5 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference diff --git a/spaces/finaspirant/SearchWithVoice/app.py b/spaces/finaspirant/SearchWithVoice/app.py deleted file mode 100644 index b7d2baa9ca5b659b67ab870879b470fa0c9849fb..0000000000000000000000000000000000000000 --- a/spaces/finaspirant/SearchWithVoice/app.py +++ /dev/null @@ -1,46 +0,0 @@ -import os - -import openai -from dotenv import load_dotenv -load_dotenv() -openai.api_key=os.environ["OPENAI_API_KEY"] - -import gradio as gr -from langchain.llms import OpenAI -from interface import AudioInterface -interface = AudioInterface() - -def process(filepath): - print(filepath) - audio = open(filepath,"rb") - transcript = openai.Audio.transcribe("whisper-1",audio) - llm = OpenAI(temperature=1) - #print(llm(transcript["text"])) - interface.speak(llm(transcript["text"])) - return llm(transcript["text"]) - -demo = gr.Interface( - fn=process, - inputs=gr.Audio(source="microphone",type="filepath"), - outputs="text") -demo.launch() - - -""" - -from dotenv import load_dotenv -load_dotenv() - -from interface import AudioInterface -from agents import SmartChatAgent - -interface = AudioInterface() -agent = SmartChatAgent() - -while True: - text = interface.listen() - response = agent.run(text) - interface.speak(response) - - -""" \ No newline at end of file diff --git a/spaces/freddyaboulton/all_demos_3/demos/blocks_layout/run.py b/spaces/freddyaboulton/all_demos_3/demos/blocks_layout/run.py deleted file mode 100644 index 2759b2d4fe6bfcebec4c863b19e750ab5ac11688..0000000000000000000000000000000000000000 --- a/spaces/freddyaboulton/all_demos_3/demos/blocks_layout/run.py +++ /dev/null @@ -1,31 +0,0 @@ -import gradio as gr - - -demo = gr.Blocks() - -with demo: - with gr.Row(): - gr.Image(interactive=True) - gr.Image() - with gr.Row(): - gr.Textbox(label="Text") - gr.Number(label="Count") - gr.Radio(choices=["One", "Two"]) - with gr.Row(): - with gr.Row(): - with gr.Column(): - gr.Textbox(label="Text") - gr.Number(label="Count") - gr.Radio(choices=["One", "Two"]) - gr.Image() - with gr.Column(): - gr.Image(interactive=True) - gr.Image() - gr.Image() - gr.Textbox(label="Text") - gr.Number(label="Count") - gr.Radio(choices=["One", "Two"]) - - -if __name__ == "__main__": - demo.launch() diff --git a/spaces/freddyaboulton/gradio_foliumtest/src/backend/gradio_foliumtest/templates/example/index.js b/spaces/freddyaboulton/gradio_foliumtest/src/backend/gradio_foliumtest/templates/example/index.js deleted file mode 100644 index 66c53e723c2e89d6205a5648d7b96faae1ae5543..0000000000000000000000000000000000000000 --- a/spaces/freddyaboulton/gradio_foliumtest/src/backend/gradio_foliumtest/templates/example/index.js +++ /dev/null @@ -1,88 +0,0 @@ -const { - SvelteComponent: f, - append: u, - attr: d, - detach: g, - element: o, - init: v, - insert: r, - noop: c, - safe_not_equal: y, - set_data: m, - text: b, - toggle_class: i -} = window.__gradio__svelte__internal; -function w(a) { - let e, n; - return { - c() { - e = o("div"), n = b( - /*value*/ - a[0] - ), d(e, "class", "svelte-1gecy8w"), i( - e, - "table", - /*type*/ - a[1] === "table" - ), i( - e, - "gallery", - /*type*/ - a[1] === "gallery" - ), i( - e, - "selected", - /*selected*/ - a[2] - ); - }, - m(t, l) { - r(t, e, l), u(e, n); - }, - p(t, [l]) { - l & /*value*/ - 1 && m( - n, - /*value*/ - t[0] - ), l & /*type*/ - 2 && i( - e, - "table", - /*type*/ - t[1] === "table" - ), l & /*type*/ - 2 && i( - e, - "gallery", - /*type*/ - t[1] === "gallery" - ), l & /*selected*/ - 4 && i( - e, - "selected", - /*selected*/ - t[2] - ); - }, - i: c, - o: c, - d(t) { - t && g(e); - } - }; -} -function h(a, e, n) { - let { value: t } = e, { type: l } = e, { selected: _ = !1 } = e; - return a.$$set = (s) => { - "value" in s && n(0, t = s.value), "type" in s && n(1, l = s.type), "selected" in s && n(2, _ = s.selected); - }, [t, l, _]; -} -class E extends f { - constructor(e) { - super(), v(this, e, h, w, y, { value: 0, type: 1, selected: 2 }); - } -} -export { - E as default -}; diff --git a/spaces/georgefen/Face-Landmark-ControlNet/annotator/uniformer/mmcv/runner/hooks/lr_updater.py b/spaces/georgefen/Face-Landmark-ControlNet/annotator/uniformer/mmcv/runner/hooks/lr_updater.py deleted file mode 100644 index 6365908ddf6070086de2ffc0afada46ed2f32256..0000000000000000000000000000000000000000 --- a/spaces/georgefen/Face-Landmark-ControlNet/annotator/uniformer/mmcv/runner/hooks/lr_updater.py +++ /dev/null @@ -1,670 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import numbers -from math import cos, pi - -import annotator.uniformer.mmcv as mmcv -from .hook import HOOKS, Hook - - -class LrUpdaterHook(Hook): - """LR Scheduler in MMCV. - - Args: - by_epoch (bool): LR changes epoch by epoch - warmup (string): Type of warmup used. It can be None(use no warmup), - 'constant', 'linear' or 'exp' - warmup_iters (int): The number of iterations or epochs that warmup - lasts - warmup_ratio (float): LR used at the beginning of warmup equals to - warmup_ratio * initial_lr - warmup_by_epoch (bool): When warmup_by_epoch == True, warmup_iters - means the number of epochs that warmup lasts, otherwise means the - number of iteration that warmup lasts - """ - - def __init__(self, - by_epoch=True, - warmup=None, - warmup_iters=0, - warmup_ratio=0.1, - warmup_by_epoch=False): - # validate the "warmup" argument - if warmup is not None: - if warmup not in ['constant', 'linear', 'exp']: - raise ValueError( - f'"{warmup}" is not a supported type for warming up, valid' - ' types are "constant" and "linear"') - if warmup is not None: - assert warmup_iters > 0, \ - '"warmup_iters" must be a positive integer' - assert 0 < warmup_ratio <= 1.0, \ - '"warmup_ratio" must be in range (0,1]' - - self.by_epoch = by_epoch - self.warmup = warmup - self.warmup_iters = warmup_iters - self.warmup_ratio = warmup_ratio - self.warmup_by_epoch = warmup_by_epoch - - if self.warmup_by_epoch: - self.warmup_epochs = self.warmup_iters - self.warmup_iters = None - else: - self.warmup_epochs = None - - self.base_lr = [] # initial lr for all param groups - self.regular_lr = [] # expected lr if no warming up is performed - - def _set_lr(self, runner, lr_groups): - if isinstance(runner.optimizer, dict): - for k, optim in runner.optimizer.items(): - for param_group, lr in zip(optim.param_groups, lr_groups[k]): - param_group['lr'] = lr - else: - for param_group, lr in zip(runner.optimizer.param_groups, - lr_groups): - param_group['lr'] = lr - - def get_lr(self, runner, base_lr): - raise NotImplementedError - - def get_regular_lr(self, runner): - if isinstance(runner.optimizer, dict): - lr_groups = {} - for k in runner.optimizer.keys(): - _lr_group = [ - self.get_lr(runner, _base_lr) - for _base_lr in self.base_lr[k] - ] - lr_groups.update({k: _lr_group}) - - return lr_groups - else: - return [self.get_lr(runner, _base_lr) for _base_lr in self.base_lr] - - def get_warmup_lr(self, cur_iters): - - def _get_warmup_lr(cur_iters, regular_lr): - if self.warmup == 'constant': - warmup_lr = [_lr * self.warmup_ratio for _lr in regular_lr] - elif self.warmup == 'linear': - k = (1 - cur_iters / self.warmup_iters) * (1 - - self.warmup_ratio) - warmup_lr = [_lr * (1 - k) for _lr in regular_lr] - elif self.warmup == 'exp': - k = self.warmup_ratio**(1 - cur_iters / self.warmup_iters) - warmup_lr = [_lr * k for _lr in regular_lr] - return warmup_lr - - if isinstance(self.regular_lr, dict): - lr_groups = {} - for key, regular_lr in self.regular_lr.items(): - lr_groups[key] = _get_warmup_lr(cur_iters, regular_lr) - return lr_groups - else: - return _get_warmup_lr(cur_iters, self.regular_lr) - - def before_run(self, runner): - # NOTE: when resuming from a checkpoint, if 'initial_lr' is not saved, - # it will be set according to the optimizer params - if isinstance(runner.optimizer, dict): - self.base_lr = {} - for k, optim in runner.optimizer.items(): - for group in optim.param_groups: - group.setdefault('initial_lr', group['lr']) - _base_lr = [ - group['initial_lr'] for group in optim.param_groups - ] - self.base_lr.update({k: _base_lr}) - else: - for group in runner.optimizer.param_groups: - group.setdefault('initial_lr', group['lr']) - self.base_lr = [ - group['initial_lr'] for group in runner.optimizer.param_groups - ] - - def before_train_epoch(self, runner): - if self.warmup_iters is None: - epoch_len = len(runner.data_loader) - self.warmup_iters = self.warmup_epochs * epoch_len - - if not self.by_epoch: - return - - self.regular_lr = self.get_regular_lr(runner) - self._set_lr(runner, self.regular_lr) - - def before_train_iter(self, runner): - cur_iter = runner.iter - if not self.by_epoch: - self.regular_lr = self.get_regular_lr(runner) - if self.warmup is None or cur_iter >= self.warmup_iters: - self._set_lr(runner, self.regular_lr) - else: - warmup_lr = self.get_warmup_lr(cur_iter) - self._set_lr(runner, warmup_lr) - elif self.by_epoch: - if self.warmup is None or cur_iter > self.warmup_iters: - return - elif cur_iter == self.warmup_iters: - self._set_lr(runner, self.regular_lr) - else: - warmup_lr = self.get_warmup_lr(cur_iter) - self._set_lr(runner, warmup_lr) - - -@HOOKS.register_module() -class FixedLrUpdaterHook(LrUpdaterHook): - - def __init__(self, **kwargs): - super(FixedLrUpdaterHook, self).__init__(**kwargs) - - def get_lr(self, runner, base_lr): - return base_lr - - -@HOOKS.register_module() -class StepLrUpdaterHook(LrUpdaterHook): - """Step LR scheduler with min_lr clipping. - - Args: - step (int | list[int]): Step to decay the LR. If an int value is given, - regard it as the decay interval. If a list is given, decay LR at - these steps. - gamma (float, optional): Decay LR ratio. Default: 0.1. - min_lr (float, optional): Minimum LR value to keep. If LR after decay - is lower than `min_lr`, it will be clipped to this value. If None - is given, we don't perform lr clipping. Default: None. - """ - - def __init__(self, step, gamma=0.1, min_lr=None, **kwargs): - if isinstance(step, list): - assert mmcv.is_list_of(step, int) - assert all([s > 0 for s in step]) - elif isinstance(step, int): - assert step > 0 - else: - raise TypeError('"step" must be a list or integer') - self.step = step - self.gamma = gamma - self.min_lr = min_lr - super(StepLrUpdaterHook, self).__init__(**kwargs) - - def get_lr(self, runner, base_lr): - progress = runner.epoch if self.by_epoch else runner.iter - - # calculate exponential term - if isinstance(self.step, int): - exp = progress // self.step - else: - exp = len(self.step) - for i, s in enumerate(self.step): - if progress < s: - exp = i - break - - lr = base_lr * (self.gamma**exp) - if self.min_lr is not None: - # clip to a minimum value - lr = max(lr, self.min_lr) - return lr - - -@HOOKS.register_module() -class ExpLrUpdaterHook(LrUpdaterHook): - - def __init__(self, gamma, **kwargs): - self.gamma = gamma - super(ExpLrUpdaterHook, self).__init__(**kwargs) - - def get_lr(self, runner, base_lr): - progress = runner.epoch if self.by_epoch else runner.iter - return base_lr * self.gamma**progress - - -@HOOKS.register_module() -class PolyLrUpdaterHook(LrUpdaterHook): - - def __init__(self, power=1., min_lr=0., **kwargs): - self.power = power - self.min_lr = min_lr - super(PolyLrUpdaterHook, self).__init__(**kwargs) - - def get_lr(self, runner, base_lr): - if self.by_epoch: - progress = runner.epoch - max_progress = runner.max_epochs - else: - progress = runner.iter - max_progress = runner.max_iters - coeff = (1 - progress / max_progress)**self.power - return (base_lr - self.min_lr) * coeff + self.min_lr - - -@HOOKS.register_module() -class InvLrUpdaterHook(LrUpdaterHook): - - def __init__(self, gamma, power=1., **kwargs): - self.gamma = gamma - self.power = power - super(InvLrUpdaterHook, self).__init__(**kwargs) - - def get_lr(self, runner, base_lr): - progress = runner.epoch if self.by_epoch else runner.iter - return base_lr * (1 + self.gamma * progress)**(-self.power) - - -@HOOKS.register_module() -class CosineAnnealingLrUpdaterHook(LrUpdaterHook): - - def __init__(self, min_lr=None, min_lr_ratio=None, **kwargs): - assert (min_lr is None) ^ (min_lr_ratio is None) - self.min_lr = min_lr - self.min_lr_ratio = min_lr_ratio - super(CosineAnnealingLrUpdaterHook, self).__init__(**kwargs) - - def get_lr(self, runner, base_lr): - if self.by_epoch: - progress = runner.epoch - max_progress = runner.max_epochs - else: - progress = runner.iter - max_progress = runner.max_iters - - if self.min_lr_ratio is not None: - target_lr = base_lr * self.min_lr_ratio - else: - target_lr = self.min_lr - return annealing_cos(base_lr, target_lr, progress / max_progress) - - -@HOOKS.register_module() -class FlatCosineAnnealingLrUpdaterHook(LrUpdaterHook): - """Flat + Cosine lr schedule. - - Modified from https://github.com/fastai/fastai/blob/master/fastai/callback/schedule.py#L128 # noqa: E501 - - Args: - start_percent (float): When to start annealing the learning rate - after the percentage of the total training steps. - The value should be in range [0, 1). - Default: 0.75 - min_lr (float, optional): The minimum lr. Default: None. - min_lr_ratio (float, optional): The ratio of minimum lr to the base lr. - Either `min_lr` or `min_lr_ratio` should be specified. - Default: None. - """ - - def __init__(self, - start_percent=0.75, - min_lr=None, - min_lr_ratio=None, - **kwargs): - assert (min_lr is None) ^ (min_lr_ratio is None) - if start_percent < 0 or start_percent > 1 or not isinstance( - start_percent, float): - raise ValueError( - 'expected float between 0 and 1 start_percent, but ' - f'got {start_percent}') - self.start_percent = start_percent - self.min_lr = min_lr - self.min_lr_ratio = min_lr_ratio - super(FlatCosineAnnealingLrUpdaterHook, self).__init__(**kwargs) - - def get_lr(self, runner, base_lr): - if self.by_epoch: - start = round(runner.max_epochs * self.start_percent) - progress = runner.epoch - start - max_progress = runner.max_epochs - start - else: - start = round(runner.max_iters * self.start_percent) - progress = runner.iter - start - max_progress = runner.max_iters - start - - if self.min_lr_ratio is not None: - target_lr = base_lr * self.min_lr_ratio - else: - target_lr = self.min_lr - - if progress < 0: - return base_lr - else: - return annealing_cos(base_lr, target_lr, progress / max_progress) - - -@HOOKS.register_module() -class CosineRestartLrUpdaterHook(LrUpdaterHook): - """Cosine annealing with restarts learning rate scheme. - - Args: - periods (list[int]): Periods for each cosine anneling cycle. - restart_weights (list[float], optional): Restart weights at each - restart iteration. Default: [1]. - min_lr (float, optional): The minimum lr. Default: None. - min_lr_ratio (float, optional): The ratio of minimum lr to the base lr. - Either `min_lr` or `min_lr_ratio` should be specified. - Default: None. - """ - - def __init__(self, - periods, - restart_weights=[1], - min_lr=None, - min_lr_ratio=None, - **kwargs): - assert (min_lr is None) ^ (min_lr_ratio is None) - self.periods = periods - self.min_lr = min_lr - self.min_lr_ratio = min_lr_ratio - self.restart_weights = restart_weights - assert (len(self.periods) == len(self.restart_weights) - ), 'periods and restart_weights should have the same length.' - super(CosineRestartLrUpdaterHook, self).__init__(**kwargs) - - self.cumulative_periods = [ - sum(self.periods[0:i + 1]) for i in range(0, len(self.periods)) - ] - - def get_lr(self, runner, base_lr): - if self.by_epoch: - progress = runner.epoch - else: - progress = runner.iter - - if self.min_lr_ratio is not None: - target_lr = base_lr * self.min_lr_ratio - else: - target_lr = self.min_lr - - idx = get_position_from_periods(progress, self.cumulative_periods) - current_weight = self.restart_weights[idx] - nearest_restart = 0 if idx == 0 else self.cumulative_periods[idx - 1] - current_periods = self.periods[idx] - - alpha = min((progress - nearest_restart) / current_periods, 1) - return annealing_cos(base_lr, target_lr, alpha, current_weight) - - -def get_position_from_periods(iteration, cumulative_periods): - """Get the position from a period list. - - It will return the index of the right-closest number in the period list. - For example, the cumulative_periods = [100, 200, 300, 400], - if iteration == 50, return 0; - if iteration == 210, return 2; - if iteration == 300, return 3. - - Args: - iteration (int): Current iteration. - cumulative_periods (list[int]): Cumulative period list. - - Returns: - int: The position of the right-closest number in the period list. - """ - for i, period in enumerate(cumulative_periods): - if iteration < period: - return i - raise ValueError(f'Current iteration {iteration} exceeds ' - f'cumulative_periods {cumulative_periods}') - - -@HOOKS.register_module() -class CyclicLrUpdaterHook(LrUpdaterHook): - """Cyclic LR Scheduler. - - Implement the cyclical learning rate policy (CLR) described in - https://arxiv.org/pdf/1506.01186.pdf - - Different from the original paper, we use cosine annealing rather than - triangular policy inside a cycle. This improves the performance in the - 3D detection area. - - Args: - by_epoch (bool): Whether to update LR by epoch. - target_ratio (tuple[float]): Relative ratio of the highest LR and the - lowest LR to the initial LR. - cyclic_times (int): Number of cycles during training - step_ratio_up (float): The ratio of the increasing process of LR in - the total cycle. - anneal_strategy (str): {'cos', 'linear'} - Specifies the annealing strategy: 'cos' for cosine annealing, - 'linear' for linear annealing. Default: 'cos'. - """ - - def __init__(self, - by_epoch=False, - target_ratio=(10, 1e-4), - cyclic_times=1, - step_ratio_up=0.4, - anneal_strategy='cos', - **kwargs): - if isinstance(target_ratio, float): - target_ratio = (target_ratio, target_ratio / 1e5) - elif isinstance(target_ratio, tuple): - target_ratio = (target_ratio[0], target_ratio[0] / 1e5) \ - if len(target_ratio) == 1 else target_ratio - else: - raise ValueError('target_ratio should be either float ' - f'or tuple, got {type(target_ratio)}') - - assert len(target_ratio) == 2, \ - '"target_ratio" must be list or tuple of two floats' - assert 0 <= step_ratio_up < 1.0, \ - '"step_ratio_up" must be in range [0,1)' - - self.target_ratio = target_ratio - self.cyclic_times = cyclic_times - self.step_ratio_up = step_ratio_up - self.lr_phases = [] # init lr_phases - # validate anneal_strategy - if anneal_strategy not in ['cos', 'linear']: - raise ValueError('anneal_strategy must be one of "cos" or ' - f'"linear", instead got {anneal_strategy}') - elif anneal_strategy == 'cos': - self.anneal_func = annealing_cos - elif anneal_strategy == 'linear': - self.anneal_func = annealing_linear - - assert not by_epoch, \ - 'currently only support "by_epoch" = False' - super(CyclicLrUpdaterHook, self).__init__(by_epoch, **kwargs) - - def before_run(self, runner): - super(CyclicLrUpdaterHook, self).before_run(runner) - # initiate lr_phases - # total lr_phases are separated as up and down - max_iter_per_phase = runner.max_iters // self.cyclic_times - iter_up_phase = int(self.step_ratio_up * max_iter_per_phase) - self.lr_phases.append( - [0, iter_up_phase, max_iter_per_phase, 1, self.target_ratio[0]]) - self.lr_phases.append([ - iter_up_phase, max_iter_per_phase, max_iter_per_phase, - self.target_ratio[0], self.target_ratio[1] - ]) - - def get_lr(self, runner, base_lr): - curr_iter = runner.iter - for (start_iter, end_iter, max_iter_per_phase, start_ratio, - end_ratio) in self.lr_phases: - curr_iter %= max_iter_per_phase - if start_iter <= curr_iter < end_iter: - progress = curr_iter - start_iter - return self.anneal_func(base_lr * start_ratio, - base_lr * end_ratio, - progress / (end_iter - start_iter)) - - -@HOOKS.register_module() -class OneCycleLrUpdaterHook(LrUpdaterHook): - """One Cycle LR Scheduler. - - The 1cycle learning rate policy changes the learning rate after every - batch. The one cycle learning rate policy is described in - https://arxiv.org/pdf/1708.07120.pdf - - Args: - max_lr (float or list): Upper learning rate boundaries in the cycle - for each parameter group. - total_steps (int, optional): The total number of steps in the cycle. - Note that if a value is not provided here, it will be the max_iter - of runner. Default: None. - pct_start (float): The percentage of the cycle (in number of steps) - spent increasing the learning rate. - Default: 0.3 - anneal_strategy (str): {'cos', 'linear'} - Specifies the annealing strategy: 'cos' for cosine annealing, - 'linear' for linear annealing. - Default: 'cos' - div_factor (float): Determines the initial learning rate via - initial_lr = max_lr/div_factor - Default: 25 - final_div_factor (float): Determines the minimum learning rate via - min_lr = initial_lr/final_div_factor - Default: 1e4 - three_phase (bool): If three_phase is True, use a third phase of the - schedule to annihilate the learning rate according to - final_div_factor instead of modifying the second phase (the first - two phases will be symmetrical about the step indicated by - pct_start). - Default: False - """ - - def __init__(self, - max_lr, - total_steps=None, - pct_start=0.3, - anneal_strategy='cos', - div_factor=25, - final_div_factor=1e4, - three_phase=False, - **kwargs): - # validate by_epoch, currently only support by_epoch = False - if 'by_epoch' not in kwargs: - kwargs['by_epoch'] = False - else: - assert not kwargs['by_epoch'], \ - 'currently only support "by_epoch" = False' - if not isinstance(max_lr, (numbers.Number, list, dict)): - raise ValueError('the type of max_lr must be the one of list or ' - f'dict, but got {type(max_lr)}') - self._max_lr = max_lr - if total_steps is not None: - if not isinstance(total_steps, int): - raise ValueError('the type of total_steps must be int, but' - f'got {type(total_steps)}') - self.total_steps = total_steps - # validate pct_start - if pct_start < 0 or pct_start > 1 or not isinstance(pct_start, float): - raise ValueError('expected float between 0 and 1 pct_start, but ' - f'got {pct_start}') - self.pct_start = pct_start - # validate anneal_strategy - if anneal_strategy not in ['cos', 'linear']: - raise ValueError('anneal_strategy must be one of "cos" or ' - f'"linear", instead got {anneal_strategy}') - elif anneal_strategy == 'cos': - self.anneal_func = annealing_cos - elif anneal_strategy == 'linear': - self.anneal_func = annealing_linear - self.div_factor = div_factor - self.final_div_factor = final_div_factor - self.three_phase = three_phase - self.lr_phases = [] # init lr_phases - super(OneCycleLrUpdaterHook, self).__init__(**kwargs) - - def before_run(self, runner): - if hasattr(self, 'total_steps'): - total_steps = self.total_steps - else: - total_steps = runner.max_iters - if total_steps < runner.max_iters: - raise ValueError( - 'The total steps must be greater than or equal to max ' - f'iterations {runner.max_iters} of runner, but total steps ' - f'is {total_steps}.') - - if isinstance(runner.optimizer, dict): - self.base_lr = {} - for k, optim in runner.optimizer.items(): - _max_lr = format_param(k, optim, self._max_lr) - self.base_lr[k] = [lr / self.div_factor for lr in _max_lr] - for group, lr in zip(optim.param_groups, self.base_lr[k]): - group.setdefault('initial_lr', lr) - else: - k = type(runner.optimizer).__name__ - _max_lr = format_param(k, runner.optimizer, self._max_lr) - self.base_lr = [lr / self.div_factor for lr in _max_lr] - for group, lr in zip(runner.optimizer.param_groups, self.base_lr): - group.setdefault('initial_lr', lr) - - if self.three_phase: - self.lr_phases.append( - [float(self.pct_start * total_steps) - 1, 1, self.div_factor]) - self.lr_phases.append([ - float(2 * self.pct_start * total_steps) - 2, self.div_factor, 1 - ]) - self.lr_phases.append( - [total_steps - 1, 1, 1 / self.final_div_factor]) - else: - self.lr_phases.append( - [float(self.pct_start * total_steps) - 1, 1, self.div_factor]) - self.lr_phases.append( - [total_steps - 1, self.div_factor, 1 / self.final_div_factor]) - - def get_lr(self, runner, base_lr): - curr_iter = runner.iter - start_iter = 0 - for i, (end_iter, start_lr, end_lr) in enumerate(self.lr_phases): - if curr_iter <= end_iter: - pct = (curr_iter - start_iter) / (end_iter - start_iter) - lr = self.anneal_func(base_lr * start_lr, base_lr * end_lr, - pct) - break - start_iter = end_iter - return lr - - -def annealing_cos(start, end, factor, weight=1): - """Calculate annealing cos learning rate. - - Cosine anneal from `weight * start + (1 - weight) * end` to `end` as - percentage goes from 0.0 to 1.0. - - Args: - start (float): The starting learning rate of the cosine annealing. - end (float): The ending learing rate of the cosine annealing. - factor (float): The coefficient of `pi` when calculating the current - percentage. Range from 0.0 to 1.0. - weight (float, optional): The combination factor of `start` and `end` - when calculating the actual starting learning rate. Default to 1. - """ - cos_out = cos(pi * factor) + 1 - return end + 0.5 * weight * (start - end) * cos_out - - -def annealing_linear(start, end, factor): - """Calculate annealing linear learning rate. - - Linear anneal from `start` to `end` as percentage goes from 0.0 to 1.0. - - Args: - start (float): The starting learning rate of the linear annealing. - end (float): The ending learing rate of the linear annealing. - factor (float): The coefficient of `pi` when calculating the current - percentage. Range from 0.0 to 1.0. - """ - return start + (end - start) * factor - - -def format_param(name, optim, param): - if isinstance(param, numbers.Number): - return [param] * len(optim.param_groups) - elif isinstance(param, (list, tuple)): # multi param groups - if len(param) != len(optim.param_groups): - raise ValueError(f'expected {len(optim.param_groups)} ' - f'values for {name}, got {len(param)}') - return param - else: # multi optimizers - if name not in param: - raise KeyError(f'{name} is not found in {param.keys()}') - return param[name] diff --git a/spaces/ghlee94/MEDIAR/segmentation_models_pytorch/encoders/mobilenet.py b/spaces/ghlee94/MEDIAR/segmentation_models_pytorch/encoders/mobilenet.py deleted file mode 100644 index 878732ef0ec36b6bd8a0d7c651a5e81e54731d5e..0000000000000000000000000000000000000000 --- a/spaces/ghlee94/MEDIAR/segmentation_models_pytorch/encoders/mobilenet.py +++ /dev/null @@ -1,80 +0,0 @@ -"""Each encoder should have following attributes and methods and be inherited from `_base.EncoderMixin` - -Attributes: - - _out_channels (list of int): specify number of channels for each encoder feature tensor - _depth (int): specify number of stages in decoder (in other words number of downsampling operations) - _in_channels (int): default number of input channels in first Conv2d layer for encoder (usually 3) - -Methods: - - forward(self, x: torch.Tensor) - produce list of features of different spatial resolutions, each feature is a 4D torch.tensor of - shape NCHW (features should be sorted in descending order according to spatial resolution, starting - with resolution same as input `x` tensor). - - Input: `x` with shape (1, 3, 64, 64) - Output: [f0, f1, f2, f3, f4, f5] - features with corresponding shapes - [(1, 3, 64, 64), (1, 64, 32, 32), (1, 128, 16, 16), (1, 256, 8, 8), - (1, 512, 4, 4), (1, 1024, 2, 2)] (C - dim may differ) - - also should support number of features according to specified depth, e.g. if depth = 5, - number of feature tensors = 6 (one with same resolution as input and 5 downsampled), - depth = 3 -> number of feature tensors = 4 (one with same resolution as input and 3 downsampled). -""" - -import torchvision -import torch.nn as nn - -from ._base import EncoderMixin - - -class MobileNetV2Encoder(torchvision.models.MobileNetV2, EncoderMixin): - def __init__(self, out_channels, depth=5, **kwargs): - super().__init__(**kwargs) - self._depth = depth - self._out_channels = out_channels - self._in_channels = 3 - del self.classifier - - def get_stages(self): - return [ - nn.Identity(), - self.features[:2], - self.features[2:4], - self.features[4:7], - self.features[7:14], - self.features[14:], - ] - - def forward(self, x): - stages = self.get_stages() - - features = [] - for i in range(self._depth + 1): - x = stages[i](x) - features.append(x) - - return features - - def load_state_dict(self, state_dict, **kwargs): - state_dict.pop("classifier.1.bias", None) - state_dict.pop("classifier.1.weight", None) - super().load_state_dict(state_dict, **kwargs) - - -mobilenet_encoders = { - "mobilenet_v2": { - "encoder": MobileNetV2Encoder, - "pretrained_settings": { - "imagenet": { - "mean": [0.485, 0.456, 0.406], - "std": [0.229, 0.224, 0.225], - "url": "https://download.pytorch.org/models/mobilenet_v2-b0353104.pth", - "input_space": "RGB", - "input_range": [0, 1], - }, - }, - "params": {"out_channels": (3, 16, 24, 32, 96, 1280),}, - }, -} diff --git a/spaces/gotiQspiryo/whisper-ui/examples/Advance Steel 2014 X64 (32X64bit) (Product Key And Xforce Keygen) Serial Key Keygen.md b/spaces/gotiQspiryo/whisper-ui/examples/Advance Steel 2014 X64 (32X64bit) (Product Key And Xforce Keygen) Serial Key Keygen.md deleted file mode 100644 index 70cc8b12ef0a05256c3f46e22d554babd425a3a2..0000000000000000000000000000000000000000 --- a/spaces/gotiQspiryo/whisper-ui/examples/Advance Steel 2014 X64 (32X64bit) (Product Key And Xforce Keygen) Serial Key Keygen.md +++ /dev/null @@ -1,82 +0,0 @@ - -

    Advance Steel 2014 x64 (32X64bit) (Product key and Xforce keygen) Serial Key keygen

    -

    If you are a structural engineer or a designer who works with steel structures, you may have heard of Advance Steel. This is a software that allows you to create and detail 3D models of steel structures, such as buildings, bridges, or industrial plants. You can also generate fabrication drawings, bills of materials, NC files, and reports from your models.

    -

    Advance Steel 2014 x64 (32X64bit) (Product key and Xforce keygen) Serial Key keygen


    Download File ✒ ✒ ✒ https://urlgoal.com/2uyMeH



    -

    But how can you get Advance Steel 2014 x64 (32X64bit) (Product key and Xforce keygen) Serial Key keygen? This is a combination of codes and tools that can help you activate and use the software for free. In this article, we will show you how to do that from a reliable and safe source. We will also give you some tips and tricks to use the software more effectively. So, let's get started!

    -

    How to Get Advance Steel 2014 x64 (32X64bit) (Product key and Xforce keygen) Serial Key keygen

    -

    There are many websites that offer Advance Steel 2014 x64 (32X64bit) (Product key and Xforce keygen) Serial Key keygen, but not all of them are trustworthy or secure. Some of them may contain viruses, malware, or spyware that can harm your computer or steal your personal data. Therefore, you need to be careful and choose a reputable source to get the codes and tools.

    -

    One of the best websites that we recommend is Civil MDC, which is a website that provides various software and books for civil engineers. Here are the steps to get Advance Steel 2014 x64 (32X64bit) (Product key and Xforce keygen) Serial Key keygen from this website:

    -
      -
    1. Go to https://civilmdc.com/2020/03/10/x-force-keygenerator-autodesk-products-2014-all/ and scroll down until you see the download link 2.
    2. -
    3. Click on the link and you will be redirected to a file hosting website called Mediafire.
    4. -
    5. Click on the green button that says "Download" and wait for a few seconds until the download starts.
    6. -
    7. Save the file to your computer and extract it using WinRAR or any other software that can open ZIP files.
    8. -
    9. Open the extracted folder and double-click on the file that says "x-force_2014_x32.exe" or "x-force_2014_x64.exe" depending on your system.
    10. -
    11. You will see a window that shows the X-force key generator. This is a tool that can generate product keys and serial keys for various Autodesk products, including Advance Steel 2014.
    12. -
    13. Select "Advance Steel 2014" from the drop-down menu and click on "Generate". You will see a product key and a serial key in the boxes below.
    14. -
    15. Copy the product key and the serial key and save them somewhere safe.
    16. -
    17. Now you need to install Advance Steel 2014 on your computer. You can download it from https://www.autodesk.com/products/advance-steel/free-trial.
    18. -
    19. Run the installer and follow the instructions. When asked to enter the product key and the serial number, paste the ones that you generated with the X-force key generator.
    20. -
    21. Finish the installation and restart your computer.
    22. -
    23. You have successfully installed Advance Steel 2014 x64 (32X64bit) (Product key and Xforce keygen) Serial Key keygen on your computer. You can now use it for free.
    24. -
    -

    Tips and Tricks for Using Advance Steel 2014

    -

    Now that you have installed Advance Steel 2014 x64 (32X64bit) (Product key and Xforce keygen) Serial Key keygen on your computer, you may want to know some tips and tricks to use it more effectively. Here are some of them:

    -
      -
    • You can use templates to create your models faster and easier. You can choose from various templates that are available in the software or create your own custom templates.
    • -
    • You can use macros to automate repetitive tasks or complex operations. You can record your own macros or use the ones that are provided by the software or by other users.
    • -
    • You can use connections to join different parts of your model together. You can choose from various types of connections, such as bolts, welds, cuts, or joints. You can also create your own custom connections.
    • -
    • You can use labels to annotate your model with text, dimensions, symbols, or images. You can customize your labels with different fonts, colors, styles, or formats.
    • -
    • You can use views to display different aspects of your model in different windows. You can create plan views, elevation views, section views, detail views, or perspective views. You can also adjust the scale, orientation, visibility, or appearance of your views.
    • -
    • You can use drawings to generate fabrication drawings, bills of materials, NC files, or reports from your model. You can customize your drawings with different layouts, titles, borders, scales, dimensions, annotations, or symbols.
    • -
    -

    Conclusion

    -

    Advance Steel 2014 is a powerful software that allows you to create and detail 3D models of steel structures. You can also generate fabrication drawings, bills of materials, NC files, or reports from your models. You can get Advance Steel 2014 x64 (32X64bit) (Product key and Xforce keygen) Serial Key keygen from Civil MDC website for free and enjoy it on your computer. We hope this article has helped you with that and given you some useful tips and tricks for using the software. If you liked this article, please share it with your friends and leave us a comment below. Thank you for reading!

    -

    -

    What are the Features of Advance Steel 2014?

    -

    Advance Steel 2014 is a software that has many features that can help you create and detail 3D models of steel structures. Here are some of the features of this software:

    -
      -
    • You can use parametric steel connections to join different parts of your model together. You can choose from over 300 types of connections, such as base plates, end plates, clip angles, haunches, or gussets. You can also modify or create your own custom connections.
    • -
    • You can use intelligent structural objects to create your model faster and easier. You can choose from various types of objects, such as beams, columns, braces, plates, bolts, welds, or grating. You can also modify or create your own custom objects.
    • -
    • You can use automatic tools to generate fabrication drawings, bills of materials, NC files, or reports from your model. You can customize your output with different templates, styles, formats, or symbols.
    • -
    • You can use bi-directional links to synchronize your model with other software, such as AutoCAD, Revit, Robot Structural Analysis, or Navisworks. You can also import or export your model in various formats, such as DWG, DXF, IFC, CIS/2, SDNF, or DSTV.
    • -
    • You can use collaboration tools to work with other users on the same project. You can use worksharing to divide your model into different parts and assign them to different users. You can also use revision control to track and manage the changes made to your model.
    • -
    -

    What are the Advantages of Using Advance Steel 2014?

    -

    Using Advance Steel 2014 has many advantages that can help you improve your productivity and quality of work. Here are some of the advantages of using this software:

    -
      -
    • You can save time and money by creating and detailing your model in one software instead of using multiple software.
    • -
    • You can reduce errors and rework by using parametric steel connections and intelligent structural objects that automatically update when you change your model.
    • -
    • You can increase accuracy and consistency by using automatic tools that generate fabrication drawings, bills of materials, NC files, or reports from your model.
    • -
    • You can enhance coordination and communication by using bi-directional links that synchronize your model with other software or by using collaboration tools that allow you to work with other users on the same project.
    • -
    • You can achieve better results and satisfaction by using a software that is designed specifically for steel structures and that has a user-friendly interface and a comprehensive support system.
    • -
    -

    Conclusion

    -

    Advance Steel 2014 is a powerful software that allows you to create and detail 3D models of steel structures. You can also generate fabrication drawings, bills of materials, NC files, or reports from your models. You can get Advance Steel 2014 x64 (32X64bit) (Product key and Xforce keygen) Serial Key keygen from Civil MDC website for free and enjoy it on your computer. We hope this article has helped you with that and given you some useful tips and tricks for using the software. If you liked this article, please share it with your friends and leave us a comment below. Thank you for reading!

    -

    How to Use Advance Steel 2014?

    -

    After you have installed Advance Steel 2014 on your computer, you can start using it to create and detail your steel structures. Here are the basic steps to use this software:

    -
      -
    1. Launch Advance Steel 2014 and create a new project or open an existing one.
    2. -
    3. Create your model using parametric steel connections and intelligent structural objects. You can also import or link your model from other software, such as AutoCAD or Revit.
    4. -
    5. Modify or edit your model using various tools, such as move, copy, rotate, mirror, align, or stretch. You can also use macros to automate some tasks or operations.
    6. -
    7. Check your model for errors or clashes using the model browser or the clash detection tool. You can also use the design check tool to verify your model against various codes or standards.
    8. -
    9. Generate fabrication drawings, bills of materials, NC files, or reports from your model using automatic tools. You can also customize your output using different templates, styles, formats, or symbols.
    10. -
    11. Save and export your model and output in various formats, such as DWG, DXF, IFC, CIS/2, SDNF, or DSTV. You can also share your model and output with other users using worksharing or revision control.
    12. -
    - -

    What are the Alternatives to Advance Steel 2014?

    -

    If you are looking for other software that can help you create and detail steel structures, you may want to consider some of the alternatives to Advance Steel 2014. Here are some of the alternatives to this software:

    -
      -
    • Tekla Structures: This is a software that allows you to create and detail 3D models of any type of structure, such as steel, concrete, timber, or composite. You can also generate fabrication drawings, bills of materials, NC files, or reports from your models.
    • -
    • ProStructures: This is a software that allows you to create and detail 3D models of steel and concrete structures. You can also generate fabrication drawings, bills of materials, NC files, or reports from your models.
    • -
    • Advance Design: This is a software that allows you to create and analyze 3D models of steel structures. You can also generate fabrication drawings, bills of materials, NC files, or reports from your models.
    • -
    • SCIA Engineer: This is a software that allows you to create and analyze 3D models of any type of structure, such as steel, concrete, timber, or composite. You can also generate fabrication drawings, bills of materials, NC files, or reports from your models.
    • -
    - -

    Conclusion

    - -

    Advance Steel 2014 is a powerful software that allows you to create and detail 3D models of steel structures. You can also generate fabrication drawings, bills of materials, NC files, or reports from your models. You can get Advance Steel 2014 x64 (32X64bit) (Product key and Xforce keygen) Serial Key keygen from Civil MDC website for free and enjoy it on your computer. We hope this article has helped you with that and given you some useful tips and tricks for using the software. If you liked this article, please share it with your friends and leave us a comment below. Thank you for reading!

    -

    Conclusion

    -

    Advance Steel 2014 is a powerful software that allows you to create and detail 3D models of steel structures. You can also generate fabrication drawings, bills of materials, NC files, or reports from your models. You can get Advance Steel 2014 x64 (32X64bit) (Product key and Xforce keygen) Serial Key keygen from Civil MDC website for free and enjoy it on your computer. We hope this article has helped you with that and given you some useful tips and tricks for using the software. If you liked this article, please share it with your friends and leave us a comment below. Thank you for reading!

    3cee63e6c2
    -
    -
    \ No newline at end of file diff --git a/spaces/gotiQspiryo/whisper-ui/examples/Autofit 2008 Hun.md b/spaces/gotiQspiryo/whisper-ui/examples/Autofit 2008 Hun.md deleted file mode 100644 index 71d021e6a955e5ce15bbc5c23fc21aa05ea0f7f7..0000000000000000000000000000000000000000 --- a/spaces/gotiQspiryo/whisper-ui/examples/Autofit 2008 Hun.md +++ /dev/null @@ -1,42 +0,0 @@ -

    autofit 2008 hun


    Download Ziphttps://urlgoal.com/2uyMtu



    -
    -niet een computerscherm aanklikt, het getal 1234 ziet er niet slecht uit en het beeld van de text was niet te hoog. Dit is toch een gemis? - - Je kan misschien op je lijstje scherm nog wel iets aanpassen? - - Ghehe - - :-) - - wat een belachelijke smakelijke, typistische tekst. - - ja die tijd hadden we voor dat leen en schrijven, misschien zou die wel totaal uitstelden - - Ja, maar eigenlijk hadden we een computer nodig om dat te doen. - - dan moet je een scherm met een usb-port of ssd gebruiken. - - geen kanttekening - - Al die computerhoeren die een scherm van 8" zijn, is superduper waardevol. - - ja, nee, maar het is dezelfde monitor - - je moet ook wel dat scherm hebben, dat is lelijk. - - En je hebt een scherm van 8" waar je kunt schermen scannen. - - ik had een monitor van 7", maar daar was het lelijk :-D - - Iddit! - - Nu had ik m'n 3e computer en nu nog een nieuw, heb zelfs zwarte meubels aangekocht. - - Ik ga zo met de groeten rekken. - - oke, zo staat het nu, die laten we dan niet meer benaderen, geef op! - - Dank je O 4fefd39f24
    -
    -
    -

    diff --git a/spaces/gotiQspiryo/whisper-ui/examples/Automation Studio P6 13 The Software that Covers Hydraulic Pneumatic Electrical PLC HMI and Communication Technologies.md b/spaces/gotiQspiryo/whisper-ui/examples/Automation Studio P6 13 The Software that Covers Hydraulic Pneumatic Electrical PLC HMI and Communication Technologies.md deleted file mode 100644 index 66902703bf607359ad2618a4b350ba0a198cb9d1..0000000000000000000000000000000000000000 --- a/spaces/gotiQspiryo/whisper-ui/examples/Automation Studio P6 13 The Software that Covers Hydraulic Pneumatic Electrical PLC HMI and Communication Technologies.md +++ /dev/null @@ -1,9 +0,0 @@ -
    -

    Studio Display also includes a studio-quality, three-microphone array with an especially low noise floor for crystal-clear calls and voice recordings. It also features a high-fidelity six-speaker sound system, the best ever created for Mac, delivering an unbelievable listening experience. Four force-cancelling woofers minimize distortion and produce bold, articulate bass, and two high-performance tweeters create accurate mids and crisp highs. The speakers also support spatial audio for music and video with Dolby Atmos, creating a truly cinematic viewing experience. Altogether, Studio Display has the best combination of camera and audio ever in a desktop display.

    -

    Automation Studio P6 SR9 v6.0.0.10932 is an outstanding engineering software program which is developed to design, doc, educate after which keep the electrical automation. By utilizing this software program completely different corporations can work collaboratively on any undertaking and share their key info among the many clients, suppliers and the coworkers. You can even obtain Altair FluxMotor 2018.

    -

    Automation Studio P6 13


    Download File >>> https://urlgoal.com/2uyN6f



    -

    The final release of Visual Studio 2013 became available for download on October 17, 2013, along with .NET 4.5.1.[190] Visual Studio 2013 officially launched on November 13, 2013, at a virtual launch event keynoted by S. Somasegar and hosted on events.visualstudio.com.[191] "Visual Studio 2013 Update 1" (Visual Studio 2013.1) was released on January 20, 2014.[192]Visual Studio 2013.1 is a targeted update that addresses some key areas of customer feedback.[193]"Visual Studio 2013 Update 2" (Visual Studio 2013.2) was released on May 12, 2014.[194]Visual Studio 2013 Update 3 was released on August 4, 2014. With this update, Visual Studio provides an option to disable the all-caps menus, which was introduced in VS2012.[195]"Visual Studio 2013 Update 4" (Visual Studio 2013.4) was released on November 12, 2014.[196]"Visual Studio 2013 Update 5" (Visual Studio 2013.5) was released on July 20, 2015.[197]

    -

    Famic Technologies Inc., Montreal, a provider of software engineering and industrial automation, has released a comprehensive line of new features and functionalities for its simulation solution software for hydraulic, pneumatic, electrical and HMI users. As always, Automation Studio version 6.2 is available for both industry and educational institutions.

    -

    Since 1986, Famic Technologies has been making trade oriented software solutions including: Automation Studio for machine knowledge management, Automation Studio Live Manifold for hydraulic manifold design, quoting and prototyping, and Andon Studio, a process management optimization software solution. It also creates, markets, and supports CAD and simulation software solutions for hydraulics, pneumatics, electrical, automation and controls.

    aaccfb2cb3
    -
    -
    \ No newline at end of file diff --git a/spaces/gotiQspiryo/whisper-ui/examples/Call Of Duty Ghosts Multiplayer Crack V36 213 The Ultimate Guide for Gamers.md b/spaces/gotiQspiryo/whisper-ui/examples/Call Of Duty Ghosts Multiplayer Crack V36 213 The Ultimate Guide for Gamers.md deleted file mode 100644 index c06ca1c06ae1cf1c81353d319f9c9e7b58da80cd..0000000000000000000000000000000000000000 --- a/spaces/gotiQspiryo/whisper-ui/examples/Call Of Duty Ghosts Multiplayer Crack V36 213 The Ultimate Guide for Gamers.md +++ /dev/null @@ -1,6 +0,0 @@ -

    Call Of Duty Ghosts Multiplayer Crack V36 213


    Download >>>>> https://urlgoal.com/2uyMKN



    - - aaccfb2cb3
    -
    -
    -

    diff --git a/spaces/gradio/HuBERT/examples/speech_recognition/criterions/cross_entropy_acc.py b/spaces/gradio/HuBERT/examples/speech_recognition/criterions/cross_entropy_acc.py deleted file mode 100644 index 7c4d8ba3802a2da9467c42b0aa18653c7bbb2ec9..0000000000000000000000000000000000000000 --- a/spaces/gradio/HuBERT/examples/speech_recognition/criterions/cross_entropy_acc.py +++ /dev/null @@ -1,130 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -from __future__ import absolute_import, division, print_function, unicode_literals - -import logging -import math - -import torch -import torch.nn.functional as F -from fairseq import utils -from fairseq.criterions import FairseqCriterion, register_criterion - - -@register_criterion("cross_entropy_acc") -class CrossEntropyWithAccCriterion(FairseqCriterion): - def __init__(self, task, sentence_avg): - super().__init__(task) - self.sentence_avg = sentence_avg - - def compute_loss(self, model, net_output, target, reduction, log_probs): - # N, T -> N * T - target = target.view(-1) - lprobs = model.get_normalized_probs(net_output, log_probs=log_probs) - if not hasattr(lprobs, "batch_first"): - logging.warning( - "ERROR: we need to know whether " - "batch first for the net output; " - "you need to set batch_first attribute for the return value of " - "model.get_normalized_probs. Now, we assume this is true, but " - "in the future, we will raise exception instead. " - ) - batch_first = getattr(lprobs, "batch_first", True) - if not batch_first: - lprobs = lprobs.transpose(0, 1) - - # N, T, D -> N * T, D - lprobs = lprobs.view(-1, lprobs.size(-1)) - loss = F.nll_loss( - lprobs, target, ignore_index=self.padding_idx, reduction=reduction - ) - return lprobs, loss - - def get_logging_output(self, sample, target, lprobs, loss): - target = target.view(-1) - mask = target != self.padding_idx - correct = torch.sum( - lprobs.argmax(1).masked_select(mask) == target.masked_select(mask) - ) - total = torch.sum(mask) - sample_size = ( - sample["target"].size(0) if self.sentence_avg else sample["ntokens"] - ) - - logging_output = { - "loss": utils.item(loss.data), # * sample['ntokens'], - "ntokens": sample["ntokens"], - "nsentences": sample["target"].size(0), - "sample_size": sample_size, - "correct": utils.item(correct.data), - "total": utils.item(total.data), - "nframes": torch.sum(sample["net_input"]["src_lengths"]).item(), - } - - return sample_size, logging_output - - def forward(self, model, sample, reduction="sum", log_probs=True): - """Computes the cross entropy with accuracy metric for the given sample. - - This is similar to CrossEntropyCriterion in fairseq, but also - computes accuracy metrics as part of logging - - Args: - logprobs (Torch.tensor) of shape N, T, D i.e. - batchsize, timesteps, dimensions - targets (Torch.tensor) of shape N, T i.e batchsize, timesteps - - Returns: - tuple: With three elements: - 1) the loss - 2) the sample size, which is used as the denominator for the gradient - 3) logging outputs to display while training - - TODO: - * Currently this Criterion will only work with LSTMEncoderModels or - FairseqModels which have decoder, or Models which return TorchTensor - as net_output. - We need to make a change to support all FairseqEncoder models. - """ - net_output = model(**sample["net_input"]) - target = model.get_targets(sample, net_output) - lprobs, loss = self.compute_loss( - model, net_output, target, reduction, log_probs - ) - sample_size, logging_output = self.get_logging_output( - sample, target, lprobs, loss - ) - return loss, sample_size, logging_output - - @staticmethod - def aggregate_logging_outputs(logging_outputs): - """Aggregate logging outputs from data parallel training.""" - correct_sum = sum(log.get("correct", 0) for log in logging_outputs) - total_sum = sum(log.get("total", 0) for log in logging_outputs) - loss_sum = sum(log.get("loss", 0) for log in logging_outputs) - ntokens = sum(log.get("ntokens", 0) for log in logging_outputs) - nsentences = sum(log.get("nsentences", 0) for log in logging_outputs) - sample_size = sum(log.get("sample_size", 0) for log in logging_outputs) - nframes = sum(log.get("nframes", 0) for log in logging_outputs) - agg_output = { - "loss": loss_sum / sample_size / math.log(2) if sample_size > 0 else 0.0, - # if args.sentence_avg, then sample_size is nsentences, then loss - # is per-sentence loss; else sample_size is ntokens, the loss - # becomes per-output token loss - "ntokens": ntokens, - "nsentences": nsentences, - "nframes": nframes, - "sample_size": sample_size, - "acc": correct_sum * 100.0 / total_sum if total_sum > 0 else 0.0, - "correct": correct_sum, - "total": total_sum, - # total is the number of validate tokens - } - if sample_size != ntokens: - agg_output["nll_loss"] = loss_sum / ntokens / math.log(2) - # loss: per output token loss - # nll_loss: per sentence loss - return agg_output diff --git a/spaces/gradio/HuBERT/scripts/convert_model.lua b/spaces/gradio/HuBERT/scripts/convert_model.lua deleted file mode 100644 index 61b92139294fb90a25989ebd2ee52a765fb278a2..0000000000000000000000000000000000000000 --- a/spaces/gradio/HuBERT/scripts/convert_model.lua +++ /dev/null @@ -1,108 +0,0 @@ --- Copyright (c) Facebook, Inc. and its affiliates. --- --- This source code is licensed under the MIT license found in the --- LICENSE file in the root directory of this source tree. --- --- Usage: convert_model.lua -require 'torch' -local fairseq = require 'fairseq' - -model = torch.load(arg[1]) - -function find_weight_norm(container, module) - for _, wn in ipairs(container:listModules()) do - if torch.type(wn) == 'nn.WeightNorm' and wn.modules[1] == module then - return wn - end - end -end - -function push_state(dict, key, module) - if torch.type(module) == 'nn.Linear' then - local wn = find_weight_norm(model.module, module) - assert(wn) - dict[key .. '.weight_v'] = wn.v:float() - dict[key .. '.weight_g'] = wn.g:float() - elseif torch.type(module) == 'nn.TemporalConvolutionTBC' then - local wn = find_weight_norm(model.module, module) - assert(wn) - local v = wn.v:float():view(wn.viewOut):transpose(2, 3) - dict[key .. '.weight_v'] = v - dict[key .. '.weight_g'] = wn.g:float():view(module.weight:size(3), 1, 1) - else - dict[key .. '.weight'] = module.weight:float() - end - if module.bias then - dict[key .. '.bias'] = module.bias:float() - end -end - -encoder_dict = {} -decoder_dict = {} -combined_dict = {} - -function encoder_state(encoder) - luts = encoder:findModules('nn.LookupTable') - push_state(encoder_dict, 'embed_tokens', luts[1]) - push_state(encoder_dict, 'embed_positions', luts[2]) - - fcs = encoder:findModules('nn.Linear') - assert(#fcs >= 2) - local nInputPlane = fcs[1].weight:size(1) - push_state(encoder_dict, 'fc1', table.remove(fcs, 1)) - push_state(encoder_dict, 'fc2', table.remove(fcs, #fcs)) - - for i, module in ipairs(encoder:findModules('nn.TemporalConvolutionTBC')) do - push_state(encoder_dict, 'convolutions.' .. tostring(i - 1), module) - if nInputPlane ~= module.weight:size(3) / 2 then - push_state(encoder_dict, 'projections.' .. tostring(i - 1), table.remove(fcs, 1)) - end - nInputPlane = module.weight:size(3) / 2 - end - assert(#fcs == 0) -end - -function decoder_state(decoder) - luts = decoder:findModules('nn.LookupTable') - push_state(decoder_dict, 'embed_tokens', luts[1]) - push_state(decoder_dict, 'embed_positions', luts[2]) - - fcs = decoder:findModules('nn.Linear') - local nInputPlane = fcs[1].weight:size(1) - push_state(decoder_dict, 'fc1', table.remove(fcs, 1)) - push_state(decoder_dict, 'fc2', fcs[#fcs - 1]) - push_state(decoder_dict, 'fc3', fcs[#fcs]) - - table.remove(fcs, #fcs) - table.remove(fcs, #fcs) - - for i, module in ipairs(decoder:findModules('nn.TemporalConvolutionTBC')) do - if nInputPlane ~= module.weight:size(3) / 2 then - push_state(decoder_dict, 'projections.' .. tostring(i - 1), table.remove(fcs, 1)) - end - nInputPlane = module.weight:size(3) / 2 - - local prefix = 'attention.' .. tostring(i - 1) - push_state(decoder_dict, prefix .. '.in_projection', table.remove(fcs, 1)) - push_state(decoder_dict, prefix .. '.out_projection', table.remove(fcs, 1)) - push_state(decoder_dict, 'convolutions.' .. tostring(i - 1), module) - end - assert(#fcs == 0) -end - - -_encoder = model.module.modules[2] -_decoder = model.module.modules[3] - -encoder_state(_encoder) -decoder_state(_decoder) - -for k, v in pairs(encoder_dict) do - combined_dict['encoder.' .. k] = v -end -for k, v in pairs(decoder_dict) do - combined_dict['decoder.' .. k] = v -end - - -torch.save('state_dict.t7', combined_dict) diff --git a/spaces/gwang-kim/DATID-3D/eg3d/gui_utils/text_utils.py b/spaces/gwang-kim/DATID-3D/eg3d/gui_utils/text_utils.py deleted file mode 100644 index e64a34d1287d58960141fa06a8e76446cd9cebc8..0000000000000000000000000000000000000000 --- a/spaces/gwang-kim/DATID-3D/eg3d/gui_utils/text_utils.py +++ /dev/null @@ -1,125 +0,0 @@ -# SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# SPDX-License-Identifier: LicenseRef-NvidiaProprietary -# -# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual -# property and proprietary rights in and to this material, related -# documentation and any modifications thereto. Any use, reproduction, -# disclosure or distribution of this material and related documentation -# without an express license agreement from NVIDIA CORPORATION or -# its affiliates is strictly prohibited. - -import functools -from typing import Optional - -import dnnlib -import numpy as np -import PIL.Image -import PIL.ImageFont -import scipy.ndimage - -from . import gl_utils - -#---------------------------------------------------------------------------- - -def get_default_font(): - url = 'http://fonts.gstatic.com/s/opensans/v17/mem8YaGs126MiZpBA-U1UpcaXcl0Aw.ttf' # Open Sans regular - return dnnlib.util.open_url(url, return_filename=True) - -#---------------------------------------------------------------------------- - -@functools.lru_cache(maxsize=None) -def get_pil_font(font=None, size=32): - if font is None: - font = get_default_font() - return PIL.ImageFont.truetype(font=font, size=size) - -#---------------------------------------------------------------------------- - -def get_array(string, *, dropshadow_radius: int=None, **kwargs): - if dropshadow_radius is not None: - offset_x = int(np.ceil(dropshadow_radius*2/3)) - offset_y = int(np.ceil(dropshadow_radius*2/3)) - return _get_array_priv(string, dropshadow_radius=dropshadow_radius, offset_x=offset_x, offset_y=offset_y, **kwargs) - else: - return _get_array_priv(string, **kwargs) - -@functools.lru_cache(maxsize=10000) -def _get_array_priv( - string: str, *, - size: int = 32, - max_width: Optional[int]=None, - max_height: Optional[int]=None, - min_size=10, - shrink_coef=0.8, - dropshadow_radius: int=None, - offset_x: int=None, - offset_y: int=None, - **kwargs -): - cur_size = size - array = None - while True: - if dropshadow_radius is not None: - # separate implementation for dropshadow text rendering - array = _get_array_impl_dropshadow(string, size=cur_size, radius=dropshadow_radius, offset_x=offset_x, offset_y=offset_y, **kwargs) - else: - array = _get_array_impl(string, size=cur_size, **kwargs) - height, width, _ = array.shape - if (max_width is None or width <= max_width) and (max_height is None or height <= max_height) or (cur_size <= min_size): - break - cur_size = max(int(cur_size * shrink_coef), min_size) - return array - -#---------------------------------------------------------------------------- - -@functools.lru_cache(maxsize=10000) -def _get_array_impl(string, *, font=None, size=32, outline=0, outline_pad=3, outline_coef=3, outline_exp=2, line_pad: int=None): - pil_font = get_pil_font(font=font, size=size) - lines = [pil_font.getmask(line, 'L') for line in string.split('\n')] - lines = [np.array(line, dtype=np.uint8).reshape([line.size[1], line.size[0]]) for line in lines] - width = max(line.shape[1] for line in lines) - lines = [np.pad(line, ((0, 0), (0, width - line.shape[1])), mode='constant') for line in lines] - line_spacing = line_pad if line_pad is not None else size // 2 - lines = [np.pad(line, ((0, line_spacing), (0, 0)), mode='constant') for line in lines[:-1]] + lines[-1:] - mask = np.concatenate(lines, axis=0) - alpha = mask - if outline > 0: - mask = np.pad(mask, int(np.ceil(outline * outline_pad)), mode='constant', constant_values=0) - alpha = mask.astype(np.float32) / 255 - alpha = scipy.ndimage.gaussian_filter(alpha, outline) - alpha = 1 - np.maximum(1 - alpha * outline_coef, 0) ** outline_exp - alpha = (alpha * 255 + 0.5).clip(0, 255).astype(np.uint8) - alpha = np.maximum(alpha, mask) - return np.stack([mask, alpha], axis=-1) - -#---------------------------------------------------------------------------- - -@functools.lru_cache(maxsize=10000) -def _get_array_impl_dropshadow(string, *, font=None, size=32, radius: int, offset_x: int, offset_y: int, line_pad: int=None, **kwargs): - assert (offset_x > 0) and (offset_y > 0) - pil_font = get_pil_font(font=font, size=size) - lines = [pil_font.getmask(line, 'L') for line in string.split('\n')] - lines = [np.array(line, dtype=np.uint8).reshape([line.size[1], line.size[0]]) for line in lines] - width = max(line.shape[1] for line in lines) - lines = [np.pad(line, ((0, 0), (0, width - line.shape[1])), mode='constant') for line in lines] - line_spacing = line_pad if line_pad is not None else size // 2 - lines = [np.pad(line, ((0, line_spacing), (0, 0)), mode='constant') for line in lines[:-1]] + lines[-1:] - mask = np.concatenate(lines, axis=0) - alpha = mask - - mask = np.pad(mask, 2*radius + max(abs(offset_x), abs(offset_y)), mode='constant', constant_values=0) - alpha = mask.astype(np.float32) / 255 - alpha = scipy.ndimage.gaussian_filter(alpha, radius) - alpha = 1 - np.maximum(1 - alpha * 1.5, 0) ** 1.4 - alpha = (alpha * 255 + 0.5).clip(0, 255).astype(np.uint8) - alpha = np.pad(alpha, [(offset_y, 0), (offset_x, 0)], mode='constant')[:-offset_y, :-offset_x] - alpha = np.maximum(alpha, mask) - return np.stack([mask, alpha], axis=-1) - -#---------------------------------------------------------------------------- - -@functools.lru_cache(maxsize=10000) -def get_texture(string, bilinear=True, mipmap=True, **kwargs): - return gl_utils.Texture(image=get_array(string, **kwargs), bilinear=bilinear, mipmap=mipmap) - -#---------------------------------------------------------------------------- diff --git a/spaces/haakohu/deep_privacy2/app.py b/spaces/haakohu/deep_privacy2/app.py deleted file mode 100644 index a2f9977de9694d8440dfbf71a5be8c3d7d32ee4e..0000000000000000000000000000000000000000 --- a/spaces/haakohu/deep_privacy2/app.py +++ /dev/null @@ -1,31 +0,0 @@ -import gradio -import os -from tops.config import instantiate -import gradio.inputs -os.system("pip install --upgrade pip") -os.system("pip install ftfy regex tqdm") -os.system("pip install --no-deps git+https://github.com/openai/CLIP.git") -os.system("pip install git+https://github.com/facebookresearch/detectron2@96c752ce821a3340e27edd51c28a00665dd32a30#subdirectory=projects/DensePose") -os.system("pip install --no-deps git+https://github.com/hukkelas/DSFD-Pytorch-Inference") -os.environ["TORCH_HOME"] = "torch_home" -from dp2 import utils -from gradio_demos.modules import ExampleDemo, WebcamDemo - -cfg_face = utils.load_config("configs/anonymizers/FB_cse.py") - -anonymizer_face = instantiate(cfg_face.anonymizer, load_cache=False) - -anonymizer_face.initialize_tracker(fps=1) - - -with gradio.Blocks() as demo: - gradio.Markdown("#
    DeepPrivacy2 - Realistic Image Anonymization
    ") - gradio.Markdown("###
    Håkon Hukkelås, Rudolf Mester, Frank Lindseth
    ") - gradio.Markdown("
    See more information at: https://github.com/hukkelas/deep_privacy2
    ") - gradio.Markdown("
    For a demo of face anonymization, see: https://huggingface.co/spaces/haakohu/deep_privacy2_face
    ") - with gradio.Tab("Full-body Anonymization (webcam)"): - ExampleDemo(anonymizer_face) - with gradio.Tab("Full-body Anonymization (file upload)"): - ExampleDemo(anonymizer_face, source="upload") - -demo.launch() diff --git a/spaces/haakohu/deep_privacy2/dp2/data/transforms/functional.py b/spaces/haakohu/deep_privacy2/dp2/data/transforms/functional.py deleted file mode 100644 index 1ee57f27ad07e597098ce1de967c3a50a1d06d0a..0000000000000000000000000000000000000000 --- a/spaces/haakohu/deep_privacy2/dp2/data/transforms/functional.py +++ /dev/null @@ -1,57 +0,0 @@ -import torchvision.transforms.functional as F -import torch -import pickle -from tops import download_file, assert_shape -from typing import Dict -from functools import lru_cache - -global symmetry_transform - - -@lru_cache(maxsize=1) -def get_symmetry_transform(symmetry_url): - file_name = download_file(symmetry_url) - with open(file_name, "rb") as fp: - symmetry = pickle.load(fp) - return torch.from_numpy(symmetry["vertex_transforms"]).long() - - -hflip_handled_cases = set([ - "keypoints", "img", "mask", "border", "semantic_mask", "vertices", "E_mask", "embed_map", "condition", - "embedding", "vertx2cat", "maskrcnn_mask", "__key__"]) - - -def hflip(container: Dict[str, torch.Tensor], flip_map=None) -> Dict[str, torch.Tensor]: - container["img"] = F.hflip(container["img"]) - if "condition" in container: - container["condition"] = F.hflip(container["condition"]) - if "embedding" in container: - container["embedding"] = F.hflip(container["embedding"]) - assert all([key in hflip_handled_cases for key in container]), container.keys() - if "keypoints" in container: - assert flip_map is not None - if container["keypoints"].ndim == 3: - keypoints = container["keypoints"][:, flip_map, :] - keypoints[:, :, 0] = 1 - keypoints[:, :, 0] - else: - assert_shape(container["keypoints"], (None, 3)) - keypoints = container["keypoints"][flip_map, :] - keypoints[:, 0] = 1 - keypoints[:, 0] - container["keypoints"] = keypoints - if "mask" in container: - container["mask"] = F.hflip(container["mask"]) - if "border" in container: - container["border"] = F.hflip(container["border"]) - if "semantic_mask" in container: - container["semantic_mask"] = F.hflip(container["semantic_mask"]) - if "vertices" in container: - symmetry_transform = get_symmetry_transform( - "https://dl.fbaipublicfiles.com/densepose/meshes/symmetry/symmetry_smpl_27554.pkl") - container["vertices"] = F.hflip(container["vertices"]) - symmetry_transform_ = symmetry_transform.to(container["vertices"].device) - container["vertices"] = symmetry_transform_[container["vertices"].long()] - if "E_mask" in container: - container["E_mask"] = F.hflip(container["E_mask"]) - if "maskrcnn_mask" in container: - container["maskrcnn_mask"] = F.hflip(container["maskrcnn_mask"]) - return container diff --git a/spaces/hadasak/SciTrends/README.md b/spaces/hadasak/SciTrends/README.md deleted file mode 100644 index 27c646f36fc5a73e8827942560f8c5fe243ba780..0000000000000000000000000000000000000000 --- a/spaces/hadasak/SciTrends/README.md +++ /dev/null @@ -1,6 +0,0 @@ ---- -title: SciTrends -app_file: main.py -sdk: gradio -sdk_version: 3.47.1 ---- diff --git a/spaces/hands012/gpt-academic/request_llm/bridge_chatglm.py b/spaces/hands012/gpt-academic/request_llm/bridge_chatglm.py deleted file mode 100644 index 100783d248c4cd6dcbdb091181ac21f0f66af670..0000000000000000000000000000000000000000 --- a/spaces/hands012/gpt-academic/request_llm/bridge_chatglm.py +++ /dev/null @@ -1,161 +0,0 @@ - -from transformers import AutoModel, AutoTokenizer -import time -import threading -import importlib -from toolbox import update_ui, get_conf -from multiprocessing import Process, Pipe - -load_message = "ChatGLM尚未加载,加载需要一段时间。注意,取决于`config.py`的配置,ChatGLM消耗大量的内存(CPU)或显存(GPU),也许会导致低配计算机卡死 ……" - -################################################################################# -class GetGLMHandle(Process): - def __init__(self): - super().__init__(daemon=True) - self.parent, self.child = Pipe() - self.chatglm_model = None - self.chatglm_tokenizer = None - self.info = "" - self.success = True - self.check_dependency() - self.start() - self.threadLock = threading.Lock() - - def check_dependency(self): - try: - import sentencepiece - self.info = "依赖检测通过" - self.success = True - except: - self.info = "缺少ChatGLM的依赖,如果要使用ChatGLM,除了基础的pip依赖以外,您还需要运行`pip install -r request_llm/requirements_chatglm.txt`安装ChatGLM的依赖。" - self.success = False - - def ready(self): - return self.chatglm_model is not None - - def run(self): - # 子进程执行 - # 第一次运行,加载参数 - retry = 0 - while True: - try: - if self.chatglm_model is None: - self.chatglm_tokenizer = AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) - device, = get_conf('LOCAL_MODEL_DEVICE') - if device=='cpu': - self.chatglm_model = AutoModel.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True).float() - else: - self.chatglm_model = AutoModel.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True).half().cuda() - self.chatglm_model = self.chatglm_model.eval() - break - else: - break - except: - retry += 1 - if retry > 3: - self.child.send('[Local Message] Call ChatGLM fail 不能正常加载ChatGLM的参数。') - raise RuntimeError("不能正常加载ChatGLM的参数!") - - while True: - # 进入任务等待状态 - kwargs = self.child.recv() - # 收到消息,开始请求 - try: - for response, history in self.chatglm_model.stream_chat(self.chatglm_tokenizer, **kwargs): - self.child.send(response) - # # 中途接收可能的终止指令(如果有的话) - # if self.child.poll(): - # command = self.child.recv() - # if command == '[Terminate]': break - except: - from toolbox import trimmed_format_exc - self.child.send('[Local Message] Call ChatGLM fail.' + '\n```\n' + trimmed_format_exc() + '\n```\n') - # 请求处理结束,开始下一个循环 - self.child.send('[Finish]') - - def stream_chat(self, **kwargs): - # 主进程执行 - self.threadLock.acquire() - self.parent.send(kwargs) - while True: - res = self.parent.recv() - if res != '[Finish]': - yield res - else: - break - self.threadLock.release() - -global glm_handle -glm_handle = None -################################################################################# -def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", observe_window=[], console_slience=False): - """ - 多线程方法 - 函数的说明请见 request_llm/bridge_all.py - """ - global glm_handle - if glm_handle is None: - glm_handle = GetGLMHandle() - if len(observe_window) >= 1: observe_window[0] = load_message + "\n\n" + glm_handle.info - if not glm_handle.success: - error = glm_handle.info - glm_handle = None - raise RuntimeError(error) - - # chatglm 没有 sys_prompt 接口,因此把prompt加入 history - history_feedin = [] - history_feedin.append(["What can I do?", sys_prompt]) - for i in range(len(history)//2): - history_feedin.append([history[2*i], history[2*i+1]] ) - - watch_dog_patience = 5 # 看门狗 (watchdog) 的耐心, 设置5秒即可 - response = "" - for response in glm_handle.stream_chat(query=inputs, history=history_feedin, max_length=llm_kwargs['max_length'], top_p=llm_kwargs['top_p'], temperature=llm_kwargs['temperature']): - if len(observe_window) >= 1: observe_window[0] = response - if len(observe_window) >= 2: - if (time.time()-observe_window[1]) > watch_dog_patience: - raise RuntimeError("程序终止。") - return response - - - -def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_prompt='', stream = True, additional_fn=None): - """ - 单线程方法 - 函数的说明请见 request_llm/bridge_all.py - """ - chatbot.append((inputs, "")) - - global glm_handle - if glm_handle is None: - glm_handle = GetGLMHandle() - chatbot[-1] = (inputs, load_message + "\n\n" + glm_handle.info) - yield from update_ui(chatbot=chatbot, history=[]) - if not glm_handle.success: - glm_handle = None - return - - if additional_fn is not None: - import core_functional - importlib.reload(core_functional) # 热更新prompt - core_functional = core_functional.get_core_functions() - if "PreProcess" in core_functional[additional_fn]: inputs = core_functional[additional_fn]["PreProcess"](inputs) # 获取预处理函数(如果有的话) - inputs = core_functional[additional_fn]["Prefix"] + inputs + core_functional[additional_fn]["Suffix"] - - # 处理历史信息 - history_feedin = [] - history_feedin.append(["What can I do?", system_prompt] ) - for i in range(len(history)//2): - history_feedin.append([history[2*i], history[2*i+1]] ) - - # 开始接收chatglm的回复 - response = "[Local Message]: 等待ChatGLM响应中 ..." - for response in glm_handle.stream_chat(query=inputs, history=history_feedin, max_length=llm_kwargs['max_length'], top_p=llm_kwargs['top_p'], temperature=llm_kwargs['temperature']): - chatbot[-1] = (inputs, response) - yield from update_ui(chatbot=chatbot, history=history) - - # 总结输出 - if response == "[Local Message]: 等待ChatGLM响应中 ...": - response = "[Local Message]: ChatGLM响应异常 ..." - history.extend([inputs, response]) - yield from update_ui(chatbot=chatbot, history=history) diff --git a/spaces/hank1996/yolopv2/utils/metrics.py b/spaces/hank1996/yolopv2/utils/metrics.py deleted file mode 100644 index f7fd0fa13a40aa197b96c40273f8b7da3a98dccc..0000000000000000000000000000000000000000 --- a/spaces/hank1996/yolopv2/utils/metrics.py +++ /dev/null @@ -1,223 +0,0 @@ - -from pathlib import Path - -import matplotlib.pyplot as plt -import numpy as np -import torch - -from . import general - - -def fitness(x): - # Model fitness as a weighted combination of metrics - w = [0.0, 0.0, 0.1, 0.9] # weights for [P, R, mAP@0.5, mAP@0.5:0.95] - return (x[:, :4] * w).sum(1) - - -def ap_per_class(tp, conf, pred_cls, target_cls, plot=False, save_dir='.', names=()): - """ Compute the average precision, given the recall and precision curves. - Source: https://github.com/rafaelpadilla/Object-Detection-Metrics. - # Arguments - tp: True positives (nparray, nx1 or nx10). - conf: Objectness value from 0-1 (nparray). - pred_cls: Predicted object classes (nparray). - target_cls: True object classes (nparray). - plot: Plot precision-recall curve at mAP@0.5 - save_dir: Plot save directory - # Returns - The average precision as computed in py-faster-rcnn. - """ - - # Sort by objectness - i = np.argsort(-conf) - tp, conf, pred_cls = tp[i], conf[i], pred_cls[i] - - # Find unique classes - unique_classes = np.unique(target_cls) - nc = unique_classes.shape[0] # number of classes, number of detections - - # Create Precision-Recall curve and compute AP for each class - px, py = np.linspace(0, 1, 1000), [] # for plotting - ap, p, r = np.zeros((nc, tp.shape[1])), np.zeros((nc, 1000)), np.zeros((nc, 1000)) - for ci, c in enumerate(unique_classes): - i = pred_cls == c - n_l = (target_cls == c).sum() # number of labels - n_p = i.sum() # number of predictions - - if n_p == 0 or n_l == 0: - continue - else: - # Accumulate FPs and TPs - fpc = (1 - tp[i]).cumsum(0) - tpc = tp[i].cumsum(0) - - # Recall - recall = tpc / (n_l + 1e-16) # recall curve - r[ci] = np.interp(-px, -conf[i], recall[:, 0], left=0) # negative x, xp because xp decreases - - # Precision - precision = tpc / (tpc + fpc) # precision curve - p[ci] = np.interp(-px, -conf[i], precision[:, 0], left=1) # p at pr_score - - # AP from recall-precision curve - for j in range(tp.shape[1]): - ap[ci, j], mpre, mrec = compute_ap(recall[:, j], precision[:, j]) - if plot and j == 0: - py.append(np.interp(px, mrec, mpre)) # precision at mAP@0.5 - - # Compute F1 (harmonic mean of precision and recall) - f1 = 2 * p * r / (p + r + 1e-16) - if plot: - plot_pr_curve(px, py, ap, Path(save_dir) / 'PR_curve.png', names) - plot_mc_curve(px, f1, Path(save_dir) / 'F1_curve.png', names, ylabel='F1') - plot_mc_curve(px, p, Path(save_dir) / 'P_curve.png', names, ylabel='Precision') - plot_mc_curve(px, r, Path(save_dir) / 'R_curve.png', names, ylabel='Recall') - - i = f1.mean(0).argmax() # max F1 index - return p[:, i], r[:, i], ap, f1[:, i], unique_classes.astype('int32') - - -def compute_ap(recall, precision): - """ Compute the average precision, given the recall and precision curves - # Arguments - recall: The recall curve (list) - precision: The precision curve (list) - # Returns - Average precision, precision curve, recall curve - """ - - # Append sentinel values to beginning and end - mrec = np.concatenate(([0.], recall, [recall[-1] + 0.01])) - mpre = np.concatenate(([1.], precision, [0.])) - - # Compute the precision envelope - mpre = np.flip(np.maximum.accumulate(np.flip(mpre))) - - # Integrate area under curve - method = 'interp' # methods: 'continuous', 'interp' - if method == 'interp': - x = np.linspace(0, 1, 101) # 101-point interp (COCO) - ap = np.trapz(np.interp(x, mrec, mpre), x) # integrate - else: # 'continuous' - i = np.where(mrec[1:] != mrec[:-1])[0] # points where x axis (recall) changes - ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1]) # area under curve - - return ap, mpre, mrec - - -class ConfusionMatrix: - # Updated version of https://github.com/kaanakan/object_detection_confusion_matrix - def __init__(self, nc, conf=0.25, iou_thres=0.45): - self.matrix = np.zeros((nc + 1, nc + 1)) - self.nc = nc # number of classes - self.conf = conf - self.iou_thres = iou_thres - - def process_batch(self, detections, labels): - """ - Return intersection-over-union (Jaccard index) of boxes. - Both sets of boxes are expected to be in (x1, y1, x2, y2) format. - Arguments: - detections (Array[N, 6]), x1, y1, x2, y2, conf, class - labels (Array[M, 5]), class, x1, y1, x2, y2 - Returns: - None, updates confusion matrix accordingly - """ - detections = detections[detections[:, 4] > self.conf] - gt_classes = labels[:, 0].int() - detection_classes = detections[:, 5].int() - iou = general.box_iou(labels[:, 1:], detections[:, :4]) - - x = torch.where(iou > self.iou_thres) - if x[0].shape[0]: - matches = torch.cat((torch.stack(x, 1), iou[x[0], x[1]][:, None]), 1).cpu().numpy() - if x[0].shape[0] > 1: - matches = matches[matches[:, 2].argsort()[::-1]] - matches = matches[np.unique(matches[:, 1], return_index=True)[1]] - matches = matches[matches[:, 2].argsort()[::-1]] - matches = matches[np.unique(matches[:, 0], return_index=True)[1]] - else: - matches = np.zeros((0, 3)) - - n = matches.shape[0] > 0 - m0, m1, _ = matches.transpose().astype(np.int16) - for i, gc in enumerate(gt_classes): - j = m0 == i - if n and sum(j) == 1: - self.matrix[gc, detection_classes[m1[j]]] += 1 # correct - else: - self.matrix[self.nc, gc] += 1 # background FP - - if n: - for i, dc in enumerate(detection_classes): - if not any(m1 == i): - self.matrix[dc, self.nc] += 1 # background FN - - def matrix(self): - return self.matrix - - def plot(self, save_dir='', names=()): - try: - import seaborn as sn - - array = self.matrix / (self.matrix.sum(0).reshape(1, self.nc + 1) + 1E-6) # normalize - array[array < 0.005] = np.nan # don't annotate (would appear as 0.00) - - fig = plt.figure(figsize=(12, 9), tight_layout=True) - sn.set(font_scale=1.0 if self.nc < 50 else 0.8) # for label size - labels = (0 < len(names) < 99) and len(names) == self.nc # apply names to ticklabels - sn.heatmap(array, annot=self.nc < 30, annot_kws={"size": 8}, cmap='Blues', fmt='.2f', square=True, - xticklabels=names + ['background FP'] if labels else "auto", - yticklabels=names + ['background FN'] if labels else "auto").set_facecolor((1, 1, 1)) - fig.axes[0].set_xlabel('True') - fig.axes[0].set_ylabel('Predicted') - fig.savefig(Path(save_dir) / 'confusion_matrix.png', dpi=250) - except Exception as e: - pass - - def print(self): - for i in range(self.nc + 1): - print(' '.join(map(str, self.matrix[i]))) - - -# Plots ---------------------------------------------------------------------------------------------------------------- - -def plot_pr_curve(px, py, ap, save_dir='pr_curve.png', names=()): - # Precision-recall curve - fig, ax = plt.subplots(1, 1, figsize=(9, 6), tight_layout=True) - py = np.stack(py, axis=1) - - if 0 < len(names) < 21: # display per-class legend if < 21 classes - for i, y in enumerate(py.T): - ax.plot(px, y, linewidth=1, label=f'{names[i]} {ap[i, 0]:.3f}') # plot(recall, precision) - else: - ax.plot(px, py, linewidth=1, color='grey') # plot(recall, precision) - - ax.plot(px, py.mean(1), linewidth=3, color='blue', label='all classes %.3f mAP@0.5' % ap[:, 0].mean()) - ax.set_xlabel('Recall') - ax.set_ylabel('Precision') - ax.set_xlim(0, 1) - ax.set_ylim(0, 1) - plt.legend(bbox_to_anchor=(1.04, 1), loc="upper left") - fig.savefig(Path(save_dir), dpi=250) - - -def plot_mc_curve(px, py, save_dir='mc_curve.png', names=(), xlabel='Confidence', ylabel='Metric'): - # Metric-confidence curve - fig, ax = plt.subplots(1, 1, figsize=(9, 6), tight_layout=True) - - if 0 < len(names) < 21: # display per-class legend if < 21 classes - for i, y in enumerate(py): - ax.plot(px, y, linewidth=1, label=f'{names[i]}') # plot(confidence, metric) - else: - ax.plot(px, py.T, linewidth=1, color='grey') # plot(confidence, metric) - - y = py.mean(0) - ax.plot(px, y, linewidth=3, color='blue', label=f'all classes {y.max():.2f} at {px[y.argmax()]:.3f}') - ax.set_xlabel(xlabel) - ax.set_ylabel(ylabel) - ax.set_xlim(0, 1) - ax.set_ylim(0, 1) - plt.legend(bbox_to_anchor=(1.04, 1), loc="upper left") - fig.savefig(Path(save_dir), dpi=250) - diff --git a/spaces/hanstyle/tts/face_detection/detection/__init__.py b/spaces/hanstyle/tts/face_detection/detection/__init__.py deleted file mode 100644 index 1a6b0402dae864a3cc5dc2a90a412fd842a0efc7..0000000000000000000000000000000000000000 --- a/spaces/hanstyle/tts/face_detection/detection/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from .core import FaceDetector \ No newline at end of file diff --git a/spaces/haonanzhang/ChatGPT-BOT/modules/overwrites.py b/spaces/haonanzhang/ChatGPT-BOT/modules/overwrites.py deleted file mode 100644 index bfcd4d01b7d7bec1184a8d09113933bca860530b..0000000000000000000000000000000000000000 --- a/spaces/haonanzhang/ChatGPT-BOT/modules/overwrites.py +++ /dev/null @@ -1,56 +0,0 @@ -from __future__ import annotations -import logging - -from llama_index import Prompt -from typing import List, Tuple -import mdtex2html - -from modules.presets import * -from modules.llama_func import * - - -def compact_text_chunks(self, prompt: Prompt, text_chunks: List[str]) -> List[str]: - logging.debug("Compacting text chunks...🚀🚀🚀") - combined_str = [c.strip() for c in text_chunks if c.strip()] - combined_str = [f"[{index+1}] {c}" for index, c in enumerate(combined_str)] - combined_str = "\n\n".join(combined_str) - # resplit based on self.max_chunk_overlap - text_splitter = self.get_text_splitter_given_prompt(prompt, 1, padding=1) - return text_splitter.split_text(combined_str) - - -def postprocess( - self, y: List[Tuple[str | None, str | None]] -) -> List[Tuple[str | None, str | None]]: - """ - Parameters: - y: List of tuples representing the message and response pairs. Each message and response should be a string, which may be in Markdown format. - Returns: - List of tuples representing the message and response. Each message and response will be a string of HTML. - """ - if y is None or y == []: - return [] - user, bot = y[-1] - if not detect_converted_mark(user): - user = convert_asis(user) - if not detect_converted_mark(bot): - bot = convert_mdtext(bot) - y[-1] = (user, bot) - return y - -with open("./assets/custom.js", "r", encoding="utf-8") as f, open("./assets/Kelpy-Codos.js", "r", encoding="utf-8") as f2: - customJS = f.read() - kelpyCodos = f2.read() - -def reload_javascript(): - print("Reloading javascript...") - js = f'' - def template_response(*args, **kwargs): - res = GradioTemplateResponseOriginal(*args, **kwargs) - res.body = res.body.replace(b'', f'{js}'.encode("utf8")) - res.init_headers() - return res - - gr.routes.templates.TemplateResponse = template_response - -GradioTemplateResponseOriginal = gr.routes.templates.TemplateResponse \ No newline at end of file diff --git a/spaces/haotiz/glip-zeroshot-demo/maskrcnn_benchmark/utils/ema.py b/spaces/haotiz/glip-zeroshot-demo/maskrcnn_benchmark/utils/ema.py deleted file mode 100644 index 771d72dfbbdf5eee210cb805242054492a270ae2..0000000000000000000000000000000000000000 --- a/spaces/haotiz/glip-zeroshot-demo/maskrcnn_benchmark/utils/ema.py +++ /dev/null @@ -1,46 +0,0 @@ -from copy import deepcopy -from collections import OrderedDict -import torch - - -class ModelEma: - def __init__(self, model, decay=0.9999, device=''): - self.ema = deepcopy(model) - self.ema.eval() - self.decay = decay - self.device = device - if device: - self.ema.to(device=device) - self.ema_is_dp = hasattr(self.ema, 'module') - for p in self.ema.parameters(): - p.requires_grad_(False) - - def load_checkpoint(self, checkpoint): - if isinstance(checkpoint, str): - checkpoint = torch.load(checkpoint) - - assert isinstance(checkpoint, dict) - if 'model_ema' in checkpoint: - new_state_dict = OrderedDict() - for k, v in checkpoint['model_ema'].items(): - if self.ema_is_dp: - name = k if k.startswith('module') else 'module.' + k - else: - name = k.replace('module.', '') if k.startswith('module') else k - new_state_dict[name] = v - self.ema.load_state_dict(new_state_dict) - - def state_dict(self): - return self.ema.state_dict() - - def update(self, model): - pre_module = hasattr(model, 'module') and not self.ema_is_dp - with torch.no_grad(): - curr_msd = model.state_dict() - for k, ema_v in self.ema.state_dict().items(): - k = 'module.' + k if pre_module else k - model_v = curr_msd[k].detach() - if self.device: - model_v = model_v.to(device=self.device) - ema_v.copy_(ema_v * self.decay + (1. - self.decay) * model_v) - diff --git a/spaces/hasibzunair/fifa-tryon-demo/Self-Correction-Human-Parsing-for-ACGPN/mhp_extension/detectron2/detectron2/layers/csrc/nms_rotated/nms_rotated.h b/spaces/hasibzunair/fifa-tryon-demo/Self-Correction-Human-Parsing-for-ACGPN/mhp_extension/detectron2/detectron2/layers/csrc/nms_rotated/nms_rotated.h deleted file mode 100644 index 9c86c8d55cd24fb5322657b9d2f676fc3e1373ba..0000000000000000000000000000000000000000 --- a/spaces/hasibzunair/fifa-tryon-demo/Self-Correction-Human-Parsing-for-ACGPN/mhp_extension/detectron2/detectron2/layers/csrc/nms_rotated/nms_rotated.h +++ /dev/null @@ -1,39 +0,0 @@ -// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved -#pragma once -#include - -namespace detectron2 { - -at::Tensor nms_rotated_cpu( - const at::Tensor& dets, - const at::Tensor& scores, - const float iou_threshold); - -#ifdef WITH_CUDA -at::Tensor nms_rotated_cuda( - const at::Tensor& dets, - const at::Tensor& scores, - const float iou_threshold); -#endif - -// Interface for Python -// inline is needed to prevent multiple function definitions when this header is -// included by different cpps -inline at::Tensor nms_rotated( - const at::Tensor& dets, - const at::Tensor& scores, - const float iou_threshold) { - assert(dets.device().is_cuda() == scores.device().is_cuda()); - if (dets.device().is_cuda()) { -#ifdef WITH_CUDA - return nms_rotated_cuda( - dets.contiguous(), scores.contiguous(), iou_threshold); -#else - AT_ERROR("Not compiled with GPU support"); -#endif - } - - return nms_rotated_cpu(dets.contiguous(), scores.contiguous(), iou_threshold); -} - -} // namespace detectron2 diff --git a/spaces/hk59775634/OpenAI-Manager/README.md b/spaces/hk59775634/OpenAI-Manager/README.md deleted file mode 100644 index 10d9892e1d029158a2dcc63d9ffd9b18a933e722..0000000000000000000000000000000000000000 --- a/spaces/hk59775634/OpenAI-Manager/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: OpenAI Manager -emoji: 📈 -colorFrom: red -colorTo: pink -sdk: static -pinned: false -license: bsd-3-clause -duplicated_from: junchenmo/OpenAI-Manager ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/ho11laqe/nnUNet_calvingfront_detection/nnunet/experiment_planning/utils.py b/spaces/ho11laqe/nnUNet_calvingfront_detection/nnunet/experiment_planning/utils.py deleted file mode 100644 index 6c118b18f2f6719231e43a1dea3402e22928b6a9..0000000000000000000000000000000000000000 --- a/spaces/ho11laqe/nnUNet_calvingfront_detection/nnunet/experiment_planning/utils.py +++ /dev/null @@ -1,222 +0,0 @@ -# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import json -import os -import pickle -import shutil -from collections import OrderedDict -from multiprocessing import Pool - -import numpy as np -from batchgenerators.utilities.file_and_folder_operations import join, isdir, maybe_mkdir_p, subfiles, subdirs, isfile -from nnunet.configuration import default_num_threads -from nnunet.experiment_planning.DatasetAnalyzer import DatasetAnalyzer -from nnunet.experiment_planning.common_utils import split_4d_nifti -from nnunet.paths import nnUNet_raw_data, nnUNet_cropped_data, preprocessing_output_dir -from nnunet.preprocessing.cropping import ImageCropper - - -def split_4d(input_folder, num_processes=default_num_threads, overwrite_task_output_id=None): - assert isdir(join(input_folder, "imagesTr")) and isdir(join(input_folder, "labelsTr")) and \ - isfile(join(input_folder, "dataset.json")), \ - "The input folder must be a valid Task folder from the Medical Segmentation Decathlon with at least the " \ - "imagesTr and labelsTr subfolders and the dataset.json file" - - while input_folder.endswith("/"): - input_folder = input_folder[:-1] - - full_task_name = input_folder.split("/")[-1] - - assert full_task_name.startswith("Task"), "The input folder must point to a folder that starts with TaskXX_" - - first_underscore = full_task_name.find("_") - assert first_underscore == 6, "Input folder start with TaskXX with XX being a 3-digit id: 00, 01, 02 etc" - - input_task_id = int(full_task_name[4:6]) - if overwrite_task_output_id is None: - overwrite_task_output_id = input_task_id - - task_name = full_task_name[7:] - - output_folder = join(nnUNet_raw_data, "Task%03.0d_" % overwrite_task_output_id + task_name) - - if isdir(output_folder): - shutil.rmtree(output_folder) - - files = [] - output_dirs = [] - - maybe_mkdir_p(output_folder) - for subdir in ["imagesTr", "imagesTs"]: - curr_out_dir = join(output_folder, subdir) - if not isdir(curr_out_dir): - os.mkdir(curr_out_dir) - curr_dir = join(input_folder, subdir) - nii_files = [join(curr_dir, i) for i in os.listdir(curr_dir) if i.endswith(".nii.gz")] - nii_files.sort() - for n in nii_files: - files.append(n) - output_dirs.append(curr_out_dir) - - shutil.copytree(join(input_folder, "labelsTr"), join(output_folder, "labelsTr")) - - p = Pool(num_processes) - p.starmap(split_4d_nifti, zip(files, output_dirs)) - p.close() - p.join() - shutil.copy(join(input_folder, "dataset.json"), output_folder) - - -def create_lists_from_splitted_dataset(base_folder_splitted): - lists = [] - - json_file = join(base_folder_splitted, "dataset.json") - with open(json_file) as jsn: - d = json.load(jsn) - training_files = d['training'] - num_modalities = len(d['modality'].keys()) - for tr in training_files: - cur_pat = [] - for mod in range(num_modalities): - cur_pat.append(join(base_folder_splitted, "imagesTr", tr['image'].split("/")[-1][:-7] + - "_%04.0d.nii.gz" % mod)) - cur_pat.append(join(base_folder_splitted, "labelsTr", tr['label'].split("/")[-1])) - lists.append(cur_pat) - return lists, {int(i): d['modality'][str(i)] for i in d['modality'].keys()} - - -def create_lists_from_splitted_dataset_folder(folder): - """ - does not rely on dataset.json - :param folder: - :return: - """ - caseIDs = get_caseIDs_from_splitted_dataset_folder(folder) - list_of_lists = [] - for f in caseIDs: - list_of_lists.append(subfiles(folder, prefix=f, suffix=".nii.gz", join=True, sort=True)) - return list_of_lists - - -def get_caseIDs_from_splitted_dataset_folder(folder): - files = subfiles(folder, suffix=".nii.gz", join=False) - # all files must be .nii.gz and have 4 digit modality index - files = [i[:-12] for i in files] - # only unique patient ids - files = np.unique(files) - return files - - -def crop(task_string, override=False, num_threads=default_num_threads): - cropped_out_dir = join(nnUNet_cropped_data, task_string) - maybe_mkdir_p(cropped_out_dir) - - if override and isdir(cropped_out_dir): - shutil.rmtree(cropped_out_dir) - maybe_mkdir_p(cropped_out_dir) - - splitted_4d_output_dir_task = join(nnUNet_raw_data, task_string) - lists, _ = create_lists_from_splitted_dataset(splitted_4d_output_dir_task) - - imgcrop = ImageCropper(num_threads, cropped_out_dir) - imgcrop.run_cropping(lists, overwrite_existing=override) - shutil.copy(join(nnUNet_raw_data, task_string, "dataset.json"), cropped_out_dir) - - -def analyze_dataset(task_string, override=False, collect_intensityproperties=True, num_processes=default_num_threads): - cropped_out_dir = join(nnUNet_cropped_data, task_string) - dataset_analyzer = DatasetAnalyzer(cropped_out_dir, overwrite=override, num_processes=num_processes) - _ = dataset_analyzer.analyze_dataset(collect_intensityproperties) - - -def plan_and_preprocess(task_string, processes_lowres=default_num_threads, processes_fullres=3, no_preprocessing=False): - from nnunet.experiment_planning.experiment_planner_baseline_2DUNet import ExperimentPlanner2D - from nnunet.experiment_planning.experiment_planner_baseline_3DUNet import ExperimentPlanner - - preprocessing_output_dir_this_task_train = join(preprocessing_output_dir, task_string) - cropped_out_dir = join(nnUNet_cropped_data, task_string) - maybe_mkdir_p(preprocessing_output_dir_this_task_train) - - shutil.copy(join(cropped_out_dir, "dataset_properties.pkl"), preprocessing_output_dir_this_task_train) - shutil.copy(join(nnUNet_raw_data, task_string, "dataset.json"), preprocessing_output_dir_this_task_train) - - exp_planner = ExperimentPlanner(cropped_out_dir, preprocessing_output_dir_this_task_train) - exp_planner.plan_experiment() - if not no_preprocessing: - exp_planner.run_preprocessing((processes_lowres, processes_fullres)) - - exp_planner = ExperimentPlanner2D(cropped_out_dir, preprocessing_output_dir_this_task_train) - exp_planner.plan_experiment() - if not no_preprocessing: - exp_planner.run_preprocessing(processes_fullres) - - # write which class is in which slice to all training cases (required to speed up 2D Dataloader) - # This is done for all data so that if we wanted to use them with 2D we could do so - - if not no_preprocessing: - p = Pool(default_num_threads) - - # if there is more than one my_data_identifier (different brnaches) then this code will run for all of them if - # they start with the same string. not problematic, but not pretty - stages = [i for i in subdirs(preprocessing_output_dir_this_task_train, join=True, sort=True) - if i.split("/")[-1].find("stage") != -1] - for s in stages: - print(s.split("/")[-1]) - list_of_npz_files = subfiles(s, True, None, ".npz", True) - list_of_pkl_files = [i[:-4]+".pkl" for i in list_of_npz_files] - all_classes = [] - for pk in list_of_pkl_files: - with open(pk, 'rb') as f: - props = pickle.load(f) - all_classes_tmp = np.array(props['classes']) - all_classes.append(all_classes_tmp[all_classes_tmp >= 0]) - p.map(add_classes_in_slice_info, zip(list_of_npz_files, list_of_pkl_files, all_classes)) - p.close() - p.join() - - -def add_classes_in_slice_info(args): - """ - We need this for 2D dataloader with oversampling. As of now it will detect slices that contain specific classes - at run time, meaning it needs to iterate over an entire patient just to extract one slice. That is obviously bad, - so we are doing this once beforehand and just give the dataloader the info it needs in the patients pkl file. - - """ - npz_file, pkl_file, all_classes = args - seg_map = np.load(npz_file)['data'][-1] - with open(pkl_file, 'rb') as f: - props = pickle.load(f) - #if props.get('classes_in_slice_per_axis') is not None: - print(pkl_file) - # this will be a dict of dict where the first dict encodes the axis along which a slice is extracted in its keys. - # The second dict (value of first dict) will have all classes as key and as values a list of all slice ids that - # contain this class - classes_in_slice = OrderedDict() - for axis in range(3): - other_axes = tuple([i for i in range(3) if i != axis]) - classes_in_slice[axis] = OrderedDict() - for c in all_classes: - valid_slices = np.where(np.sum(seg_map == c, axis=other_axes) > 0)[0] - classes_in_slice[axis][c] = valid_slices - - number_of_voxels_per_class = OrderedDict() - for c in all_classes: - number_of_voxels_per_class[c] = np.sum(seg_map == c) - - props['classes_in_slice_per_axis'] = classes_in_slice - props['number_of_voxels_per_class'] = number_of_voxels_per_class - - with open(pkl_file, 'wb') as f: - pickle.dump(props, f) diff --git a/spaces/hysts-samples/space-monitor/demo_list.py b/spaces/hysts-samples/space-monitor/demo_list.py deleted file mode 100644 index 21b97d18b8d42f4333a623ca1bcc9a36cdec9285..0000000000000000000000000000000000000000 --- a/spaces/hysts-samples/space-monitor/demo_list.py +++ /dev/null @@ -1,202 +0,0 @@ -import dataclasses -import datetime -import operator -import pathlib - -import pandas as pd -import tqdm.auto -import yaml -from huggingface_hub import HfApi - -from constants import ( - OWNER_CHOICES, - SLEEP_TIME_INT_TO_STR, - SLEEP_TIME_STR_TO_INT, - WHOAMI, -) - - -@dataclasses.dataclass(frozen=True) -class DemoInfo: - space_id: str - url: str - title: str - owner: str - sdk: str - sdk_version: str - likes: int - status: str - last_modified: str - sleep_time: int - replicas: int - private: bool - hardware: str - suggested_hardware: str - created: str = "" - arxiv: list[str] = dataclasses.field(default_factory=list) - github: list[str] = dataclasses.field(default_factory=list) - tags: list[str] = dataclasses.field(default_factory=list) - - def __post_init__(self): - object.__setattr__(self, "last_modified", DemoInfo.convert_timestamp(self.last_modified)) - object.__setattr__(self, "created", DemoInfo.convert_timestamp(self.created)) - - @staticmethod - def convert_timestamp(timestamp: str) -> str: - try: - return datetime.datetime.strptime(timestamp, "%Y-%m-%dT%H:%M:%S.%fZ").strftime("%Y/%m/%d %H:%M:%S") - except ValueError: - return timestamp - - @classmethod - def from_space_id(cls, space_id: str) -> "DemoInfo": - api = HfApi() - space_info = api.space_info(repo_id=space_id) - card = space_info.cardData - runtime = space_info.runtime - resources = runtime.get("resources") - if "hardware" not in runtime: - hardware = "" - else: - hardware = runtime["hardware"]["current"] or runtime["hardware"]["requested"] - - return cls( - space_id=space_id, - url=f"https://huggingface.co/spaces/{space_id}", - title=card["title"] if "title" in card else "", - owner=space_id.split("/")[0], - sdk=card["sdk"], - sdk_version=card.get("sdk_version", ""), - likes=space_info.likes, - status=runtime["stage"], - last_modified=space_info.lastModified, - sleep_time=runtime.get("gcTimeout", 0) or 0, - replicas=resources["replicas"] if resources is not None else 0, - private=space_info.private, - hardware=hardware, - suggested_hardware=card.get("suggested_hardware", ""), - ) - - -def get_df_from_yaml(path: pathlib.Path | str) -> pd.DataFrame: - with pathlib.Path(path).open() as f: - data = yaml.safe_load(f) - demo_info = [] - for space_id in tqdm.auto.tqdm(list(data)): - base_info = DemoInfo.from_space_id(space_id) - info = DemoInfo(**(dataclasses.asdict(base_info) | data[space_id])) - demo_info.append(info) - return pd.DataFrame([dataclasses.asdict(info) for info in demo_info]) - - -class Prettifier: - @staticmethod - def get_arxiv_link(links: list[str]) -> str: - links = [Prettifier.create_link(link.split("/")[-1], link) for link in links] - return "\n".join(links) - - @staticmethod - def get_github_link(links: list[str]) -> str: - links = [Prettifier.create_link("github", link) for link in links] - return "\n".join(links) - - @staticmethod - def get_tag_list(tags: list[str]) -> str: - return ", ".join(tags) - - @staticmethod - def create_link(text: str, url: str) -> str: - return f'{text}' - - @staticmethod - def to_div(text: str | None, category_name: str) -> str: - if text is None: - text = "" - class_name = f"{category_name}-{text.lower()}" - return f'
    {text}
    ' - - @staticmethod - def add_div_tag_to_replicas(replicas: int) -> str: - if replicas == 0: - return "" - if replicas == 1: - return "1" - return f'
    {replicas}
    ' - - @staticmethod - def add_div_tag_to_sleep_time(sleep_time_s: str, hardware: str) -> str: - if hardware == "cpu-basic": - return f'
    {sleep_time_s}
    ' - s = sleep_time_s.replace(" ", "-") - return f'
    {sleep_time_s}
    ' - - def __call__(self, df: pd.DataFrame) -> pd.DataFrame: - new_rows = [] - for _, row in df.iterrows(): - new_row = dict(row) | { - "status": self.to_div(row.status, "status"), - "hardware": self.to_div(row.hardware, "hardware"), - "suggested_hardware": self.to_div(row.suggested_hardware, "hardware"), - "title": self.create_link(row.title, row.url), - "owner": self.create_link(row.owner, f"https://huggingface.co/{row.owner}"), - "sdk": self.to_div(row.sdk, "sdk"), - "sleep_time": self.add_div_tag_to_sleep_time(SLEEP_TIME_INT_TO_STR[row.sleep_time], row.hardware), - "replicas": self.add_div_tag_to_replicas(row.replicas), - "arxiv": self.get_arxiv_link(row.arxiv), - "github": self.get_github_link(row.github), - "tags": self.get_tag_list(row.tags), - } - new_rows.append(new_row) - return pd.DataFrame(new_rows, columns=df.columns) - - -class DemoList: - COLUMN_INFO = [ - ["status", "markdown"], - ["private", "bool"], - ["hardware", "markdown"], - ["title", "markdown"], - ["owner", "markdown"], - ["likes", "number"], - ["last_modified", "str"], - ["sdk", "markdown"], - ["sdk_version", "str"], - ["suggested_hardware", "markdown"], - ["sleep_time", "markdown"], - ["replicas", "markdown"], - ] - - def __init__(self, df: pd.DataFrame): - self.df_raw = df - self._prettifier = Prettifier() - self.df_prettified = self._prettifier(df).loc[:, self.column_names] - - @property - def column_names(self): - return list(map(operator.itemgetter(0), self.COLUMN_INFO)) - - @property - def column_datatype(self): - return list(map(operator.itemgetter(1), self.COLUMN_INFO)) - - def filter( - self, - status: list[str], - hardware: list[str], - sleep_time: list[str], - multiple_replicas: bool, - sdk: list[str], - ) -> pd.DataFrame: - df = self.df_raw.copy() - - if multiple_replicas: - df = df[self.df_raw.replicas > 1] - - df = df[ - (self.df_raw.status.isin(status)) & (self.df_raw.hardware.isin(hardware)) & (self.df_raw.sdk.isin(sdk)) - ] - - sleep_time_int = [SLEEP_TIME_STR_TO_INT[s] for s in sleep_time] - df = df[self.df_raw.sleep_time.isin(sleep_time_int)] - - return self._prettifier(df).loc[:, self.column_names] diff --git a/spaces/hysts/CelebAMask-HQ-Face-Parsing/images/README.md b/spaces/hysts/CelebAMask-HQ-Face-Parsing/images/README.md deleted file mode 100644 index cfd45bb9d2799fa93f74a1ca1ab1252d81bdaf0b..0000000000000000000000000000000000000000 --- a/spaces/hysts/CelebAMask-HQ-Face-Parsing/images/README.md +++ /dev/null @@ -1,7 +0,0 @@ -These images are freely-usable ones from [Unsplash](https://unsplash.com/). - -- https://unsplash.com/photos/rDEOVtE7vOs -- https://unsplash.com/photos/et_78QkMMQs -- https://unsplash.com/photos/ILip77SbmOE -- https://unsplash.com/photos/95UF6LXe-Lo - diff --git a/spaces/hyxue/HiFiFace-inference-demo/utils/visualizer.py b/spaces/hyxue/HiFiFace-inference-demo/utils/visualizer.py deleted file mode 100644 index 37810fd8ffe715e8d975f839b94f43bc683ede80..0000000000000000000000000000000000000000 --- a/spaces/hyxue/HiFiFace-inference-demo/utils/visualizer.py +++ /dev/null @@ -1,39 +0,0 @@ -import torch -from torch.utils.tensorboard import SummaryWriter - - -class Visualizer: - """ - Tensorboard 可视化监控类 - """ - - def __init__(self, opt): - """ """ - self.opt = opt # cache the option - self.writer = SummaryWriter(log_dir=opt.log_dir) - - def display_current_results(self, iters, visuals_dict): - """ - Display current images - - Parameters: - ---------- - visuals (OrderedDict) - - dictionary of images to display - iters (int) - - the current iteration - """ - for label, image in visuals_dict.items(): - if image.shape[0] >= 2: - image = image[0:2, :, :, :] - self.writer.add_images(str(label), (image * 255.0).to(torch.uint8), global_step=iters, dataformats="NCHW") - - def plot_current_losses(self, iters, loss_dict): - """ - Display losses on tensorboard - - Parameters: - iters (int) -- current iteration - losses (OrderedDict) -- training losses stored in the format of (name, torch.Tensor) pairs - """ - x = iters - for k, v in loss_dict.items(): - self.writer.add_scalar(f"Loss/{k}", v, x) diff --git a/spaces/ifey/chatdemo/gradiodemo/Demo/mHtl/HtmlJs.py b/spaces/ifey/chatdemo/gradiodemo/Demo/mHtl/HtmlJs.py deleted file mode 100644 index 45d0f11565d9b2cc1b9b6fc8c9ba146dfe0373bc..0000000000000000000000000000000000000000 --- a/spaces/ifey/chatdemo/gradiodemo/Demo/mHtl/HtmlJs.py +++ /dev/null @@ -1,27 +0,0 @@ -import gradio as gr - -# 自定义 HTML 和 JavaScript 代码 -custom_html = """ - - - - Custom JavaScript - - - - - - -""" -def greet(name): - return "Hello " + name + "!" -# 创建 Gradio 应用,嵌入自定义 HTML -with gr.Blocks() as demo: - custom_component = gr.HTML(custom_html) - gr.Interface(fn=greet, inputs="text", outputs="text") - -demo.launch() diff --git a/spaces/imseldrith/Imagine/run.py b/spaces/imseldrith/Imagine/run.py deleted file mode 100644 index 8f76ededa8212f34b93cfe19ef3ce0296badd232..0000000000000000000000000000000000000000 --- a/spaces/imseldrith/Imagine/run.py +++ /dev/null @@ -1,74 +0,0 @@ -from flask import Flask, render_template, request, send_file, jsonify -from imaginepy import AsyncImagine, Style, Ratio -import os -import asyncio - -app = Flask(__name__) - -async def generate_image_async(prompt, style, ratio): - imagine = AsyncImagine() - - try: - img_data = await imagine.sdprem( - prompt=prompt, - style=Style[style], - ratio=Ratio[ratio] - ) - except Exception as e: - return f"An error occurred while generating the image: {e}" - - if img_data is None: - return "An error occurred while generating the image." - - img_data = await imagine.upscale(image=img_data) - - if img_data is None: - return "An error occurred while upscaling the image." - - await imagine.close() - - try: - image_path = os.path.join(app.root_path, "static", "example.jpeg") - with open(image_path, mode="wb") as img_file: - img_file.write(img_data) - except Exception as e: - return f"An error occurred while writing the image to file: {e}" - - return image_path - -@app.route('/') -def index(): - return render_template('index.html') - -@app.route('/generate', methods=['POST']) -async def generate_image(): - prompt = request.form['prompt'] - style = request.form['style'] - ratio = request.form['ratio'] - - image_path = await generate_image_async(prompt, style, ratio) - - if isinstance(image_path, str): - return image_path - - return render_template('output.html') - -@app.route('/api/generate', methods=['POST']) -async def api_generate_image(): - data = request.get_json() - prompt = data['prompt'] - style = data['style'] - ratio = data['ratio'] - - image_path = await generate_image_async(prompt, style, ratio) - - if isinstance(image_path, str): - return jsonify({'error': image_path}), 500 - - return send_file(image_path, mimetype='image/jpeg', as_attachment=True) - -if __name__ == "__main__": - loop = asyncio.get_event_loop() - #loop.run_until_complete(main()) - app.run(host="0.0.0.0", port=7860) - \ No newline at end of file diff --git a/spaces/inamXcontru/PoeticTTS/Alla Tu [Juego De Pc Y DVD][Iso] DRM Free.md b/spaces/inamXcontru/PoeticTTS/Alla Tu [Juego De Pc Y DVD][Iso] DRM Free.md deleted file mode 100644 index 7a9d5fafac0308cea14b7c1aed4241263af319cb..0000000000000000000000000000000000000000 --- a/spaces/inamXcontru/PoeticTTS/Alla Tu [Juego De Pc Y DVD][Iso] DRM Free.md +++ /dev/null @@ -1,86 +0,0 @@ -
    -

    Alla Tu: el juego interactivo que puedes disfrutar en tu PC o DVD sin DRM

    -

    ¿Te gustan los juegos de azar y estrategia? ¿Te atreves a enfrentarte al banquero y a las cajas misteriosas? Si es así, te encantará Alla Tu, el juego interactivo basado en el exitoso concurso televisivo que puedes jugar en tu PC o DVD sin DRM.

    -

    Alla Tu es un juego divertido y emocionante que te hará sentir como si estuvieras en el plató con Jesús Vázquez. El objetivo es simple: elegir una de las 26 cajas que contienen diferentes cantidades de dinero, desde 0,01 euros hasta 600.000 euros, y luego ir eliminando las demás cajas una por una, esperando que la tuya tenga el premio mayor.

    -

    Alla Tu [Juego De Pc Y DVD][Iso] DRM Free


    Download File >>>>> https://gohhs.com/2uz4Vs



    -

    Pero no estarás solo: el banquero te hará ofertas tentadoras para que vendas tu caja y te vayas con lo que te ofrezca. ¿Aceptas o rechazas? ¡Allá tú! Tendrás que confiar en tu intuición y en tu suerte para tomar la mejor decisión.

    -

    Alla Tu es un juego interactivo que puedes descargar gratis y jugar en tu PC o DVD sin DRM. No necesitas conexión a internet ni ningún tipo de registro. Solo tienes que instalar el juego en tu ordenador o introducir el disco en tu reproductor de DVD y empezar a jugar.

    -

    Puedes jugar solo o con tus familiares y amigos, compitiendo por ver quién consigue más dinero. Además, el juego tiene varios modos de dificultad y opciones de personalización para que lo adaptes a tu gusto. También puedes ver las estadísticas de tus partidas y los rankings de los mejores jugadores.

    -

    Alla Tu es un juego interactivo que te hará pasar momentos de diversión y tensión. ¿Serás capaz de ganar el gran premio o te conformarás con lo que te ofrezca el banquero? ¡Descarga ya Alla Tu y pon a prueba tu suerte!

    -

    ¿Qué es Alla Tu y cómo se juega?

    -

    Alla Tu es un juego interactivo que se basa en el famoso concurso de televisión del mismo nombre, que se emitió en España entre 2004 y 2010. El programa fue presentado por Jesús Vázquez y contó con la participación de miles de concursantes que se enfrentaron al banquero y a las cajas para ganar dinero.

    -

    El juego consiste en elegir una de las 26 cajas que hay en el escenario, cada una con una cantidad de dinero diferente, desde 0,01 euros hasta 600.000 euros. El concursante no sabe qué hay en su caja ni en las demás, solo el banquero lo sabe. El concursante debe ir abriendo las otras cajas una por una, esperando que contengan las cantidades más bajas posibles, para que el valor de su caja aumente.

    -

    Después de cada ronda de apertura de cajas, el banquero le hace una oferta al concursante para que venda su caja y se vaya con el dinero que le ofrece. El concursante puede aceptar o rechazar la oferta. Si la rechaza, debe seguir abriendo cajas hasta que solo queden dos: la suya y otra. Entonces puede decidir si se queda con su caja o la cambia por la otra. Al final, se revela lo que había en cada caja y se ve si el concursante ha ganado o perdido dinero.

    - -

    ¿Por qué jugar a Alla Tu en tu PC o DVD?

    -

    Alla Tu es un juego interactivo que puedes disfrutar en tu PC o DVD sin DRM. Esto significa que puedes descargarlo gratis y jugarlo sin necesidad de conexión a internet ni ningún tipo de registro. Además, puedes instalarlo en tu ordenador o introducir el disco en tu reproductor de DVD y empezar a jugar al instante.

    -

    -

    Jugar a Alla Tu en tu PC o DVD tiene muchas ventajas. Por ejemplo, puedes jugar solo o con tus familiares y amigos, compitiendo por ver quién consigue más dinero. También puedes elegir el nivel de dificultad y las opciones de personalización que más te gusten. Además, puedes ver las estadísticas de tus partidas y los rankings de los mejores jugadores.

    -

    Jugar a Alla Tu en tu PC o DVD es una forma divertida y emocionante de pasar el tiempo. Podrás sentir la adrenalina de enfrentarte al banquero y a las cajas, y poner a prueba tu intuición y tu suerte. ¿Serás capaz de ganar el gran premio o te conformarás con lo que te ofrezca el banquero? ¡Descarga ya Alla Tu y pon a prueba tu suerte!

    -

    ¿Qué ventajas tiene Alla Tu sin DRM?

    -

    Alla Tu es un juego interactivo que puedes descargar gratis y jugar en tu PC o DVD sin DRM. DRM significa Digital Rights Management, o gestión de derechos digitales, y se refiere a un conjunto de tecnologías que limitan el uso de los contenidos digitales por parte de los usuarios.

    -

    Algunos juegos tienen DRM para evitar la piratería o el uso no autorizado de los mismos. Sin embargo, esto también puede suponer una molestia para los usuarios legítimos, que pueden tener problemas para instalar, ejecutar o actualizar el juego. Además, el DRM puede afectar al rendimiento del juego o a la privacidad de los usuarios.

    -

    Por eso, jugar a Alla Tu sin DRM tiene muchas ventajas. No tendrás que activar el juego online ni introducir ningún código o clave. Tampoco tendrás que depender de una conexión a internet o de un servidor externo para jugar. Podrás instalar el juego en tantos ordenadores o reproductores de DVD como quieras y jugar sin restricciones. Además, podrás disfrutar del juego con la máxima calidad y sin riesgos para tu seguridad.

    - -

    ¿Dónde puedes descargar Alla Tu gratis?

    -

    Alla Tu es un juego interactivo que puedes descargar gratis y jugar en tu PC o DVD sin DRM. Si quieres disfrutar de este juego divertido y emocionante, solo tienes que seguir estos pasos:

    -
      -
    1. Entra en la página web de Archive.org, donde encontrarás el archivo ISO del juego.
    2. -
    3. Descarga el archivo ISO en tu ordenador. Es un archivo comprimido que contiene todos los datos del juego.
    4. -
    5. Descomprime el archivo ISO con un programa como WinRAR o 7-Zip. Obtendrás una carpeta con el nombre del juego.
    6. -
    7. Abre la carpeta y busca el archivo SETUP.EXE. Haz doble clic en él para iniciar la instalación del juego en tu ordenador.
    8. -
    9. Sigue las instrucciones que aparecen en pantalla para completar la instalación. Elige la ruta donde quieres instalar el juego y acepta los términos y condiciones.
    10. -
    11. Una vez instalado el juego, podrás acceder a él desde el menú Inicio o desde el escritorio de tu ordenador.
    12. -
    13. Si quieres jugar al juego en tu reproductor de DVD, solo tienes que grabar el archivo ISO en un disco DVD con un programa como Nero o ImgBurn. Luego, introduce el disco en tu reproductor de DVD y selecciona la opción de reproducir el disco.
    14. -
    -

    ¡Ya está! Ya puedes disfrutar de Alla Tu en tu PC o DVD sin DRM. ¡Diviértete jugando y ganando dinero!

    -

    ¿Qué características tiene Alla Tu?

    -

    Alla Tu es un juego interactivo que tiene muchas características que lo hacen atractivo y divertido. Algunas de ellas son:

    -
      -
    • Tiene los gráficos y los sonidos originales del programa de televisión, lo que te hará sentir como si estuvieras en el plató con Jesús Vázquez.
    • -
    • Tiene la voz real de Jesús Vázquez, que te guiará y te animará durante el juego. También podrás escuchar la voz del banquero, que te hará ofertas y comentarios.
    • -
    • Tiene varios modos de juego: individual, multijugador y torneo. En el modo individual, podrás jugar solo contra el banquero. En el modo multijugador, podrás jugar con hasta cuatro jugadores más, turnándoos para elegir caja y aceptar o rechazar ofertas. En el modo torneo, podrás competir con otros jugadores online por ver quién consigue más dinero.
    • -
    • Tiene varios niveles de dificultad: fácil, medio y difícil. En cada nivel, el banquero te hará ofertas más o menos generosas según el valor de tu caja y las que queden en juego.
    • -
    • Tiene varias opciones de personalización: podrás elegir el color de tu caja, el nombre de tu concursante y el tipo de moneda que quieres usar (euros o dólares).
    • -
    • Tiene un sistema de estadísticas y rankings: podrás ver el historial de tus partidas, el dinero que has ganado o perdido, el número de ofertas que has aceptado o rechazado y el valor de tu caja final. También podrás ver los rankings de los mejores jugadores del mundo, de tu país o de tus amigos.
    • -
    - -

    ¿Qué beneficios tiene jugar a Alla Tu?

    -

    Alla Tu es un juego interactivo que no solo te hará pasar un buen rato, sino que también te aportará algunos beneficios. Algunos de ellos son:

    -
      -
    • Te ayudará a mejorar tu capacidad de cálculo mental y tu intuición matemática. Tendrás que estimar las probabilidades de que tu caja tenga una cantidad alta o baja, y compararlas con las ofertas del banquero.
    • -
    • Te ayudará a desarrollar tu inteligencia emocional y tu autocontrol. Tendrás que manejar la presión, la incertidumbre y la tentación de aceptar o rechazar las ofertas del banquero.
    • -
    • Te ayudará a fomentar tu espíritu competitivo y tu autoestima. Podrás retarte a ti mismo o a otros jugadores por ver quién consigue más dinero.
    • -
    • Te ayudará a divertirte y a relajarte. Podrás disfrutar de un juego entretenido y emocionante, que te hará olvidarte del estrés y de los problemas del día a día.
    • -
    -

    ¿Qué opiniones tiene Alla Tu?

    -

    Alla Tu es un juego interactivo que ha recibido muchas opiniones positivas de los usuarios que lo han probado. Algunas de ellas son:

    -
    -

    "Me encanta este juego, es muy fiel al programa de televisión y tiene muy buena calidad. Es muy divertido jugar con la familia o los amigos y ver quién se lleva más dinero. Lo recomiendo mucho."

    -Ana, 34 años. -
    -
    -

    "Es un juego muy entretenido y adictivo. Te hace sentir la emoción de enfrentarte al banquero y a las cajas, y te hace pensar mucho. Además, se puede jugar en el PC o en el DVD sin problemas. Lo mejor es que es gratis y sin DRM."

    -Pedro, 28 años. -
    -
    -

    "Me parece un juego genial, muy bien hecho y con muchos detalles. Tiene la voz de Jesús Vázquez y del banquero, los gráficos y los sonidos originales, y varios modos de juego y opciones. Es como estar en el programa de verdad."

    -Laura, 31 años. -
    - -

    ¿Cómo conseguir Alla Tu?

    -

    Alla Tu es un juego interactivo que puedes conseguir gratis y jugar en tu PC o DVD sin DRM. Solo tienes que seguir estos pasos:

    -
      -
    • Entra en la página web de Archive.org, donde encontrarás el archivo ISO del juego.
    • -
    • Descarga el archivo ISO en tu ordenador. Es un archivo comprimido que contiene todos los datos del juego.
    • -
    • Descomprime el archivo ISO con un programa como WinRAR o 7-Zip. Obtendrás una carpeta con el nombre del juego.
    • -
    • Abre la carpeta y busca el archivo SETUP.EXE. Haz doble clic en él para iniciar la instalación del juego en tu ordenador.
    • -
    • Sigue las instrucciones que aparecen en pantalla para completar la instalación. Elige la ruta donde quieres instalar el juego y acepta los términos y condiciones.
    • -
    • Una vez instalado el juego, podrás acceder a él desde el menú Inicio o desde el escritorio de tu ordenador.
    • -
    • Si quieres jugar al juego en tu reproductor de DVD, solo tienes que grabar el archivo ISO en un disco DVD con un programa como Nero o ImgBurn. Luego, introduce el disco en tu reproductor de DVD y selecciona la opción de reproducir el disco.
    • -
    -

    ¡Ya está! Ya puedes conseguir Alla Tu y jugar en tu PC o DVD sin DRM. ¡No esperes más y descarga ya este juego interactivo!

    -

    Conclusión

    -

    Alla Tu es un juego interactivo que te hará vivir la experiencia del concurso de televisión en tu PC o DVD. Podrás jugar solo o con tus familiares y amigos, enfrentarte al banquero y a las cajas, y ganar hasta 600.000 euros. Además, podrás disfrutar de este juego gratis y sin DRM, lo que te dará más libertad y seguridad. Si te gustan los juegos de azar y estrategia, no lo dudes y descarga ya Alla Tu, el juego interactivo que te hará sentir como si estuvieras en el plató con Jesús Vázquez.

    3cee63e6c2
    -
    -
    \ No newline at end of file diff --git a/spaces/inamXcontru/PoeticTTS/Asunsoft Excel Password Geeker Crackinstmank.md b/spaces/inamXcontru/PoeticTTS/Asunsoft Excel Password Geeker Crackinstmank.md deleted file mode 100644 index c876b05052bf472b189b0c916598be4ff389ee68..0000000000000000000000000000000000000000 --- a/spaces/inamXcontru/PoeticTTS/Asunsoft Excel Password Geeker Crackinstmank.md +++ /dev/null @@ -1,118 +0,0 @@ -
    -

    Asunsoft Excel Password Geeker: A Review

    -

    If you have ever forgotten or lost the password to your Excel file, you know how frustrating it can be. You may have important data or work that you need to access, but you can't open the file without the password. What can you do in such a situation?

    -

    asunsoft excel password geeker crackinstmank


    Download Filehttps://gohhs.com/2uz2Nh



    -

    One possible solution is to use a password recovery tool like Asunsoft Excel Password Geeker. This is a software that can help you remove or recover the password of any Excel file, regardless of the version or complexity. In this article, we will review Asunsoft Excel Password Geeker and see how it works, what are its features, and what are its pros and cons.

    -

    How Asunsoft Excel Password Geeker Works

    -

    Asunsoft Excel Password Geeker is a simple and easy-to-use software that can be downloaded and installed on your Windows computer. It supports all versions of Excel from 97 to 2019, and it can remove or recover both open and edit passwords.

    -

    To use Asunsoft Excel Password Geeker, you just need to follow these steps:

    -
      -
    1. Launch the software and select the type of password you want to remove or recover (open or edit).
    2. -
    3. Click on "Open" and browse for the Excel file that you want to unlock.
    4. -
    5. Choose the recovery mode that suits your situation. There are four modes available: Brute-force Attack, Mask Attack, Dictionary Attack, and Smart Attack.
    6. -
    7. Click on "Start" and wait for the software to find the password.
    8. -
    9. Once the password is found, copy it and use it to open your Excel file.
    10. -
    -

    Features of Asunsoft Excel Password Geeker

    -

    Asunsoft Excel Password Geeker has some features that make it stand out from other password recovery tools. Here are some of them:

    -
      -
    • It has a high speed and efficiency. It can remove or recover passwords within seconds or minutes, thanks to its multi-core acceleration technology.
    • -
    • It has a friendly and intuitive interface. It is easy to use even for beginners, as it guides you through the process step by step.
    • -
    • It has a wide compatibility. It supports all versions of Excel from 97 to 2019, and it can work on Windows 10/8/7/Vista/XP/2000.
    • -
    • It has a flexible recovery mode. It allows you to choose from four different modes depending on the complexity and length of your password. You can also customize some settings to speed up the recovery process.
    • -
    • It has a safe and reliable performance. It does not damage or modify your original Excel file, and it does not contain any virus or malware.
    • -
    -

    Pros and Cons of Asunsoft Excel Password Geeker

    -

    Asunsoft Excel Password Geeker is not a perfect software, and it has some advantages and disadvantages that you should consider before using it. Here are some of them:

    - - - - - - - -
    ProsCons
    It is fast and effective.It is not free.
    It is easy and simple to use.It does not support Mac OS.
    It is compatible with all versions of Excel.It may not work for some complex passwords.
    It has a flexible recovery mode.It may not be updated regularly.
    It is safe and reliable.
    - -

    Conclusion

    - -

    If you are looking for a way to remove or recover your Excel password, Asunsoft Excel Password Geeker may be a good option for you. It is a fast, easy, and effective software that can help you unlock your Excel file in no time. However, it is not free, and it may not work for some complex passwords. Therefore, you should weigh the pros and cons before deciding whether to use it or not.

    -

    - -

    We hope this article has given you some useful information about Asunsoft Excel Password Geeker. If you have any questions or comments, feel free to leave them below.

    -

    How to Download and Install Asunsoft Excel Password Geeker

    -

    If you want to try Asunsoft Excel Password Geeker for yourself, you can download it from the official website or from other trusted sources. The software is not free, but you can use the trial version to test its functionality and performance. The trial version can only remove passwords that are less than three characters long.

    -

    To download and install Asunsoft Excel Password Geeker, you just need to follow these steps:

    -
      -
    1. Go to the official website of Asunsoft Excel Password Geeker and click on "Download". You can also find other download links on other websites, but make sure they are safe and reliable.
    2. -
    3. Save the setup file on your computer and run it as administrator.
    4. -
    5. Follow the instructions on the screen to complete the installation process.
    6. -
    7. Launch the software and enter the registration code if you have purchased it. If not, you can use the trial version with some limitations.
    8. -
    9. Enjoy using Asunsoft Excel Password Geeker to remove or recover your Excel passwords.
    10. -
    -

    Tips and Tricks for Using Asunsoft Excel Password Geeker

    -

    Asunsoft Excel Password Geeker is a powerful and easy-to-use software, but there are some tips and tricks that can help you use it more effectively and efficiently. Here are some of them:

    -
      -
    • Choose the right recovery mode for your situation. If you know some information about your password, such as the length, the characters, or the prefix or suffix, you can use the Mask Attack or the Dictionary Attack to speed up the recovery process. If you have no clue about your password, you can use the Smart Attack or the Brute-force Attack to try all possible combinations.
    • -
    • Customize some settings to optimize the recovery process. You can adjust some parameters such as the password range, the password length, or the dictionary path to make the recovery more accurate and faster.
    • -
    • Save your recovery state and resume it later. If you have a long and complex password, it may take a long time to recover it. You can save your recovery state and resume it later when you have more time or resources.
    • -
    • Use a strong and memorable password for your Excel file. To avoid forgetting or losing your password again, you should use a strong and memorable password for your Excel file. You can use a combination of letters, numbers, symbols, and spaces to make your password more secure. You can also use a password manager to store and manage your passwords safely.
    • -
    - -

    Conclusion

    - -

    Asunsoft Excel Password Geeker is a useful software that can help you remove or recover your Excel passwords in a fast and easy way. It has some features that make it stand out from other password recovery tools, such as its high speed, its friendly interface, its wide compatibility, its flexible recovery mode, and its safe performance. However, it also has some drawbacks that you should consider before using it, such as its price, its lack of Mac support, its possible failure for some complex passwords, and its possible lack of updates.

    - -

    We hope this article has given you some helpful information about Asunsoft Excel Password Geeker. If you have any questions or comments, feel free to leave them below.

    -

    How to Use Asunsoft Excel Password Geeker to Crack Excel Passwords

    -

    Asunsoft Excel Password Geeker is not only a password remover, but also a password cracker. It can help you crack the passwords of Excel files that are protected by encryption or complex algorithms. This way, you can not only open the file, but also modify it or copy its contents.

    -

    To use Asunsoft Excel Password Geeker to crack Excel passwords, you just need to follow these steps:

    -
      -
    1. Launch the software and select the type of password you want to crack (open or edit).
    2. -
    3. Click on "Open" and browse for the encrypted Excel file that you want to crack.
    4. -
    5. Choose the recovery mode that suits your situation. There are four modes available: Brute-force Attack, Mask Attack, Dictionary Attack, and Smart Attack.
    6. -
    7. Click on "Start" and wait for the software to find the password.
    8. -
    9. Once the password is found, copy it and use it to open and edit your Excel file.
    10. -
    -

    How to Get Asunsoft Excel Password Geeker for Free

    -

    Asunsoft Excel Password Geeker is a paid software that costs $17.95 for a single license. However, there are some ways that you can get it for free or at a discounted price. Here are some of them:

    -
      -
    • You can download the trial version from the official website or from other sources. The trial version can remove passwords that are less than three characters long, and it can recover passwords that are less than five characters long.
    • -
    • You can look for coupon codes or discounts on some websites or platforms. You may find some offers that can save you some money or give you some bonuses.
    • -
    • You can participate in some giveaways or contests that are held by Asunsoft or other partners. You may have a chance to win a free license or a gift card.
    • -
    • You can share your feedback or experience with Asunsoft Excel Password Geeker on some social media or forums. You may get some rewards or appreciation from Asunsoft or other users.
    • -
    - -

    Conclusion

    - -

    Asunsoft Excel Password Geeker is a useful software that can help you remove or crack your Excel passwords in a fast and easy way. It has some features that make it stand out from other password recovery tools, such as its high speed, its friendly interface, its wide compatibility, its flexible recovery mode, and its safe performance. However, it also has some drawbacks that you should consider before using it, such as its price, its lack of Mac support, its possible failure for some complex passwords, and its possible lack of updates.

    - -

    We hope this article has given you some helpful information about Asunsoft Excel Password Geeker. If you have any questions or comments, feel free to leave them below.

    -

    How to Uninstall Asunsoft Excel Password Geeker

    -

    If you want to uninstall Asunsoft Excel Password Geeker from your computer, you can do it easily and safely. You just need to follow these steps:

    -
      -
    1. Close Asunsoft Excel Password Geeker if it is running.
    2. -
    3. Go to the Control Panel and click on "Programs and Features".
    4. -
    5. Find Asunsoft Excel Password Geeker in the list of installed programs and click on "Uninstall".
    6. -
    7. Follow the instructions on the screen to complete the uninstallation process.
    8. -
    9. Delete any leftover files or folders related to Asunsoft Excel Password Geeker from your computer.
    10. -
    -

    How to Contact Asunsoft Excel Password Geeker Support Team

    -

    If you have any problems or questions about Asunsoft Excel Password Geeker, you can contact the support team for help. You can use one of these methods:

    -
      -
    • You can send an email to support@asunsoft.com and describe your issue or inquiry in detail. You can also attach some screenshots or files if necessary.
    • -
    • You can visit the official website of Asunsoft Excel Password Geeker and click on "Contact Us". You can fill out the online form with your name, email, subject, and message. You can also choose the product that you need help with.
    • -
    • You can follow Asunsoft on some social media platforms such as Facebook, Twitter, or YouTube. You can leave a comment or a message and get some updates or tips from Asunsoft or other users.
    • -
    - -

    Conclusion

    - -

    Asunsoft Excel Password Geeker is a useful software that can help you remove or crack your Excel passwords in a fast and easy way. It has some features that make it stand out from other password recovery tools, such as its high speed, its friendly interface, its wide compatibility, its flexible recovery mode, and its safe performance. However, it also has some drawbacks that you should consider before using it, such as its price, its lack of Mac support, its possible failure for some complex passwords, and its possible lack of updates.

    - -

    We hope this article has given you some helpful information about Asunsoft Excel Password Geeker. If you have any questions or comments, feel free to leave them below.

    -

    Conclusion

    - -

    Asunsoft Excel Password Geeker is a useful software that can help you remove or crack your Excel passwords in a fast and easy way. It has some features that make it stand out from other password recovery tools, such as its high speed, its friendly interface, its wide compatibility, its flexible recovery mode, and its safe performance. However, it also has some drawbacks that you should consider before using it, such as its price, its lack of Mac support, its possible failure for some complex passwords, and its possible lack of updates.

    - -

    We hope this article has given you some helpful information about Asunsoft Excel Password Geeker. If you have any questions or comments, feel free to leave them below.

    3cee63e6c2
    -
    -
    \ No newline at end of file diff --git a/spaces/innnky/soft-vits-singingvc/models.py b/spaces/innnky/soft-vits-singingvc/models.py deleted file mode 100644 index 5a14a90cf31c33d4a2b961968866585ee0454dd0..0000000000000000000000000000000000000000 --- a/spaces/innnky/soft-vits-singingvc/models.py +++ /dev/null @@ -1,562 +0,0 @@ -import copy -import math -import torch -from torch import nn -from torch.nn import functional as F -import numpy as np -import commons -import modules -import attentions -import monotonic_align - -from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d -from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm -from commons import init_weights, get_padding - - -class StochasticDurationPredictor(nn.Module): - def __init__(self, in_channels, filter_channels, kernel_size, p_dropout, n_flows=4, gin_channels=0): - super().__init__() - filter_channels = in_channels # it needs to be removed from future version. - self.in_channels = in_channels - self.filter_channels = filter_channels - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.n_flows = n_flows - self.gin_channels = gin_channels - - self.log_flow = modules.Log() - self.flows = nn.ModuleList() - self.flows.append(modules.ElementwiseAffine(2)) - for i in range(n_flows): - self.flows.append(modules.ConvFlow(2, filter_channels, kernel_size, n_layers=3)) - self.flows.append(modules.Flip()) - - self.post_pre = nn.Conv1d(1, filter_channels, 1) - self.post_proj = nn.Conv1d(filter_channels, filter_channels, 1) - self.post_convs = modules.DDSConv(filter_channels, kernel_size, n_layers=3, p_dropout=p_dropout) - self.post_flows = nn.ModuleList() - self.post_flows.append(modules.ElementwiseAffine(2)) - for i in range(4): - self.post_flows.append(modules.ConvFlow(2, filter_channels, kernel_size, n_layers=3)) - self.post_flows.append(modules.Flip()) - - self.pre = nn.Conv1d(in_channels, filter_channels, 1) - self.proj = nn.Conv1d(filter_channels, filter_channels, 1) - self.convs = modules.DDSConv(filter_channels, kernel_size, n_layers=3, p_dropout=p_dropout) - if gin_channels != 0: - self.cond = nn.Conv1d(gin_channels, filter_channels, 1) - - def forward(self, x, x_mask, w=None, g=None, reverse=False, noise_scale=1.0): - x = torch.detach(x) - x = self.pre(x) - if g is not None: - g = torch.detach(g) - x = x + self.cond(g) - x = self.convs(x, x_mask) - x = self.proj(x) * x_mask - - if not reverse: - flows = self.flows - assert w is not None - - logdet_tot_q = 0 - h_w = self.post_pre(w) - h_w = self.post_convs(h_w, x_mask) - h_w = self.post_proj(h_w) * x_mask - e_q = torch.randn(w.size(0), 2, w.size(2)).to(device=x.device, dtype=x.dtype) * x_mask - z_q = e_q - for flow in self.post_flows: - z_q, logdet_q = flow(z_q, x_mask, g=(x + h_w)) - logdet_tot_q += logdet_q - z_u, z1 = torch.split(z_q, [1, 1], 1) - u = torch.sigmoid(z_u) * x_mask - z0 = (w - u) * x_mask - logdet_tot_q += torch.sum((F.logsigmoid(z_u) + F.logsigmoid(-z_u)) * x_mask, [1, 2]) - logq = torch.sum(-0.5 * (math.log(2 * math.pi) + (e_q ** 2)) * x_mask, [1, 2]) - logdet_tot_q - - logdet_tot = 0 - z0, logdet = self.log_flow(z0, x_mask) - logdet_tot += logdet - z = torch.cat([z0, z1], 1) - for flow in flows: - z, logdet = flow(z, x_mask, g=x, reverse=reverse) - logdet_tot = logdet_tot + logdet - nll = torch.sum(0.5 * (math.log(2 * math.pi) + (z ** 2)) * x_mask, [1, 2]) - logdet_tot - return nll + logq # [b] - else: - flows = list(reversed(self.flows)) - flows = flows[:-2] + [flows[-1]] # remove a useless vflow - z = torch.randn(x.size(0), 2, x.size(2)).to(device=x.device, dtype=x.dtype) * noise_scale - for flow in flows: - z = flow(z, x_mask, g=x, reverse=reverse) - z0, z1 = torch.split(z, [1, 1], 1) - logw = z0 - return logw - - -class DurationPredictor(nn.Module): - def __init__(self, in_channels, filter_channels, kernel_size, p_dropout, gin_channels=0): - super().__init__() - - self.in_channels = in_channels - self.filter_channels = filter_channels - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.gin_channels = gin_channels - - self.drop = nn.Dropout(p_dropout) - self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size, padding=kernel_size // 2) - self.norm_1 = modules.LayerNorm(filter_channels) - self.conv_2 = nn.Conv1d(filter_channels, filter_channels, kernel_size, padding=kernel_size // 2) - self.norm_2 = modules.LayerNorm(filter_channels) - self.proj = nn.Conv1d(filter_channels, 1, 1) - - if gin_channels != 0: - self.cond = nn.Conv1d(gin_channels, in_channels, 1) - - def forward(self, x, x_mask, g=None): - x = torch.detach(x) - if g is not None: - g = torch.detach(g) - x = x + self.cond(g) - x = self.conv_1(x * x_mask) - x = torch.relu(x) - x = self.norm_1(x) - x = self.drop(x) - x = self.conv_2(x * x_mask) - x = torch.relu(x) - x = self.norm_2(x) - x = self.drop(x) - x = self.proj(x * x_mask) - return x * x_mask - - -class TextEncoder(nn.Module): - def __init__(self, - n_vocab, - out_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout): - super().__init__() - self.n_vocab = n_vocab - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - - # self.emb = nn.Embedding(n_vocab, hidden_channels) - # nn.init.normal_(self.emb.weight, 0.0, hidden_channels**-0.5) - - self.encoder = attentions.Encoder( - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout) - self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) - - def forward(self, x, x_lengths): - # x = x.transpose(1,2) - # x = self.emb(x) * math.sqrt(self.hidden_channels) # [b, t, h] - # print(x.shape) - x = torch.transpose(x, 1, -1) # [b, h, t] - x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype) - - x = self.encoder(x * x_mask, x_mask) - stats = self.proj(x) * x_mask - - m, logs = torch.split(stats, self.out_channels, dim=1) - return x, m, logs, x_mask - - -class ResidualCouplingBlock(nn.Module): - def __init__(self, - channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - n_flows=4, - gin_channels=0): - super().__init__() - self.channels = channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.n_flows = n_flows - self.gin_channels = gin_channels - - self.flows = nn.ModuleList() - for i in range(n_flows): - self.flows.append( - modules.ResidualCouplingLayer(channels, hidden_channels, kernel_size, dilation_rate, n_layers, - gin_channels=gin_channels, mean_only=True)) - self.flows.append(modules.Flip()) - - def forward(self, x, x_mask, g=None, reverse=False): - if not reverse: - for flow in self.flows: - x, _ = flow(x, x_mask, g=g, reverse=reverse) - else: - for flow in reversed(self.flows): - x = flow(x, x_mask, g=g, reverse=reverse) - return x - - -class PosteriorEncoder(nn.Module): - def __init__(self, - in_channels, - out_channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=0): - super().__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.gin_channels = gin_channels - - self.pre = nn.Conv1d(in_channels, hidden_channels, 1) - self.enc = modules.WN(hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=gin_channels) - self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) - - def forward(self, x, x_lengths, g=None): - x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype) - x = self.pre(x) * x_mask - x = self.enc(x, x_mask, g=g) - stats = self.proj(x) * x_mask - m, logs = torch.split(stats, self.out_channels, dim=1) - z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask - return z, m, logs, x_mask - - -class Generator(torch.nn.Module): - def __init__(self, initial_channel, resblock, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates, - upsample_initial_channel, upsample_kernel_sizes, gin_channels=0): - super(Generator, self).__init__() - self.num_kernels = len(resblock_kernel_sizes) - self.num_upsamples = len(upsample_rates) - self.conv_pre = Conv1d(initial_channel, upsample_initial_channel, 7, 1, padding=3) - resblock = modules.ResBlock1 if resblock == '1' else modules.ResBlock2 - - self.ups = nn.ModuleList() - for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)): - self.ups.append(weight_norm( - ConvTranspose1d(upsample_initial_channel // (2 ** i), upsample_initial_channel // (2 ** (i + 1)), - k, u, padding=(k - u) // 2))) - - self.resblocks = nn.ModuleList() - for i in range(len(self.ups)): - ch = upsample_initial_channel // (2 ** (i + 1)) - for j, (k, d) in enumerate(zip(resblock_kernel_sizes, resblock_dilation_sizes)): - self.resblocks.append(resblock(ch, k, d)) - - self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False) - self.ups.apply(init_weights) - - if gin_channels != 0: - self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1) - - def forward(self, x, g=None): - x = self.conv_pre(x) - if g is not None: - x = x + self.cond(g) - - for i in range(self.num_upsamples): - x = F.leaky_relu(x, modules.LRELU_SLOPE) - x = self.ups[i](x) - xs = None - for j in range(self.num_kernels): - if xs is None: - xs = self.resblocks[i * self.num_kernels + j](x) - else: - xs += self.resblocks[i * self.num_kernels + j](x) - x = xs / self.num_kernels - x = F.leaky_relu(x) - x = self.conv_post(x) - x = torch.tanh(x) - - return x - - def remove_weight_norm(self): - print('Removing weight norm...') - for l in self.ups: - remove_weight_norm(l) - for l in self.resblocks: - l.remove_weight_norm() - - -class DiscriminatorP(torch.nn.Module): - def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False): - super(DiscriminatorP, self).__init__() - self.period = period - self.use_spectral_norm = use_spectral_norm - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList([ - norm_f(Conv2d(1, 32, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), - norm_f(Conv2d(32, 128, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), - norm_f(Conv2d(128, 512, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), - norm_f(Conv2d(512, 1024, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), - norm_f(Conv2d(1024, 1024, (kernel_size, 1), 1, padding=(get_padding(kernel_size, 1), 0))), - ]) - self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0))) - - def forward(self, x): - fmap = [] - - # 1d to 2d - b, c, t = x.shape - if t % self.period != 0: # pad first - n_pad = self.period - (t % self.period) - x = F.pad(x, (0, n_pad), "reflect") - t = t + n_pad - x = x.view(b, c, t // self.period, self.period) - - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, modules.LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap - - -class DiscriminatorS(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(DiscriminatorS, self).__init__() - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList([ - norm_f(Conv1d(1, 16, 15, 1, padding=7)), - norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)), - norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)), - norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)), - norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)), - norm_f(Conv1d(1024, 1024, 5, 1, padding=2)), - ]) - self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1)) - - def forward(self, x): - fmap = [] - - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, modules.LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap - - -class MultiPeriodDiscriminator(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(MultiPeriodDiscriminator, self).__init__() - periods = [2, 3, 5, 7, 11] - - discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)] - discs = discs + [DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods] - self.discriminators = nn.ModuleList(discs) - - def forward(self, y, y_hat): - y_d_rs = [] - y_d_gs = [] - fmap_rs = [] - fmap_gs = [] - for i, d in enumerate(self.discriminators): - y_d_r, fmap_r = d(y) - y_d_g, fmap_g = d(y_hat) - y_d_rs.append(y_d_r) - y_d_gs.append(y_d_g) - fmap_rs.append(fmap_r) - fmap_gs.append(fmap_g) - - return y_d_rs, y_d_gs, fmap_rs, fmap_gs - - -class SynthesizerTrn(nn.Module): - """ - Synthesizer for Training - """ - - def __init__(self, - n_vocab, - spec_channels, - segment_size, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - n_speakers=0, - gin_channels=0, - use_sdp=True, - **kwargs): - - super().__init__() - self.n_vocab = n_vocab - self.spec_channels = spec_channels - self.inter_channels = inter_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.resblock = resblock - self.resblock_kernel_sizes = resblock_kernel_sizes - self.resblock_dilation_sizes = resblock_dilation_sizes - self.upsample_rates = upsample_rates - self.upsample_initial_channel = upsample_initial_channel - self.upsample_kernel_sizes = upsample_kernel_sizes - self.segment_size = segment_size - self.n_speakers = n_speakers - self.gin_channels = gin_channels - - self.use_sdp = use_sdp - - self.enc_p = TextEncoder(n_vocab, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout) - self.dec = Generator(inter_channels, resblock, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates, - upsample_initial_channel, upsample_kernel_sizes, gin_channels=gin_channels) - self.enc_q = PosteriorEncoder(spec_channels, inter_channels, hidden_channels, 5, 1, 16, - gin_channels=gin_channels) - self.flow = ResidualCouplingBlock(inter_channels, hidden_channels, 5, 1, 4, gin_channels=gin_channels) - - if use_sdp: - self.dp = StochasticDurationPredictor(hidden_channels, 192, 3, 0.5, 4, gin_channels=gin_channels) - else: - self.dp = DurationPredictor(hidden_channels, 256, 3, 0.5, gin_channels=gin_channels) - - if n_speakers > 1: - self.emb_g = nn.Embedding(n_speakers, gin_channels) - - def forward(self, x, x_lengths, y, y_lengths, sid=None): - - x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths) - if self.n_speakers > 0: - g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1] - else: - g = None - - z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g) - z_p = self.flow(z, y_mask, g=g) - - with torch.no_grad(): - # negative cross-entropy - s_p_sq_r = torch.exp(-2 * logs_p) # [b, d, t] - neg_cent1 = torch.sum(-0.5 * math.log(2 * math.pi) - logs_p, [1], keepdim=True) # [b, 1, t_s] - neg_cent2 = torch.matmul(-0.5 * (z_p ** 2).transpose(1, 2), - s_p_sq_r) # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s] - neg_cent3 = torch.matmul(z_p.transpose(1, 2), (m_p * s_p_sq_r)) # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s] - neg_cent4 = torch.sum(-0.5 * (m_p ** 2) * s_p_sq_r, [1], keepdim=True) # [b, 1, t_s] - neg_cent = neg_cent1 + neg_cent2 + neg_cent3 + neg_cent4 - - attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1) - attn = monotonic_align.maximum_path(neg_cent, attn_mask.squeeze(1)).unsqueeze(1).detach() - - w = attn.sum(2) - if self.use_sdp: - l_length = self.dp(x, x_mask, w, g=g) - l_length = l_length / torch.sum(x_mask) - else: - logw_ = torch.log(w + 1e-6) * x_mask - logw = self.dp(x, x_mask, g=g) - l_length = torch.sum((logw - logw_) ** 2, [1, 2]) / torch.sum(x_mask) # for averaging - - # expand prior - m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(1, 2) - logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(1, 2) - - z_slice, ids_slice = commons.rand_slice_segments(z, y_lengths, self.segment_size) - o = self.dec(z_slice, g=g) - return o, l_length, attn, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q) - - def infer(self, x, x_lengths, sid=None, noise_scale=1, length_scale=1, noise_scale_w=1., max_len=None): - x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths) - print(x.shape, x_lengths) - - if self.n_speakers > 0: - g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1] - else: - g = None - - if self.use_sdp: - logw = self.dp(x, x_mask, g=g, reverse=True, noise_scale=noise_scale_w) - else: - logw = self.dp(x, x_mask, g=g) - print(logw.shape) - w = torch.exp(logw) * x_mask * length_scale - w_ceil = torch.ceil(w) - - w_ceil = w_ceil * 0 + 2 - # for index in range(w_ceil.shape[2]): - # if index%4 == 0: - # w_ceil[0,0,index] = 1.0 - - for i in range(w_ceil.shape[2]): - sep = 1 / 0.14 - if i * sep >= w_ceil.shape[2] * 2: - break - w_ceil[0, 0, int(i * sep / 2)] = 1 - - # print(w_ceil) - y_lengths = torch.clamp_min(torch.sum(w_ceil, [1, 2]), 1).long() - y_mask = torch.unsqueeze(commons.sequence_mask(y_lengths, None), 1).to(x_mask.dtype) - print(y_lengths) - - attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1) - - attn = commons.generate_path(w_ceil, attn_mask) - - m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(1, 2) # [b, t', t], [b, t, d] -> [b, d, t'] - logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(1, - 2) # [b, t', t], [b, t, d] -> [b, d, t'] - - z_p = m_p + torch.randn_like(m_p) * torch.exp(logs_p) * noise_scale - - z = self.flow(z_p, y_mask, g=g, reverse=True) - o = self.dec((z * y_mask)[:, :, :max_len], g=g) - return o, attn, y_mask, (z, z_p, m_p, logs_p) - - def voice_conversion(self, y, y_lengths, sid_src, sid_tgt): - assert self.n_speakers > 0, "n_speakers have to be larger than 0." - g_src = self.emb_g(sid_src).unsqueeze(-1) - g_tgt = self.emb_g(sid_tgt).unsqueeze(-1) - z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g_src) - z_p = self.flow(z, y_mask, g=g_src) - z_hat = self.flow(z_p, y_mask, g=g_tgt, reverse=True) - o_hat = self.dec(z_hat * y_mask, g=g_tgt) - return o_hat, y_mask, (z, z_p, z_hat) - diff --git a/spaces/innovatorved/whisper.api/app/tests/test_core/test_server.py b/spaces/innovatorved/whisper.api/app/tests/test_core/test_server.py deleted file mode 100644 index be3bc8a468ed9d11d6f29e58bc65d5a5ca1f4320..0000000000000000000000000000000000000000 --- a/spaces/innovatorved/whisper.api/app/tests/test_core/test_server.py +++ /dev/null @@ -1,11 +0,0 @@ -from fastapi.testclient import TestClient -from app.main import app -from app.core.config import settings - -client = TestClient(app) - - -def test_ping_main(): - response = client.get("/ping") - assert response.status_code == 200 - assert response.json() == {"ping": "pong"} diff --git a/spaces/inplisQlawa/anything-midjourney-v4-1/Free Product Key For Visual Studio 2012.md b/spaces/inplisQlawa/anything-midjourney-v4-1/Free Product Key For Visual Studio 2012.md deleted file mode 100644 index b4ad7cb6db698155e15468ab6b29340447af170a..0000000000000000000000000000000000000000 --- a/spaces/inplisQlawa/anything-midjourney-v4-1/Free Product Key For Visual Studio 2012.md +++ /dev/null @@ -1,11 +0,0 @@ - -

    i installed visual studio 2010 in a new partition. i noticed in the properties of the c: partition, the name of the visual studio 2010 folder was written "program files (x86)". during the installation, when i chose where to install, i said "on the default location".

    -

    Free Product Key For Visual Studio 2012


    Download File > https://urlin.us/2uEwXI



    -

    to prevent confusion, i'll explain why we have different licensing for.net. as a refresher,.net is microsoft's collective term for the.net framework, the.net libraries and tools, and the.net class library.

    -

    why is visual studio 2013 preview saying i do not have an active license for visual studio 2013 preview? i already have a serial for the full version of visual studio 2012. i am trying to sign in to preview (create an account) but it is not allowing me. it says i do not have an active visual studio 2013 preview license.

    -

    i have been told i must contact the community mvp for visual studio to obtain a vs 2013 preview license. i have not been able to find anyone on the community mvp list from microsoft. if anyone can point me in the right direction, i'd be very grateful.

    -

    i tried using zdnet msdn windows/visual studio 2013 trial license during the vs 2013 preview but the license was not activated after restarting the os or reinstalling vs 2013. i also tried the process without any msdn windows/visual studio 2013 license. i need to know if the windows and/or visual studio 2013 license for zdnet and its tests is valid or should i use a vs 2013 preview license as they suggest.

    -

    -

    although there are many reasons to upgrade, i think the two big ones are that they add support for windows 8, and that they are currently the only ide with support for asp.net web forms 3.5 (although you can get the dlls from before that time). if you want to work with html, css, javascript, and the others on the web, then you are missing something. i often use their debugger, and they have done a very good job of making those features work for web developers (and to a lesser extent for silverlight and wpf developers). microsoft has also started releasing the source code of their libraries and tools, making it easier for developers to verify the correctness of what they do, and making it easy to do small bug fixes without having to ask them to update their solution.

    899543212b
    -
    -
    \ No newline at end of file diff --git a/spaces/inplisQlawa/anything-midjourney-v4-1/How To Cheat At Cards Daniel Madison Pdf.md b/spaces/inplisQlawa/anything-midjourney-v4-1/How To Cheat At Cards Daniel Madison Pdf.md deleted file mode 100644 index d6308f6c0adfad6a60f47bd7aa9238e55f13c7aa..0000000000000000000000000000000000000000 --- a/spaces/inplisQlawa/anything-midjourney-v4-1/How To Cheat At Cards Daniel Madison Pdf.md +++ /dev/null @@ -1,6 +0,0 @@ -

    How To Cheat At Cards Daniel Madison Pdf


    DOWNLOAD >>>>> https://urlin.us/2uEw3H



    - -[READ] Daniel Madison Anthology - PDF Format. Amazon com Anthology ... reveals the secrets of card cheating Full depth no gymmics. The History of Playing ... 4d29de3e1b
    -
    -
    -

    diff --git a/spaces/jackli888/stable-diffusion-webui/modules/gfpgan_model.py b/spaces/jackli888/stable-diffusion-webui/modules/gfpgan_model.py deleted file mode 100644 index bc0c5f738e086225505af9738862fde4eecfa4a9..0000000000000000000000000000000000000000 --- a/spaces/jackli888/stable-diffusion-webui/modules/gfpgan_model.py +++ /dev/null @@ -1,116 +0,0 @@ -import os -import sys -import traceback - -import facexlib -import gfpgan - -import modules.face_restoration -from modules import paths, shared, devices, modelloader - -model_dir = "GFPGAN" -user_path = None -model_path = os.path.join(paths.models_path, model_dir) -model_url = "https://github.com/TencentARC/GFPGAN/releases/download/v1.3.0/GFPGANv1.4.pth" -have_gfpgan = False -loaded_gfpgan_model = None - - -def gfpgann(): - global loaded_gfpgan_model - global model_path - if loaded_gfpgan_model is not None: - loaded_gfpgan_model.gfpgan.to(devices.device_gfpgan) - return loaded_gfpgan_model - - if gfpgan_constructor is None: - return None - - models = modelloader.load_models(model_path, model_url, user_path, ext_filter="GFPGAN") - if len(models) == 1 and "http" in models[0]: - model_file = models[0] - elif len(models) != 0: - latest_file = max(models, key=os.path.getctime) - model_file = latest_file - else: - print("Unable to load gfpgan model!") - return None - if hasattr(facexlib.detection.retinaface, 'device'): - facexlib.detection.retinaface.device = devices.device_gfpgan - model = gfpgan_constructor(model_path=model_file, upscale=1, arch='clean', channel_multiplier=2, bg_upsampler=None, device=devices.device_gfpgan) - loaded_gfpgan_model = model - - return model - - -def send_model_to(model, device): - model.gfpgan.to(device) - model.face_helper.face_det.to(device) - model.face_helper.face_parse.to(device) - - -def gfpgan_fix_faces(np_image): - model = gfpgann() - if model is None: - return np_image - - send_model_to(model, devices.device_gfpgan) - - np_image_bgr = np_image[:, :, ::-1] - cropped_faces, restored_faces, gfpgan_output_bgr = model.enhance(np_image_bgr, has_aligned=False, only_center_face=False, paste_back=True) - np_image = gfpgan_output_bgr[:, :, ::-1] - - model.face_helper.clean_all() - - if shared.opts.face_restoration_unload: - send_model_to(model, devices.cpu) - - return np_image - - -gfpgan_constructor = None - - -def setup_model(dirname): - global model_path - if not os.path.exists(model_path): - os.makedirs(model_path) - - try: - from gfpgan import GFPGANer - from facexlib import detection, parsing - global user_path - global have_gfpgan - global gfpgan_constructor - - load_file_from_url_orig = gfpgan.utils.load_file_from_url - facex_load_file_from_url_orig = facexlib.detection.load_file_from_url - facex_load_file_from_url_orig2 = facexlib.parsing.load_file_from_url - - def my_load_file_from_url(**kwargs): - return load_file_from_url_orig(**dict(kwargs, model_dir=model_path)) - - def facex_load_file_from_url(**kwargs): - return facex_load_file_from_url_orig(**dict(kwargs, save_dir=model_path, model_dir=None)) - - def facex_load_file_from_url2(**kwargs): - return facex_load_file_from_url_orig2(**dict(kwargs, save_dir=model_path, model_dir=None)) - - gfpgan.utils.load_file_from_url = my_load_file_from_url - facexlib.detection.load_file_from_url = facex_load_file_from_url - facexlib.parsing.load_file_from_url = facex_load_file_from_url2 - user_path = dirname - have_gfpgan = True - gfpgan_constructor = GFPGANer - - class FaceRestorerGFPGAN(modules.face_restoration.FaceRestoration): - def name(self): - return "GFPGAN" - - def restore(self, np_image): - return gfpgan_fix_faces(np_image) - - shared.face_restorers.append(FaceRestorerGFPGAN()) - except Exception: - print("Error setting up GFPGAN:", file=sys.stderr) - print(traceback.format_exc(), file=sys.stderr) diff --git a/spaces/jaleesahmed/model-development/README.md b/spaces/jaleesahmed/model-development/README.md deleted file mode 100644 index c06ddec879f6f8ca6f65fbb7198d40f3f0dd2cf1..0000000000000000000000000000000000000000 --- a/spaces/jaleesahmed/model-development/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Model Development -emoji: 🐠 -colorFrom: green -colorTo: purple -sdk: gradio -sdk_version: 3.1.3 -app_file: app.py -pinned: false -license: lgpl-3.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/james-oldfield/PandA/networks/stylegan3/torch_utils/ops/conv2d_gradfix.py b/spaces/james-oldfield/PandA/networks/stylegan3/torch_utils/ops/conv2d_gradfix.py deleted file mode 100644 index 388778fa971d7bc5c64b5fd6c0e5492863ee1c5f..0000000000000000000000000000000000000000 --- a/spaces/james-oldfield/PandA/networks/stylegan3/torch_utils/ops/conv2d_gradfix.py +++ /dev/null @@ -1,198 +0,0 @@ -# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# -# NVIDIA CORPORATION and its licensors retain all intellectual property -# and proprietary rights in and to this software, related documentation -# and any modifications thereto. Any use, reproduction, disclosure or -# distribution of this software and related documentation without an express -# license agreement from NVIDIA CORPORATION is strictly prohibited. - -"""Custom replacement for `torch.nn.functional.conv2d` that supports -arbitrarily high order gradients with zero performance penalty.""" - -import contextlib -import torch - -# pylint: disable=redefined-builtin -# pylint: disable=arguments-differ -# pylint: disable=protected-access - -#---------------------------------------------------------------------------- - -enabled = False # Enable the custom op by setting this to true. -weight_gradients_disabled = False # Forcefully disable computation of gradients with respect to the weights. - -@contextlib.contextmanager -def no_weight_gradients(disable=True): - global weight_gradients_disabled - old = weight_gradients_disabled - if disable: - weight_gradients_disabled = True - yield - weight_gradients_disabled = old - -#---------------------------------------------------------------------------- - -def conv2d(input, weight, bias=None, stride=1, padding=0, dilation=1, groups=1): - if _should_use_custom_op(input): - return _conv2d_gradfix(transpose=False, weight_shape=weight.shape, stride=stride, padding=padding, output_padding=0, dilation=dilation, groups=groups).apply(input, weight, bias) - return torch.nn.functional.conv2d(input=input, weight=weight, bias=bias, stride=stride, padding=padding, dilation=dilation, groups=groups) - -def conv_transpose2d(input, weight, bias=None, stride=1, padding=0, output_padding=0, groups=1, dilation=1): - if _should_use_custom_op(input): - return _conv2d_gradfix(transpose=True, weight_shape=weight.shape, stride=stride, padding=padding, output_padding=output_padding, groups=groups, dilation=dilation).apply(input, weight, bias) - return torch.nn.functional.conv_transpose2d(input=input, weight=weight, bias=bias, stride=stride, padding=padding, output_padding=output_padding, groups=groups, dilation=dilation) - -#---------------------------------------------------------------------------- - -def _should_use_custom_op(input): - assert isinstance(input, torch.Tensor) - if (not enabled) or (not torch.backends.cudnn.enabled): - return False - if input.device.type != 'cuda': - return False - return True - -def _tuple_of_ints(xs, ndim): - xs = tuple(xs) if isinstance(xs, (tuple, list)) else (xs,) * ndim - assert len(xs) == ndim - assert all(isinstance(x, int) for x in xs) - return xs - -#---------------------------------------------------------------------------- - -_conv2d_gradfix_cache = dict() -_null_tensor = torch.empty([0]) - -def _conv2d_gradfix(transpose, weight_shape, stride, padding, output_padding, dilation, groups): - # Parse arguments. - ndim = 2 - weight_shape = tuple(weight_shape) - stride = _tuple_of_ints(stride, ndim) - padding = _tuple_of_ints(padding, ndim) - output_padding = _tuple_of_ints(output_padding, ndim) - dilation = _tuple_of_ints(dilation, ndim) - - # Lookup from cache. - key = (transpose, weight_shape, stride, padding, output_padding, dilation, groups) - if key in _conv2d_gradfix_cache: - return _conv2d_gradfix_cache[key] - - # Validate arguments. - assert groups >= 1 - assert len(weight_shape) == ndim + 2 - assert all(stride[i] >= 1 for i in range(ndim)) - assert all(padding[i] >= 0 for i in range(ndim)) - assert all(dilation[i] >= 0 for i in range(ndim)) - if not transpose: - assert all(output_padding[i] == 0 for i in range(ndim)) - else: # transpose - assert all(0 <= output_padding[i] < max(stride[i], dilation[i]) for i in range(ndim)) - - # Helpers. - common_kwargs = dict(stride=stride, padding=padding, dilation=dilation, groups=groups) - def calc_output_padding(input_shape, output_shape): - if transpose: - return [0, 0] - return [ - input_shape[i + 2] - - (output_shape[i + 2] - 1) * stride[i] - - (1 - 2 * padding[i]) - - dilation[i] * (weight_shape[i + 2] - 1) - for i in range(ndim) - ] - - # Forward & backward. - class Conv2d(torch.autograd.Function): - @staticmethod - def forward(ctx, input, weight, bias): - assert weight.shape == weight_shape - ctx.save_for_backward( - input if weight.requires_grad else _null_tensor, - weight if input.requires_grad else _null_tensor, - ) - ctx.input_shape = input.shape - - # Simple 1x1 convolution => cuBLAS (only on Volta, not on Ampere). - if weight_shape[2:] == stride == dilation == (1, 1) and padding == (0, 0) and torch.cuda.get_device_capability(input.device) < (8, 0): - a = weight.reshape(groups, weight_shape[0] // groups, weight_shape[1]) - b = input.reshape(input.shape[0], groups, input.shape[1] // groups, -1) - c = (a.transpose(1, 2) if transpose else a) @ b.permute(1, 2, 0, 3).flatten(2) - c = c.reshape(-1, input.shape[0], *input.shape[2:]).transpose(0, 1) - c = c if bias is None else c + bias.unsqueeze(0).unsqueeze(2).unsqueeze(3) - return c.contiguous(memory_format=(torch.channels_last if input.stride(1) == 1 else torch.contiguous_format)) - - # General case => cuDNN. - if transpose: - return torch.nn.functional.conv_transpose2d(input=input, weight=weight, bias=bias, output_padding=output_padding, **common_kwargs) - return torch.nn.functional.conv2d(input=input, weight=weight, bias=bias, **common_kwargs) - - @staticmethod - def backward(ctx, grad_output): - input, weight = ctx.saved_tensors - input_shape = ctx.input_shape - grad_input = None - grad_weight = None - grad_bias = None - - if ctx.needs_input_grad[0]: - p = calc_output_padding(input_shape=input_shape, output_shape=grad_output.shape) - op = _conv2d_gradfix(transpose=(not transpose), weight_shape=weight_shape, output_padding=p, **common_kwargs) - grad_input = op.apply(grad_output, weight, None) - assert grad_input.shape == input_shape - - if ctx.needs_input_grad[1] and not weight_gradients_disabled: - grad_weight = Conv2dGradWeight.apply(grad_output, input) - assert grad_weight.shape == weight_shape - - if ctx.needs_input_grad[2]: - grad_bias = grad_output.sum([0, 2, 3]) - - return grad_input, grad_weight, grad_bias - - # Gradient with respect to the weights. - class Conv2dGradWeight(torch.autograd.Function): - @staticmethod - def forward(ctx, grad_output, input): - ctx.save_for_backward( - grad_output if input.requires_grad else _null_tensor, - input if grad_output.requires_grad else _null_tensor, - ) - ctx.grad_output_shape = grad_output.shape - ctx.input_shape = input.shape - - # Simple 1x1 convolution => cuBLAS (on both Volta and Ampere). - if weight_shape[2:] == stride == dilation == (1, 1) and padding == (0, 0): - a = grad_output.reshape(grad_output.shape[0], groups, grad_output.shape[1] // groups, -1).permute(1, 2, 0, 3).flatten(2) - b = input.reshape(input.shape[0], groups, input.shape[1] // groups, -1).permute(1, 2, 0, 3).flatten(2) - c = (b @ a.transpose(1, 2) if transpose else a @ b.transpose(1, 2)).reshape(weight_shape) - return c.contiguous(memory_format=(torch.channels_last if input.stride(1) == 1 else torch.contiguous_format)) - - # General case => cuDNN. - name = 'aten::cudnn_convolution_transpose_backward_weight' if transpose else 'aten::cudnn_convolution_backward_weight' - flags = [torch.backends.cudnn.benchmark, torch.backends.cudnn.deterministic, torch.backends.cudnn.allow_tf32] - return torch._C._jit_get_operation(name)(weight_shape, grad_output, input, padding, stride, dilation, groups, *flags) - - @staticmethod - def backward(ctx, grad2_grad_weight): - grad_output, input = ctx.saved_tensors - grad_output_shape = ctx.grad_output_shape - input_shape = ctx.input_shape - grad2_grad_output = None - grad2_input = None - - if ctx.needs_input_grad[0]: - grad2_grad_output = Conv2d.apply(input, grad2_grad_weight, None) - assert grad2_grad_output.shape == grad_output_shape - - if ctx.needs_input_grad[1]: - p = calc_output_padding(input_shape=input_shape, output_shape=grad_output_shape) - op = _conv2d_gradfix(transpose=(not transpose), weight_shape=weight_shape, output_padding=p, **common_kwargs) - grad2_input = op.apply(grad_output, grad2_grad_weight, None) - assert grad2_input.shape == input_shape - - return grad2_grad_output, grad2_input - - _conv2d_gradfix_cache[key] = Conv2d - return Conv2d - -#---------------------------------------------------------------------------- diff --git a/spaces/jbilcke-hf/webapp-factory-llama-node/public/css/daisyui@2.6.0.css b/spaces/jbilcke-hf/webapp-factory-llama-node/public/css/daisyui@2.6.0.css deleted file mode 100644 index a6367f5d23383e694d54128f8c92cfd31f1c48e6..0000000000000000000000000000000000000000 --- a/spaces/jbilcke-hf/webapp-factory-llama-node/public/css/daisyui@2.6.0.css +++ /dev/null @@ -1 +0,0 @@ -:root{--p:259 94% 51%;--pf:259 94% 41%;--sf:314 100% 38%;--af:174 60% 41%;--nf:219 14% 22%;--in:198 93% 60%;--su:158 64% 52%;--wa:43 96% 56%;--er:0 91% 71%;--inc:198 100% 12%;--suc:158 100% 10%;--wac:43 100% 11%;--erc:0 100% 14%;--rounded-box:1rem;--rounded-btn:.5rem;--rounded-badge:1.9rem;--animation-btn:.25s;--animation-input:.2s;--btn-text-case:uppercase;--btn-focus-scale:.95;--border-btn:1px;--tab-border:1px;--tab-radius:.5rem;--pc:0 0% 100%;--s:314 100% 47%;--sc:0 0% 100%;--a:174 60% 51%;--ac:0 0% 100%;--n:219 14% 28%;--nc:0 0% 100%;--b1:0 0% 100%;--b2:210 20% 98%;--b3:216 12% 84%;--bc:215 28% 17%}@media (prefers-color-scheme:dark){:root{--p:262 80% 50%;--pf:262 80% 40%;--sf:316 70% 40%;--af:175 70% 33%;--in:198 93% 60%;--su:158 64% 52%;--wa:43 96% 56%;--er:0 91% 71%;--inc:198 100% 12%;--suc:158 100% 10%;--wac:43 100% 11%;--erc:0 100% 14%;--rounded-box:1rem;--rounded-btn:.5rem;--rounded-badge:1.9rem;--animation-btn:.25s;--animation-input:.2s;--btn-text-case:uppercase;--btn-focus-scale:.95;--border-btn:1px;--tab-border:1px;--tab-radius:.5rem;--pc:0 0% 100%;--s:316 70% 50%;--sc:0 0% 100%;--a:175 70% 41%;--ac:0 0% 100%;--n:218 18% 12%;--nf:223 17% 8%;--nc:220 13% 69%;--b1:220 18% 20%;--b2:220 17% 17%;--b3:219 18% 15%;--bc:220 13% 69%}}[data-theme=light]{--p:259 94% 51%;--pf:259 94% 41%;--sf:314 100% 38%;--af:174 60% 41%;--nf:219 14% 22%;--in:198 93% 60%;--su:158 64% 52%;--wa:43 96% 56%;--er:0 91% 71%;--inc:198 100% 12%;--suc:158 100% 10%;--wac:43 100% 11%;--erc:0 100% 14%;--rounded-box:1rem;--rounded-btn:.5rem;--rounded-badge:1.9rem;--animation-btn:.25s;--animation-input:.2s;--btn-text-case:uppercase;--btn-focus-scale:.95;--border-btn:1px;--tab-border:1px;--tab-radius:.5rem;--pc:0 0% 100%;--s:314 100% 47%;--sc:0 0% 100%;--a:174 60% 51%;--ac:0 0% 100%;--n:219 14% 28%;--nc:0 0% 100%;--b1:0 0% 100%;--b2:210 20% 98%;--b3:216 12% 84%;--bc:215 28% 17%}[data-theme=dark]{--p:262 80% 50%;--pf:262 80% 40%;--sf:316 70% 40%;--af:175 70% 33%;--in:198 93% 60%;--su:158 64% 52%;--wa:43 96% 56%;--er:0 91% 71%;--inc:198 100% 12%;--suc:158 100% 10%;--wac:43 100% 11%;--erc:0 100% 14%;--rounded-box:1rem;--rounded-btn:.5rem;--rounded-badge:1.9rem;--animation-btn:.25s;--animation-input:.2s;--btn-text-case:uppercase;--btn-focus-scale:.95;--border-btn:1px;--tab-border:1px;--tab-radius:.5rem;--pc:0 0% 100%;--s:316 70% 50%;--sc:0 0% 100%;--a:175 70% 41%;--ac:0 0% 100%;--n:218 18% 12%;--nf:223 17% 8%;--nc:220 13% 69%;--b1:220 18% 20%;--b2:220 17% 17%;--b3:219 18% 15%;--bc:220 13% 69%}[data-theme=cupcake]{--p:183 47% 59%;--pf:183 47% 47%;--sf:338 71% 62%;--af:39 84% 46%;--nf:280 46% 11%;--in:198 93% 60%;--su:158 64% 52%;--wa:43 96% 56%;--er:0 91% 71%;--pc:183 100% 12%;--sc:338 100% 16%;--ac:39 100% 12%;--nc:280 83% 83%;--inc:198 100% 12%;--suc:158 100% 10%;--wac:43 100% 11%;--erc:0 100% 14%;--rounded-box:1rem;--rounded-badge:1.9rem;--animation-btn:.25s;--animation-input:.2s;--btn-text-case:uppercase;--btn-focus-scale:.95;--border-btn:1px;--s:338 71% 78%;--a:39 84% 58%;--n:280 46% 14%;--b1:24 33% 97%;--b2:27 22% 92%;--b3:22 14% 89%;--bc:280 46% 14%;--rounded-btn:1.9rem;--tab-border:2px;--tab-radius:.5rem}[data-theme=bumblebee]{--p:41 74% 53%;--pf:41 74% 42%;--sf:50 94% 46%;--af:240 33% 11%;--nf:240 33% 11%;--b2:0 0% 90%;--b3:0 0% 81%;--in:198 93% 60%;--su:158 64% 52%;--wa:43 96% 56%;--er:0 91% 71%;--bc:0 0% 20%;--ac:240 60% 83%;--nc:240 60% 83%;--inc:198 100% 12%;--suc:158 100% 10%;--wac:43 100% 11%;--erc:0 100% 14%;--rounded-box:1rem;--rounded-btn:.5rem;--rounded-badge:1.9rem;--animation-btn:.25s;--animation-input:.2s;--btn-text-case:uppercase;--btn-focus-scale:.95;--border-btn:1px;--tab-border:1px;--tab-radius:.5rem;--pc:240 33% 14%;--s:50 94% 58%;--sc:240 33% 14%;--a:240 33% 14%;--n:240 33% 14%;--b1:0 0% 100%}[data-theme=emerald]{--p:141 50% 60%;--pf:141 50% 48%;--sf:219 96% 48%;--af:10 81% 45%;--nf:219 20% 20%;--b2:0 0% 90%;--b3:0 0% 81%;--in:198 93% 60%;--su:158 64% 52%;--wa:43 96% 56%;--er:0 91% 71%;--inc:198 100% 12%;--suc:158 100% 10%;--wac:43 100% 11%;--erc:0 100% 14%;--rounded-box:1rem;--rounded-btn:.5rem;--rounded-badge:1.9rem;--btn-text-case:uppercase;--border-btn:1px;--tab-border:1px;--tab-radius:.5rem;--pc:210 20% 98%;--s:219 96% 60%;--sc:210 20% 98%;--a:10 81% 56%;--ac:210 20% 98%;--n:219 20% 25%;--nc:210 20% 98%;--b1:0 0% 100%;--bc:219 20% 25%;--animation-btn:0;--animation-input:0;--btn-focus-scale:1}[data-theme=corporate]{--p:229 96% 64%;--pf:229 96% 51%;--sf:215 26% 47%;--af:154 49% 48%;--nf:233 27% 10%;--b2:0 0% 90%;--b3:0 0% 81%;--in:198 93% 60%;--su:158 64% 52%;--wa:43 96% 56%;--er:0 91% 71%;--pc:229 100% 93%;--sc:215 100% 12%;--ac:154 100% 12%;--inc:198 100% 12%;--suc:158 100% 10%;--wac:43 100% 11%;--erc:0 100% 14%;--btn-text-case:uppercase;--border-btn:1px;--tab-border:1px;--tab-radius:.5rem;--s:215 26% 59%;--a:154 49% 60%;--n:233 27% 13%;--nc:210 38% 95%;--b1:0 0% 100%;--bc:233 27% 13%;--rounded-box:.25rem;--rounded-btn:.125rem;--rounded-badge:.125rem;--animation-btn:0;--animation-input:0;--btn-focus-scale:1}[data-theme=synthwave]{--p:321 70% 69%;--pf:321 70% 55%;--sf:197 87% 52%;--af:48 89% 46%;--nf:253 61% 15%;--b2:254 59% 23%;--b3:254 59% 21%;--pc:321 100% 14%;--sc:197 100% 13%;--ac:48 100% 11%;--rounded-box:1rem;--rounded-btn:.5rem;--rounded-badge:1.9rem;--animation-btn:.25s;--animation-input:.2s;--btn-text-case:uppercase;--btn-focus-scale:.95;--border-btn:1px;--tab-border:1px;--tab-radius:.5rem;--s:197 87% 65%;--a:48 89% 57%;--n:253 61% 19%;--nc:260 60% 98%;--b1:254 59% 26%;--bc:260 60% 98%;--in:199 87% 64%;--inc:257 63% 17%;--su:168 74% 68%;--suc:257 63% 17%;--wa:48 89% 57%;--wac:257 63% 17%;--er:352 74% 57%;--erc:260 60% 98%}[data-theme=retro]{--p:3 74% 76%;--pf:3 74% 61%;--sf:145 27% 58%;--af:49 67% 61%;--nf:42 17% 34%;--inc:221 100% 91%;--suc:142 100% 87%;--wac:32 100% 9%;--erc:0 100% 90%;--animation-btn:.25s;--animation-input:.2s;--btn-text-case:uppercase;--btn-focus-scale:.95;--border-btn:1px;--tab-border:1px;--tab-radius:.5rem;--pc:345 5% 15%;--s:145 27% 72%;--sc:345 5% 15%;--a:49 67% 76%;--ac:345 5% 15%;--n:42 17% 42%;--nc:45 47% 80%;--b1:45 47% 80%;--b2:45 37% 72%;--b3:42 36% 65%;--bc:345 5% 15%;--in:221 83% 53%;--su:142 76% 36%;--wa:32 95% 44%;--er:0 72% 51%;--rounded-box:.4rem;--rounded-btn:.4rem;--rounded-badge:.4rem}[data-theme=cyberpunk]{--pf:345 100% 58%;--sf:195 80% 56%;--af:276 74% 57%;--nf:57 100% 10%;--b2:56 100% 45%;--b3:56 100% 41%;--in:198 93% 60%;--su:158 64% 52%;--wa:43 96% 56%;--er:0 91% 71%;--bc:56 100% 10%;--pc:345 100% 15%;--sc:195 100% 14%;--ac:276 100% 14%;--inc:198 100% 12%;--suc:158 100% 10%;--wac:43 100% 11%;--erc:0 100% 14%;--animation-btn:.25s;--animation-input:.2s;--btn-text-case:uppercase;--btn-focus-scale:.95;--border-btn:1px;--tab-border:1px;--p:345 100% 73%;--s:195 80% 70%;--a:276 74% 71%;--n:57 100% 13%;--nc:56 100% 50%;--b1:56 100% 50%;--rounded-box:0;--rounded-btn:0;--rounded-badge:0;--tab-radius:0;font-family:ui-monospace,SFMono-Regular,Menlo,Monaco,Consolas,Liberation Mono,Courier New,monospace}[data-theme=valentine]{--p:353 74% 67%;--pf:353 74% 54%;--sf:254 86% 61%;--af:181 56% 56%;--nf:336 43% 38%;--b2:318 46% 80%;--b3:318 46% 72%;--pc:353 100% 13%;--sc:254 100% 15%;--ac:181 100% 14%;--inc:221 100% 91%;--suc:142 100% 87%;--wac:32 100% 9%;--erc:0 100% 90%;--rounded-box:1rem;--rounded-badge:1.9rem;--animation-btn:.25s;--animation-input:.2s;--btn-text-case:uppercase;--btn-focus-scale:.95;--border-btn:1px;--tab-border:1px;--tab-radius:.5rem;--s:254 86% 77%;--a:181 56% 70%;--n:336 43% 48%;--nc:318 46% 89%;--b1:318 46% 89%;--bc:344 38% 28%;--in:221 83% 53%;--su:142 76% 36%;--wa:32 95% 44%;--er:0 72% 51%;--rounded-btn:1.9rem}[data-theme=halloween]{--p:32 89% 52%;--pf:32 89% 42%;--sf:271 46% 34%;--af:91 100% 26%;--nf:180 4% 9%;--b2:0 0% 12%;--b3:0 0% 10%;--bc:0 0% 83%;--sc:271 100% 88%;--ac:91 100% 87%;--nc:180 5% 82%;--inc:221 100% 91%;--suc:142 100% 87%;--wac:32 100% 9%;--erc:0 100% 90%;--rounded-box:1rem;--rounded-btn:.5rem;--rounded-badge:1.9rem;--animation-btn:.25s;--animation-input:.2s;--btn-text-case:uppercase;--btn-focus-scale:.95;--border-btn:1px;--tab-border:1px;--tab-radius:.5rem;--pc:180 7% 8%;--s:271 46% 42%;--a:91 100% 33%;--n:180 4% 11%;--b1:0 0% 13%;--in:221 83% 53%;--su:142 76% 36%;--wa:32 95% 44%;--er:0 72% 51%}[data-theme=garden]{--p:139 16% 43%;--pf:139 16% 34%;--sf:97 37% 75%;--af:0 68% 75%;--nf:0 4% 28%;--b2:0 4% 82%;--b3:0 4% 74%;--in:198 93% 60%;--su:158 64% 52%;--wa:43 96% 56%;--er:0 91% 71%;--pc:139 100% 89%;--inc:198 100% 12%;--suc:158 100% 10%;--wac:43 100% 11%;--erc:0 100% 14%;--rounded-box:1rem;--rounded-btn:.5rem;--rounded-badge:1.9rem;--animation-btn:.25s;--animation-input:.2s;--btn-text-case:uppercase;--btn-focus-scale:.95;--border-btn:1px;--tab-border:1px;--tab-radius:.5rem;--s:97 37% 93%;--sc:96 32% 15%;--a:0 68% 94%;--ac:0 22% 16%;--n:0 4% 35%;--nc:0 4% 91%;--b1:0 4% 91%;--bc:0 3% 6%}[data-theme=forest]{--p:141 72% 42%;--pf:141 72% 34%;--sf:141 75% 38%;--af:35 69% 42%;--nf:0 10% 5%;--b2:0 12% 7%;--b3:0 12% 7%;--in:198 93% 60%;--su:158 64% 52%;--wa:43 96% 56%;--er:0 91% 71%;--bc:0 12% 82%;--pc:141 100% 88%;--sc:141 100% 10%;--ac:35 100% 10%;--nc:0 7% 81%;--inc:198 100% 12%;--suc:158 100% 10%;--wac:43 100% 11%;--erc:0 100% 14%;--rounded-box:1rem;--rounded-badge:1.9rem;--animation-btn:.25s;--animation-input:.2s;--btn-text-case:uppercase;--btn-focus-scale:.95;--border-btn:1px;--tab-border:1px;--tab-radius:.5rem;--s:141 75% 48%;--a:35 69% 52%;--n:0 10% 6%;--b1:0 12% 8%;--rounded-btn:1.9rem}[data-theme=aqua]{--p:182 93% 49%;--pf:182 93% 40%;--sf:274 31% 45%;--af:47 100% 64%;--nf:205 54% 40%;--b2:219 53% 39%;--b3:219 53% 35%;--bc:219 100% 89%;--pc:182 100% 10%;--sc:274 100% 11%;--ac:47 100% 16%;--nc:205 100% 90%;--inc:221 100% 91%;--suc:142 100% 87%;--wac:32 100% 9%;--erc:0 100% 90%;--rounded-box:1rem;--rounded-btn:.5rem;--rounded-badge:1.9rem;--animation-btn:.25s;--animation-input:.2s;--btn-text-case:uppercase;--btn-focus-scale:.95;--border-btn:1px;--tab-border:1px;--tab-radius:.5rem;--s:274 31% 57%;--a:47 100% 80%;--n:205 54% 50%;--b1:219 53% 43%;--in:221 83% 53%;--su:142 76% 36%;--wa:32 95% 44%;--er:0 72% 51%}[data-theme=lofi]{--p:0 0% 5%;--pf:0 0% 4%;--sf:0 2% 8%;--af:0 0% 12%;--nf:0 0% 0%;--btn-text-case:uppercase;--border-btn:1px;--tab-border:1px;--pc:0 0% 100%;--s:0 2% 10%;--sc:0 0% 100%;--a:0 0% 15%;--ac:0 0% 100%;--n:0 0% 0%;--nc:0 0% 100%;--b1:0 0% 100%;--b2:0 0% 95%;--b3:0 2% 90%;--bc:0 0% 0%;--in:212 100% 48%;--inc:0 0% 100%;--su:137 72% 46%;--suc:0 0% 100%;--wa:5 100% 66%;--wac:0 0% 100%;--er:325 78% 49%;--erc:0 0% 100%;--rounded-box:.25rem;--rounded-btn:.125rem;--rounded-badge:.125rem;--animation-btn:0;--animation-input:0;--btn-focus-scale:1;--tab-radius:0}[data-theme=pastel]{--p:284 22% 80%;--pf:284 22% 64%;--sf:352 70% 70%;--af:158 55% 65%;--nf:199 44% 49%;--in:198 93% 60%;--su:158 64% 52%;--wa:43 96% 56%;--er:0 91% 71%;--bc:0 0% 20%;--pc:284 59% 16%;--sc:352 100% 18%;--ac:158 100% 16%;--nc:199 100% 12%;--inc:198 100% 12%;--suc:158 100% 10%;--wac:43 100% 11%;--erc:0 100% 14%;--rounded-box:1rem;--rounded-badge:1.9rem;--animation-btn:.25s;--animation-input:.2s;--btn-text-case:uppercase;--btn-focus-scale:.95;--border-btn:1px;--tab-border:1px;--tab-radius:.5rem;--s:352 70% 88%;--a:158 55% 81%;--n:199 44% 61%;--b1:0 0% 100%;--b2:210 20% 98%;--b3:216 12% 84%;--rounded-btn:1.9rem}[data-theme=fantasy]{--p:296 83% 25%;--pf:296 83% 20%;--sf:200 100% 30%;--af:31 94% 41%;--nf:215 28% 13%;--b2:0 0% 90%;--b3:0 0% 81%;--in:198 93% 60%;--su:158 64% 52%;--wa:43 96% 56%;--er:0 91% 71%;--pc:296 100% 85%;--sc:200 100% 87%;--ac:31 100% 10%;--nc:215 62% 83%;--inc:198 100% 12%;--suc:158 100% 10%;--wac:43 100% 11%;--erc:0 100% 14%;--rounded-box:1rem;--rounded-btn:.5rem;--rounded-badge:1.9rem;--animation-btn:.25s;--animation-input:.2s;--btn-text-case:uppercase;--btn-focus-scale:.95;--border-btn:1px;--tab-border:1px;--tab-radius:.5rem;--s:200 100% 37%;--a:31 94% 51%;--n:215 28% 17%;--b1:0 0% 100%;--bc:215 28% 17%}[data-theme=wireframe]{--pf:0 0% 58%;--sf:0 0% 58%;--af:0 0% 58%;--nf:0 0% 74%;--bc:0 0% 20%;--pc:0 0% 14%;--sc:0 0% 14%;--ac:0 0% 14%;--nc:0 0% 18%;--inc:240 100% 90%;--suc:120 100% 85%;--wac:60 100% 10%;--erc:0 100% 90%;--animation-btn:.25s;--animation-input:.2s;--btn-text-case:uppercase;--btn-focus-scale:.95;--border-btn:1px;--tab-border:1px;--p:0 0% 72%;--s:0 0% 72%;--a:0 0% 72%;--n:0 0% 92%;--b1:0 0% 100%;--b2:0 0% 93%;--b3:0 0% 87%;--in:240 100% 50%;--su:120 100% 25%;--wa:60 30% 50%;--er:0 100% 50%;--rounded-box:.2rem;--rounded-btn:.2rem;--rounded-badge:.2rem;--tab-radius:.2rem;font-family:Chalkboard,comic sans ms,sanssecondaryerif}[data-theme=black]{--p:0 2% 20%;--pf:0 2% 16%;--sf:0 2% 16%;--af:0 2% 16%;--bc:0 0% 80%;--pc:0 5% 84%;--sc:0 5% 84%;--ac:0 5% 84%;--nc:0 3% 83%;--inc:240 100% 90%;--suc:120 100% 85%;--wac:60 100% 10%;--erc:0 100% 90%;--border-btn:1px;--tab-border:1px;--s:0 2% 20%;--a:0 2% 20%;--b1:0 0% 0%;--b2:0 0% 5%;--b3:0 2% 10%;--n:0 1% 15%;--nf:0 2% 20%;--in:240 100% 50%;--su:120 100% 25%;--wa:60 100% 50%;--er:0 100% 50%;--rounded-box:0;--rounded-btn:0;--rounded-badge:0;--animation-btn:0;--animation-input:0;--btn-text-case:lowercase;--btn-focus-scale:1;--tab-radius:0}[data-theme=luxury]{--p:0 0% 100%;--pf:0 0% 80%;--sf:218 54% 14%;--af:319 22% 21%;--nf:270 4% 7%;--pc:0 0% 20%;--sc:218 100% 84%;--ac:319 85% 85%;--inc:202 100% 14%;--suc:89 100% 10%;--wac:54 100% 13%;--erc:0 100% 14%;--rounded-box:1rem;--rounded-btn:.5rem;--rounded-badge:1.9rem;--animation-btn:.25s;--animation-input:.2s;--btn-text-case:uppercase;--btn-focus-scale:.95;--border-btn:1px;--tab-border:1px;--tab-radius:.5rem;--s:218 54% 18%;--a:319 22% 26%;--n:270 4% 9%;--nc:37 67% 58%;--b1:240 10% 4%;--b2:270 4% 9%;--b3:270 2% 18%;--bc:37 67% 58%;--in:202 100% 70%;--su:89 62% 52%;--wa:54 69% 64%;--er:0 100% 72%}[data-theme=dracula]{--p:326 100% 74%;--pf:326 100% 59%;--sf:265 89% 62%;--af:31 100% 57%;--nf:230 15% 24%;--b2:231 15% 17%;--b3:231 15% 15%;--pc:326 100% 15%;--sc:265 100% 16%;--ac:31 100% 14%;--nc:230 71% 86%;--inc:191 100% 15%;--suc:135 100% 13%;--wac:65 100% 15%;--erc:0 100% 13%;--rounded-box:1rem;--rounded-btn:.5rem;--rounded-badge:1.9rem;--animation-btn:.25s;--animation-input:.2s;--btn-text-case:uppercase;--btn-focus-scale:.95;--border-btn:1px;--tab-border:1px;--tab-radius:.5rem;--s:265 89% 78%;--a:31 100% 71%;--n:230 15% 30%;--b1:231 15% 18%;--bc:60 30% 96%;--in:191 97% 77%;--su:135 94% 65%;--wa:65 92% 76%;--er:0 100% 67%}[data-theme=cmyk]{--p:203 83% 60%;--pf:203 83% 48%;--sf:335 78% 48%;--af:56 100% 48%;--nf:0 0% 8%;--b2:0 0% 90%;--b3:0 0% 81%;--bc:0 0% 20%;--pc:203 100% 12%;--sc:335 100% 92%;--ac:56 100% 12%;--nc:0 0% 82%;--inc:192 100% 10%;--suc:291 100% 88%;--wac:25 100% 11%;--erc:4 100% 91%;--rounded-box:1rem;--rounded-btn:.5rem;--rounded-badge:1.9rem;--animation-btn:.25s;--animation-input:.2s;--btn-text-case:uppercase;--btn-focus-scale:.95;--border-btn:1px;--tab-border:1px;--tab-radius:.5rem;--s:335 78% 60%;--a:56 100% 60%;--n:0 0% 10%;--b1:0 0% 100%;--in:192 48% 52%;--su:291 48% 38%;--wa:25 85% 57%;--er:4 81% 56%}[data-theme=autumn]{--p:344 96% 28%;--pf:344 96% 22%;--sf:0 63% 47%;--af:27 56% 50%;--nf:22 17% 35%;--b2:0 0% 85%;--b3:0 0% 77%;--bc:0 0% 19%;--pc:344 100% 86%;--sc:0 100% 92%;--ac:27 100% 13%;--nc:22 100% 89%;--inc:187 100% 10%;--suc:165 100% 89%;--wac:30 100% 10%;--erc:354 100% 90%;--rounded-box:1rem;--rounded-btn:.5rem;--rounded-badge:1.9rem;--animation-btn:.25s;--animation-input:.2s;--btn-text-case:uppercase;--btn-focus-scale:.95;--border-btn:1px;--tab-border:1px;--tab-radius:.5rem;--s:0 63% 58%;--a:27 56% 63%;--n:22 17% 44%;--b1:0 0% 95%;--in:187 48% 50%;--su:165 34% 43%;--wa:30 84% 50%;--er:354 79% 49%}[data-theme=business]{--p:210 64% 31%;--pf:210 64% 24%;--sf:200 13% 44%;--af:13 80% 48%;--nf:213 14% 13%;--b2:0 0% 11%;--b3:0 0% 10%;--bc:0 0% 83%;--pc:210 100% 86%;--sc:200 100% 11%;--ac:13 100% 12%;--nc:213 28% 83%;--inc:199 100% 88%;--suc:144 100% 11%;--wac:39 100% 12%;--erc:6 100% 89%;--animation-btn:.25s;--animation-input:.2s;--btn-text-case:uppercase;--btn-focus-scale:.95;--border-btn:1px;--tab-border:1px;--tab-radius:.5rem;--s:200 13% 55%;--a:13 80% 60%;--n:213 14% 16%;--b1:0 0% 13%;--in:199 100% 42%;--su:144 31% 56%;--wa:39 64% 60%;--er:6 56% 43%;--rounded-box:.25rem;--rounded-btn:.125rem;--rounded-badge:.125rem}[data-theme=acid]{--p:303 100% 50%;--pf:303 100% 40%;--sf:27 100% 40%;--af:72 98% 40%;--nf:238 43% 14%;--b2:0 0% 88%;--b3:0 0% 79%;--bc:0 0% 20%;--pc:303 100% 90%;--sc:27 100% 10%;--ac:72 100% 10%;--nc:238 99% 83%;--inc:210 100% 12%;--suc:149 100% 12%;--wac:53 100% 11%;--erc:1 100% 89%;--animation-btn:.25s;--animation-input:.2s;--btn-text-case:uppercase;--btn-focus-scale:.95;--border-btn:1px;--tab-border:1px;--tab-radius:.5rem;--s:27 100% 50%;--a:72 98% 50%;--n:238 43% 17%;--b1:0 0% 98%;--in:210 92% 58%;--su:149 50% 58%;--wa:53 93% 57%;--er:1 100% 45%;--rounded-box:1.25rem;--rounded-btn:1rem;--rounded-badge:1rem}[data-theme=lemonade]{--p:89 96% 31%;--pf:89 96% 24%;--sf:60 81% 44%;--af:63 80% 71%;--nf:238 43% 14%;--b2:0 0% 90%;--b3:0 0% 81%;--bc:0 0% 20%;--pc:89 100% 86%;--sc:60 100% 11%;--ac:63 100% 18%;--nc:238 99% 83%;--inc:192 79% 17%;--suc:74 100% 16%;--wac:50 100% 15%;--erc:1 100% 17%;--rounded-box:1rem;--rounded-btn:.5rem;--rounded-badge:1.9rem;--animation-btn:.25s;--animation-input:.2s;--btn-text-case:uppercase;--btn-focus-scale:.95;--border-btn:1px;--tab-border:1px;--tab-radius:.5rem;--s:60 81% 55%;--a:63 80% 88%;--n:238 43% 17%;--b1:0 0% 100%;--in:192 39% 85%;--su:74 76% 79%;--wa:50 87% 75%;--er:1 70% 83%}[data-theme=night]{--p:198 93% 60%;--pf:198 93% 48%;--sf:234 89% 59%;--af:329 86% 56%;--b2:222 47% 10%;--b3:222 47% 9%;--bc:222 66% 82%;--pc:198 100% 12%;--sc:234 100% 15%;--ac:329 100% 14%;--nc:217 76% 83%;--inc:198 100% 90%;--suc:172 100% 10%;--wac:41 100% 13%;--erc:351 100% 14%;--rounded-box:1rem;--rounded-btn:.5rem;--rounded-badge:1.9rem;--animation-btn:.25s;--animation-input:.2s;--btn-text-case:uppercase;--btn-focus-scale:.95;--border-btn:1px;--tab-border:1px;--tab-radius:.5rem;--s:234 89% 74%;--a:329 86% 70%;--n:217 33% 17%;--nf:217 30% 22%;--b1:222 47% 11%;--in:198 90% 48%;--su:172 66% 50%;--wa:41 88% 64%;--er:351 95% 71%}[data-theme=coffee]{--p:30 67% 58%;--pf:30 67% 46%;--sf:182 25% 16%;--af:194 74% 20%;--nf:300 20% 5%;--b2:306 19% 10%;--b3:306 19% 9%;--pc:30 100% 12%;--sc:182 67% 84%;--ac:194 100% 85%;--nc:300 14% 81%;--inc:171 100% 13%;--suc:93 100% 12%;--wac:43 100% 14%;--erc:10 100% 15%;--rounded-box:1rem;--rounded-btn:.5rem;--rounded-badge:1.9rem;--animation-btn:.25s;--animation-input:.2s;--btn-text-case:uppercase;--btn-focus-scale:.95;--border-btn:1px;--tab-border:1px;--tab-radius:.5rem;--s:182 25% 20%;--a:194 74% 25%;--n:300 20% 6%;--b1:306 19% 11%;--bc:37 8% 42%;--in:171 37% 67%;--su:93 25% 62%;--wa:43 100% 69%;--er:10 95% 75%}[data-theme=winter]{--p:212 100% 51%;--pf:212 100% 41%;--sf:247 47% 35%;--af:310 49% 42%;--nf:217 92% 8%;--pc:212 100% 90%;--sc:247 100% 89%;--ac:310 100% 90%;--nc:217 100% 82%;--inc:192 100% 16%;--suc:182 100% 13%;--wac:32 100% 17%;--erc:0 100% 14%;--rounded-box:1rem;--rounded-btn:.5rem;--rounded-badge:1.9rem;--animation-btn:.25s;--animation-input:.2s;--btn-text-case:uppercase;--btn-focus-scale:.95;--border-btn:1px;--tab-border:1px;--tab-radius:.5rem;--s:247 47% 43%;--a:310 49% 52%;--n:217 92% 10%;--b1:0 0% 100%;--b2:217 100% 97%;--b3:219 44% 92%;--bc:214 30% 32%;--in:192 93% 78%;--su:182 47% 66%;--wa:32 62% 84%;--er:0 63% 72%}*,:before,:after{--tw-translate-x:0;--tw-translate-y:0;--tw-rotate:0;--tw-skew-x:0;--tw-skew-y:0;--tw-scale-x:1;--tw-scale-y:1;--tw-pan-x: ;--tw-pan-y: ;--tw-pinch-zoom: ;--tw-scroll-snap-strictness:proximity;--tw-ordinal: ;--tw-slashed-zero: ;--tw-numeric-figure: ;--tw-numeric-spacing: ;--tw-numeric-fraction: ;--tw-ring-inset: ;--tw-ring-offset-width:0px;--tw-ring-offset-color:#fff;--tw-ring-color:#3b82f680;--tw-ring-offset-shadow:0 0 #0000;--tw-ring-shadow:0 0 #0000;--tw-shadow:0 0 #0000;--tw-shadow-colored:0 0 #0000;--tw-blur: ;--tw-brightness: ;--tw-contrast: ;--tw-grayscale: ;--tw-hue-rotate: ;--tw-invert: ;--tw-saturate: ;--tw-sepia: ;--tw-drop-shadow: ;--tw-backdrop-blur: ;--tw-backdrop-brightness: ;--tw-backdrop-contrast: ;--tw-backdrop-grayscale: ;--tw-backdrop-hue-rotate: ;--tw-backdrop-invert: ;--tw-backdrop-opacity: ;--tw-backdrop-saturate: ;--tw-backdrop-sepia: ;box-sizing:border-box;border:0 solid }:before,:after{--tw-content:""}html{-webkit-text-size-adjust:100%;tab-size:4;font-family:ui-sans-serif,system-ui,-apple-system,BlinkMacSystemFont,Segoe UI,Roboto,Helvetica Neue,Arial,Noto Sans,sans-serif,Apple Color Emoji,Segoe UI Emoji,Segoe UI Symbol,Noto Color Emoji;line-height:1.5}body{line-height:inherit;margin:0}hr{height:0;color:inherit;border-top-width:1px}abbr:where([title]){-webkit-text-decoration:underline dotted;text-decoration:underline dotted}h1,h2,h3,h4,h5,h6{font-size:inherit;font-weight:inherit}a{color:inherit;-webkit-text-decoration:inherit;text-decoration:inherit}b,strong{font-weight:bolder}code,kbd,samp,pre{font-family:ui-monospace,SFMono-Regular,Menlo,Monaco,Consolas,Liberation Mono,Courier New,monospace;font-size:1em}small{font-size:80%}sub,sup{vertical-align:baseline;font-size:75%;line-height:0;position:relative}sub{bottom:-.25em}sup{top:-.5em}table{text-indent:0;border-color:inherit;border-collapse:collapse}button,input,optgroup,select,textarea{font-family:inherit;font-size:100%;line-height:inherit;color:inherit;margin:0;padding:0}button,select{text-transform:none}button,[type=button],[type=reset],[type=submit]{-webkit-appearance:button;background-color:#0000;background-image:none}:-moz-focusring{outline:auto }:-moz-ui-invalid{box-shadow:none}progress{vertical-align:baseline}::-webkit-inner-spin-button,::-webkit-outer-spin-button{height:auto}[type=search]{-webkit-appearance:textfield;outline-offset:-2px}::-webkit-search-decoration{-webkit-appearance:none}::-webkit-file-upload-button{-webkit-appearance:button;font:inherit}summary{display:list-item}blockquote,dl,dd,h1,h2,h3,h4,h5,h6,hr,figure,p,pre{margin:0}fieldset{margin:0;padding:0}legend{padding:0}ol,ul,menu{margin:0;padding:0;list-style:none}textarea{resize:vertical}input::placeholder,textarea::placeholder{opacity:1;color:#9ca3af}button,[role=button]{cursor:pointer}:disabled{cursor:default}img,svg,video,canvas,audio,iframe,embed,object{vertical-align:middle;display:block}img,video{max-width:100%;height:auto}[hidden]{display:none}:root{background-color:hsla(var(--b1)/var(--tw-bg-opacity,1));color:hsla(var(--bc)/var(--tw-text-opacity,1))}html{-webkit-tap-highlight-color:transparent}*,:before,:after{--tw-ring-inset: ;--tw-ring-offset-width:0px;--tw-ring-offset-color:#fff;--tw-ring-color:#3b82f680;--tw-ring-offset-shadow:0 0 #0000;--tw-ring-shadow:0 0 #0000;--tw-shadow:0 0 #0000;--tw-shadow-colored:0 0 #0000}.alert{width:100%;--tw-bg-opacity:1;background-color:hsl(var(--b2,var(--b1))/var(--tw-bg-opacity));border-radius:var(--rounded-box,1rem);flex-direction:column;justify-content:space-between;align-items:center;gap:1rem;padding:1rem;display:flex}.alert>:not([hidden])~:not([hidden]){--tw-space-y-reverse:0;margin-top:calc(.5rem*calc(1 - var(--tw-space-y-reverse)));margin-bottom:calc(.5rem*var(--tw-space-y-reverse))}@media (min-width:768px){.alert{flex-direction:row}.alert>:not([hidden])~:not([hidden]){--tw-space-y-reverse:0;margin-top:calc(0px*calc(1 - var(--tw-space-y-reverse)));margin-bottom:calc(0px*var(--tw-space-y-reverse))}}.alert>:where(*){align-items:center;gap:.5rem;display:flex}.artboard{width:100%}.avatar{display:inline-flex;position:relative}.avatar>div{aspect-ratio:1/1;display:block;overflow:hidden}.avatar img{height:100%;width:100%;object-fit:cover}.avatar.placeholder>div{justify-content:center;align-items:center;display:flex}.badge{height:1.25rem;width:fit-content;--tw-border-opacity:1;border-width:1px;border-color:hsl(var(--n)/var(--tw-border-opacity));--tw-bg-opacity:1;background-color:hsl(var(--n)/var(--tw-bg-opacity));--tw-text-opacity:1;color:hsl(var(--nc)/var(--tw-text-opacity));border-radius:var(--rounded-badge,1.9rem);justify-content:center;align-items:center;padding-left:.563rem;padding-right:.563rem;font-size:.875rem;line-height:1.25rem;transition-property:color,background-color,border-color,-webkit-text-decoration-color,text-decoration-color,fill,stroke,opacity,box-shadow,transform,filter,backdrop-filter,-webkit-text-decoration-color,-webkit-backdrop-filter;transition-duration:.2s;transition-timing-function:cubic-bezier(.4,0,.2,1);display:inline-flex}.breadcrumbs{max-width:100%;padding-top:.5rem;padding-bottom:.5rem;overflow-x:auto}.breadcrumbs>ul{white-space:nowrap;min-height:min-content;align-items:center;display:flex}.breadcrumbs>ul>li{align-items:center;display:flex}.breadcrumbs>ul>li>a{cursor:pointer;align-items:center;display:flex}.breadcrumbs>ul>li>a:hover{-webkit-text-decoration-line:underline;text-decoration-line:underline}.btn{cursor:pointer;-webkit-user-select:none;user-select:none;border-color:#0000;border-color:hsl(var(--n)/var(--tw-border-opacity));text-align:center;border-radius:var(--rounded-btn,.5rem);height:3rem;min-height:3rem;text-transform:uppercase;text-transform:var(--btn-text-case,uppercase);border-width:var(--border-btn,1px);animation:button-pop var(--animation-btn,.25s)ease-out;--tw-border-opacity:1;--tw-bg-opacity:1;background-color:hsl(var(--n)/var(--tw-bg-opacity));--tw-text-opacity:1;color:hsl(var(--nc)/var(--tw-text-opacity));flex-wrap:wrap;flex-shrink:0;justify-content:center;align-items:center;padding-left:1rem;padding-right:1rem;font-size:.875rem;font-weight:600;line-height:1em;transition-property:color,background-color,border-color,-webkit-text-decoration-color,text-decoration-color,fill,stroke,opacity,box-shadow,transform,filter,backdrop-filter,-webkit-text-decoration-color,-webkit-backdrop-filter;transition-duration:.2s;transition-timing-function:cubic-bezier(.4,0,.2,1);display:inline-flex}.btn-disabled,.btn[disabled]{pointer-events:none;--tw-border-opacity:0;background-color:hsl(var(--n)/var(--tw-bg-opacity));--tw-bg-opacity:.2;color:hsl(var(--bc)/var(--tw-text-opacity));--tw-text-opacity:.2}.btn-square{height:3rem;width:3rem;padding:0}.btn-circle{height:3rem;width:3rem;border-radius:9999px;padding:0}.btn.loading,.btn.loading:hover{pointer-events:none}.btn.loading:before{height:1rem;width:1rem;content:"";border-radius:9999px;border-width:2px;border-color:#0000 currentColor currentColor #0000;margin-right:.5rem;animation:spin 2s linear infinite}@media (prefers-reduced-motion:reduce){.btn.loading:before{animation:spin 10s linear infinite}}@keyframes spin{0%{transform:rotate(0deg)}to{transform:rotate(360deg)}}.btn-group{flex-wrap:wrap;display:flex}.btn-group>input[type=radio].btn{-webkit-appearance:none;appearance:none}.btn-group>input[type=radio].btn:before{content:attr(data-title)}.card{border-radius:var(--rounded-box,1rem);flex-direction:column;display:flex;position:relative;overflow:hidden}.card:focus{outline-offset:2px;outline:2px solid #0000}.card-body{padding:var(--padding-card,2rem);flex-direction:column;flex:auto;gap:.5rem;display:flex}.card-body :where(p){flex-grow:1}.card-actions{flex-wrap:wrap;align-items:flex-start;gap:.5rem;display:flex}.card figure{justify-content:center;align-items:center;display:flex}.card.image-full{display:grid}.card.image-full:before{content:"";z-index:10;--tw-bg-opacity:1;background-color:hsl(var(--n)/var(--tw-bg-opacity));opacity:.75;border-radius:var(--rounded-box,1rem);position:relative}.card.image-full:before,.card.image-full>*{grid-row-start:1;grid-column-start:1}.card.image-full>figure img{height:100%;object-fit:cover}.card.image-full>.card-body{z-index:20;--tw-text-opacity:1;color:hsl(var(--nc)/var(--tw-text-opacity));position:relative}.carousel{scroll-snap-type:x mandatory;scroll-behavior:smooth;-ms-overflow-style:none;scrollbar-width:none;display:flex;overflow-x:scroll}.carousel-vertical{scroll-snap-type:y mandatory;flex-direction:column;overflow-y:scroll}.carousel-item{box-sizing:content-box;scroll-snap-align:start;flex:none;display:flex}.carousel-center .carousel-item{scroll-snap-align:center}.carousel-end .carousel-item{scroll-snap-align:end}.checkbox{--chkbg:var(--bc);--chkfg:var(--b1);height:1.5rem;width:1.5rem;cursor:pointer;-webkit-appearance:none;appearance:none;border-width:1px;border-color:hsl(var(--bc)/var(--tw-border-opacity));--tw-border-opacity:.2;border-radius:var(--rounded-btn,.5rem)}.collapse{display:grid;position:relative;overflow:hidden}.collapse-title,.collapse>input[type=checkbox],.collapse-content{grid-row-start:1;grid-column-start:1}.collapse>input[type=checkbox]{-webkit-appearance:none;appearance:none;opacity:0}.collapse-content{max-height:0;cursor:unset;grid-row-start:2;padding-left:1rem;padding-right:1rem;transition:padding .2s ease-in-out,background-color .2s ease-in-out;overflow:hidden}.collapse-open .collapse-content,.collapse:focus:not(.collapse-close) .collapse-content,.collapse:not(.collapse-close) input[type=checkbox]:checked~.collapse-content{max-height:9000px}:root .countdown{line-height:1em}.countdown{display:inline-flex}.countdown>*{height:1em;display:inline-block;overflow-y:hidden}.countdown>:before{content:"00\a 01\a 02\a 03\a 04\a 05\a 06\a 07\a 08\a 09\a 10\a 11\a 12\a 13\a 14\a 15\a 16\a 17\a 18\a 19\a 20\a 21\a 22\a 23\a 24\a 25\a 26\a 27\a 28\a 29\a 30\a 31\a 32\a 33\a 34\a 35\a 36\a 37\a 38\a 39\a 40\a 41\a 42\a 43\a 44\a 45\a 46\a 47\a 48\a 49\a 50\a 51\a 52\a 53\a 54\a 55\a 56\a 57\a 58\a 59\a 60\a 61\a 62\a 63\a 64\a 65\a 66\a 67\a 68\a 69\a 70\a 71\a 72\a 73\a 74\a 75\a 76\a 77\a 78\a 79\a 80\a 81\a 82\a 83\a 84\a 85\a 86\a 87\a 88\a 89\a 90\a 91\a 92\a 93\a 94\a 95\a 96\a 97\a 98\a 99\a ";white-space:pre;top:calc(var(--value)*-1em);text-align:center;transition:all 1s cubic-bezier(1,0,0,1);position:relative}.divider{height:1rem;flex-direction:row;align-self:stretch;align-items:center;margin-top:1rem;margin-bottom:1rem;display:flex}.divider:before,.divider:after{content:"";height:.125rem;width:100%;flex-grow:1}.drawer{display:grid;overflow:hidden}.drawer.drawer-end{direction:rtl}.drawer.drawer-end>*{direction:ltr}.drawer.drawer-end .drawer-toggle~.drawer-side>.drawer-overlay+*{--tw-translate-x:100%;transform:translate(var(--tw-translate-x),var(--tw-translate-y))rotate(var(--tw-rotate))skewX(var(--tw-skew-x))skewY(var(--tw-skew-y))scaleX(var(--tw-scale-x))scaleY(var(--tw-scale-y));justify-self:end}.drawer.drawer-end .drawer-toggle:checked~.drawer-side>.drawer-overlay+*{--tw-translate-x:0px;transform:translate(var(--tw-translate-x),var(--tw-translate-y))rotate(var(--tw-rotate))skewX(var(--tw-skew-x))skewY(var(--tw-skew-y))scaleX(var(--tw-scale-x))scaleY(var(--tw-scale-y))}.drawer-toggle{height:0;width:0;-webkit-appearance:none;appearance:none;opacity:0;position:absolute}.drawer-toggle~.drawer-content{z-index:0;max-height:100vh;grid-row-start:1;grid-column-start:1;transition-property:all;transition-duration:.3s;transition-timing-function:cubic-bezier(.4,0,.2,1);overflow-y:auto}.drawer-toggle~.drawer-side{max-height:100vh;grid-row-start:1;grid-column-start:1;display:grid}.drawer-toggle~.drawer-side>.drawer-overlay{visibility:hidden;opacity:0;cursor:pointer;--tw-bg-opacity:1;background-color:hsl(var(--nf,var(--n))/var(--tw-bg-opacity));grid-row-start:1;grid-column-start:1;transition-property:all;transition-duration:.3s;transition-timing-function:cubic-bezier(.4,0,.2,1)}.drawer-toggle~.drawer-side>.drawer-overlay+*{z-index:10;--tw-translate-x:-100%;transform:translate(var(--tw-translate-x),var(--tw-translate-y))rotate(var(--tw-rotate))skewX(var(--tw-skew-x))skewY(var(--tw-skew-y))scaleX(var(--tw-scale-x))scaleY(var(--tw-scale-y));grid-row-start:1;grid-column-start:1;transition-property:all;transition-duration:.3s;transition-timing-function:cubic-bezier(.4,0,.2,1)}.drawer-toggle:checked~.drawer-side{overflow-y:auto}.drawer-toggle:checked~.drawer-side>.drawer-overlay{visibility:visible;opacity:.999999;--tw-bg-opacity:.4}.drawer-toggle:checked~.drawer-side>.drawer-overlay+*{--tw-translate-x:0px;transform:translate(var(--tw-translate-x),var(--tw-translate-y))rotate(var(--tw-rotate))skewX(var(--tw-skew-x))skewY(var(--tw-skew-y))scaleX(var(--tw-scale-x))scaleY(var(--tw-scale-y))}[dir=rtl] .drawer-toggle~.drawer-side>.drawer-overlay+*{--tw-translate-x:100%;transform:translate(var(--tw-translate-x),var(--tw-translate-y))rotate(var(--tw-rotate))skewX(var(--tw-skew-x))skewY(var(--tw-skew-y))scaleX(var(--tw-scale-x))scaleY(var(--tw-scale-y))}[dir=rtl] .drawer-toggle:checked~.drawer-side>.drawer-overlay+*{--tw-translate-x:0px;transform:translate(var(--tw-translate-x),var(--tw-translate-y))rotate(var(--tw-rotate))skewX(var(--tw-skew-x))skewY(var(--tw-skew-y))scaleX(var(--tw-scale-x))scaleY(var(--tw-scale-y))}@media (min-width:1024px){.drawer-mobile{grid-auto-columns:max-content auto}.drawer-mobile>.drawer-toggle~.drawer-content{height:auto}@media (min-width:1024px){.drawer-mobile>.drawer-toggle~.drawer-content{grid-column-start:2}}@media (min-width:1024px){.drawer-mobile>.drawer-toggle~.drawer-side>.drawer-overlay{visibility:visible}}@media (min-width:1024px){.drawer-mobile>.drawer-toggle~.drawer-side>.drawer-overlay+*{--tw-translate-x:0px;transform:translate(var(--tw-translate-x),var(--tw-translate-y))rotate(var(--tw-rotate))skewX(var(--tw-skew-x))skewY(var(--tw-skew-y))scaleX(var(--tw-scale-x))scaleY(var(--tw-scale-y))}}@media (min-width:1024px){.drawer-mobile.drawer-end>.drawer-toggle~.drawer-content{grid-column-start:1}}@media (min-width:1024px){.drawer-mobile.drawer-end>.drawer-toggle~.drawer-side{grid-column-start:2}}@media (min-width:1024px){.drawer-mobile.drawer-end>.drawer-toggle~.drawer-side>.drawer-overlay{visibility:visible}}@media (min-width:1024px){.drawer-mobile.drawer-end>.drawer-toggle~.drawer-side>.drawer-overlay+*{--tw-translate-x:0px;transform:translate(var(--tw-translate-x),var(--tw-translate-y))rotate(var(--tw-rotate))skewX(var(--tw-skew-x))skewY(var(--tw-skew-y))scaleX(var(--tw-scale-x))scaleY(var(--tw-scale-y))}}.drawer-mobile>.drawer-toggle~.drawer-side{overflow-y:auto}.drawer-mobile.drawer-end{direction:ltr;grid-auto-columns:auto max-content}.drawer-mobile.drawer-end>.drawer-toggle~.drawer-content{height:auto}.drawer-mobile.drawer-end>.drawer-toggle~.drawer-side{overflow-y:auto}.drawer-mobile>.drawer-toggle:checked~.drawer-content{--tw-translate-x:0px;transform:translate(var(--tw-translate-x),var(--tw-translate-y))rotate(var(--tw-rotate))skewX(var(--tw-skew-x))skewY(var(--tw-skew-y))scaleX(var(--tw-scale-x))scaleY(var(--tw-scale-y))}}.dropdown{display:inline-block;position:relative}.dropdown>:focus{outline-offset:2px;outline:2px solid #0000}.dropdown .dropdown-content{visibility:hidden;z-index:50;opacity:0;transform-origin:top;--tw-scale-x:.95;--tw-scale-y:.95;transform:translate(var(--tw-translate-x),var(--tw-translate-y))rotate(var(--tw-rotate))skewX(var(--tw-skew-x))skewY(var(--tw-skew-y))scaleX(var(--tw-scale-x))scaleY(var(--tw-scale-y));transition-property:color,background-color,border-color,-webkit-text-decoration-color,text-decoration-color,fill,stroke,opacity,box-shadow,transform,filter,backdrop-filter,-webkit-text-decoration-color,-webkit-backdrop-filter;transition-duration:.2s;transition-timing-function:cubic-bezier(.4,0,.2,1);position:absolute}.dropdown-end .dropdown-content{right:0}.dropdown-left .dropdown-content{transform-origin:100%;top:0;bottom:auto;right:100%}.dropdown-right .dropdown-content{transform-origin:0;top:0;bottom:auto;left:100%}.dropdown-top .dropdown-content{transform-origin:bottom;top:auto;bottom:100%}.dropdown-end.dropdown-right .dropdown-content,.dropdown-end.dropdown-left .dropdown-content{top:auto;bottom:0}.dropdown.dropdown-open .dropdown-content,.dropdown.dropdown-hover:hover .dropdown-content,.dropdown:not(.dropdown-hover):focus .dropdown-content,.dropdown:not(.dropdown-hover):focus-within .dropdown-content{visibility:visible;opacity:1}.footer{width:100%;grid-auto-flow:row;place-items:start;gap:2.5rem 1rem;font-size:.875rem;line-height:1.25rem;display:grid}.footer>*{place-items:start;gap:.5rem;display:grid}.footer-center{text-align:center;place-items:center}.footer-center>*{place-items:center}@media (min-width:48rem){.footer{grid-auto-flow:column}.footer-center{grid-auto-flow:dense}}.form-control{flex-direction:column;display:flex}.label{-webkit-user-select:none;user-select:none;justify-content:space-between;align-items:center;padding:.5rem .25rem;display:flex}.hero{width:100%;background-position:50%;background-size:cover;place-items:center;display:grid}.hero>*{grid-row-start:1;grid-column-start:1}.hero-overlay{height:100%;width:100%;background-color:hsl(var(--n)/var(--tw-bg-opacity));--tw-bg-opacity:.5;grid-row-start:1;grid-column-start:1}.hero-content{z-index:0;max-width:80rem;justify-content:center;align-items:center;gap:1rem;padding:1rem;display:flex}.indicator{width:fit-content;display:inline-flex;position:relative}.indicator .indicator-item{z-index:1;--tw-translate-x:50%;--tw-translate-y:-50%;transform:translate(var(--tw-translate-x),var(--tw-translate-y))rotate(var(--tw-rotate))skewX(var(--tw-skew-x))skewY(var(--tw-skew-y))scaleX(var(--tw-scale-x))scaleY(var(--tw-scale-y));position:absolute;inset:0 0 auto auto}.indicator .indicator-item.indicator-start{--tw-translate-x:-50%;transform:translate(var(--tw-translate-x),var(--tw-translate-y))rotate(var(--tw-rotate))skewX(var(--tw-skew-x))skewY(var(--tw-skew-y))scaleX(var(--tw-scale-x))scaleY(var(--tw-scale-y));left:0;right:auto}.indicator .indicator-item.indicator-center{--tw-translate-x:-50%;transform:translate(var(--tw-translate-x),var(--tw-translate-y))rotate(var(--tw-rotate))skewX(var(--tw-skew-x))skewY(var(--tw-skew-y))scaleX(var(--tw-scale-x))scaleY(var(--tw-scale-y));left:50%;right:50%}.indicator .indicator-item.indicator-end{--tw-translate-x:50%;transform:translate(var(--tw-translate-x),var(--tw-translate-y))rotate(var(--tw-rotate))skewX(var(--tw-skew-x))skewY(var(--tw-skew-y))scaleX(var(--tw-scale-x))scaleY(var(--tw-scale-y));left:auto;right:0}.indicator .indicator-item.indicator-bottom{--tw-translate-y:50%;transform:translate(var(--tw-translate-x),var(--tw-translate-y))rotate(var(--tw-rotate))skewX(var(--tw-skew-x))skewY(var(--tw-skew-y))scaleX(var(--tw-scale-x))scaleY(var(--tw-scale-y));top:auto;bottom:0}.indicator .indicator-item.indicator-middle{--tw-translate-y:-50%;transform:translate(var(--tw-translate-x),var(--tw-translate-y))rotate(var(--tw-rotate))skewX(var(--tw-skew-x))skewY(var(--tw-skew-y))scaleX(var(--tw-scale-x))scaleY(var(--tw-scale-y));top:50%;bottom:50%}.indicator .indicator-item.indicator-top{--tw-translate-y:-50%;transform:translate(var(--tw-translate-x),var(--tw-translate-y))rotate(var(--tw-rotate))skewX(var(--tw-skew-x))skewY(var(--tw-skew-y))scaleX(var(--tw-scale-x))scaleY(var(--tw-scale-y));top:0;bottom:auto}.input{height:3rem;border-width:1px;border-color:hsl(var(--bc)/var(--tw-border-opacity));--tw-border-opacity:0;--tw-bg-opacity:1;background-color:hsl(var(--b1)/var(--tw-bg-opacity));border-radius:var(--rounded-btn,.5rem);flex-shrink:1;padding-left:1rem;padding-right:1rem;font-size:.875rem;line-height:2;transition-property:color,background-color,border-color,-webkit-text-decoration-color,text-decoration-color,fill,stroke,opacity,box-shadow,transform,filter,backdrop-filter,-webkit-text-decoration-color,-webkit-backdrop-filter;transition-duration:.2s;transition-timing-function:cubic-bezier(.4,0,.2,1)}.input-group{width:100%;align-items:stretch;display:flex}.input-group>*,.input-group>.input{border-radius:0}.input-group-md{font-size:.875rem;line-height:2}.input-group-lg{font-size:1.125rem;line-height:2}.input-group-sm{font-size:.875rem;line-height:2rem}.input-group-xs{font-size:.75rem;line-height:1.625}.input-group :where(span){--tw-bg-opacity:1;background-color:hsl(var(--b3,var(--b2))/var(--tw-bg-opacity));align-items:center;padding-left:1rem;padding-right:1rem;display:flex}.input-group :first-child{border-top-left-radius:var(--rounded-btn,.5rem);border-top-right-radius:0;border-bottom-left-radius:var(--rounded-btn,.5rem);border-bottom-right-radius:0}.input-group :last-child{border-top-left-radius:0;border-top-right-radius:var(--rounded-btn,.5rem);border-bottom-left-radius:0;border-bottom-right-radius:var(--rounded-btn,.5rem)}.input-group-vertical{flex-direction:column}.input-group-vertical :first-child{border-top-left-radius:var(--rounded-btn,.5rem);border-top-right-radius:var(--rounded-btn,.5rem);border-bottom-left-radius:0;border-bottom-right-radius:0}.input-group-vertical :last-child{border-top-left-radius:0;border-top-right-radius:0;border-bottom-left-radius:var(--rounded-btn,.5rem);border-bottom-right-radius:var(--rounded-btn,.5rem)}.kbd{border-width:1px;border-color:hsl(var(--nf,var(--n))/var(--tw-border-opacity));--tw-border-opacity:.2;--tw-bg-opacity:1;background-color:hsl(var(--b2,var(--b1))/var(--tw-bg-opacity));border-radius:var(--rounded-btn,.5rem);min-height:2.2em;min-width:2.2em;border-bottom-width:2px;justify-content:center;align-items:center;padding-left:.5rem;padding-right:.5rem;display:inline-flex}.link{cursor:pointer;-webkit-text-decoration-line:underline;text-decoration-line:underline}.link-hover{-webkit-text-decoration-line:none;text-decoration-line:none}.link-hover:hover{-webkit-text-decoration-line:underline;text-decoration-line:underline}.mask{-webkit-mask-size:contain;mask-size:contain;-webkit-mask-repeat:no-repeat;mask-repeat:no-repeat;-webkit-mask-position:center;mask-position:50%}.mask-half-1{-webkit-mask-size:200%;mask-size:200%;-webkit-mask-position:left;mask-position:0}.mask-half-2{-webkit-mask-size:200%;mask-size:200%;-webkit-mask-position:right;mask-position:100%}.menu{flex-direction:column;display:flex}.menu.horizontal{flex-direction:row;display:inline-flex}.menu.horizontal :where(li){flex-direction:row}.menu :where(li){flex-flow:column wrap;align-items:stretch;display:flex;position:relative}.menu :where(li:not(.menu-title))>:where(:not(ul)){display:flex}.menu :where(li:not(.disabled):not(.menu-title))>:where(:not(ul)){cursor:pointer;-webkit-user-select:none;user-select:none;outline-offset:2px;outline:2px solid #0000;align-items:center}.menu>:where(li>:not(ul):focus){outline-offset:2px;outline:2px solid #0000}.menu>:where(li.disabled>:not(ul):focus){cursor:auto}.menu>:where(li) :where(ul){flex-direction:column;align-items:stretch;display:flex}.menu>:where(li)>:where(ul){top:initial;border-top-left-radius:inherit;border-top-right-radius:inherit;border-bottom-right-radius:inherit;border-bottom-left-radius:inherit;display:none;position:absolute;left:100%}.menu>:where(li:hover)>:where(ul){display:flex}.menu>:where(li:focus)>:where(ul){display:flex}.mockup-code{min-width:20rem;--tw-bg-opacity:1;background-color:hsl(var(--n)/var(--tw-bg-opacity));--tw-text-opacity:1;color:hsl(var(--nc)/var(--tw-text-opacity));border-radius:var(--rounded-box,1rem);padding-top:1.25rem;padding-bottom:1.25rem;position:relative;overflow-x:auto;overflow-y:hidden}.mockup-code pre[data-prefix]:before{content:attr(data-prefix);text-align:right;width:2rem;opacity:.5;display:inline-block}.mockup-window{border-radius:var(--rounded-box,1rem);padding-top:1.25rem;position:relative;overflow-x:auto;overflow-y:hidden}.mockup-window pre[data-prefix]:before{content:attr(data-prefix);text-align:right;display:inline-block}.modal{pointer-events:none;visibility:hidden;opacity:0;z-index:999;background-color:hsl(var(--nf,var(--n))/var(--tw-bg-opacity));--tw-bg-opacity:.4;overscroll-behavior:contain;justify-content:center;align-items:flex-end;transition-property:transform,opacity;transition-duration:.2s;transition-timing-function:cubic-bezier(.4,0,.2,1);display:flex;position:fixed;inset:0;overflow-y:hidden}@media (min-width:640px){.modal{align-items:center}.modal-box{max-width:32rem;--tw-translate-y:0px;transform:translate(var(--tw-translate-x),var(--tw-translate-y))rotate(var(--tw-rotate))skewX(var(--tw-skew-x))skewY(var(--tw-skew-y))scaleX(var(--tw-scale-x))scaleY(var(--tw-scale-y));--tw-scale-x:.9;--tw-scale-y:.9;border-bottom-left-radius:var(--rounded-box,1rem);border-bottom-right-radius:var(--rounded-box,1rem)}}.modal-box{max-height:calc(100vh - 5em);width:100%;--tw-translate-y:2.5rem;transform:translate(var(--tw-translate-x),var(--tw-translate-y))rotate(var(--tw-rotate))skewX(var(--tw-skew-x))skewY(var(--tw-skew-y))scaleX(var(--tw-scale-x))scaleY(var(--tw-scale-y));--tw-bg-opacity:1;background-color:hsl(var(--b1)/var(--tw-bg-opacity));border-top-left-radius:var(--rounded-box,1rem);border-top-right-radius:var(--rounded-box,1rem);overscroll-behavior:contain;padding:1.5rem;transition-property:color,background-color,border-color,-webkit-text-decoration-color,text-decoration-color,fill,stroke,opacity,box-shadow,transform,filter,backdrop-filter,-webkit-text-decoration-color,-webkit-backdrop-filter;transition-duration:.2s;transition-timing-function:cubic-bezier(.4,0,.2,1);overflow-y:auto;box-shadow:0 25px 50px -12px #00000040}.modal-open,.modal:target,.modal-toggle:checked+.modal{pointer-events:auto;visibility:visible;opacity:1}.modal-action{justify-content:flex-end;margin-top:1.5rem;display:flex}.modal-toggle{height:0;width:0;-webkit-appearance:none;appearance:none;opacity:0;position:fixed}.navbar{padding:var(--navbar-padding,.5rem);min-height:4rem;width:100%;align-items:center;display:flex}.navbar>*{align-items:center;display:flex}.navbar-start{width:50%;justify-content:flex-start}.navbar-center{flex-shrink:0}.navbar-end{width:50%;justify-content:flex-end}.progress{width:100%;-webkit-appearance:none;appearance:none;height:.5rem;border-radius:var(--rounded-box,1rem);position:relative;overflow:hidden}.radial-progress{height:var(--size);width:var(--size);vertical-align:middle;box-sizing:content-box;--value:0;--size:5rem;--thickness:calc(var(--size)/10);background-color:#0000;border-radius:9999px;place-content:center;display:inline-grid;position:relative}.radial-progress::-moz-progress-bar{-webkit-appearance:none;appearance:none;background-color:#0000}.radial-progress::-webkit-progress-value{-webkit-appearance:none;appearance:none;background-color:#0000}.radial-progress::-webkit-progress-bar{-webkit-appearance:none;appearance:none;background-color:#0000}.radial-progress:before,.radial-progress:after{content:"";border-radius:9999px;position:absolute}.radial-progress:before{background:radial-gradient(farthest-side,currentColor 98%,#0000)top/var(--thickness)var(--thickness)no-repeat,conic-gradient(currentColor calc(var(--value)*1%),#0000 0);-webkit-mask:radial-gradient(farthest-side,#0000 calc(99% - var(--thickness)),#000 calc(100% - var(--thickness)));mask:radial-gradient(farthest-side,#0000 calc(99% - var(--thickness)),#000 calc(100% - var(--thickness)));inset:0}.radial-progress:after{inset:calc(50% - var(--thickness)/2);transform:rotate(calc(var(--value)*3.6deg - 90deg))translate(calc(var(--size)/2 - 50%));background-color:currentColor}.radio{--chkbg:var(--bc);height:1.5rem;width:1.5rem;cursor:pointer;-webkit-appearance:none;appearance:none;border-width:1px;border-color:hsl(var(--bc)/var(--tw-border-opacity));--tw-border-opacity:.2;transition:background,box-shadow var(--animation-input,.2s)ease-in-out;border-radius:9999px}.range{height:1.5rem;width:100%;cursor:pointer;-webkit-appearance:none;--range-shdw:var(--bc);border-radius:var(--rounded-box,1rem);background-color:#0000;overflow:hidden}.range:focus{outline:0}.rating{display:inline-flex;position:relative}.rating :where(input){cursor:pointer;animation:rating-pop var(--animation-input,.25s)ease-out;height:1.5rem;width:1.5rem;background-color:hsl(var(--bc)/var(--tw-bg-opacity));--tw-bg-opacity:1}.select{cursor:pointer;-webkit-user-select:none;user-select:none;-webkit-appearance:none;appearance:none;height:3rem;min-height:3rem;border-width:1px;border-color:hsl(var(--bc)/var(--tw-border-opacity));--tw-border-opacity:0;--tw-bg-opacity:1;background-color:hsl(var(--b1)/var(--tw-bg-opacity));border-radius:var(--rounded-btn,.5rem);background-image:linear-gradient(45deg,#0000 50%,currentColor 50%),linear-gradient(135deg,currentColor 50%,#0000 50%);background-position:calc(100% - 20px) calc(1px + 50%),calc(100% - 16px) calc(1px + 50%);background-repeat:no-repeat;background-size:4px 4px,4px 4px;flex-shrink:0;padding-left:1rem;padding-right:2.5rem;font-size:.875rem;font-weight:600;line-height:2;transition-property:color,background-color,border-color,-webkit-text-decoration-color,text-decoration-color,fill,stroke,opacity,box-shadow,transform,filter,backdrop-filter,-webkit-text-decoration-color,-webkit-backdrop-filter;transition-duration:.2s;transition-timing-function:cubic-bezier(.4,0,.2,1);display:inline-flex}.select-disabled,.select[disabled]{pointer-events:none;cursor:not-allowed;--tw-border-opacity:1;border-color:hsl(var(--b2,var(--b1))/var(--tw-border-opacity));--tw-bg-opacity:1;background-color:hsl(var(--b2,var(--b1))/var(--tw-bg-opacity));--tw-text-opacity:.2}.stack{place-items:flex-end center;display:inline-grid}.stack>*{z-index:1;width:100%;opacity:.6;grid-row-start:1;grid-column-start:1;transform:translateY(1rem)scale(.9)}.stack>:nth-child(2){z-index:2;opacity:.8;transform:translateY(.5rem)scale(.95)}.stack>:nth-child(1){z-index:3;opacity:1;transform:matrix(1,0,0,1,0,0)}.stats{--tw-bg-opacity:1;background-color:hsl(var(--b1)/var(--tw-bg-opacity));--tw-text-opacity:1;color:hsl(var(--bc)/var(--tw-text-opacity));border-radius:var(--rounded-box,1rem);display:inline-grid}:where(.stats){grid-auto-flow:column;overflow-x:auto}.stat{width:100%;border-color:hsl(var(--bc)/var(--tw-border-opacity));--tw-border-opacity:.1;grid-template-columns:repeat(1,1fr);column-gap:1rem;padding:1rem 1.5rem;display:inline-grid}.stat-figure{grid-row:1/span 3;grid-column-start:2;place-self:center end}.stat-title{white-space:nowrap;opacity:.6;grid-column-start:1}.stat-value{white-space:nowrap;grid-column-start:1;font-size:2.25rem;font-weight:800;line-height:2.5rem}.stat-desc{white-space:nowrap;opacity:.6;grid-column-start:1;font-size:.75rem;line-height:1rem}.stat-actions{white-space:nowrap;grid-column-start:1;margin-top:1rem}.steps{counter-reset:step;grid-auto-columns:1fr;grid-auto-flow:column;display:inline-grid;overflow-x:auto;overflow-y:hidden}.steps .step{text-align:center;min-width:4rem;grid-template-rows:40px 1fr;grid-template-columns:auto;place-items:center;display:grid}.swap{-webkit-user-select:none;user-select:none;cursor:pointer;place-content:center;display:inline-grid;position:relative}.swap>*{grid-row-start:1;grid-column-start:1;transition-property:all;transition-duration:.3s;transition-timing-function:cubic-bezier(.4,0,.2,1)}.swap input{-webkit-appearance:none;appearance:none}.swap .swap-on,.swap .swap-indeterminate,.swap input:indeterminate~.swap-on,.swap input:checked~.swap-off,.swap.swap-active .swap-off,.swap input:indeterminate~.swap-off{opacity:0}.swap input:checked~.swap-on,.swap-active .swap-on,.swap input:indeterminate~.swap-indeterminate{opacity:1}.tabs{flex-wrap:wrap;align-items:flex-end;display:flex}.tab{cursor:pointer;-webkit-user-select:none;user-select:none;text-align:center;height:2rem;--tab-padding:1rem;--tw-text-opacity:.5;--tab-color:hsla(var(--bc)/var(--tw-text-opacity,1));--tab-bg:hsla(var(--b1)/var(--tw-bg-opacity,1));--tab-border-color:hsla(var(--b3)/var(--tw-bg-opacity,1));color:var(--tab-color);padding-left:var(--tab-padding,1rem);padding-right:var(--tab-padding,1rem);flex-wrap:wrap;justify-content:center;align-items:center;font-size:.875rem;line-height:2;display:inline-flex;position:relative}.table{text-align:left;position:relative}.table th:first-child{z-index:11;position:sticky;left:0}.textarea{min-height:3rem;border-width:1px;border-color:hsl(var(--bc)/var(--tw-border-opacity));--tw-border-opacity:0;--tw-bg-opacity:1;background-color:hsl(var(--b1)/var(--tw-bg-opacity));border-radius:var(--rounded-btn,.5rem);flex-shrink:1;padding:.5rem 1rem;font-size:.875rem;line-height:2;transition-property:color,background-color,border-color,-webkit-text-decoration-color,text-decoration-color,fill,stroke,opacity,box-shadow,transform,filter,backdrop-filter,-webkit-text-decoration-color,-webkit-backdrop-filter;transition-duration:.2s;transition-timing-function:cubic-bezier(.4,0,.2,1)}.toggle{--chkbg:hsla(var(--bc)/.2);--handleoffset:1.5rem;height:1.5rem;width:3rem;cursor:pointer;-webkit-appearance:none;appearance:none;border-width:1px;border-color:hsl(var(--bc)/var(--tw-border-opacity));--tw-border-opacity:.2;background-color:hsl(var(--bc)/var(--tw-bg-opacity));--tw-bg-opacity:.2;border-radius:var(--rounded-badge,1.9rem);transition-duration:.3s;transition-timing-function:cubic-bezier(.4,0,.2,1);transition:background,box-shadow var(--animation-input,.2s)ease-in-out;box-shadow:calc(var(--handleoffset)*-1)0 0 2px hsl(var(--b1))inset,0 0 0 2px hsl(var(--b1))inset;flex-shrink:0}.tooltip{--tooltip-offset:calc(100% + 1px + var(--tooltip-tail,0px));text-align:center;--tooltip-tail:3px;--tooltip-color:hsl(var(--n));--tooltip-text-color:hsl(var(--nc));--tooltip-tail-offset:calc(100% + 1px - var(--tooltip-tail));display:inline-block;position:relative}.tooltip:before{pointer-events:none;content:attr(data-tip);top:auto;left:50%;right:auto;bottom:var(--tooltip-offset);max-width:20rem;background-color:var(--tooltip-color);color:var(--tooltip-text-color);width:max-content;border-radius:.25rem;padding:.25rem .5rem;font-size:.875rem;line-height:1.25rem;position:absolute;transform:translate(-50%)}.tooltip-bottom:before{top:var(--tooltip-offset);bottom:auto;left:50%;right:auto;transform:translate(-50%)}.tooltip-left:before{top:50%;left:auto;right:var(--tooltip-offset);bottom:auto;transform:translateY(-50%)}.tooltip-right:before{top:50%;left:var(--tooltip-offset);bottom:auto;right:auto;transform:translateY(-50%)}.avatar-group{display:flex;overflow:hidden}.avatar-group .avatar{--tw-border-opacity:1;border-width:4px;border-color:hsl(var(--b1)/var(--tw-border-opacity));border-radius:9999px;overflow:hidden}.btn-outline .badge{--tw-border-opacity:1;border-color:hsl(var(--nf,var(--n))/var(--tw-border-opacity));--tw-text-opacity:1;color:hsl(var(--nc)/var(--tw-text-opacity))}.btn-outline.btn-primary .badge{--tw-border-opacity:1;border-color:hsl(var(--p)/var(--tw-border-opacity));--tw-bg-opacity:1;background-color:hsl(var(--p)/var(--tw-bg-opacity));--tw-text-opacity:1;color:hsl(var(--pc)/var(--tw-text-opacity))}.btn-outline.btn-secondary .badge{--tw-border-opacity:1;border-color:hsl(var(--s)/var(--tw-border-opacity));--tw-bg-opacity:1;background-color:hsl(var(--s)/var(--tw-bg-opacity));--tw-text-opacity:1;color:hsl(var(--sc)/var(--tw-text-opacity))}.btn-outline.btn-accent .badge{--tw-border-opacity:1;border-color:hsl(var(--a)/var(--tw-border-opacity));--tw-bg-opacity:1;background-color:hsl(var(--a)/var(--tw-bg-opacity));--tw-text-opacity:1;color:hsl(var(--ac)/var(--tw-text-opacity))}.btn-outline .badge.outline{--tw-border-opacity:1;border-color:hsl(var(--nf,var(--n))/var(--tw-border-opacity));background-color:#0000}.btn-outline.btn-primary .badge-outline{--tw-border-opacity:1;border-color:hsl(var(--p)/var(--tw-border-opacity));--tw-text-opacity:1;color:hsl(var(--p)/var(--tw-text-opacity));background-color:#0000}.btn-outline.btn-secondary .badge-outline{--tw-border-opacity:1;border-color:hsl(var(--s)/var(--tw-border-opacity));--tw-text-opacity:1;color:hsl(var(--s)/var(--tw-text-opacity));background-color:#0000}.btn-outline.btn-accent .badge-outline{--tw-border-opacity:1;border-color:hsl(var(--a)/var(--tw-border-opacity));--tw-text-opacity:1;color:hsl(var(--a)/var(--tw-text-opacity));background-color:#0000}.btn-outline:hover .badge{--tw-border-opacity:1;border-color:hsl(var(--b2,var(--b1))/var(--tw-border-opacity));--tw-bg-opacity:1;background-color:hsl(var(--b2,var(--b1))/var(--tw-bg-opacity));--tw-text-opacity:1;color:hsl(var(--bc)/var(--tw-text-opacity))}.btn-outline:hover .badge.outline{--tw-border-opacity:1;border-color:hsl(var(--b2,var(--b1))/var(--tw-border-opacity));--tw-text-opacity:1;color:hsl(var(--nc)/var(--tw-text-opacity))}.btn-outline.btn-primary:hover .badge{--tw-border-opacity:1;border-color:hsl(var(--pc)/var(--tw-border-opacity));--tw-bg-opacity:1;background-color:hsl(var(--pc)/var(--tw-bg-opacity));--tw-text-opacity:1;color:hsl(var(--p)/var(--tw-text-opacity))}.btn-outline.btn-primary:hover .badge.outline{--tw-border-opacity:1;border-color:hsl(var(--pc)/var(--tw-border-opacity));--tw-bg-opacity:1;background-color:hsl(var(--pf,var(--p))/var(--tw-bg-opacity));--tw-text-opacity:1;color:hsl(var(--pc)/var(--tw-text-opacity))}.btn-outline.btn-secondary:hover .badge{--tw-border-opacity:1;border-color:hsl(var(--sc)/var(--tw-border-opacity));--tw-bg-opacity:1;background-color:hsl(var(--sc)/var(--tw-bg-opacity));--tw-text-opacity:1;color:hsl(var(--s)/var(--tw-text-opacity))}.btn-outline.btn-secondary:hover .badge.outline{--tw-border-opacity:1;border-color:hsl(var(--sc)/var(--tw-border-opacity));--tw-bg-opacity:1;background-color:hsl(var(--sf,var(--s))/var(--tw-bg-opacity));--tw-text-opacity:1;color:hsl(var(--sc)/var(--tw-text-opacity))}.btn-outline.btn-accent:hover .badge{--tw-border-opacity:1;border-color:hsl(var(--ac)/var(--tw-border-opacity));--tw-bg-opacity:1;background-color:hsl(var(--ac)/var(--tw-bg-opacity));--tw-text-opacity:1;color:hsl(var(--a)/var(--tw-text-opacity))}.btn-outline.btn-accent:hover .badge.outline{--tw-border-opacity:1;border-color:hsl(var(--ac)/var(--tw-border-opacity));--tw-bg-opacity:1;background-color:hsl(var(--af,var(--a))/var(--tw-bg-opacity));--tw-text-opacity:1;color:hsl(var(--ac)/var(--tw-text-opacity))}.breadcrumbs>ul>li>a:focus{outline-offset:2px;outline:2px solid #0000}.breadcrumbs>ul>li>a:focus-visible{outline-offset:2px;outline:2px solid }.breadcrumbs>ul>li+:before{content:"";height:.375rem;width:.375rem;--tw-rotate:45deg;transform:translate(var(--tw-translate-x),var(--tw-translate-y))rotate(var(--tw-rotate))skewX(var(--tw-skew-x))skewY(var(--tw-skew-y))scaleX(var(--tw-scale-x))scaleY(var(--tw-scale-y));opacity:.4;background-color:#0000;border-top:1px solid ;border-right:1px solid ;margin-left:.5rem;margin-right:.75rem;display:block}.btn:active:hover,.btn:active:focus{transform:scale(var(--btn-focus-scale,.95));animation:none}.btn:hover,.btn-active{--tw-border-opacity:1;border-color:hsl(var(--nf,var(--n))/var(--tw-border-opacity));--tw-bg-opacity:1;background-color:hsl(var(--nf,var(--n))/var(--tw-bg-opacity))}.btn:focus-visible{outline:2px solid hsl(var(--nf));outline-offset:2px}.btn-primary{--tw-border-opacity:1;border-color:hsl(var(--p)/var(--tw-border-opacity));--tw-bg-opacity:1;background-color:hsl(var(--p)/var(--tw-bg-opacity));--tw-text-opacity:1;color:hsl(var(--pc)/var(--tw-text-opacity))}.btn-primary:hover,.btn-primary.btn-active{--tw-border-opacity:1;border-color:hsl(var(--pf,var(--p))/var(--tw-border-opacity));--tw-bg-opacity:1;background-color:hsl(var(--pf,var(--p))/var(--tw-bg-opacity))}.btn-primary:focus-visible{outline:2px solid hsl(var(--p))}.btn-secondary{--tw-border-opacity:1;border-color:hsl(var(--s)/var(--tw-border-opacity));--tw-bg-opacity:1;background-color:hsl(var(--s)/var(--tw-bg-opacity));--tw-text-opacity:1;color:hsl(var(--sc)/var(--tw-text-opacity))}.btn-secondary:hover,.btn-secondary.btn-active{--tw-border-opacity:1;border-color:hsl(var(--sf,var(--s))/var(--tw-border-opacity));--tw-bg-opacity:1;background-color:hsl(var(--sf,var(--s))/var(--tw-bg-opacity))}.btn-secondary:focus-visible{outline:2px solid hsl(var(--s))}.btn-accent{--tw-border-opacity:1;border-color:hsl(var(--a)/var(--tw-border-opacity));--tw-bg-opacity:1;background-color:hsl(var(--a)/var(--tw-bg-opacity));--tw-text-opacity:1;color:hsl(var(--ac)/var(--tw-text-opacity))}.btn-accent:hover,.btn-accent.btn-active{--tw-border-opacity:1;border-color:hsl(var(--af,var(--a))/var(--tw-border-opacity));--tw-bg-opacity:1;background-color:hsl(var(--af,var(--a))/var(--tw-bg-opacity))}.btn-accent:focus-visible{outline:2px solid hsl(var(--a))}.btn-info{--tw-border-opacity:1;border-color:hsl(var(--in)/var(--tw-border-opacity));--tw-bg-opacity:1;background-color:hsl(var(--in)/var(--tw-bg-opacity));--tw-text-opacity:1;color:hsl(var(--inc,var(--nc))/var(--tw-text-opacity))}.btn-info:hover,.btn-info.btn-active{--tw-border-opacity:1;border-color:hsl(var(--in)/var(--tw-border-opacity));--tw-bg-opacity:1;background-color:hsl(var(--in)/var(--tw-bg-opacity))}.btn-info:focus-visible{outline:2px solid hsl(var(--in))}.btn-success{--tw-border-opacity:1;border-color:hsl(var(--su)/var(--tw-border-opacity));--tw-bg-opacity:1;background-color:hsl(var(--su)/var(--tw-bg-opacity));--tw-text-opacity:1;color:hsl(var(--suc,var(--nc))/var(--tw-text-opacity))}.btn-success:hover,.btn-success.btn-active{--tw-border-opacity:1;border-color:hsl(var(--su)/var(--tw-border-opacity));--tw-bg-opacity:1;background-color:hsl(var(--su)/var(--tw-bg-opacity))}.btn-success:focus-visible{outline:2px solid hsl(var(--su))}.btn-warning{--tw-border-opacity:1;border-color:hsl(var(--wa)/var(--tw-border-opacity));--tw-bg-opacity:1;background-color:hsl(var(--wa)/var(--tw-bg-opacity));--tw-text-opacity:1;color:hsl(var(--wac,var(--nc))/var(--tw-text-opacity))}.btn-warning:hover,.btn-warning.btn-active{--tw-border-opacity:1;border-color:hsl(var(--wa)/var(--tw-border-opacity));--tw-bg-opacity:1;background-color:hsl(var(--wa)/var(--tw-bg-opacity))}.btn-warning:focus-visible{outline:2px solid hsl(var(--wa))}.btn-error{--tw-border-opacity:1;border-color:hsl(var(--er)/var(--tw-border-opacity));--tw-bg-opacity:1;background-color:hsl(var(--er)/var(--tw-bg-opacity));--tw-text-opacity:1;color:hsl(var(--erc,var(--nc))/var(--tw-text-opacity))}.btn-error:hover,.btn-error.btn-active{--tw-border-opacity:1;border-color:hsl(var(--er)/var(--tw-border-opacity));--tw-bg-opacity:1;background-color:hsl(var(--er)/var(--tw-bg-opacity))}.btn-error:focus-visible{outline:2px solid hsl(var(--er))}.btn.glass:hover,.btn.glass.btn-active{--glass-opacity:25%;--glass-border-opacity:15%}.btn.glass:focus-visible{outline:2px solid 0 0 2px currentColor}.btn-ghost{color:currentColor;background-color:#0000;border-width:1px;border-color:#0000}.btn-ghost:hover,.btn-ghost.btn-active{--tw-border-opacity:0;background-color:hsl(var(--bc)/var(--tw-bg-opacity));--tw-bg-opacity:.2}.btn-ghost:focus-visible{outline:2px solid 0 0 2px currentColor}.btn-link{--tw-text-opacity:1;color:hsl(var(--p)/var(--tw-text-opacity));background-color:#0000;border-color:#0000}.btn-link:hover,.btn-link.btn-active{background-color:#0000;border-color:#0000;-webkit-text-decoration-line:underline;text-decoration-line:underline}.btn-link:focus-visible{outline:2px solid 0 0 2px currentColor}.btn-outline{--tw-text-opacity:1;color:hsl(var(--bc)/var(--tw-text-opacity));background-color:#0000;border-color:currentColor}.btn-outline:hover{--tw-border-opacity:1;border-color:hsl(var(--bc)/var(--tw-border-opacity));--tw-bg-opacity:1;background-color:hsl(var(--bc)/var(--tw-bg-opacity));--tw-text-opacity:1;color:hsl(var(--b1)/var(--tw-text-opacity))}.btn-outline.btn-primary{--tw-text-opacity:1;color:hsl(var(--p)/var(--tw-text-opacity))}.btn-outline.btn-primary:hover{--tw-border-opacity:1;border-color:hsl(var(--pf,var(--p))/var(--tw-border-opacity));--tw-bg-opacity:1;background-color:hsl(var(--pf,var(--p))/var(--tw-bg-opacity));--tw-text-opacity:1;color:hsl(var(--pc)/var(--tw-text-opacity))}.btn-outline.btn-secondary{--tw-text-opacity:1;color:hsl(var(--s)/var(--tw-text-opacity))}.btn-outline.btn-secondary:hover{--tw-border-opacity:1;border-color:hsl(var(--sf,var(--s))/var(--tw-border-opacity));--tw-bg-opacity:1;background-color:hsl(var(--sf,var(--s))/var(--tw-bg-opacity));--tw-text-opacity:1;color:hsl(var(--sc)/var(--tw-text-opacity))}.btn-outline.btn-accent{--tw-text-opacity:1;color:hsl(var(--a)/var(--tw-text-opacity))}.btn-outline.btn-accent:hover{--tw-border-opacity:1;border-color:hsl(var(--af,var(--a))/var(--tw-border-opacity));--tw-bg-opacity:1;background-color:hsl(var(--af,var(--a))/var(--tw-bg-opacity));--tw-text-opacity:1;color:hsl(var(--ac)/var(--tw-text-opacity))}.btn-outline.btn-success{--tw-text-opacity:1;color:hsl(var(--su)/var(--tw-text-opacity))}.btn-outline.btn-success:hover{--tw-border-opacity:1;border-color:hsl(var(--su)/var(--tw-border-opacity));--tw-bg-opacity:1;background-color:hsl(var(--su)/var(--tw-bg-opacity));--tw-text-opacity:1;color:hsl(var(--suc,var(--nc))/var(--tw-text-opacity))}.btn-outline.btn-info{--tw-text-opacity:1;color:hsl(var(--in)/var(--tw-text-opacity))}.btn-outline.btn-info:hover{--tw-border-opacity:1;border-color:hsl(var(--in)/var(--tw-border-opacity));--tw-bg-opacity:1;background-color:hsl(var(--in)/var(--tw-bg-opacity));--tw-text-opacity:1;color:hsl(var(--inc,var(--nc))/var(--tw-text-opacity))}.btn-outline.btn-warning{--tw-text-opacity:1;color:hsl(var(--wa)/var(--tw-text-opacity))}.btn-outline.btn-warning:hover{--tw-border-opacity:1;border-color:hsl(var(--wa)/var(--tw-border-opacity));--tw-bg-opacity:1;background-color:hsl(var(--wa)/var(--tw-bg-opacity));--tw-text-opacity:1;color:hsl(var(--wac,var(--nc))/var(--tw-text-opacity))}.btn-outline.btn-error{--tw-text-opacity:1;color:hsl(var(--er)/var(--tw-text-opacity))}.btn-outline.btn-error:hover{--tw-border-opacity:1;border-color:hsl(var(--er)/var(--tw-border-opacity));--tw-bg-opacity:1;background-color:hsl(var(--er)/var(--tw-bg-opacity));--tw-text-opacity:1;color:hsl(var(--erc,var(--nc))/var(--tw-text-opacity))}.btn.loading.btn-square:before,.btn.loading.btn-circle:before{margin-right:0}.btn.loading.btn-xl:before,.btn.loading.btn-lg:before{height:1.25rem;width:1.25rem}.btn.loading.btn-sm:before,.btn.loading.btn-xs:before{height:.75rem;width:.75rem}.btn-group>input[type=radio]:checked.btn,.btn-group>.btn-active{--tw-border-opacity:1;border-color:hsl(var(--p)/var(--tw-border-opacity));--tw-bg-opacity:1;background-color:hsl(var(--p)/var(--tw-bg-opacity));--tw-text-opacity:1;color:hsl(var(--pc)/var(--tw-text-opacity))}.btn-group>input[type=radio]:checked.btn:focus-visible,.btn-group>.btn-active:focus-visible{outline:2px solid hsl(var(--p))}.btn-group>.btn:not(:first-child){border-top-left-radius:0;border-bottom-left-radius:0;margin-left:-1px}.btn-group>.btn:not(:last-child){border-top-right-radius:0;border-bottom-right-radius:0}@keyframes button-pop{0%{transform:scale(var(--btn-focus-scale,.95))}40%{transform:scale(1.02)}to{transform:scale(1)}}.card:focus-visible{outline-offset:2px;outline:2px solid }.card.bordered,.card-bordered{--tw-border-opacity:1;border-width:1px;border-color:hsl(var(--b2,var(--b1))/var(--tw-border-opacity))}.card.compact .card-body{padding:1rem;font-size:.875rem;line-height:1.25rem}.card-title{align-items:center;gap:.5rem;font-size:1.25rem;font-weight:600;line-height:1.75rem;display:flex}.carousel::-webkit-scrollbar{display:none}.checkbox:focus-visible{outline:2px solid hsl(var(--bc));outline-offset:2px}.checkbox:checked,.checkbox[checked=true]{--tw-bg-opacity:1;background-color:hsl(var(--bc)/var(--tw-bg-opacity));animation:checkmark var(--animation-input,.2s)ease-in-out;background-repeat:no-repeat;background-image:linear-gradient(-45deg,transparent 65%,hsl(var(--chkbg))65.99%),linear-gradient(45deg,transparent 75%,hsl(var(--chkbg))75.99%),linear-gradient(-45deg,hsl(var(--chkbg))40%,transparent 40.99%),linear-gradient(45deg,hsl(var(--chkbg))30%,hsl(var(--chkfg))30.99%,hsl(var(--chkfg))40%,transparent 40.99%),linear-gradient(-45deg,hsl(var(--chkfg))50%,hsl(var(--chkbg))50.99%)}.checkbox:indeterminate{--tw-bg-opacity:1;background-color:hsl(var(--bc)/var(--tw-bg-opacity));animation:checkmark var(--animation-input,.2s)ease-in-out;background-repeat:no-repeat;background-image:linear-gradient(90deg,transparent 80%,hsl(var(--chkbg))80%),linear-gradient(-90deg,transparent 80%,hsl(var(--chkbg))80%),linear-gradient(0deg,hsl(var(--chkbg))43%,hsl(var(--chkfg))43%,hsl(var(--chkfg))57%,hsl(var(--chkbg))57%)}.checkbox-primary{--chkbg:var(--p);--chkfg:var(--pc);--tw-border-opacity:1;border-color:hsl(var(--p)/var(--tw-border-opacity))}.checkbox-primary:hover{--tw-border-opacity:1;border-color:hsl(var(--p)/var(--tw-border-opacity))}.checkbox-primary:focus-visible{outline:2px solid hsl(var(--p))}.checkbox-primary:checked,.checkbox-primary[checked=true]{--tw-border-opacity:1;border-color:hsl(var(--p)/var(--tw-border-opacity));--tw-bg-opacity:1;background-color:hsl(var(--p)/var(--tw-bg-opacity));--tw-text-opacity:1;color:hsl(var(--pc)/var(--tw-text-opacity))}.checkbox-secondary{--chkbg:var(--s);--chkfg:var(--sc);--tw-border-opacity:1;border-color:hsl(var(--s)/var(--tw-border-opacity))}.checkbox-secondary:hover{--tw-border-opacity:1;border-color:hsl(var(--s)/var(--tw-border-opacity))}.checkbox-secondary:focus-visible{outline:2px solid hsl(var(--s))}.checkbox-secondary:checked,.checkbox-secondary[checked=true]{--tw-border-opacity:1;border-color:hsl(var(--s)/var(--tw-border-opacity));--tw-bg-opacity:1;background-color:hsl(var(--s)/var(--tw-bg-opacity));--tw-text-opacity:1;color:hsl(var(--sc)/var(--tw-text-opacity))}.checkbox-accent{--chkbg:var(--a);--chkfg:var(--ac);--tw-border-opacity:1;border-color:hsl(var(--a)/var(--tw-border-opacity))}.checkbox-accent:hover{--tw-border-opacity:1;border-color:hsl(var(--a)/var(--tw-border-opacity))}.checkbox-accent:focus-visible{outline:2px solid hsl(var(--a))}.checkbox-accent:checked,.checkbox-accent[checked=true]{--tw-border-opacity:1;border-color:hsl(var(--a)/var(--tw-border-opacity));--tw-bg-opacity:1;background-color:hsl(var(--a)/var(--tw-bg-opacity));--tw-text-opacity:1;color:hsl(var(--ac)/var(--tw-text-opacity))}.checkbox:disabled{cursor:not-allowed;--tw-bg-opacity:1;background-color:hsl(var(--bc)/var(--tw-bg-opacity));opacity:.2;border-color:#0000}@keyframes checkmark{0%{background-position-y:5px}50%{background-position-y:-2px}to{background-position-y:0}}.checkbox-mark{display:none}body[dir=rtl] .checkbox{--chkbg:var(--bc);--chkfg:var(--b1)}body[dir=rtl] .checkbox:checked,body[dir=rtl] .checkbox[checked=true]{background-image:linear-gradient(45deg,transparent 65%,hsl(var(--chkbg))65.99%),linear-gradient(-45deg,transparent 75%,hsl(var(--chkbg))75.99%),linear-gradient(45deg,hsl(var(--chkbg))40%,transparent 40.99%),linear-gradient(-45deg,hsl(var(--chkbg))30%,hsl(var(--chkfg))30.99%,hsl(var(--chkfg))40%,transparent 40.99%),linear-gradient(45deg,hsl(var(--chkfg))50%,hsl(var(--chkbg))50.99%)}.collapse:focus-visible{outline:2px solid hsl(var(--nf));outline-offset:2px}.collapse-arrow .collapse-title:after{height:.5rem;width:.5rem;content:"";transform-origin:75% 75%;pointer-events:none;transition-property:all;transition-duration:.2s;transition-timing-function:cubic-bezier(.4,0,.2,1);display:block;position:absolute;top:1.4rem;right:1.4rem;transform:rotate(45deg);box-shadow:2px 2px}.collapse-plus .collapse-title:after{height:.5rem;width:.5rem;content:"+";pointer-events:none;transition-property:all;transition-duration:.3s;transition-timing-function:cubic-bezier(.4,0,.2,1);display:block;position:absolute;top:.9rem;right:1.4rem}.collapse:not(.collapse-open):not(.collapse-close) input[type=checkbox],.collapse:not(.collapse-open):not(.collapse-close) .collapse-title{cursor:pointer}.collapse:focus:not(.collapse-open):not(.collapse-close) .collapse-title{cursor:unset}.collapse-title,.collapse>input[type=checkbox]{width:100%;min-height:3.75rem;padding:1rem 3rem 1rem 1rem;transition:background-color .2s ease-in-out}.collapse-open :where(.collapse-content),.collapse:focus:not(.collapse-close) :where(.collapse-content),.collapse:not(.collapse-close) :where(input[type=checkbox]:checked~.collapse-content){padding-bottom:1rem;transition:padding .2s ease-in-out,background-color .2s ease-in-out}.collapse-open.collapse-arrow .collapse-title:after,.collapse-arrow:focus:not(.collapse-close) .collapse-title:after,.collapse-arrow:not(.collapse-close) input[type=checkbox]:checked~.collapse-title:after{transform:rotate(225deg)}.collapse-open.collapse-plus .collapse-title:after,.collapse-plus:focus:not(.collapse-close) .collapse-title:after,.collapse-plus:not(.collapse-close) input[type=checkbox]:checked~.collapse-title:after{content:"−"}.divider:before,.divider:after{background-color:hsl(var(--bc)/var(--tw-bg-opacity));--tw-bg-opacity:.1}.divider:not(:empty){gap:1rem}.drawer.drawer-end .drawer-toggle:checked~.drawer-content{--tw-translate-x:-.5rem;transform:translate(var(--tw-translate-x),var(--tw-translate-y))rotate(var(--tw-rotate))skewX(var(--tw-skew-x))skewY(var(--tw-skew-y))scaleX(var(--tw-scale-x))scaleY(var(--tw-scale-y))}.drawer-toggle:checked~.drawer-content{--tw-translate-x:.5rem;transform:translate(var(--tw-translate-x),var(--tw-translate-y))rotate(var(--tw-rotate))skewX(var(--tw-skew-x))skewY(var(--tw-skew-y))scaleX(var(--tw-scale-x))scaleY(var(--tw-scale-y))}.drawer-toggle:focus-visible~.drawer-content .drawer-button{outline:2px solid hsl(var(--nf));outline-offset:2px}.drawer-toggle:focus-visible~.drawer-content .drawer-button.btn-primary{outline:2px solid hsl(var(--p))}.drawer-toggle:focus-visible~.drawer-content .drawer-button.btn-secondary{outline:2px solid hsl(var(--s))}.drawer-toggle:focus-visible~.drawer-content .drawer-button.btn-accent{outline:2px solid hsl(var(--a))}.drawer-toggle:focus-visible~.drawer-content .drawer-button.btn-info{outline:2px solid hsl(var(--in))}.drawer-toggle:focus-visible~.drawer-content .drawer-button.btn-success{outline:2px solid hsl(var(--su))}.drawer-toggle:focus-visible~.drawer-content .drawer-button.btn-warning{outline:2px solid hsl(var(--wa))}.drawer-toggle:focus-visible~.drawer-content .drawer-button.btn-error{outline:2px solid hsl(var(--er))}.drawer-toggle:focus-visible~.drawer-content .drawer-button.glass,.drawer-toggle:focus-visible~.drawer-content .drawer-button.btn-ghost,.drawer-toggle:focus-visible~.drawer-content .drawer-button.btn-link{outline:2px solid }.dropdown.dropdown-open .dropdown-content,.dropdown.dropdown-hover:hover .dropdown-content,.dropdown:focus .dropdown-content,.dropdown:focus-within .dropdown-content{--tw-scale-x:1;--tw-scale-y:1;transform:translate(var(--tw-translate-x),var(--tw-translate-y))rotate(var(--tw-rotate))skewX(var(--tw-skew-x))skewY(var(--tw-skew-y))scaleX(var(--tw-scale-x))scaleY(var(--tw-scale-y))}.footer-title{text-transform:uppercase;opacity:.5;margin-bottom:.5rem;font-weight:700}.label-text{--tw-text-opacity:1;color:hsl(var(--bc)/var(--tw-text-opacity));font-size:.875rem;line-height:1.25rem}.label-text-alt{--tw-text-opacity:1;color:hsl(var(--bc)/var(--tw-text-opacity));font-size:.75rem;line-height:1rem}.label a:hover{--tw-text-opacity:1;color:hsl(var(--bc)/var(--tw-text-opacity))}.input-bordered{--tw-border-opacity:.2}.input:focus{outline:2px solid hsla(var(--bc)/.2);outline-offset:2px}.input-ghost{--tw-bg-opacity:.05}.input-ghost:focus{--tw-bg-opacity:1;--tw-text-opacity:1;color:hsl(var(--bc)/var(--tw-text-opacity));box-shadow:none}.input-primary{--tw-border-opacity:1;border-color:hsl(var(--p)/var(--tw-border-opacity))}.input-primary:focus{outline:2px solid hsl(var(--p))}.input-secondary{--tw-border-opacity:1;border-color:hsl(var(--s)/var(--tw-border-opacity))}.input-secondary:focus{outline:2px solid hsl(var(--s))}.input-accent{--tw-border-opacity:1;border-color:hsl(var(--a)/var(--tw-border-opacity))}.input-accent:focus{outline:2px solid hsl(var(--a))}.input-info{--tw-border-opacity:1;border-color:hsl(var(--in)/var(--tw-border-opacity))}.input-info:focus{outline:2px solid hsl(var(--in))}.input-success{--tw-border-opacity:1;border-color:hsl(var(--su)/var(--tw-border-opacity))}.input-success:focus{outline:2px solid hsl(var(--su))}.input-warning{--tw-border-opacity:1;border-color:hsl(var(--wa)/var(--tw-border-opacity))}.input-warning:focus{outline:2px solid hsl(var(--wa))}.input-error{--tw-border-opacity:1;border-color:hsl(var(--er)/var(--tw-border-opacity))}.input-error:focus{outline:2px solid hsl(var(--er))}.input-disabled,.input[disabled]{cursor:not-allowed;--tw-border-opacity:1;border-color:hsl(var(--b2,var(--b1))/var(--tw-border-opacity));--tw-bg-opacity:1;background-color:hsl(var(--b2,var(--b1))/var(--tw-bg-opacity));--tw-text-opacity:.2}.input-disabled::placeholder,.input[disabled]::placeholder{color:hsl(var(--bc)/var(--tw-placeholder-opacity));--tw-placeholder-opacity:.2}.input::-webkit-calendar-picker-indicator{display:none}.link-primary{--tw-text-opacity:1;color:hsl(var(--p)/var(--tw-text-opacity))}.link-primary:hover{--tw-text-opacity:1;color:hsl(var(--pf,var(--p))/var(--tw-text-opacity))}.link-secondary{--tw-text-opacity:1;color:hsl(var(--s)/var(--tw-text-opacity))}.link-secondary:hover{--tw-text-opacity:1;color:hsl(var(--sf,var(--s))/var(--tw-text-opacity))}.link-accent{--tw-text-opacity:1;color:hsl(var(--a)/var(--tw-text-opacity))}.link-accent:hover{--tw-text-opacity:1;color:hsl(var(--af,var(--a))/var(--tw-text-opacity))}.link-neutral{--tw-text-opacity:1;color:hsl(var(--n)/var(--tw-text-opacity))}.link-neutral:hover{--tw-text-opacity:1;color:hsl(var(--nf,var(--n))/var(--tw-text-opacity))}.link:focus{outline-offset:2px;outline:2px solid #0000}.link:focus-visible{outline-offset:2px;outline:2px solid }.mask-squircle{-webkit-mask-image:url(data:image/svg+xml;base64,PHN2ZyB3aWR0aD0iMjAwIiBoZWlnaHQ9IjIwMCIgeG1sbnM9Imh0dHA6Ly93d3cudzMub3JnLzIwMDAvc3ZnIj4KCiAgPHBhdGggZD0iTSAxMDAgMCBDIDIwIDAgMCAyMCAwIDEwMCBDIDAgMTgwIDIwIDIwMCAxMDAgMjAwIEMgMTgwIDIwMCAyMDAgMTgwIDIwMCAxMDAgQyAyMDAgMjAgMTgwIDAgMTAwIDAgWiIvPgoKPC9zdmc+);mask-image:url(data:image/svg+xml;base64,PHN2ZyB3aWR0aD0iMjAwIiBoZWlnaHQ9IjIwMCIgeG1sbnM9Imh0dHA6Ly93d3cudzMub3JnLzIwMDAvc3ZnIj4KCiAgPHBhdGggZD0iTSAxMDAgMCBDIDIwIDAgMCAyMCAwIDEwMCBDIDAgMTgwIDIwIDIwMCAxMDAgMjAwIEMgMTgwIDIwMCAyMDAgMTgwIDIwMCAxMDAgQyAyMDAgMjAgMTgwIDAgMTAwIDAgWiIvPgoKPC9zdmc+)}.mask-decagon{-webkit-mask-image:url(data:image/svg+xml;base64,PD94bWwgdmVyc2lvbj0iMS4wIiBlbmNvZGluZz0iVVRGLTgiPz4KPHN2ZyB3aWR0aD0iMTkycHgiIGhlaWdodD0iMjAwcHgiIHZpZXdCb3g9IjAgMCAxOTIgMjAwIiB2ZXJzaW9uPSIxLjEiIHhtbG5zPSJodHRwOi8vd3d3LnczLm9yZy8yMDAwL3N2ZyIgeG1sbnM6eGxpbms9Imh0dHA6Ly93d3cudzMub3JnLzE5OTkveGxpbmsiPgogICAgPCEtLSBHZW5lcmF0b3I6IFNrZXRjaCA2MC4xICg4ODEzMykgLSBodHRwczovL3NrZXRjaC5jb20gLS0+CiAgICA8dGl0bGU+ZGVjYWdvbjwvdGl0bGU+CiAgICA8ZGVzYz5DcmVhdGVkIHdpdGggU2tldGNoLjwvZGVzYz4KICAgIDxnIGlkPSJQYWdlLTEiIHN0cm9rZT0ibm9uZSIgc3Ryb2tlLXdpZHRoPSIxIiBmaWxsPSJub25lIiBmaWxsLXJ1bGU9ImV2ZW5vZGQiPgogICAgICAgIDxwb2x5Z29uIGlkPSJkZWNhZ29uIiBmaWxsPSIjMDAwMDAwIiBwb2ludHM9Ijk2IDAgMTU0Ljc3ODUyNSAxOS4wOTgzMDA2IDE5MS4xMDU2NTIgNjkuMDk4MzAwNiAxOTEuMTA1NjUyIDEzMC45MDE2OTkgMTU0Ljc3ODUyNSAxODAuOTAxNjk5IDk2IDIwMCAzNy4yMjE0NzQ4IDE4MC45MDE2OTkgMC44OTQzNDgzNyAxMzAuOTAxNjk5IDAuODk0MzQ4MzcgNjkuMDk4MzAwNiAzNy4yMjE0NzQ4IDE5LjA5ODMwMDYiPjwvcG9seWdvbj4KICAgIDwvZz4KPC9zdmc+);mask-image:url(data:image/svg+xml;base64,PD94bWwgdmVyc2lvbj0iMS4wIiBlbmNvZGluZz0iVVRGLTgiPz4KPHN2ZyB3aWR0aD0iMTkycHgiIGhlaWdodD0iMjAwcHgiIHZpZXdCb3g9IjAgMCAxOTIgMjAwIiB2ZXJzaW9uPSIxLjEiIHhtbG5zPSJodHRwOi8vd3d3LnczLm9yZy8yMDAwL3N2ZyIgeG1sbnM6eGxpbms9Imh0dHA6Ly93d3cudzMub3JnLzE5OTkveGxpbmsiPgogICAgPCEtLSBHZW5lcmF0b3I6IFNrZXRjaCA2MC4xICg4ODEzMykgLSBodHRwczovL3NrZXRjaC5jb20gLS0+CiAgICA8dGl0bGU+ZGVjYWdvbjwvdGl0bGU+CiAgICA8ZGVzYz5DcmVhdGVkIHdpdGggU2tldGNoLjwvZGVzYz4KICAgIDxnIGlkPSJQYWdlLTEiIHN0cm9rZT0ibm9uZSIgc3Ryb2tlLXdpZHRoPSIxIiBmaWxsPSJub25lIiBmaWxsLXJ1bGU9ImV2ZW5vZGQiPgogICAgICAgIDxwb2x5Z29uIGlkPSJkZWNhZ29uIiBmaWxsPSIjMDAwMDAwIiBwb2ludHM9Ijk2IDAgMTU0Ljc3ODUyNSAxOS4wOTgzMDA2IDE5MS4xMDU2NTIgNjkuMDk4MzAwNiAxOTEuMTA1NjUyIDEzMC45MDE2OTkgMTU0Ljc3ODUyNSAxODAuOTAxNjk5IDk2IDIwMCAzNy4yMjE0NzQ4IDE4MC45MDE2OTkgMC44OTQzNDgzNyAxMzAuOTAxNjk5IDAuODk0MzQ4MzcgNjkuMDk4MzAwNiAzNy4yMjE0NzQ4IDE5LjA5ODMwMDYiPjwvcG9seWdvbj4KICAgIDwvZz4KPC9zdmc+)}.mask-diamond{-webkit-mask-image:url(data:image/svg+xml;base64,PD94bWwgdmVyc2lvbj0iMS4wIiBlbmNvZGluZz0iVVRGLTgiPz4KPHN2ZyB3aWR0aD0iMjAwcHgiIGhlaWdodD0iMjAwcHgiIHZpZXdCb3g9IjAgMCAyMDAgMjAwIiB2ZXJzaW9uPSIxLjEiIHhtbG5zPSJodHRwOi8vd3d3LnczLm9yZy8yMDAwL3N2ZyIgeG1sbnM6eGxpbms9Imh0dHA6Ly93d3cudzMub3JnLzE5OTkveGxpbmsiPgogICAgPCEtLSBHZW5lcmF0b3I6IFNrZXRjaCA2MC4xICg4ODEzMykgLSBodHRwczovL3NrZXRjaC5jb20gLS0+CiAgICA8dGl0bGU+ZGlhbW9uZDwvdGl0bGU+CiAgICA8ZGVzYz5DcmVhdGVkIHdpdGggU2tldGNoLjwvZGVzYz4KICAgIDxnIGlkPSJQYWdlLTEiIHN0cm9rZT0ibm9uZSIgc3Ryb2tlLXdpZHRoPSIxIiBmaWxsPSJub25lIiBmaWxsLXJ1bGU9ImV2ZW5vZGQiPgogICAgICAgIDxwb2x5Z29uIGlkPSJkaWFtb25kIiBmaWxsPSIjMDAwMDAwIiBwb2ludHM9IjEwMCAwIDIwMCAxMDAgMTAwIDIwMCAwIDEwMCI+PC9wb2x5Z29uPgogICAgPC9nPgo8L3N2Zz4=);mask-image:url(data:image/svg+xml;base64,PD94bWwgdmVyc2lvbj0iMS4wIiBlbmNvZGluZz0iVVRGLTgiPz4KPHN2ZyB3aWR0aD0iMjAwcHgiIGhlaWdodD0iMjAwcHgiIHZpZXdCb3g9IjAgMCAyMDAgMjAwIiB2ZXJzaW9uPSIxLjEiIHhtbG5zPSJodHRwOi8vd3d3LnczLm9yZy8yMDAwL3N2ZyIgeG1sbnM6eGxpbms9Imh0dHA6Ly93d3cudzMub3JnLzE5OTkveGxpbmsiPgogICAgPCEtLSBHZW5lcmF0b3I6IFNrZXRjaCA2MC4xICg4ODEzMykgLSBodHRwczovL3NrZXRjaC5jb20gLS0+CiAgICA8dGl0bGU+ZGlhbW9uZDwvdGl0bGU+CiAgICA8ZGVzYz5DcmVhdGVkIHdpdGggU2tldGNoLjwvZGVzYz4KICAgIDxnIGlkPSJQYWdlLTEiIHN0cm9rZT0ibm9uZSIgc3Ryb2tlLXdpZHRoPSIxIiBmaWxsPSJub25lIiBmaWxsLXJ1bGU9ImV2ZW5vZGQiPgogICAgICAgIDxwb2x5Z29uIGlkPSJkaWFtb25kIiBmaWxsPSIjMDAwMDAwIiBwb2ludHM9IjEwMCAwIDIwMCAxMDAgMTAwIDIwMCAwIDEwMCI+PC9wb2x5Z29uPgogICAgPC9nPgo8L3N2Zz4=)}.mask-heart{-webkit-mask-image:url(data:image/svg+xml;base64,PD94bWwgdmVyc2lvbj0iMS4wIiBlbmNvZGluZz0iVVRGLTgiPz4KPHN2ZyB3aWR0aD0iMjAwcHgiIGhlaWdodD0iMTg1cHgiIHZpZXdCb3g9IjAgMCAyMDAgMTg1IiB2ZXJzaW9uPSIxLjEiIHhtbG5zPSJodHRwOi8vd3d3LnczLm9yZy8yMDAwL3N2ZyIgeG1sbnM6eGxpbms9Imh0dHA6Ly93d3cudzMub3JnLzE5OTkveGxpbmsiPgogICAgPCEtLSBHZW5lcmF0b3I6IFNrZXRjaCA2MC4xICg4ODEzMykgLSBodHRwczovL3NrZXRjaC5jb20gLS0+CiAgICA8dGl0bGU+aGVhcnQ8L3RpdGxlPgogICAgPGRlc2M+Q3JlYXRlZCB3aXRoIFNrZXRjaC48L2Rlc2M+CiAgICA8ZyBpZD0iUGFnZS0xIiBzdHJva2U9Im5vbmUiIHN0cm9rZS13aWR0aD0iMSIgZmlsbD0ibm9uZSIgZmlsbC1ydWxlPSJldmVub2RkIj4KICAgICAgICA8cGF0aCBkPSJNMTAwLDE4NC42MDU1MzQgQzk2LjkxMjE3MTYsMTg0LjYwMTYzNSA5My44OTY5NzMzLDE4My42Njg1OTggOTEuMzQ2NjE4MiwxODEuOTI3NzkzIEM1My41NjQ5ODA0LDE1Ni4yODAxMjMgMzcuMjA1Mjc2NCwxMzguNjk0NTIzIDI4LjE4MTcxOTQsMTI3LjY5OTkxNyBDOC45NTE5NzYyNiwxMDQuMjYzNjY3IC0wLjI1NDI2MzI3Myw4MC4yMDI0NTEzIDAuMDA1MzM4MjU5MzEsNTQuMTQ2MTQ5MyBDMC4zMDgyMDY3MTQsMjQuMjg3MTY1NiAyNC4yNjM2NTkzLDAgNTMuNDA2MzM1LDAgQzc0LjU5NzUxMiwwIDg5LjI3NDYxMzQsMTEuOTM2ODYzMSA5Ny44MjIyMzQzLDIxLjg3ODY0MDMgQzk4LjM3MDA4MTIsMjIuNTA5NDMgOTkuMTY0NTE5NiwyMi44NzE2ODg5IDEwMCwyMi44NzE2ODg5IEMxMDAuODM1NDg2LDIyLjg3MTY4ODkgMTAxLjYyOTkyNCwyMi41MDk0MyAxMDIuMTc3NzcxLDIxLjg3ODY0MDMgQzExMC43MjUzOTIsMTEuOTI3MjQ4MiAxMjUuNDAyNDkzLDAgMTQ2LjU5MzY3LDAgQzE3NS43MzYzNDYsMCAxOTkuNjkxNzk5LDI0LjI4NzE2NTYgMTk5Ljk5NDY2Nyw1NC4xNTA5NTY3IEMyMDAuMjU0MjY5LDgwLjIxMjA2NjEgMTkxLjAzODQxNCwxMDQuMjczMjgyIDE3MS44MTgyODYsMTI3LjcwNDcyNCBDMTYyLjc5NDcyOSwxMzguNjk5MzMgMTQ2LjQzNTAyNSwxNTYuMjg0OTMgMTA4LjY1MzM4NywxODEuOTMyNiBDMTA2LjEwMjQ4NCwxODMuNjcxNzA0IDEwMy4wODczMjksMTg0LjYwMzA1MiAxMDAsMTg0LjYwNTUzNCBaIiBpZD0iaGVhcnQiIGZpbGw9IiMwMDAwMDAiIGZpbGwtcnVsZT0ibm9uemVybyI+PC9wYXRoPgogICAgPC9nPgo8L3N2Zz4=);mask-image:url(data:image/svg+xml;base64,PD94bWwgdmVyc2lvbj0iMS4wIiBlbmNvZGluZz0iVVRGLTgiPz4KPHN2ZyB3aWR0aD0iMjAwcHgiIGhlaWdodD0iMTg1cHgiIHZpZXdCb3g9IjAgMCAyMDAgMTg1IiB2ZXJzaW9uPSIxLjEiIHhtbG5zPSJodHRwOi8vd3d3LnczLm9yZy8yMDAwL3N2ZyIgeG1sbnM6eGxpbms9Imh0dHA6Ly93d3cudzMub3JnLzE5OTkveGxpbmsiPgogICAgPCEtLSBHZW5lcmF0b3I6IFNrZXRjaCA2MC4xICg4ODEzMykgLSBodHRwczovL3NrZXRjaC5jb20gLS0+CiAgICA8dGl0bGU+aGVhcnQ8L3RpdGxlPgogICAgPGRlc2M+Q3JlYXRlZCB3aXRoIFNrZXRjaC48L2Rlc2M+CiAgICA8ZyBpZD0iUGFnZS0xIiBzdHJva2U9Im5vbmUiIHN0cm9rZS13aWR0aD0iMSIgZmlsbD0ibm9uZSIgZmlsbC1ydWxlPSJldmVub2RkIj4KICAgICAgICA8cGF0aCBkPSJNMTAwLDE4NC42MDU1MzQgQzk2LjkxMjE3MTYsMTg0LjYwMTYzNSA5My44OTY5NzMzLDE4My42Njg1OTggOTEuMzQ2NjE4MiwxODEuOTI3NzkzIEM1My41NjQ5ODA0LDE1Ni4yODAxMjMgMzcuMjA1Mjc2NCwxMzguNjk0NTIzIDI4LjE4MTcxOTQsMTI3LjY5OTkxNyBDOC45NTE5NzYyNiwxMDQuMjYzNjY3IC0wLjI1NDI2MzI3Myw4MC4yMDI0NTEzIDAuMDA1MzM4MjU5MzEsNTQuMTQ2MTQ5MyBDMC4zMDgyMDY3MTQsMjQuMjg3MTY1NiAyNC4yNjM2NTkzLDAgNTMuNDA2MzM1LDAgQzc0LjU5NzUxMiwwIDg5LjI3NDYxMzQsMTEuOTM2ODYzMSA5Ny44MjIyMzQzLDIxLjg3ODY0MDMgQzk4LjM3MDA4MTIsMjIuNTA5NDMgOTkuMTY0NTE5NiwyMi44NzE2ODg5IDEwMCwyMi44NzE2ODg5IEMxMDAuODM1NDg2LDIyLjg3MTY4ODkgMTAxLjYyOTkyNCwyMi41MDk0MyAxMDIuMTc3NzcxLDIxLjg3ODY0MDMgQzExMC43MjUzOTIsMTEuOTI3MjQ4MiAxMjUuNDAyNDkzLDAgMTQ2LjU5MzY3LDAgQzE3NS43MzYzNDYsMCAxOTkuNjkxNzk5LDI0LjI4NzE2NTYgMTk5Ljk5NDY2Nyw1NC4xNTA5NTY3IEMyMDAuMjU0MjY5LDgwLjIxMjA2NjEgMTkxLjAzODQxNCwxMDQuMjczMjgyIDE3MS44MTgyODYsMTI3LjcwNDcyNCBDMTYyLjc5NDcyOSwxMzguNjk5MzMgMTQ2LjQzNTAyNSwxNTYuMjg0OTMgMTA4LjY1MzM4NywxODEuOTMyNiBDMTA2LjEwMjQ4NCwxODMuNjcxNzA0IDEwMy4wODczMjksMTg0LjYwMzA1MiAxMDAsMTg0LjYwNTUzNCBaIiBpZD0iaGVhcnQiIGZpbGw9IiMwMDAwMDAiIGZpbGwtcnVsZT0ibm9uemVybyI+PC9wYXRoPgogICAgPC9nPgo8L3N2Zz4=)}.mask-hexagon{-webkit-mask-image:url(data:image/svg+xml;base64,PD94bWwgdmVyc2lvbj0iMS4wIiBlbmNvZGluZz0iVVRGLTgiPz4KPHN2ZyB3aWR0aD0iMTgycHgiIGhlaWdodD0iMjAxcHgiIHZpZXdCb3g9IjAgMCAxODIgMjAxIiB2ZXJzaW9uPSIxLjEiIHhtbG5zPSJodHRwOi8vd3d3LnczLm9yZy8yMDAwL3N2ZyIgeG1sbnM6eGxpbms9Imh0dHA6Ly93d3cudzMub3JnLzE5OTkveGxpbmsiPgogICAgPCEtLSBHZW5lcmF0b3I6IFNrZXRjaCA2MC4xICg4ODEzMykgLSBodHRwczovL3NrZXRjaC5jb20gLS0+CiAgICA8dGl0bGU+aGV4YWdvbjwvdGl0bGU+CiAgICA8ZGVzYz5DcmVhdGVkIHdpdGggU2tldGNoLjwvZGVzYz4KICAgIDxnIGlkPSJQYWdlLTEiIHN0cm9rZT0ibm9uZSIgc3Ryb2tlLXdpZHRoPSIxIiBmaWxsPSJub25lIiBmaWxsLXJ1bGU9ImV2ZW5vZGQiPgogICAgICAgIDxwYXRoIGQ9Ik01NS43ODU3ODg5LDE5MS40MDAyMDkgQzQ2LjU5MDM4NjYsMTkxLjQwMDIwOSAzNS43MjMwOTMsMTg0LjcxMjY0NCAzMC43MDc0MTksMTc3LjE4OTEzMyBMLTUuMjM4MjQ0NTEsMTE1LjMyOTE1NCBDLTEwLjI1MzkxODUsMTA2Ljk2OTY5NyAtMTAuMjUzOTE4NSw5NC40MzA1MTIgLTUuMjM4MjQ0NTEsODYuMDcxMDU1NCBMMzAuNzA3NDE5LDI0LjIxMTA3NjMgQzM1LjcyMzA5MywxNS44NTE2MTk2IDQ2LjU5MDM4NjYsMTAgNTUuNzg1Nzg4OSwxMCBMMTI2Ljg0MTE3LDEwIEMxMzYuMDM2NTczLDEwIDE0Ni45MDM4NjYsMTYuNjg3NTY1MyAxNTEuOTE5NTQsMjQuMjExMDc2MyBMMTg3Ljg2NTIwNCw4Ni4wNzEwNTU0IEMxOTIuMDQ0OTMyLDk0LjQzMDUxMiAxOTIuMDQ0OTMyLDEwNi45Njk2OTcgMTg3Ljg2NTIwNCwxMTUuMzI5MTU0IEwxNTEuOTE5NTQsMTc3LjE4OTEzMyBDMTQ3LjczOTgxMiwxODUuNTQ4NTg5IDEzNi4wMzY1NzMsMTkxLjQwMDIwOSAxMjYuODQxMTcsMTkxLjQwMDIwOSBMNTUuNzg1Nzg4OSwxOTEuNDAwMjA5IFoiIGlkPSJoZXhhZ29uIiBmaWxsPSIjMDAwMDAwIiBmaWxsLXJ1bGU9Im5vbnplcm8iIHRyYW5zZm9ybT0idHJhbnNsYXRlKDkxLjAwMDAwMCwgMTAwLjcwMDEwNCkgcm90YXRlKC0yNzAuMDAwMDAwKSB0cmFuc2xhdGUoLTkxLjAwMDAwMCwgLTEwMC43MDAxMDQpICI+PC9wYXRoPgogICAgPC9nPgo8L3N2Zz4=);mask-image:url(data:image/svg+xml;base64,PD94bWwgdmVyc2lvbj0iMS4wIiBlbmNvZGluZz0iVVRGLTgiPz4KPHN2ZyB3aWR0aD0iMTgycHgiIGhlaWdodD0iMjAxcHgiIHZpZXdCb3g9IjAgMCAxODIgMjAxIiB2ZXJzaW9uPSIxLjEiIHhtbG5zPSJodHRwOi8vd3d3LnczLm9yZy8yMDAwL3N2ZyIgeG1sbnM6eGxpbms9Imh0dHA6Ly93d3cudzMub3JnLzE5OTkveGxpbmsiPgogICAgPCEtLSBHZW5lcmF0b3I6IFNrZXRjaCA2MC4xICg4ODEzMykgLSBodHRwczovL3NrZXRjaC5jb20gLS0+CiAgICA8dGl0bGU+aGV4YWdvbjwvdGl0bGU+CiAgICA8ZGVzYz5DcmVhdGVkIHdpdGggU2tldGNoLjwvZGVzYz4KICAgIDxnIGlkPSJQYWdlLTEiIHN0cm9rZT0ibm9uZSIgc3Ryb2tlLXdpZHRoPSIxIiBmaWxsPSJub25lIiBmaWxsLXJ1bGU9ImV2ZW5vZGQiPgogICAgICAgIDxwYXRoIGQ9Ik01NS43ODU3ODg5LDE5MS40MDAyMDkgQzQ2LjU5MDM4NjYsMTkxLjQwMDIwOSAzNS43MjMwOTMsMTg0LjcxMjY0NCAzMC43MDc0MTksMTc3LjE4OTEzMyBMLTUuMjM4MjQ0NTEsMTE1LjMyOTE1NCBDLTEwLjI1MzkxODUsMTA2Ljk2OTY5NyAtMTAuMjUzOTE4NSw5NC40MzA1MTIgLTUuMjM4MjQ0NTEsODYuMDcxMDU1NCBMMzAuNzA3NDE5LDI0LjIxMTA3NjMgQzM1LjcyMzA5MywxNS44NTE2MTk2IDQ2LjU5MDM4NjYsMTAgNTUuNzg1Nzg4OSwxMCBMMTI2Ljg0MTE3LDEwIEMxMzYuMDM2NTczLDEwIDE0Ni45MDM4NjYsMTYuNjg3NTY1MyAxNTEuOTE5NTQsMjQuMjExMDc2MyBMMTg3Ljg2NTIwNCw4Ni4wNzEwNTU0IEMxOTIuMDQ0OTMyLDk0LjQzMDUxMiAxOTIuMDQ0OTMyLDEwNi45Njk2OTcgMTg3Ljg2NTIwNCwxMTUuMzI5MTU0IEwxNTEuOTE5NTQsMTc3LjE4OTEzMyBDMTQ3LjczOTgxMiwxODUuNTQ4NTg5IDEzNi4wMzY1NzMsMTkxLjQwMDIwOSAxMjYuODQxMTcsMTkxLjQwMDIwOSBMNTUuNzg1Nzg4OSwxOTEuNDAwMjA5IFoiIGlkPSJoZXhhZ29uIiBmaWxsPSIjMDAwMDAwIiBmaWxsLXJ1bGU9Im5vbnplcm8iIHRyYW5zZm9ybT0idHJhbnNsYXRlKDkxLjAwMDAwMCwgMTAwLjcwMDEwNCkgcm90YXRlKC0yNzAuMDAwMDAwKSB0cmFuc2xhdGUoLTkxLjAwMDAwMCwgLTEwMC43MDAxMDQpICI+PC9wYXRoPgogICAgPC9nPgo8L3N2Zz4=)}.mask-hexagon-2{-webkit-mask-image:url(data:image/svg+xml;base64,PD94bWwgdmVyc2lvbj0iMS4wIiBlbmNvZGluZz0iVVRGLTgiPz4KPHN2ZyB3aWR0aD0iMjAwcHgiIGhlaWdodD0iMTgycHgiIHZpZXdCb3g9IjAgMCAyMDAgMTgyIiB2ZXJzaW9uPSIxLjEiIHhtbG5zPSJodHRwOi8vd3d3LnczLm9yZy8yMDAwL3N2ZyIgeG1sbnM6eGxpbms9Imh0dHA6Ly93d3cudzMub3JnLzE5OTkveGxpbmsiPgogICAgPCEtLSBHZW5lcmF0b3I6IFNrZXRjaCA2MC4xICg4ODEzMykgLSBodHRwczovL3NrZXRjaC5jb20gLS0+CiAgICA8dGl0bGU+aGV4YWdvbi0yPC90aXRsZT4KICAgIDxkZXNjPkNyZWF0ZWQgd2l0aCBTa2V0Y2guPC9kZXNjPgogICAgPGcgaWQ9IlBhZ2UtMSIgc3Ryb2tlPSJub25lIiBzdHJva2Utd2lkdGg9IjEiIGZpbGw9Im5vbmUiIGZpbGwtcnVsZT0iZXZlbm9kZCI+CiAgICAgICAgPHBhdGggZD0iTTY0Ljc4NTc4ODksMTgxLjQwMDIwOSBDNTUuNTkwMzg2NiwxODEuNDAwMjA5IDQ0LjcyMzA5MywxNzQuNzEyNjQ0IDM5LjcwNzQxOSwxNjcuMTg5MTMzIEwzLjc2MTc1NTQ5LDEwNS4zMjkxNTQgQy0xLjI1MzkxODUsOTYuOTY5Njk3IC0xLjI1MzkxODUsODQuNDMwNTEyIDMuNzYxNzU1NDksNzYuMDcxMDU1NCBMMzkuNzA3NDE5LDE0LjIxMTA3NjMgQzQ0LjcyMzA5Myw1Ljg1MTYxOTY0IDU1LjU5MDM4NjYsMCA2NC43ODU3ODg5LDAgTDEzNS44NDExNywwIEMxNDUuMDM2NTczLDAgMTU1LjkwMzg2Niw2LjY4NzU2NTMxIDE2MC45MTk1NCwxNC4yMTEwNzYzIEwxOTYuODY1MjA0LDc2LjA3MTA1NTQgQzIwMS4wNDQ5MzIsODQuNDMwNTEyIDIwMS4wNDQ5MzIsOTYuOTY5Njk3IDE5Ni44NjUyMDQsMTA1LjMyOTE1NCBMMTYwLjkxOTU0LDE2Ny4xODkxMzMgQzE1Ni43Mzk4MTIsMTc1LjU0ODU4OSAxNDUuMDM2NTczLDE4MS40MDAyMDkgMTM1Ljg0MTE3LDE4MS40MDAyMDkgTDY0Ljc4NTc4ODksMTgxLjQwMDIwOSBaIiBpZD0iaGV4YWdvbi0yIiBmaWxsPSIjMDAwMDAwIiBmaWxsLXJ1bGU9Im5vbnplcm8iPjwvcGF0aD4KICAgIDwvZz4KPC9zdmc+);mask-image:url(data:image/svg+xml;base64,PD94bWwgdmVyc2lvbj0iMS4wIiBlbmNvZGluZz0iVVRGLTgiPz4KPHN2ZyB3aWR0aD0iMjAwcHgiIGhlaWdodD0iMTgycHgiIHZpZXdCb3g9IjAgMCAyMDAgMTgyIiB2ZXJzaW9uPSIxLjEiIHhtbG5zPSJodHRwOi8vd3d3LnczLm9yZy8yMDAwL3N2ZyIgeG1sbnM6eGxpbms9Imh0dHA6Ly93d3cudzMub3JnLzE5OTkveGxpbmsiPgogICAgPCEtLSBHZW5lcmF0b3I6IFNrZXRjaCA2MC4xICg4ODEzMykgLSBodHRwczovL3NrZXRjaC5jb20gLS0+CiAgICA8dGl0bGU+aGV4YWdvbi0yPC90aXRsZT4KICAgIDxkZXNjPkNyZWF0ZWQgd2l0aCBTa2V0Y2guPC9kZXNjPgogICAgPGcgaWQ9IlBhZ2UtMSIgc3Ryb2tlPSJub25lIiBzdHJva2Utd2lkdGg9IjEiIGZpbGw9Im5vbmUiIGZpbGwtcnVsZT0iZXZlbm9kZCI+CiAgICAgICAgPHBhdGggZD0iTTY0Ljc4NTc4ODksMTgxLjQwMDIwOSBDNTUuNTkwMzg2NiwxODEuNDAwMjA5IDQ0LjcyMzA5MywxNzQuNzEyNjQ0IDM5LjcwNzQxOSwxNjcuMTg5MTMzIEwzLjc2MTc1NTQ5LDEwNS4zMjkxNTQgQy0xLjI1MzkxODUsOTYuOTY5Njk3IC0xLjI1MzkxODUsODQuNDMwNTEyIDMuNzYxNzU1NDksNzYuMDcxMDU1NCBMMzkuNzA3NDE5LDE0LjIxMTA3NjMgQzQ0LjcyMzA5Myw1Ljg1MTYxOTY0IDU1LjU5MDM4NjYsMCA2NC43ODU3ODg5LDAgTDEzNS44NDExNywwIEMxNDUuMDM2NTczLDAgMTU1LjkwMzg2Niw2LjY4NzU2NTMxIDE2MC45MTk1NCwxNC4yMTEwNzYzIEwxOTYuODY1MjA0LDc2LjA3MTA1NTQgQzIwMS4wNDQ5MzIsODQuNDMwNTEyIDIwMS4wNDQ5MzIsOTYuOTY5Njk3IDE5Ni44NjUyMDQsMTA1LjMyOTE1NCBMMTYwLjkxOTU0LDE2Ny4xODkxMzMgQzE1Ni43Mzk4MTIsMTc1LjU0ODU4OSAxNDUuMDM2NTczLDE4MS40MDAyMDkgMTM1Ljg0MTE3LDE4MS40MDAyMDkgTDY0Ljc4NTc4ODksMTgxLjQwMDIwOSBaIiBpZD0iaGV4YWdvbi0yIiBmaWxsPSIjMDAwMDAwIiBmaWxsLXJ1bGU9Im5vbnplcm8iPjwvcGF0aD4KICAgIDwvZz4KPC9zdmc+)}.mask-circle{-webkit-mask-image:url(data:image/svg+xml;base64,PD94bWwgdmVyc2lvbj0iMS4wIiBlbmNvZGluZz0iVVRGLTgiPz4KPHN2ZyB3aWR0aD0iMjAwcHgiIGhlaWdodD0iMjAwcHgiIHZpZXdCb3g9IjAgMCAyMDAgMjAwIiB2ZXJzaW9uPSIxLjEiIHhtbG5zPSJodHRwOi8vd3d3LnczLm9yZy8yMDAwL3N2ZyIgeG1sbnM6eGxpbms9Imh0dHA6Ly93d3cudzMub3JnLzE5OTkveGxpbmsiPgogICAgPCEtLSBHZW5lcmF0b3I6IFNrZXRjaCA2MC4xICg4ODEzMykgLSBodHRwczovL3NrZXRjaC5jb20gLS0+CiAgICA8dGl0bGU+T3ZhbDwvdGl0bGU+CiAgICA8ZGVzYz5DcmVhdGVkIHdpdGggU2tldGNoLjwvZGVzYz4KICAgIDxnIGlkPSJQYWdlLTEiIHN0cm9rZT0ibm9uZSIgc3Ryb2tlLXdpZHRoPSIxIiBmaWxsPSJub25lIiBmaWxsLXJ1bGU9ImV2ZW5vZGQiPgogICAgICAgIDxjaXJjbGUgaWQ9Ik92YWwiIGZpbGw9IiMwMDAwMDAiIGN4PSIxMDAiIGN5PSIxMDAiIHI9IjEwMCI+PC9jaXJjbGU+CiAgICA8L2c+Cjwvc3ZnPg==);mask-image:url(data:image/svg+xml;base64,PD94bWwgdmVyc2lvbj0iMS4wIiBlbmNvZGluZz0iVVRGLTgiPz4KPHN2ZyB3aWR0aD0iMjAwcHgiIGhlaWdodD0iMjAwcHgiIHZpZXdCb3g9IjAgMCAyMDAgMjAwIiB2ZXJzaW9uPSIxLjEiIHhtbG5zPSJodHRwOi8vd3d3LnczLm9yZy8yMDAwL3N2ZyIgeG1sbnM6eGxpbms9Imh0dHA6Ly93d3cudzMub3JnLzE5OTkveGxpbmsiPgogICAgPCEtLSBHZW5lcmF0b3I6IFNrZXRjaCA2MC4xICg4ODEzMykgLSBodHRwczovL3NrZXRjaC5jb20gLS0+CiAgICA8dGl0bGU+T3ZhbDwvdGl0bGU+CiAgICA8ZGVzYz5DcmVhdGVkIHdpdGggU2tldGNoLjwvZGVzYz4KICAgIDxnIGlkPSJQYWdlLTEiIHN0cm9rZT0ibm9uZSIgc3Ryb2tlLXdpZHRoPSIxIiBmaWxsPSJub25lIiBmaWxsLXJ1bGU9ImV2ZW5vZGQiPgogICAgICAgIDxjaXJjbGUgaWQ9Ik92YWwiIGZpbGw9IiMwMDAwMDAiIGN4PSIxMDAiIGN5PSIxMDAiIHI9IjEwMCI+PC9jaXJjbGU+CiAgICA8L2c+Cjwvc3ZnPg==)}.mask-parallelogram{-webkit-mask-image:url(data:image/svg+xml;base64,PD94bWwgdmVyc2lvbj0iMS4wIiBlbmNvZGluZz0iVVRGLTgiPz4KPHN2ZyB3aWR0aD0iMjAwcHgiIGhlaWdodD0iMTU0cHgiIHZpZXdCb3g9IjAgMCAyMDAgMTU0IiB2ZXJzaW9uPSIxLjEiIHhtbG5zPSJodHRwOi8vd3d3LnczLm9yZy8yMDAwL3N2ZyIgeG1sbnM6eGxpbms9Imh0dHA6Ly93d3cudzMub3JnLzE5OTkveGxpbmsiPgogICAgPCEtLSBHZW5lcmF0b3I6IFNrZXRjaCA2MC4xICg4ODEzMykgLSBodHRwczovL3NrZXRjaC5jb20gLS0+CiAgICA8dGl0bGU+cGFyYWxsZWxvZ3JhbTwvdGl0bGU+CiAgICA8ZGVzYz5DcmVhdGVkIHdpdGggU2tldGNoLjwvZGVzYz4KICAgIDxnIGlkPSJQYWdlLTEiIHN0cm9rZT0ibm9uZSIgc3Ryb2tlLXdpZHRoPSIxIiBmaWxsPSJub25lIiBmaWxsLXJ1bGU9ImV2ZW5vZGQiPgogICAgICAgIDxwb2x5Z29uIGlkPSJwYXJhbGxlbG9ncmFtIiBmaWxsPSIjMDAwMDAwIiBwb2ludHM9IjQ2LjE1Mzg0NjIgMCAyMDAgMCAxNTMuODQ2MTU0IDE1My44NDYxNTQgMCAxNTMuODQ2MTU0Ij48L3BvbHlnb24+CiAgICA8L2c+Cjwvc3ZnPg==);mask-image:url(data:image/svg+xml;base64,PD94bWwgdmVyc2lvbj0iMS4wIiBlbmNvZGluZz0iVVRGLTgiPz4KPHN2ZyB3aWR0aD0iMjAwcHgiIGhlaWdodD0iMTU0cHgiIHZpZXdCb3g9IjAgMCAyMDAgMTU0IiB2ZXJzaW9uPSIxLjEiIHhtbG5zPSJodHRwOi8vd3d3LnczLm9yZy8yMDAwL3N2ZyIgeG1sbnM6eGxpbms9Imh0dHA6Ly93d3cudzMub3JnLzE5OTkveGxpbmsiPgogICAgPCEtLSBHZW5lcmF0b3I6IFNrZXRjaCA2MC4xICg4ODEzMykgLSBodHRwczovL3NrZXRjaC5jb20gLS0+CiAgICA8dGl0bGU+cGFyYWxsZWxvZ3JhbTwvdGl0bGU+CiAgICA8ZGVzYz5DcmVhdGVkIHdpdGggU2tldGNoLjwvZGVzYz4KICAgIDxnIGlkPSJQYWdlLTEiIHN0cm9rZT0ibm9uZSIgc3Ryb2tlLXdpZHRoPSIxIiBmaWxsPSJub25lIiBmaWxsLXJ1bGU9ImV2ZW5vZGQiPgogICAgICAgIDxwb2x5Z29uIGlkPSJwYXJhbGxlbG9ncmFtIiBmaWxsPSIjMDAwMDAwIiBwb2ludHM9IjQ2LjE1Mzg0NjIgMCAyMDAgMCAxNTMuODQ2MTU0IDE1My44NDYxNTQgMCAxNTMuODQ2MTU0Ij48L3BvbHlnb24+CiAgICA8L2c+Cjwvc3ZnPg==)}.mask-parallelogram-2{-webkit-mask-image:url(data:image/svg+xml;base64,PD94bWwgdmVyc2lvbj0iMS4wIiBlbmNvZGluZz0iVVRGLTgiPz4KPHN2ZyB3aWR0aD0iMjAwcHgiIGhlaWdodD0iMTU0cHgiIHZpZXdCb3g9IjAgMCAyMDAgMTU0IiB2ZXJzaW9uPSIxLjEiIHhtbG5zPSJodHRwOi8vd3d3LnczLm9yZy8yMDAwL3N2ZyIgeG1sbnM6eGxpbms9Imh0dHA6Ly93d3cudzMub3JnLzE5OTkveGxpbmsiPgogICAgPCEtLSBHZW5lcmF0b3I6IFNrZXRjaCA2MC4xICg4ODEzMykgLSBodHRwczovL3NrZXRjaC5jb20gLS0+CiAgICA8dGl0bGU+cGFyYWxsZWxvZ3JhbS0yPC90aXRsZT4KICAgIDxkZXNjPkNyZWF0ZWQgd2l0aCBTa2V0Y2guPC9kZXNjPgogICAgPGcgaWQ9IlBhZ2UtMSIgc3Ryb2tlPSJub25lIiBzdHJva2Utd2lkdGg9IjEiIGZpbGw9Im5vbmUiIGZpbGwtcnVsZT0iZXZlbm9kZCI+CiAgICAgICAgPHBvbHlnb24gaWQ9InBhcmFsbGVsb2dyYW0tMiIgZmlsbD0iIzAwMDAwMCIgdHJhbnNmb3JtPSJ0cmFuc2xhdGUoMTAwLjAwMDAwMCwgNzYuOTIzMDc3KSBzY2FsZSgtMSwgMSkgdHJhbnNsYXRlKC0xMDAuMDAwMDAwLCAtNzYuOTIzMDc3KSAiIHBvaW50cz0iNDYuMTUzODQ2MiAwIDIwMCAwIDE1My44NDYxNTQgMTUzLjg0NjE1NCAwIDE1My44NDYxNTQiPjwvcG9seWdvbj4KICAgIDwvZz4KPC9zdmc+);mask-image:url(data:image/svg+xml;base64,PD94bWwgdmVyc2lvbj0iMS4wIiBlbmNvZGluZz0iVVRGLTgiPz4KPHN2ZyB3aWR0aD0iMjAwcHgiIGhlaWdodD0iMTU0cHgiIHZpZXdCb3g9IjAgMCAyMDAgMTU0IiB2ZXJzaW9uPSIxLjEiIHhtbG5zPSJodHRwOi8vd3d3LnczLm9yZy8yMDAwL3N2ZyIgeG1sbnM6eGxpbms9Imh0dHA6Ly93d3cudzMub3JnLzE5OTkveGxpbmsiPgogICAgPCEtLSBHZW5lcmF0b3I6IFNrZXRjaCA2MC4xICg4ODEzMykgLSBodHRwczovL3NrZXRjaC5jb20gLS0+CiAgICA8dGl0bGU+cGFyYWxsZWxvZ3JhbS0yPC90aXRsZT4KICAgIDxkZXNjPkNyZWF0ZWQgd2l0aCBTa2V0Y2guPC9kZXNjPgogICAgPGcgaWQ9IlBhZ2UtMSIgc3Ryb2tlPSJub25lIiBzdHJva2Utd2lkdGg9IjEiIGZpbGw9Im5vbmUiIGZpbGwtcnVsZT0iZXZlbm9kZCI+CiAgICAgICAgPHBvbHlnb24gaWQ9InBhcmFsbGVsb2dyYW0tMiIgZmlsbD0iIzAwMDAwMCIgdHJhbnNmb3JtPSJ0cmFuc2xhdGUoMTAwLjAwMDAwMCwgNzYuOTIzMDc3KSBzY2FsZSgtMSwgMSkgdHJhbnNsYXRlKC0xMDAuMDAwMDAwLCAtNzYuOTIzMDc3KSAiIHBvaW50cz0iNDYuMTUzODQ2MiAwIDIwMCAwIDE1My44NDYxNTQgMTUzLjg0NjE1NCAwIDE1My44NDYxNTQiPjwvcG9seWdvbj4KICAgIDwvZz4KPC9zdmc+)}.mask-parallelogram-3{-webkit-mask-image:url(data:image/svg+xml;base64,PD94bWwgdmVyc2lvbj0iMS4wIiBlbmNvZGluZz0iVVRGLTgiPz4KPHN2ZyB3aWR0aD0iMTU0cHgiIGhlaWdodD0iMjAxcHgiIHZpZXdCb3g9IjAgMCAxNTQgMjAxIiB2ZXJzaW9uPSIxLjEiIHhtbG5zPSJodHRwOi8vd3d3LnczLm9yZy8yMDAwL3N2ZyIgeG1sbnM6eGxpbms9Imh0dHA6Ly93d3cudzMub3JnLzE5OTkveGxpbmsiPgogICAgPCEtLSBHZW5lcmF0b3I6IFNrZXRjaCA2MC4xICg4ODEzMykgLSBodHRwczovL3NrZXRjaC5jb20gLS0+CiAgICA8dGl0bGU+cGFyYWxsZWxvZ3JhbS0zPC90aXRsZT4KICAgIDxkZXNjPkNyZWF0ZWQgd2l0aCBTa2V0Y2guPC9kZXNjPgogICAgPGcgaWQ9IlBhZ2UtMSIgc3Ryb2tlPSJub25lIiBzdHJva2Utd2lkdGg9IjEiIGZpbGw9Im5vbmUiIGZpbGwtcnVsZT0iZXZlbm9kZCI+CiAgICAgICAgPHBvbHlnb24gaWQ9InBhcmFsbGVsb2dyYW0tMyIgZmlsbD0iIzAwMDAwMCIgdHJhbnNmb3JtPSJ0cmFuc2xhdGUoNzcuMDAwMDAwLCAxMDAuOTIzMDc3KSBzY2FsZSgtMSwgMSkgcm90YXRlKDkwLjAwMDAwMCkgdHJhbnNsYXRlKC03Ny4wMDAwMDAsIC0xMDAuOTIzMDc3KSAiIHBvaW50cz0iMjMuMTUzODQ2MiAyNCAxNzcgMjQgMTMwLjg0NjE1NCAxNzcuODQ2MTU0IC0yMyAxNzcuODQ2MTU0Ij48L3BvbHlnb24+CiAgICA8L2c+Cjwvc3ZnPg==);mask-image:url(data:image/svg+xml;base64,PD94bWwgdmVyc2lvbj0iMS4wIiBlbmNvZGluZz0iVVRGLTgiPz4KPHN2ZyB3aWR0aD0iMTU0cHgiIGhlaWdodD0iMjAxcHgiIHZpZXdCb3g9IjAgMCAxNTQgMjAxIiB2ZXJzaW9uPSIxLjEiIHhtbG5zPSJodHRwOi8vd3d3LnczLm9yZy8yMDAwL3N2ZyIgeG1sbnM6eGxpbms9Imh0dHA6Ly93d3cudzMub3JnLzE5OTkveGxpbmsiPgogICAgPCEtLSBHZW5lcmF0b3I6IFNrZXRjaCA2MC4xICg4ODEzMykgLSBodHRwczovL3NrZXRjaC5jb20gLS0+CiAgICA8dGl0bGU+cGFyYWxsZWxvZ3JhbS0zPC90aXRsZT4KICAgIDxkZXNjPkNyZWF0ZWQgd2l0aCBTa2V0Y2guPC9kZXNjPgogICAgPGcgaWQ9IlBhZ2UtMSIgc3Ryb2tlPSJub25lIiBzdHJva2Utd2lkdGg9IjEiIGZpbGw9Im5vbmUiIGZpbGwtcnVsZT0iZXZlbm9kZCI+CiAgICAgICAgPHBvbHlnb24gaWQ9InBhcmFsbGVsb2dyYW0tMyIgZmlsbD0iIzAwMDAwMCIgdHJhbnNmb3JtPSJ0cmFuc2xhdGUoNzcuMDAwMDAwLCAxMDAuOTIzMDc3KSBzY2FsZSgtMSwgMSkgcm90YXRlKDkwLjAwMDAwMCkgdHJhbnNsYXRlKC03Ny4wMDAwMDAsIC0xMDAuOTIzMDc3KSAiIHBvaW50cz0iMjMuMTUzODQ2MiAyNCAxNzcgMjQgMTMwLjg0NjE1NCAxNzcuODQ2MTU0IC0yMyAxNzcuODQ2MTU0Ij48L3BvbHlnb24+CiAgICA8L2c+Cjwvc3ZnPg==)}.mask-parallelogram-4{-webkit-mask-image:url(data:image/svg+xml;base64,PD94bWwgdmVyc2lvbj0iMS4wIiBlbmNvZGluZz0iVVRGLTgiPz4KPHN2ZyB3aWR0aD0iMTU0cHgiIGhlaWdodD0iMjAxcHgiIHZpZXdCb3g9IjAgMCAxNTQgMjAxIiB2ZXJzaW9uPSIxLjEiIHhtbG5zPSJodHRwOi8vd3d3LnczLm9yZy8yMDAwL3N2ZyIgeG1sbnM6eGxpbms9Imh0dHA6Ly93d3cudzMub3JnLzE5OTkveGxpbmsiPgogICAgPCEtLSBHZW5lcmF0b3I6IFNrZXRjaCA2MC4xICg4ODEzMykgLSBodHRwczovL3NrZXRjaC5jb20gLS0+CiAgICA8dGl0bGU+cGFyYWxsZWxvZ3JhbS00PC90aXRsZT4KICAgIDxkZXNjPkNyZWF0ZWQgd2l0aCBTa2V0Y2guPC9kZXNjPgogICAgPGcgaWQ9IlBhZ2UtMSIgc3Ryb2tlPSJub25lIiBzdHJva2Utd2lkdGg9IjEiIGZpbGw9Im5vbmUiIGZpbGwtcnVsZT0iZXZlbm9kZCI+CiAgICAgICAgPHBvbHlnb24gaWQ9InBhcmFsbGVsb2dyYW0tNCIgZmlsbD0iIzAwMDAwMCIgdHJhbnNmb3JtPSJ0cmFuc2xhdGUoNzcuMDAwMDAwLCAxMDAuOTIzMDc3KSByb3RhdGUoOTAuMDAwMDAwKSB0cmFuc2xhdGUoLTc3LjAwMDAwMCwgLTEwMC45MjMwNzcpICIgcG9pbnRzPSIyMy4xNTM4NDYyIDI0IDE3NyAyNCAxMzAuODQ2MTU0IDE3Ny44NDYxNTQgLTIzIDE3Ny44NDYxNTQiPjwvcG9seWdvbj4KICAgIDwvZz4KPC9zdmc+);mask-image:url(data:image/svg+xml;base64,PD94bWwgdmVyc2lvbj0iMS4wIiBlbmNvZGluZz0iVVRGLTgiPz4KPHN2ZyB3aWR0aD0iMTU0cHgiIGhlaWdodD0iMjAxcHgiIHZpZXdCb3g9IjAgMCAxNTQgMjAxIiB2ZXJzaW9uPSIxLjEiIHhtbG5zPSJodHRwOi8vd3d3LnczLm9yZy8yMDAwL3N2ZyIgeG1sbnM6eGxpbms9Imh0dHA6Ly93d3cudzMub3JnLzE5OTkveGxpbmsiPgogICAgPCEtLSBHZW5lcmF0b3I6IFNrZXRjaCA2MC4xICg4ODEzMykgLSBodHRwczovL3NrZXRjaC5jb20gLS0+CiAgICA8dGl0bGU+cGFyYWxsZWxvZ3JhbS00PC90aXRsZT4KICAgIDxkZXNjPkNyZWF0ZWQgd2l0aCBTa2V0Y2guPC9kZXNjPgogICAgPGcgaWQ9IlBhZ2UtMSIgc3Ryb2tlPSJub25lIiBzdHJva2Utd2lkdGg9IjEiIGZpbGw9Im5vbmUiIGZpbGwtcnVsZT0iZXZlbm9kZCI+CiAgICAgICAgPHBvbHlnb24gaWQ9InBhcmFsbGVsb2dyYW0tNCIgZmlsbD0iIzAwMDAwMCIgdHJhbnNmb3JtPSJ0cmFuc2xhdGUoNzcuMDAwMDAwLCAxMDAuOTIzMDc3KSByb3RhdGUoOTAuMDAwMDAwKSB0cmFuc2xhdGUoLTc3LjAwMDAwMCwgLTEwMC45MjMwNzcpICIgcG9pbnRzPSIyMy4xNTM4NDYyIDI0IDE3NyAyNCAxMzAuODQ2MTU0IDE3Ny44NDYxNTQgLTIzIDE3Ny44NDYxNTQiPjwvcG9seWdvbj4KICAgIDwvZz4KPC9zdmc+)}.mask-pentagon{-webkit-mask-image:url(data:image/svg+xml;base64,PD94bWwgdmVyc2lvbj0iMS4wIiBlbmNvZGluZz0iVVRGLTgiPz4KPHN2ZyB3aWR0aD0iMTkycHgiIGhlaWdodD0iMTgxcHgiIHZpZXdCb3g9IjAgMCAxOTIgMTgxIiB2ZXJzaW9uPSIxLjEiIHhtbG5zPSJodHRwOi8vd3d3LnczLm9yZy8yMDAwL3N2ZyIgeG1sbnM6eGxpbms9Imh0dHA6Ly93d3cudzMub3JnLzE5OTkveGxpbmsiPgogICAgPCEtLSBHZW5lcmF0b3I6IFNrZXRjaCA2MC4xICg4ODEzMykgLSBodHRwczovL3NrZXRjaC5jb20gLS0+CiAgICA8dGl0bGU+cGVudGFnb248L3RpdGxlPgogICAgPGRlc2M+Q3JlYXRlZCB3aXRoIFNrZXRjaC48L2Rlc2M+CiAgICA8ZyBpZD0iUGFnZS0xIiBzdHJva2U9Im5vbmUiIHN0cm9rZS13aWR0aD0iMSIgZmlsbD0ibm9uZSIgZmlsbC1ydWxlPSJldmVub2RkIj4KICAgICAgICA8cG9seWdvbiBpZD0icGVudGFnb24iIGZpbGw9IiMwMDAwMDAiIHBvaW50cz0iOTYgMCAxOTEuMTA1NjUyIDY5LjA5ODMwMDYgMTU0Ljc3ODUyNSAxODAuOTAxNjk5IDM3LjIyMTQ3NDggMTgwLjkwMTY5OSAwLjg5NDM0ODM3IDY5LjA5ODMwMDYiPjwvcG9seWdvbj4KICAgIDwvZz4KPC9zdmc+);mask-image:url(data:image/svg+xml;base64,PD94bWwgdmVyc2lvbj0iMS4wIiBlbmNvZGluZz0iVVRGLTgiPz4KPHN2ZyB3aWR0aD0iMTkycHgiIGhlaWdodD0iMTgxcHgiIHZpZXdCb3g9IjAgMCAxOTIgMTgxIiB2ZXJzaW9uPSIxLjEiIHhtbG5zPSJodHRwOi8vd3d3LnczLm9yZy8yMDAwL3N2ZyIgeG1sbnM6eGxpbms9Imh0dHA6Ly93d3cudzMub3JnLzE5OTkveGxpbmsiPgogICAgPCEtLSBHZW5lcmF0b3I6IFNrZXRjaCA2MC4xICg4ODEzMykgLSBodHRwczovL3NrZXRjaC5jb20gLS0+CiAgICA8dGl0bGU+cGVudGFnb248L3RpdGxlPgogICAgPGRlc2M+Q3JlYXRlZCB3aXRoIFNrZXRjaC48L2Rlc2M+CiAgICA8ZyBpZD0iUGFnZS0xIiBzdHJva2U9Im5vbmUiIHN0cm9rZS13aWR0aD0iMSIgZmlsbD0ibm9uZSIgZmlsbC1ydWxlPSJldmVub2RkIj4KICAgICAgICA8cG9seWdvbiBpZD0icGVudGFnb24iIGZpbGw9IiMwMDAwMDAiIHBvaW50cz0iOTYgMCAxOTEuMTA1NjUyIDY5LjA5ODMwMDYgMTU0Ljc3ODUyNSAxODAuOTAxNjk5IDM3LjIyMTQ3NDggMTgwLjkwMTY5OSAwLjg5NDM0ODM3IDY5LjA5ODMwMDYiPjwvcG9seWdvbj4KICAgIDwvZz4KPC9zdmc+)}.mask-square{-webkit-mask-image:url(data:image/svg+xml;base64,PD94bWwgdmVyc2lvbj0iMS4wIiBlbmNvZGluZz0iVVRGLTgiPz4KPHN2ZyB3aWR0aD0iMjAwcHgiIGhlaWdodD0iMjAwcHgiIHZpZXdCb3g9IjAgMCAyMDAgMjAwIiB2ZXJzaW9uPSIxLjEiIHhtbG5zPSJodHRwOi8vd3d3LnczLm9yZy8yMDAwL3N2ZyIgeG1sbnM6eGxpbms9Imh0dHA6Ly93d3cudzMub3JnLzE5OTkveGxpbmsiPgogICAgPCEtLSBHZW5lcmF0b3I6IFNrZXRjaCA2MC4xICg4ODEzMykgLSBodHRwczovL3NrZXRjaC5jb20gLS0+CiAgICA8dGl0bGU+c3F1YXJlPC90aXRsZT4KICAgIDxkZXNjPkNyZWF0ZWQgd2l0aCBTa2V0Y2guPC9kZXNjPgogICAgPGcgaWQ9IlBhZ2UtMSIgc3Ryb2tlPSJub25lIiBzdHJva2Utd2lkdGg9IjEiIGZpbGw9Im5vbmUiIGZpbGwtcnVsZT0iZXZlbm9kZCI+CiAgICAgICAgPHJlY3QgaWQ9InNxdWFyZSIgZmlsbD0iIzAwMDAwMCIgeD0iMCIgeT0iMCIgd2lkdGg9IjIwMCIgaGVpZ2h0PSIyMDAiPjwvcmVjdD4KICAgIDwvZz4KPC9zdmc+);mask-image:url(data:image/svg+xml;base64,PD94bWwgdmVyc2lvbj0iMS4wIiBlbmNvZGluZz0iVVRGLTgiPz4KPHN2ZyB3aWR0aD0iMjAwcHgiIGhlaWdodD0iMjAwcHgiIHZpZXdCb3g9IjAgMCAyMDAgMjAwIiB2ZXJzaW9uPSIxLjEiIHhtbG5zPSJodHRwOi8vd3d3LnczLm9yZy8yMDAwL3N2ZyIgeG1sbnM6eGxpbms9Imh0dHA6Ly93d3cudzMub3JnLzE5OTkveGxpbmsiPgogICAgPCEtLSBHZW5lcmF0b3I6IFNrZXRjaCA2MC4xICg4ODEzMykgLSBodHRwczovL3NrZXRjaC5jb20gLS0+CiAgICA8dGl0bGU+c3F1YXJlPC90aXRsZT4KICAgIDxkZXNjPkNyZWF0ZWQgd2l0aCBTa2V0Y2guPC9kZXNjPgogICAgPGcgaWQ9IlBhZ2UtMSIgc3Ryb2tlPSJub25lIiBzdHJva2Utd2lkdGg9IjEiIGZpbGw9Im5vbmUiIGZpbGwtcnVsZT0iZXZlbm9kZCI+CiAgICAgICAgPHJlY3QgaWQ9InNxdWFyZSIgZmlsbD0iIzAwMDAwMCIgeD0iMCIgeT0iMCIgd2lkdGg9IjIwMCIgaGVpZ2h0PSIyMDAiPjwvcmVjdD4KICAgIDwvZz4KPC9zdmc+)}.mask-star{-webkit-mask-image:url(data:image/svg+xml;base64,PD94bWwgdmVyc2lvbj0iMS4wIiBlbmNvZGluZz0iVVRGLTgiPz4KPHN2ZyB3aWR0aD0iMTkycHgiIGhlaWdodD0iMTgwcHgiIHZpZXdCb3g9IjAgMCAxOTIgMTgwIiB2ZXJzaW9uPSIxLjEiIHhtbG5zPSJodHRwOi8vd3d3LnczLm9yZy8yMDAwL3N2ZyIgeG1sbnM6eGxpbms9Imh0dHA6Ly93d3cudzMub3JnLzE5OTkveGxpbmsiPgogICAgPCEtLSBHZW5lcmF0b3I6IFNrZXRjaCA2MC4xICg4ODEzMykgLSBodHRwczovL3NrZXRjaC5jb20gLS0+CiAgICA8dGl0bGU+c3RhcjwvdGl0bGU+CiAgICA8ZGVzYz5DcmVhdGVkIHdpdGggU2tldGNoLjwvZGVzYz4KICAgIDxnIGlkPSJQYWdlLTEiIHN0cm9rZT0ibm9uZSIgc3Ryb2tlLXdpZHRoPSIxIiBmaWxsPSJub25lIiBmaWxsLXJ1bGU9ImV2ZW5vZGQiPgogICAgICAgIDxwb2x5Z29uIGlkPSJzdGFyIiBmaWxsPSIjMDAwMDAwIiBwb2ludHM9Ijk2IDEzNy4yNjMzOTMgMzcuMjIxNDc0OCAxNzkuMjg2NTA2IDU5LjM4NDMyNDEgMTEwLjg5ODA3MyAwLjg5NDM0ODM3IDY4LjQ4MTM1MTUgNzMuMzcwMjY3OCA2OC4yMzgwODgyIDk2IDAgMTE4LjYyOTczMiA2OC4yMzgwODgyIDE5MS4xMDU2NTIgNjguNDgxMzUxNSAxMzIuNjE1Njc2IDExMC44OTgwNzMgMTU0Ljc3ODUyNSAxNzkuMjg2NTA2Ij48L3BvbHlnb24+CiAgICA8L2c+Cjwvc3ZnPg==);mask-image:url(data:image/svg+xml;base64,PD94bWwgdmVyc2lvbj0iMS4wIiBlbmNvZGluZz0iVVRGLTgiPz4KPHN2ZyB3aWR0aD0iMTkycHgiIGhlaWdodD0iMTgwcHgiIHZpZXdCb3g9IjAgMCAxOTIgMTgwIiB2ZXJzaW9uPSIxLjEiIHhtbG5zPSJodHRwOi8vd3d3LnczLm9yZy8yMDAwL3N2ZyIgeG1sbnM6eGxpbms9Imh0dHA6Ly93d3cudzMub3JnLzE5OTkveGxpbmsiPgogICAgPCEtLSBHZW5lcmF0b3I6IFNrZXRjaCA2MC4xICg4ODEzMykgLSBodHRwczovL3NrZXRjaC5jb20gLS0+CiAgICA8dGl0bGU+c3RhcjwvdGl0bGU+CiAgICA8ZGVzYz5DcmVhdGVkIHdpdGggU2tldGNoLjwvZGVzYz4KICAgIDxnIGlkPSJQYWdlLTEiIHN0cm9rZT0ibm9uZSIgc3Ryb2tlLXdpZHRoPSIxIiBmaWxsPSJub25lIiBmaWxsLXJ1bGU9ImV2ZW5vZGQiPgogICAgICAgIDxwb2x5Z29uIGlkPSJzdGFyIiBmaWxsPSIjMDAwMDAwIiBwb2ludHM9Ijk2IDEzNy4yNjMzOTMgMzcuMjIxNDc0OCAxNzkuMjg2NTA2IDU5LjM4NDMyNDEgMTEwLjg5ODA3MyAwLjg5NDM0ODM3IDY4LjQ4MTM1MTUgNzMuMzcwMjY3OCA2OC4yMzgwODgyIDk2IDAgMTE4LjYyOTczMiA2OC4yMzgwODgyIDE5MS4xMDU2NTIgNjguNDgxMzUxNSAxMzIuNjE1Njc2IDExMC44OTgwNzMgMTU0Ljc3ODUyNSAxNzkuMjg2NTA2Ij48L3BvbHlnb24+CiAgICA8L2c+Cjwvc3ZnPg==)}.mask-star-2{-webkit-mask-image:url(data:image/svg+xml;base64,PD94bWwgdmVyc2lvbj0iMS4wIiBlbmNvZGluZz0iVVRGLTgiPz4KPHN2ZyB3aWR0aD0iMTkycHgiIGhlaWdodD0iMTgwcHgiIHZpZXdCb3g9IjAgMCAxOTIgMTgwIiB2ZXJzaW9uPSIxLjEiIHhtbG5zPSJodHRwOi8vd3d3LnczLm9yZy8yMDAwL3N2ZyIgeG1sbnM6eGxpbms9Imh0dHA6Ly93d3cudzMub3JnLzE5OTkveGxpbmsiPgogICAgPCEtLSBHZW5lcmF0b3I6IFNrZXRjaCA2MC4xICg4ODEzMykgLSBodHRwczovL3NrZXRjaC5jb20gLS0+CiAgICA8dGl0bGU+c3Rhci0yPC90aXRsZT4KICAgIDxkZXNjPkNyZWF0ZWQgd2l0aCBTa2V0Y2guPC9kZXNjPgogICAgPGcgaWQ9IlBhZ2UtMSIgc3Ryb2tlPSJub25lIiBzdHJva2Utd2lkdGg9IjEiIGZpbGw9Im5vbmUiIGZpbGwtcnVsZT0iZXZlbm9kZCI+CiAgICAgICAgPHBvbHlnb24gaWQ9InN0YXItMiIgZmlsbD0iIzAwMDAwMCIgcG9pbnRzPSI5NiAxNTMuMDQzNjYxIDM3LjIyMTQ3NDggMTc5LjI4NjUwNiA0NC4yNDExOTA0IDExNS43NzQ0NDQgMC44OTQzNDgzNyA2OC40ODEzNTE1IDY0LjAxMTI5NjUgNTUuNDcxNTgyOCA5NiAwIDEyNy45ODg3MDQgNTUuNDcxNTgyOCAxOTEuMTA1NjUyIDY4LjQ4MTM1MTUgMTQ3Ljc1ODgxIDExNS43NzQ0NDQgMTU0Ljc3ODUyNSAxNzkuMjg2NTA2Ij48L3BvbHlnb24+CiAgICA8L2c+Cjwvc3ZnPg==);mask-image:url(data:image/svg+xml;base64,PD94bWwgdmVyc2lvbj0iMS4wIiBlbmNvZGluZz0iVVRGLTgiPz4KPHN2ZyB3aWR0aD0iMTkycHgiIGhlaWdodD0iMTgwcHgiIHZpZXdCb3g9IjAgMCAxOTIgMTgwIiB2ZXJzaW9uPSIxLjEiIHhtbG5zPSJodHRwOi8vd3d3LnczLm9yZy8yMDAwL3N2ZyIgeG1sbnM6eGxpbms9Imh0dHA6Ly93d3cudzMub3JnLzE5OTkveGxpbmsiPgogICAgPCEtLSBHZW5lcmF0b3I6IFNrZXRjaCA2MC4xICg4ODEzMykgLSBodHRwczovL3NrZXRjaC5jb20gLS0+CiAgICA8dGl0bGU+c3Rhci0yPC90aXRsZT4KICAgIDxkZXNjPkNyZWF0ZWQgd2l0aCBTa2V0Y2guPC9kZXNjPgogICAgPGcgaWQ9IlBhZ2UtMSIgc3Ryb2tlPSJub25lIiBzdHJva2Utd2lkdGg9IjEiIGZpbGw9Im5vbmUiIGZpbGwtcnVsZT0iZXZlbm9kZCI+CiAgICAgICAgPHBvbHlnb24gaWQ9InN0YXItMiIgZmlsbD0iIzAwMDAwMCIgcG9pbnRzPSI5NiAxNTMuMDQzNjYxIDM3LjIyMTQ3NDggMTc5LjI4NjUwNiA0NC4yNDExOTA0IDExNS43NzQ0NDQgMC44OTQzNDgzNyA2OC40ODEzNTE1IDY0LjAxMTI5NjUgNTUuNDcxNTgyOCA5NiAwIDEyNy45ODg3MDQgNTUuNDcxNTgyOCAxOTEuMTA1NjUyIDY4LjQ4MTM1MTUgMTQ3Ljc1ODgxIDExNS43NzQ0NDQgMTU0Ljc3ODUyNSAxNzkuMjg2NTA2Ij48L3BvbHlnb24+CiAgICA8L2c+Cjwvc3ZnPg==)}.mask-triangle{-webkit-mask-image:url(data:image/svg+xml;base64,PD94bWwgdmVyc2lvbj0iMS4wIiBlbmNvZGluZz0iVVRGLTgiPz4KPHN2ZyB3aWR0aD0iMTc0cHgiIGhlaWdodD0iMTQ5cHgiIHZpZXdCb3g9IjAgMCAxNzQgMTQ5IiB2ZXJzaW9uPSIxLjEiIHhtbG5zPSJodHRwOi8vd3d3LnczLm9yZy8yMDAwL3N2ZyIgeG1sbnM6eGxpbms9Imh0dHA6Ly93d3cudzMub3JnLzE5OTkveGxpbmsiPgogICAgPCEtLSBHZW5lcmF0b3I6IFNrZXRjaCA2MC4xICg4ODEzMykgLSBodHRwczovL3NrZXRjaC5jb20gLS0+CiAgICA8dGl0bGU+dHJpYW5nbGU8L3RpdGxlPgogICAgPGRlc2M+Q3JlYXRlZCB3aXRoIFNrZXRjaC48L2Rlc2M+CiAgICA8ZyBpZD0iUGFnZS0xIiBzdHJva2U9Im5vbmUiIHN0cm9rZS13aWR0aD0iMSIgZmlsbD0ibm9uZSIgZmlsbC1ydWxlPSJldmVub2RkIj4KICAgICAgICA8cG9seWdvbiBpZD0idHJpYW5nbGUiIGZpbGw9IiMwMDAwMDAiIHBvaW50cz0iODcgMTQ4LjQ3NjE3NyAwLjM5NzQ1OTYyMiAxNDguNjYwNzE0IDQzLjg1OTk4MzcgNzQuNDIyNjI1OSA4NyAyLjg0MjE3MDk0ZS0xNCAxMzAuMTQwMDE2IDc0LjQyMjYyNTkgMTczLjYwMjU0IDE0OC42NjA3MTQiPjwvcG9seWdvbj4KICAgIDwvZz4KPC9zdmc+);mask-image:url(data:image/svg+xml;base64,PD94bWwgdmVyc2lvbj0iMS4wIiBlbmNvZGluZz0iVVRGLTgiPz4KPHN2ZyB3aWR0aD0iMTc0cHgiIGhlaWdodD0iMTQ5cHgiIHZpZXdCb3g9IjAgMCAxNzQgMTQ5IiB2ZXJzaW9uPSIxLjEiIHhtbG5zPSJodHRwOi8vd3d3LnczLm9yZy8yMDAwL3N2ZyIgeG1sbnM6eGxpbms9Imh0dHA6Ly93d3cudzMub3JnLzE5OTkveGxpbmsiPgogICAgPCEtLSBHZW5lcmF0b3I6IFNrZXRjaCA2MC4xICg4ODEzMykgLSBodHRwczovL3NrZXRjaC5jb20gLS0+CiAgICA8dGl0bGU+dHJpYW5nbGU8L3RpdGxlPgogICAgPGRlc2M+Q3JlYXRlZCB3aXRoIFNrZXRjaC48L2Rlc2M+CiAgICA8ZyBpZD0iUGFnZS0xIiBzdHJva2U9Im5vbmUiIHN0cm9rZS13aWR0aD0iMSIgZmlsbD0ibm9uZSIgZmlsbC1ydWxlPSJldmVub2RkIj4KICAgICAgICA8cG9seWdvbiBpZD0idHJpYW5nbGUiIGZpbGw9IiMwMDAwMDAiIHBvaW50cz0iODcgMTQ4LjQ3NjE3NyAwLjM5NzQ1OTYyMiAxNDguNjYwNzE0IDQzLjg1OTk4MzcgNzQuNDIyNjI1OSA4NyAyLjg0MjE3MDk0ZS0xNCAxMzAuMTQwMDE2IDc0LjQyMjYyNTkgMTczLjYwMjU0IDE0OC42NjA3MTQiPjwvcG9seWdvbj4KICAgIDwvZz4KPC9zdmc+)}.mask-triangle-2{-webkit-mask-image:url(data:image/svg+xml;base64,PD94bWwgdmVyc2lvbj0iMS4wIiBlbmNvZGluZz0iVVRGLTgiPz4KPHN2ZyB3aWR0aD0iMTc0cHgiIGhlaWdodD0iMTUwcHgiIHZpZXdCb3g9IjAgMCAxNzQgMTUwIiB2ZXJzaW9uPSIxLjEiIHhtbG5zPSJodHRwOi8vd3d3LnczLm9yZy8yMDAwL3N2ZyIgeG1sbnM6eGxpbms9Imh0dHA6Ly93d3cudzMub3JnLzE5OTkveGxpbmsiPgogICAgPCEtLSBHZW5lcmF0b3I6IFNrZXRjaCA2MC4xICg4ODEzMykgLSBodHRwczovL3NrZXRjaC5jb20gLS0+CiAgICA8dGl0bGU+dHJpYW5nbGUtYm90dG9tPC90aXRsZT4KICAgIDxkZXNjPkNyZWF0ZWQgd2l0aCBTa2V0Y2guPC9kZXNjPgogICAgPGcgaWQ9IlBhZ2UtMSIgc3Ryb2tlPSJub25lIiBzdHJva2Utd2lkdGg9IjEiIGZpbGw9Im5vbmUiIGZpbGwtcnVsZT0iZXZlbm9kZCI+CiAgICAgICAgPHBvbHlnb24gaWQ9InRyaWFuZ2xlLWJvdHRvbSIgZmlsbD0iIzAwMDAwMCIgdHJhbnNmb3JtPSJ0cmFuc2xhdGUoODcuMDAwMDAwLCA1MC4xMDcxNDMpIHJvdGF0ZSgtMTgwLjAwMDAwMCkgdHJhbnNsYXRlKC04Ny4wMDAwMDAsIC01MC4xMDcxNDMpICIgcG9pbnRzPSI4NyA5OS40NzYxNzY4IDAuMzk3NDU5NjIyIDk5LjY2MDcxNDMgNDMuODU5OTgzNyAyNS40MjI2MjU5IDg3IC00OSAxMzAuMTQwMDE2IDI1LjQyMjYyNTkgMTczLjYwMjU0IDk5LjY2MDcxNDMiPjwvcG9seWdvbj4KICAgIDwvZz4KPC9zdmc+);mask-image:url(data:image/svg+xml;base64,PD94bWwgdmVyc2lvbj0iMS4wIiBlbmNvZGluZz0iVVRGLTgiPz4KPHN2ZyB3aWR0aD0iMTc0cHgiIGhlaWdodD0iMTUwcHgiIHZpZXdCb3g9IjAgMCAxNzQgMTUwIiB2ZXJzaW9uPSIxLjEiIHhtbG5zPSJodHRwOi8vd3d3LnczLm9yZy8yMDAwL3N2ZyIgeG1sbnM6eGxpbms9Imh0dHA6Ly93d3cudzMub3JnLzE5OTkveGxpbmsiPgogICAgPCEtLSBHZW5lcmF0b3I6IFNrZXRjaCA2MC4xICg4ODEzMykgLSBodHRwczovL3NrZXRjaC5jb20gLS0+CiAgICA8dGl0bGU+dHJpYW5nbGUtYm90dG9tPC90aXRsZT4KICAgIDxkZXNjPkNyZWF0ZWQgd2l0aCBTa2V0Y2guPC9kZXNjPgogICAgPGcgaWQ9IlBhZ2UtMSIgc3Ryb2tlPSJub25lIiBzdHJva2Utd2lkdGg9IjEiIGZpbGw9Im5vbmUiIGZpbGwtcnVsZT0iZXZlbm9kZCI+CiAgICAgICAgPHBvbHlnb24gaWQ9InRyaWFuZ2xlLWJvdHRvbSIgZmlsbD0iIzAwMDAwMCIgdHJhbnNmb3JtPSJ0cmFuc2xhdGUoODcuMDAwMDAwLCA1MC4xMDcxNDMpIHJvdGF0ZSgtMTgwLjAwMDAwMCkgdHJhbnNsYXRlKC04Ny4wMDAwMDAsIC01MC4xMDcxNDMpICIgcG9pbnRzPSI4NyA5OS40NzYxNzY4IDAuMzk3NDU5NjIyIDk5LjY2MDcxNDMgNDMuODU5OTgzNyAyNS40MjI2MjU5IDg3IC00OSAxMzAuMTQwMDE2IDI1LjQyMjYyNTkgMTczLjYwMjU0IDk5LjY2MDcxNDMiPjwvcG9seWdvbj4KICAgIDwvZz4KPC9zdmc+)}.mask-triangle-3{-webkit-mask-image:url(data:image/svg+xml;base64,PD94bWwgdmVyc2lvbj0iMS4wIiBlbmNvZGluZz0iVVRGLTgiPz4KPHN2ZyB3aWR0aD0iMTUwcHgiIGhlaWdodD0iMTc0cHgiIHZpZXdCb3g9IjAgMCAxNTAgMTc0IiB2ZXJzaW9uPSIxLjEiIHhtbG5zPSJodHRwOi8vd3d3LnczLm9yZy8yMDAwL3N2ZyIgeG1sbnM6eGxpbms9Imh0dHA6Ly93d3cudzMub3JnLzE5OTkveGxpbmsiPgogICAgPCEtLSBHZW5lcmF0b3I6IFNrZXRjaCA2MC4xICg4ODEzMykgLSBodHRwczovL3NrZXRjaC5jb20gLS0+CiAgICA8dGl0bGU+dHJpYW5nbGUtbGVmdDwvdGl0bGU+CiAgICA8ZGVzYz5DcmVhdGVkIHdpdGggU2tldGNoLjwvZGVzYz4KICAgIDxnIGlkPSJQYWdlLTEiIHN0cm9rZT0ibm9uZSIgc3Ryb2tlLXdpZHRoPSIxIiBmaWxsPSJub25lIiBmaWxsLXJ1bGU9ImV2ZW5vZGQiPgogICAgICAgIDxwb2x5Z29uIGlkPSJ0cmlhbmdsZS1sZWZ0IiBmaWxsPSIjMDAwMDAwIiB0cmFuc2Zvcm09InRyYW5zbGF0ZSgxMDAuMDAwMDAwLCA4Ny4xMDcxNDMpIHJvdGF0ZSgtOTAuMDAwMDAwKSB0cmFuc2xhdGUoLTEwMC4wMDAwMDAsIC04Ny4xMDcxNDMpICIgcG9pbnRzPSIxMDAgMTM2LjQ3NjE3NyAxMy4zOTc0NTk2IDEzNi42NjA3MTQgNTYuODU5OTgzNyA2Mi40MjI2MjU5IDEwMCAtMTIgMTQzLjE0MDAxNiA2Mi40MjI2MjU5IDE4Ni42MDI1NCAxMzYuNjYwNzE0Ij48L3BvbHlnb24+CiAgICA8L2c+Cjwvc3ZnPg==);mask-image:url(data:image/svg+xml;base64,PD94bWwgdmVyc2lvbj0iMS4wIiBlbmNvZGluZz0iVVRGLTgiPz4KPHN2ZyB3aWR0aD0iMTUwcHgiIGhlaWdodD0iMTc0cHgiIHZpZXdCb3g9IjAgMCAxNTAgMTc0IiB2ZXJzaW9uPSIxLjEiIHhtbG5zPSJodHRwOi8vd3d3LnczLm9yZy8yMDAwL3N2ZyIgeG1sbnM6eGxpbms9Imh0dHA6Ly93d3cudzMub3JnLzE5OTkveGxpbmsiPgogICAgPCEtLSBHZW5lcmF0b3I6IFNrZXRjaCA2MC4xICg4ODEzMykgLSBodHRwczovL3NrZXRjaC5jb20gLS0+CiAgICA8dGl0bGU+dHJpYW5nbGUtbGVmdDwvdGl0bGU+CiAgICA8ZGVzYz5DcmVhdGVkIHdpdGggU2tldGNoLjwvZGVzYz4KICAgIDxnIGlkPSJQYWdlLTEiIHN0cm9rZT0ibm9uZSIgc3Ryb2tlLXdpZHRoPSIxIiBmaWxsPSJub25lIiBmaWxsLXJ1bGU9ImV2ZW5vZGQiPgogICAgICAgIDxwb2x5Z29uIGlkPSJ0cmlhbmdsZS1sZWZ0IiBmaWxsPSIjMDAwMDAwIiB0cmFuc2Zvcm09InRyYW5zbGF0ZSgxMDAuMDAwMDAwLCA4Ny4xMDcxNDMpIHJvdGF0ZSgtOTAuMDAwMDAwKSB0cmFuc2xhdGUoLTEwMC4wMDAwMDAsIC04Ny4xMDcxNDMpICIgcG9pbnRzPSIxMDAgMTM2LjQ3NjE3NyAxMy4zOTc0NTk2IDEzNi42NjA3MTQgNTYuODU5OTgzNyA2Mi40MjI2MjU5IDEwMCAtMTIgMTQzLjE0MDAxNiA2Mi40MjI2MjU5IDE4Ni42MDI1NCAxMzYuNjYwNzE0Ij48L3BvbHlnb24+CiAgICA8L2c+Cjwvc3ZnPg==)}.mask-triangle-4{-webkit-mask-image:url(data:image/svg+xml;base64,PD94bWwgdmVyc2lvbj0iMS4wIiBlbmNvZGluZz0iVVRGLTgiPz4KPHN2ZyB3aWR0aD0iMTUwcHgiIGhlaWdodD0iMTc0cHgiIHZpZXdCb3g9IjAgMCAxNTAgMTc0IiB2ZXJzaW9uPSIxLjEiIHhtbG5zPSJodHRwOi8vd3d3LnczLm9yZy8yMDAwL3N2ZyIgeG1sbnM6eGxpbms9Imh0dHA6Ly93d3cudzMub3JnLzE5OTkveGxpbmsiPgogICAgPCEtLSBHZW5lcmF0b3I6IFNrZXRjaCA2MC4xICg4ODEzMykgLSBodHRwczovL3NrZXRjaC5jb20gLS0+CiAgICA8dGl0bGU+dHJpYW5nbGUtcmlnaHQ8L3RpdGxlPgogICAgPGRlc2M+Q3JlYXRlZCB3aXRoIFNrZXRjaC48L2Rlc2M+CiAgICA8ZyBpZD0iUGFnZS0xIiBzdHJva2U9Im5vbmUiIHN0cm9rZS13aWR0aD0iMSIgZmlsbD0ibm9uZSIgZmlsbC1ydWxlPSJldmVub2RkIj4KICAgICAgICA8cG9seWdvbiBpZD0idHJpYW5nbGUtcmlnaHQiIGZpbGw9IiMwMDAwMDAiIHRyYW5zZm9ybT0idHJhbnNsYXRlKDUwLjAwMDAwMCwgODcuMTA3MTQzKSByb3RhdGUoLTI3MC4wMDAwMDApIHRyYW5zbGF0ZSgtNTAuMDAwMDAwLCAtODcuMTA3MTQzKSAiIHBvaW50cz0iNTAgMTM2LjQ3NjE3NyAtMzYuNjAyNTQwNCAxMzYuNjYwNzE0IDYuODU5OTgzNzQgNjIuNDIyNjI1OSA1MCAtMTIgOTMuMTQwMDE2MyA2Mi40MjI2MjU5IDEzNi42MDI1NCAxMzYuNjYwNzE0Ij48L3BvbHlnb24+CiAgICA8L2c+Cjwvc3ZnPg==);mask-image:url(data:image/svg+xml;base64,PD94bWwgdmVyc2lvbj0iMS4wIiBlbmNvZGluZz0iVVRGLTgiPz4KPHN2ZyB3aWR0aD0iMTUwcHgiIGhlaWdodD0iMTc0cHgiIHZpZXdCb3g9IjAgMCAxNTAgMTc0IiB2ZXJzaW9uPSIxLjEiIHhtbG5zPSJodHRwOi8vd3d3LnczLm9yZy8yMDAwL3N2ZyIgeG1sbnM6eGxpbms9Imh0dHA6Ly93d3cudzMub3JnLzE5OTkveGxpbmsiPgogICAgPCEtLSBHZW5lcmF0b3I6IFNrZXRjaCA2MC4xICg4ODEzMykgLSBodHRwczovL3NrZXRjaC5jb20gLS0+CiAgICA8dGl0bGU+dHJpYW5nbGUtcmlnaHQ8L3RpdGxlPgogICAgPGRlc2M+Q3JlYXRlZCB3aXRoIFNrZXRjaC48L2Rlc2M+CiAgICA8ZyBpZD0iUGFnZS0xIiBzdHJva2U9Im5vbmUiIHN0cm9rZS13aWR0aD0iMSIgZmlsbD0ibm9uZSIgZmlsbC1ydWxlPSJldmVub2RkIj4KICAgICAgICA8cG9seWdvbiBpZD0idHJpYW5nbGUtcmlnaHQiIGZpbGw9IiMwMDAwMDAiIHRyYW5zZm9ybT0idHJhbnNsYXRlKDUwLjAwMDAwMCwgODcuMTA3MTQzKSByb3RhdGUoLTI3MC4wMDAwMDApIHRyYW5zbGF0ZSgtNTAuMDAwMDAwLCAtODcuMTA3MTQzKSAiIHBvaW50cz0iNTAgMTM2LjQ3NjE3NyAtMzYuNjAyNTQwNCAxMzYuNjYwNzE0IDYuODU5OTgzNzQgNjIuNDIyNjI1OSA1MCAtMTIgOTMuMTQwMDE2MyA2Mi40MjI2MjU5IDEzNi42MDI1NCAxMzYuNjYwNzE0Ij48L3BvbHlnb24+CiAgICA8L2c+Cjwvc3ZnPg==)}.menu.horizontal li.bordered>a,.menu.horizontal li.bordered>button,.menu.horizontal li.bordered>span{--tw-border-opacity:1;border-bottom-width:4px;border-left-width:0;border-color:hsl(var(--p)/var(--tw-border-opacity))}.menu[class*=p-] li *,.menu[class*=p-] li button{border-radius:var(--rounded-btn,.5rem)}.menu :where(li.bordered>*){--tw-border-opacity:1;border-left-width:4px;border-color:hsl(var(--p)/var(--tw-border-opacity))}.menu :where(li)>:where(:not(ul)){color:currentColor;gap:.75rem;padding:.75rem 1rem}.menu :where(li:not(.menu-title):not(:empty))>:where(:not(ul):focus),.menu :where(li:not(.menu-title):not(:empty))>:where(:not(ul):hover){background-color:hsl(var(--bc)/var(--tw-bg-opacity));--tw-bg-opacity:.1}.menu :where(li:not(.menu-title):not(:empty))>:where(:not(ul).active),.menu :where(li:not(.menu-title):not(:empty))>:where(:not(ul):active){--tw-bg-opacity:1;background-color:hsl(var(--p)/var(--tw-bg-opacity));--tw-text-opacity:1;color:hsl(var(--pc)/var(--tw-text-opacity))}.menu :where(li:empty){height:1px;background-color:hsl(var(--bc)/var(--tw-bg-opacity));--tw-bg-opacity:.1;margin:.5rem 1rem}.menu li.disabled>*{-webkit-user-select:none;user-select:none;color:hsl(var(--bc)/var(--tw-text-opacity));--tw-text-opacity:.2}.menu li.disabled>:hover{background-color:#0000}.menu li.hover-bordered a{border-color:#0000;border-left-width:4px}.menu li.hover-bordered a:hover{--tw-border-opacity:1;border-color:hsl(var(--p)/var(--tw-border-opacity))}.menu.compact li>a,.menu.compact li>span{padding-top:.5rem;padding-bottom:.5rem;font-size:.875rem;line-height:1.25rem}.menu .menu-title>*{color:hsl(var(--bc)/var(--tw-text-opacity));--tw-text-opacity:.4;padding-top:.25rem;padding-bottom:.25rem;font-size:.75rem;font-weight:700;line-height:1rem}.menu :where(li:not(.disabled))>:where(:not(ul)){outline-offset:2px;outline:2px solid #0000;transition-property:color,background-color,border-color,-webkit-text-decoration-color,text-decoration-color,fill,stroke,opacity,box-shadow,transform,filter,backdrop-filter,-webkit-text-decoration-color,-webkit-backdrop-filter;transition-duration:.2s;transition-timing-function:cubic-bezier(.4,0,.2,1)}.menu>:where(li:first-child){border-top-left-radius:inherit;border-top-right-radius:inherit;border-bottom-right-radius:unset;border-bottom-left-radius:unset}.menu>:where(li:first-child)>:where(:not(ul)){border-top-left-radius:inherit;border-top-right-radius:inherit;border-bottom-right-radius:unset;border-bottom-left-radius:unset}.menu>:where(li:last-child){border-top-left-radius:unset;border-top-right-radius:unset;border-bottom-right-radius:inherit;border-bottom-left-radius:inherit}.menu>:where(li:last-child)>:where(:not(ul)){border-top-left-radius:unset;border-top-right-radius:unset;border-bottom-right-radius:inherit;border-bottom-left-radius:inherit}.menu>:where(li)>:where(ul) :where(li){width:100%;white-space:nowrap}.menu>:where(li)>:where(ul) :where(li) :where(ul){padding-left:1rem}.menu>:where(li)>:where(ul) :where(li) :where(:not(ul)){width:100%;white-space:nowrap}.menu>:where(li)>:where(ul)>:where(li:first-child){border-top-left-radius:inherit;border-top-right-radius:inherit;border-bottom-right-radius:unset;border-bottom-left-radius:unset}.menu>:where(li)>:where(ul)>:where(li:first-child)>:where(:not(ul)){border-top-left-radius:inherit;border-top-right-radius:inherit;border-bottom-right-radius:unset;border-bottom-left-radius:unset}.menu>:where(li)>:where(ul)>:where(li:last-child){border-top-left-radius:unset;border-top-right-radius:unset;border-bottom-right-radius:inherit;border-bottom-left-radius:inherit}.menu>:where(li)>:where(ul)>:where(li:last-child)>:where(:not(ul)){border-top-left-radius:unset;border-top-right-radius:unset;border-bottom-right-radius:inherit;border-bottom-left-radius:inherit}.mockup-code:before{content:"";height:.75rem;width:.75rem;opacity:.3;border-radius:9999px;margin-bottom:1rem;display:block;box-shadow:1.4em 0,2.8em 0,4.2em 0}.mockup-code pre{padding-right:1.25rem}.mockup-code pre:before{content:"";margin-right:2ch}.mockup-window:before{content:"";height:.75rem;width:.75rem;opacity:.3;border-radius:9999px;margin-bottom:1rem;display:block;box-shadow:1.4em 0,2.8em 0,4.2em 0}.mockup-phone{background-color:#000;border-radius:50px;border:4px solid #444;margin:0 auto;padding:10px;display:inline-block;overflow:hidden}.mockup-phone .camera{height:25px;width:150px;z-index:11;background:#000;border-bottom-left-radius:17px;border-bottom-right-radius:17px;margin:0 auto;position:relative;top:0;left:0}.mockup-phone .camera:before{content:"";width:50px;height:4px;background-color:#0c0b0e;border-radius:5px;position:absolute;top:35%;left:50%;transform:translate(-50%,-50%)}.mockup-phone .camera:after{content:"";width:8px;height:8px;background-color:#0f0b25;border-radius:5px;position:absolute;top:20%;left:70%}.mockup-phone .display{border-radius:40px;margin-top:-25px;overflow:hidden}.modal-open .modal-box,.modal-toggle:checked+.modal .modal-box,.modal:target .modal-box{--tw-translate-y:0px;--tw-scale-x:1;--tw-scale-y:1;transform:translate(var(--tw-translate-x),var(--tw-translate-y))rotate(var(--tw-rotate))skewX(var(--tw-skew-x))skewY(var(--tw-skew-y))scaleX(var(--tw-scale-x))scaleY(var(--tw-scale-y))}.modal-action>:not([hidden])~:not([hidden]){--tw-space-x-reverse:0;margin-right:calc(.5rem*var(--tw-space-x-reverse));margin-left:calc(.5rem*calc(1 - var(--tw-space-x-reverse)))}.progress::-moz-progress-bar{--tw-bg-opacity:1;background-color:hsl(var(--n)/var(--tw-bg-opacity))}.progress-primary::-moz-progress-bar{--tw-bg-opacity:1;background-color:hsl(var(--p)/var(--tw-bg-opacity))}.progress-secondary::-moz-progress-bar{--tw-bg-opacity:1;background-color:hsl(var(--s)/var(--tw-bg-opacity))}.progress-accent::-moz-progress-bar{--tw-bg-opacity:1;background-color:hsl(var(--a)/var(--tw-bg-opacity))}.progress-info::-moz-progress-bar{--tw-bg-opacity:1;background-color:hsl(var(--in)/var(--tw-bg-opacity))}.progress-success::-moz-progress-bar{--tw-bg-opacity:1;background-color:hsl(var(--su)/var(--tw-bg-opacity))}.progress-warning::-moz-progress-bar{--tw-bg-opacity:1;background-color:hsl(var(--wa)/var(--tw-bg-opacity))}.progress-error::-moz-progress-bar{--tw-bg-opacity:1;background-color:hsl(var(--er)/var(--tw-bg-opacity))}.progress::-webkit-progress-bar{background-color:hsl(var(--n)/var(--tw-bg-opacity));--tw-bg-opacity:.2;border-radius:var(--rounded-box,1rem)}.progress::-webkit-progress-value{--tw-bg-opacity:1;background-color:hsl(var(--nf,var(--n))/var(--tw-bg-opacity));border-radius:var(--rounded-box,1rem)}.progress-primary::-webkit-progress-value{--tw-bg-opacity:1;background-color:hsl(var(--p)/var(--tw-bg-opacity))}.progress-secondary::-webkit-progress-value{--tw-bg-opacity:1;background-color:hsl(var(--s)/var(--tw-bg-opacity))}.progress-accent::-webkit-progress-value{--tw-bg-opacity:1;background-color:hsl(var(--a)/var(--tw-bg-opacity))}.progress-info::-webkit-progress-value{--tw-bg-opacity:1;background-color:hsl(var(--in)/var(--tw-bg-opacity))}.progress-success::-webkit-progress-value{--tw-bg-opacity:1;background-color:hsl(var(--su)/var(--tw-bg-opacity))}.progress-warning::-webkit-progress-value{--tw-bg-opacity:1;background-color:hsl(var(--wa)/var(--tw-bg-opacity))}.progress-error::-webkit-progress-value{--tw-bg-opacity:1;background-color:hsl(var(--er)/var(--tw-bg-opacity))}.radio:focus-visible{outline:2px solid hsl(var(--bc));outline-offset:2px}.radio:checked{--tw-bg-opacity:1;background-color:hsl(var(--bc)/var(--tw-bg-opacity));animation:radiomark var(--animation-input,.2s)ease-in-out;box-shadow:0 0 0 4px hsl(var(--b1))inset,0 0 0 4px hsl(var(--b1))inset}.radio-primary{--chkbg:var(--p);--tw-border-opacity:1;border-color:hsl(var(--p)/var(--tw-border-opacity))}.radio-primary:hover{--tw-border-opacity:1;border-color:hsl(var(--p)/var(--tw-border-opacity))}.radio-primary:focus-visible{outline:2px solid hsl(var(--p))}.radio-primary:checked{--tw-border-opacity:1;border-color:hsl(var(--p)/var(--tw-border-opacity));--tw-bg-opacity:1;background-color:hsl(var(--p)/var(--tw-bg-opacity));--tw-text-opacity:1;color:hsl(var(--pc)/var(--tw-text-opacity))}.radio-secondary{--chkbg:var(--s);--tw-border-opacity:1;border-color:hsl(var(--s)/var(--tw-border-opacity))}.radio-secondary:hover{--tw-border-opacity:1;border-color:hsl(var(--s)/var(--tw-border-opacity))}.radio-secondary:focus-visible{outline:2px solid hsl(var(--s))}.radio-secondary:checked{--tw-border-opacity:1;border-color:hsl(var(--s)/var(--tw-border-opacity));--tw-bg-opacity:1;background-color:hsl(var(--s)/var(--tw-bg-opacity));--tw-text-opacity:1;color:hsl(var(--sc)/var(--tw-text-opacity))}.radio-accent{--chkbg:var(--a);--tw-border-opacity:1;border-color:hsl(var(--a)/var(--tw-border-opacity))}.radio-accent:hover{--tw-border-opacity:1;border-color:hsl(var(--a)/var(--tw-border-opacity))}.radio-accent:focus-visible{outline:2px solid hsl(var(--a))}.radio-accent:checked{--tw-border-opacity:1;border-color:hsl(var(--a)/var(--tw-border-opacity));--tw-bg-opacity:1;background-color:hsl(var(--a)/var(--tw-bg-opacity));--tw-text-opacity:1;color:hsl(var(--ac)/var(--tw-text-opacity))}.radio:disabled{cursor:not-allowed;opacity:.2}@keyframes radiomark{0%{box-shadow:0 0 0 12px hsl(var(--b1))inset,0 0 0 12px hsl(var(--b1))inset}50%{box-shadow:0 0 0 3px hsl(var(--b1))inset,0 0 0 3px hsl(var(--b1))inset}to{box-shadow:0 0 0 4px hsl(var(--b1))inset,0 0 0 4px hsl(var(--b1))inset}}.radio-mark{display:none}.range:focus-visible::-webkit-slider-thumb{--focus-shadow:0 0 0 6px hsl(var(--b1))inset,0 0 0 2rem hsl(var(--range-shdw))inset}.range:focus-visible::-moz-range-thumb{--focus-shadow:0 0 0 6px hsl(var(--b1))inset,0 0 0 2rem hsl(var(--range-shdw))inset}.range::-webkit-slider-runnable-track{height:.5rem;width:100%;border-radius:var(--rounded-box,1rem);background-color:hsla(var(--bc)/.1)}.range::-moz-range-track{height:.5rem;width:100%;border-radius:var(--rounded-box,1rem);background-color:hsla(var(--bc)/.1)}.range::-webkit-slider-thumb{background-color:hsl(var(--b1));height:1.5rem;width:1.5rem;border-radius:var(--rounded-box,1rem);-webkit-appearance:none;color:hsl(var(--range-shdw));--filler-size:100rem;--filler-offset:.6rem;box-shadow:0 0 0 3px hsl(var(--range-shdw))inset,var(--focus-shadow,0 0),calc(var(--filler-size)*-1 - var(--filler-offset))0 0 var(--filler-size);border-style:none;transition-property:all;transition-duration:.3s;transition-timing-function:cubic-bezier(.4,0,.2,1);position:relative;top:50%;transform:translateY(-50%)}.range::-moz-range-thumb{background-color:hsl(var(--b1));height:1.5rem;width:1.5rem;border-radius:var(--rounded-box,1rem);color:hsl(var(--range-shdw));--filler-size:100rem;--filler-offset:.5rem;box-shadow:0 0 0 3px hsl(var(--range-shdw))inset,var(--focus-shadow,0 0),calc(var(--filler-size)*-1 - var(--filler-offset))0 0 var(--filler-size);border-style:none;transition-property:all;transition-duration:.3s;transition-timing-function:cubic-bezier(.4,0,.2,1);position:relative;top:50%}.range-primary{--range-shdw:var(--p)}.range-secondary{--range-shdw:var(--s)}.range-accent{--range-shdw:var(--a)}.rating input{-webkit-appearance:none;appearance:none}.rating .rating-hidden{width:.5rem;background-color:#0000}.rating input:checked~input{--tw-bg-opacity:.2}.rating input:focus-visible{transition-property:transform;transition-duration:.3s;transition-timing-function:cubic-bezier(.4,0,.2,1);transform:translateY(-.125em)}.rating input:active:focus{animation:none;transform:translateY(-.125em)}.rating-half :where(input:not(.rating-hidden)){width:.75rem}@keyframes rating-pop{0%{transform:translateY(-.125em)}40%{transform:translateY(-.125em)}to{transform:translateY(0)}}.select-bordered{--tw-border-opacity:.2}.select:focus{outline:2px solid hsla(var(--bc)/.2);outline-offset:2px}.select-ghost{--tw-bg-opacity:.05}.select-ghost:focus{--tw-bg-opacity:1;--tw-text-opacity:1;color:hsl(var(--bc)/var(--tw-text-opacity))}.select-primary{--tw-border-opacity:1;border-color:hsl(var(--p)/var(--tw-border-opacity))}.select-primary:focus{outline:2px solid hsl(var(--p))}.select-secondary{--tw-border-opacity:1;border-color:hsl(var(--s)/var(--tw-border-opacity))}.select-secondary:focus{outline:2px solid hsl(var(--s))}.select-accent{--tw-border-opacity:1;border-color:hsl(var(--a)/var(--tw-border-opacity))}.select-accent:focus{outline:2px solid hsl(var(--a))}.select-info{--tw-border-opacity:1;border-color:hsl(var(--in)/var(--tw-border-opacity))}.select-info:focus{outline:2px solid hsl(var(--in))}.select-success{--tw-border-opacity:1;border-color:hsl(var(--su)/var(--tw-border-opacity))}.select-success:focus{outline:2px solid hsl(var(--su))}.select-warning{--tw-border-opacity:1;border-color:hsl(var(--wa)/var(--tw-border-opacity))}.select-warning:focus{outline:2px solid hsl(var(--wa))}.select-error{--tw-border-opacity:1;border-color:hsl(var(--er)/var(--tw-border-opacity))}.select-error:focus{outline:2px solid hsl(var(--er))}.select-disabled::placeholder,.select[disabled]::placeholder{color:hsl(var(--bc)/var(--tw-placeholder-opacity));--tw-placeholder-opacity:.2}.select-multiple,.select[multiple],.select[size].select:not([size="1"]){background-image:none;padding-right:1rem}:where(.stats)>:not([hidden])~:not([hidden]){--tw-divide-x-reverse:0;border-right-width:calc(1px*var(--tw-divide-x-reverse));border-left-width:calc(1px*calc(1 - var(--tw-divide-x-reverse)));--tw-divide-y-reverse:0;border-top-width:calc(0px*calc(1 - var(--tw-divide-y-reverse)));border-bottom-width:calc(0px*var(--tw-divide-y-reverse))}.steps .step:before{height:.5rem;width:100%;transform:translate(var(--tw-translate-x),var(--tw-translate-y))rotate(var(--tw-rotate))skewX(var(--tw-skew-x))skewY(var(--tw-skew-y))scaleX(var(--tw-scale-x))scaleY(var(--tw-scale-y));--tw-bg-opacity:1;background-color:hsl(var(--b3,var(--b2))/var(--tw-bg-opacity));--tw-text-opacity:1;color:hsl(var(--bc)/var(--tw-text-opacity));content:"";grid-row-start:1;grid-column-start:1;margin-left:-100%;top:0}.steps .step:after{content:counter(step);counter-increment:step;z-index:1;height:2rem;width:2rem;--tw-bg-opacity:1;background-color:hsl(var(--b3,var(--b2))/var(--tw-bg-opacity));--tw-text-opacity:1;color:hsl(var(--bc)/var(--tw-text-opacity));border-radius:9999px;grid-row-start:1;grid-column-start:1;place-self:center;place-items:center;display:grid;position:relative}.steps .step:first-child:before{content:none}.steps .step[data-content]:after{content:attr(data-content)}.steps .step-neutral+.step-neutral:before,.steps .step-neutral:after{--tw-bg-opacity:1;background-color:hsl(var(--n)/var(--tw-bg-opacity));--tw-text-opacity:1;color:hsl(var(--nc)/var(--tw-text-opacity))}.steps .step-primary+.step-primary:before,.steps .step-primary:after{--tw-bg-opacity:1;background-color:hsl(var(--p)/var(--tw-bg-opacity));--tw-text-opacity:1;color:hsl(var(--pc)/var(--tw-text-opacity))}.steps .step-secondary+.step-secondary:before,.steps .step-secondary:after{--tw-bg-opacity:1;background-color:hsl(var(--s)/var(--tw-bg-opacity));--tw-text-opacity:1;color:hsl(var(--sc)/var(--tw-text-opacity))}.steps .step-accent+.step-accent:before,.steps .step-accent:after{--tw-bg-opacity:1;background-color:hsl(var(--a)/var(--tw-bg-opacity));--tw-text-opacity:1;color:hsl(var(--ac)/var(--tw-text-opacity))}.steps .step-info+.step-info:before{--tw-bg-opacity:1;background-color:hsl(var(--in)/var(--tw-bg-opacity))}.steps .step-info:after{--tw-bg-opacity:1;background-color:hsl(var(--in)/var(--tw-bg-opacity));--tw-text-opacity:1;color:hsl(var(--inc,var(--nc))/var(--tw-text-opacity))}.steps .step-success+.step-success:before{--tw-bg-opacity:1;background-color:hsl(var(--su)/var(--tw-bg-opacity))}.steps .step-success:after{--tw-bg-opacity:1;background-color:hsl(var(--su)/var(--tw-bg-opacity));--tw-text-opacity:1;color:hsl(var(--suc,var(--nc))/var(--tw-text-opacity))}.steps .step-warning+.step-warning:before{--tw-bg-opacity:1;background-color:hsl(var(--wa)/var(--tw-bg-opacity))}.steps .step-warning:after{--tw-bg-opacity:1;background-color:hsl(var(--wa)/var(--tw-bg-opacity));--tw-text-opacity:1;color:hsl(var(--wac,var(--nc))/var(--tw-text-opacity))}.steps .step-error+.step-error:before{--tw-bg-opacity:1;background-color:hsl(var(--er)/var(--tw-bg-opacity))}.steps .step-error:after{--tw-bg-opacity:1;background-color:hsl(var(--er)/var(--tw-bg-opacity));--tw-text-opacity:1;color:hsl(var(--erc,var(--nc))/var(--tw-text-opacity))}.swap-rotate .swap-on,.swap-rotate .swap-indeterminate,.swap-rotate input:indeterminate~.swap-on{--tw-rotate:45deg;transform:translate(var(--tw-translate-x),var(--tw-translate-y))rotate(var(--tw-rotate))skewX(var(--tw-skew-x))skewY(var(--tw-skew-y))scaleX(var(--tw-scale-x))scaleY(var(--tw-scale-y))}.swap-rotate input:checked~.swap-off,.swap-rotate.swap-active .swap-off,.swap-rotate input:indeterminate~.swap-off{--tw-rotate:-45deg;transform:translate(var(--tw-translate-x),var(--tw-translate-y))rotate(var(--tw-rotate))skewX(var(--tw-skew-x))skewY(var(--tw-skew-y))scaleX(var(--tw-scale-x))scaleY(var(--tw-scale-y))}.swap-rotate input:checked~.swap-on,.swap-rotate.swap-active .swap-on,.swap-rotate input:indeterminate~.swap-indeterminate{--tw-rotate:0deg;transform:translate(var(--tw-translate-x),var(--tw-translate-y))rotate(var(--tw-rotate))skewX(var(--tw-skew-x))skewY(var(--tw-skew-y))scaleX(var(--tw-scale-x))scaleY(var(--tw-scale-y))}.swap-flip{transform-style:preserve-3d;perspective:16em}.swap-flip .swap-on,.swap-flip .swap-indeterminate,.swap-flip input:indeterminate~.swap-on{-webkit-backface-visibility:hidden;backface-visibility:hidden;opacity:1;transform:rotateY(180deg)}.swap-flip input:checked~.swap-off,.swap-flip.swap-active .swap-off,.swap-flip input:indeterminate~.swap-off{-webkit-backface-visibility:hidden;backface-visibility:hidden;opacity:1;transform:rotateY(180deg)}.swap-flip input:checked~.swap-on,.swap-flip.swap-active .swap-on,.swap-flip input:indeterminate~.swap-indeterminate{transform:rotateY(0deg)}.tab:hover{--tw-text-opacity:1}.tab.tab-active{border-color:hsl(var(--bc)/var(--tw-border-opacity));--tw-border-opacity:1;--tw-text-opacity:1}.tab:focus{outline-offset:2px;outline:2px solid #0000}.tab:focus-visible{outline-offset:-3px;border-bottom-right-radius:var(--tab-radius,.5rem);border-bottom-left-radius:var(--tab-radius,.5rem);outline:2px solid }.tab-bordered{border-color:hsl(var(--bc)/var(--tw-border-opacity));--tw-border-opacity:.2;border-style:solid;border-bottom-width:calc(var(--tab-border,1px) + 1px)}.tab-lifted{border:var(--tab-border,1px)solid transparent;border-width:0 0 var(--tab-border,1px)0;border-top-left-radius:var(--tab-radius,.5rem);border-top-right-radius:var(--tab-radius,.5rem);border-bottom-color:var(--tab-border-color);padding-left:var(--tab-padding,1rem);padding-right:var(--tab-padding,1rem);padding-top:var(--tab-border,1px)}.tab-lifted.tab-active{background-color:var(--tab-bg);border-width:var(--tab-border,1px)var(--tab-border,1px)0 var(--tab-border,1px);border-left-color:var(--tab-border-color);border-right-color:var(--tab-border-color);border-top-color:var(--tab-border-color);padding-left:calc(var(--tab-padding,1rem) - var(--tab-border,1px));padding-right:calc(var(--tab-padding,1rem) - var(--tab-border,1px));padding-bottom:var(--tab-border,1px);padding-top:0}.tab-lifted.tab-active:before,.tab-lifted.tab-active:after{z-index:1;content:"";width:var(--tab-radius,.5rem);height:var(--tab-radius,.5rem);--tab-grad:calc(68% - var(--tab-border,1px));--tab-corner-bg:radial-gradient(circle at var(--circle-pos),transparent var(--tab-grad),var(--tab-border-color)calc(var(--tab-grad) + .3px),var(--tab-border-color)calc(var(--tab-grad) + var(--tab-border,1px)),var(--tab-bg)calc(var(--tab-grad) + var(--tab-border,1px) + .3px));display:block;position:absolute;bottom:0}.tab-lifted.tab-active:before{left:calc(var(--tab-radius,.5rem)*-1);--circle-pos:top left;background-image:var(--tab-corner-bg)}[dir=rtl] .tab-lifted.tab-active:before{--circle-pos:top right}.tab-lifted.tab-active:after{right:calc(var(--tab-radius,.5rem)*-1);--circle-pos:top right;background-image:var(--tab-corner-bg)}[dir=rtl] .tab-lifted.tab-active:after{--circle-pos:top left}.tab-lifted.tab-active:first-child:before,.tab-lifted.tab-active:last-child:after,.tab-lifted.tab-active+.tab-lifted.tab-active:before{background:0 0}.tabs-boxed{--tw-bg-opacity:1;background-color:hsl(var(--b2,var(--b1))/var(--tw-bg-opacity));border-radius:var(--rounded-btn,.5rem);padding:.25rem}.tabs-boxed .tab-active{--tw-bg-opacity:1;background-color:hsl(var(--p)/var(--tw-bg-opacity));--tw-text-opacity:1;color:hsl(var(--pc)/var(--tw-text-opacity));border-radius:var(--rounded-btn,.5rem)}.tabs-boxed .tab-active:hover{--tw-text-opacity:1;color:hsl(var(--pc)/var(--tw-text-opacity))}.table :where(th,td){white-space:nowrap;vertical-align:middle;padding:1rem}.table tr.active th,.table tr.active td,.table tr.active:nth-child(2n) th,.table tr.active:nth-child(2n) td,.table tr.hover:hover th,.table tr.hover:hover td,.table tr.hover:nth-child(2n):hover th,.table tr.hover:nth-child(2n):hover td{--tw-bg-opacity:1;background-color:hsl(var(--b3,var(--b2))/var(--tw-bg-opacity))}.table:where(:not(.table-zebra)) :where(thead,tbody,tfoot) :where(tr:not(:last-child) :where(th,td)){--tw-border-opacity:1;border-bottom-width:1px;border-color:hsl(var(--b2,var(--b1))/var(--tw-border-opacity))}.table :where(thead,tfoot) :where(th,td){--tw-bg-opacity:1;background-color:hsl(var(--b2,var(--b1))/var(--tw-bg-opacity));text-transform:uppercase;font-size:.75rem;font-weight:700;line-height:1rem}.table :where(thead,tfoot) :where(th,td):first-child{border-top-left-radius:.5rem;border-bottom-left-radius:.5rem}.table :where(thead,tfoot) :where(th,td):last-child{border-top-right-radius:.5rem;border-bottom-right-radius:.5rem}.table :where(tbody th,tbody td){--tw-bg-opacity:1;background-color:hsl(var(--b1)/var(--tw-bg-opacity))}.table-zebra tbody tr th:first-child,.table-zebra tbody tr td:first-child{border-top-left-radius:.5rem;border-bottom-left-radius:.5rem}.table-zebra tbody tr th:last-child,.table-zebra tbody tr td:last-child{border-top-right-radius:.5rem;border-bottom-right-radius:.5rem}.table-zebra tbody tr:nth-child(2n) th,.table-zebra tbody tr:nth-child(2n) td{--tw-bg-opacity:1;background-color:hsl(var(--b2,var(--b1))/var(--tw-bg-opacity))}.textarea-bordered{--tw-border-opacity:.2}.textarea:focus{outline:2px solid hsla(var(--bc)/.2);outline-offset:2px}.textarea-ghost{--tw-bg-opacity:.05}.textarea-ghost:focus{--tw-bg-opacity:1;--tw-text-opacity:1;color:hsl(var(--bc)/var(--tw-text-opacity));box-shadow:none}.textarea-primary{--tw-border-opacity:1;border-color:hsl(var(--p)/var(--tw-border-opacity))}.textarea-primary:focus{outline:2px solid hsl(var(--p))}.textarea-secondary{--tw-border-opacity:1;border-color:hsl(var(--s)/var(--tw-border-opacity))}.textarea-secondary:focus{outline:2px solid hsl(var(--s))}.textarea-accent{--tw-border-opacity:1;border-color:hsl(var(--a)/var(--tw-border-opacity))}.textarea-accent:focus{outline:2px solid hsl(var(--a))}.textarea-info{--tw-border-opacity:1;border-color:hsl(var(--in)/var(--tw-border-opacity))}.textarea-info:focus{outline:2px solid hsl(var(--in))}.textarea-success{--tw-border-opacity:1;border-color:hsl(var(--su)/var(--tw-border-opacity))}.textarea-success:focus{outline:2px solid hsl(var(--su))}.textarea-warning{--tw-border-opacity:1;border-color:hsl(var(--wa)/var(--tw-border-opacity))}.textarea-warning:focus{outline:2px solid hsl(var(--wa))}.textarea-error{--tw-border-opacity:1;border-color:hsl(var(--er)/var(--tw-border-opacity))}.textarea-error:focus{outline:2px solid hsl(var(--er))}.textarea-disabled,.textarea[disabled]{cursor:not-allowed;--tw-border-opacity:1;border-color:hsl(var(--b2,var(--b1))/var(--tw-border-opacity));--tw-bg-opacity:1;background-color:hsl(var(--b2,var(--b1))/var(--tw-bg-opacity));--tw-text-opacity:.2}.textarea-disabled::placeholder,.textarea[disabled]::placeholder{color:hsl(var(--bc)/var(--tw-placeholder-opacity));--tw-placeholder-opacity:.2}.toggle:focus-visible{outline:2px solid hsl(var(--bc));outline-offset:2px}.toggle:checked,.toggle[checked=true]{--chkbg:hsl(var(--bc));--tw-border-opacity:1;--tw-bg-opacity:1;box-shadow:var(--handleoffset)0 0 2px hsl(var(--b1))inset,0 0 0 2px hsl(var(--b1))inset}[dir=rtl] .toggle:checked,[dir=rtl] .toggle[checked=true]{box-shadow:calc(var(--handleoffset)*1)0 0 2px hsl(var(--b1))inset,0 0 0 2px hsl(var(--b1))inset}.toggle:indeterminate{--chkbg:hsl(var(--bc));--tw-border-opacity:1;--tw-bg-opacity:1;box-shadow:calc(var(--handleoffset)/2)0 0 2px hsl(var(--b1))inset,calc(var(--handleoffset)/-2)0 0 2px hsl(var(--b1))inset,0 0 0 2px hsl(var(--b1))inset}[dir=rtl] .toggle:indeterminate{box-shadow:calc(var(--handleoffset)/2)0 0 2px hsl(var(--b1))inset,calc(var(--handleoffset)/-2)0 0 2px hsl(var(--b1))inset,0 0 0 2px hsl(var(--b1))inset}.toggle-primary:focus-visible{outline:2px solid hsl(var(--p))}.toggle-primary:checked,.toggle-primary[checked=true]{--chkbg:hsl(var(--p));border-color:hsl(var(--p)/var(--tw-border-opacity));--tw-border-opacity:.1;--tw-bg-opacity:1;background-color:hsl(var(--p)/var(--tw-bg-opacity));--tw-text-opacity:1;color:hsl(var(--pc)/var(--tw-text-opacity))}.toggle-secondary:focus-visible{outline:2px solid hsl(var(--s))}.toggle-secondary:checked,.toggle-secondary[checked=true]{--chkbg:hsl(var(--s));border-color:hsl(var(--s)/var(--tw-border-opacity));--tw-border-opacity:.1;--tw-bg-opacity:1;background-color:hsl(var(--s)/var(--tw-bg-opacity));--tw-text-opacity:1;color:hsl(var(--sc)/var(--tw-text-opacity))}.toggle-accent:focus-visible{outline:2px solid hsl(var(--a))}.toggle-accent:checked,.toggle-accent[checked=true]{--chkbg:hsl(var(--a));border-color:hsl(var(--a)/var(--tw-border-opacity));--tw-border-opacity:.1;--tw-bg-opacity:1;background-color:hsl(var(--a)/var(--tw-bg-opacity));--tw-text-opacity:1;color:hsl(var(--ac)/var(--tw-text-opacity))}.toggle:disabled{cursor:not-allowed;background-color:hsl(var(--bc)/var(--tw-bg-opacity));--tw-bg-opacity:.2;border-color:#0000}.toggle-mark{display:none}.tooltip:before,.tooltip:after{opacity:0;transition-property:color,background-color,border-color,-webkit-text-decoration-color,text-decoration-color,fill,stroke,opacity,box-shadow,transform,filter,backdrop-filter,-webkit-text-decoration-color,-webkit-backdrop-filter;transition-duration:.2s;transition-delay:.1s;transition-timing-function:cubic-bezier(.4,0,.2,1)}.tooltip:after{content:"";border-style:solid;border-width:var(--tooltip-tail,0);width:0;height:0;border-color:var(--tooltip-color)transparent transparent transparent;top:auto;left:50%;right:auto;bottom:var(--tooltip-tail-offset);display:block;position:absolute;transform:translate(-50%)}.tooltip.tooltip-open:before,.tooltip.tooltip-open:after,.tooltip:hover:before,.tooltip:hover:after{opacity:1;transition-delay:75ms}.tooltip-bottom:after{border-color:transparent transparent var(--tooltip-color)transparent;top:var(--tooltip-tail-offset);bottom:auto;left:50%;right:auto;transform:translate(-50%)}.tooltip-left:after{border-color:transparent transparent transparent var(--tooltip-color);top:50%;left:auto;right:calc(var(--tooltip-tail-offset) + 1px);bottom:auto;transform:translateY(-50%)}.tooltip-right:after{border-color:transparent var(--tooltip-color)transparent transparent;top:50%;left:calc(var(--tooltip-tail-offset) + 1px);bottom:auto;right:auto;transform:translateY(-50%)}.tooltip-primary{--tooltip-color:hsl(var(--p));--tooltip-text-color:hsl(var(--pc))}.tooltip-secondary{--tooltip-color:hsl(var(--s));--tooltip-text-color:hsl(var(--sc))}.tooltip-accent{--tooltip-color:hsl(var(--a));--tooltip-text-color:hsl(var(--ac))}.tooltip-info{--tooltip-color:hsla(var(--in));--tooltip-text-color:hsl(var(--inc))}.tooltip-success{--tooltip-color:hsla(var(--su));--tooltip-text-color:hsl(var(--suc))}.tooltip-warning{--tooltip-color:hsla(var(--wa));--tooltip-text-color:hsl(var(--wac))}.tooltip-error{--tooltip-color:hsla(var(--er));--tooltip-text-color:hsl(var(--erc))}:root .prose{--tw-prose-body:hsla(var(--bc)/.8);--tw-prose-headings:hsl(var(--bc));--tw-prose-lead:hsl(var(--bc));--tw-prose-links:hsl(var(--bc));--tw-prose-bold:hsl(var(--bc));--tw-prose-counters:hsl(var(--bc));--tw-prose-bullets:hsla(var(--bc)/.5);--tw-prose-hr:hsla(var(--bc)/.2);--tw-prose-quotes:hsl(var(--bc));--tw-prose-quote-borders:hsla(var(--bc)/.2);--tw-prose-captions:hsla(var(--bc)/.5);--tw-prose-code:hsl(var(--bc));--tw-prose-pre-code:hsl(var(--nc));--tw-prose-pre-bg:hsl(var(--n));--tw-prose-th-borders:hsla(var(--bc)/.5);--tw-prose-td-borders:hsla(var(--bc)/.2)}.prose :where(code):not(:where([class~=not-prose] *)){border-radius:var(--rounded-badge);padding:2px 8px}.prose code:after,.prose code:before{content:none}.prose pre code{border-radius:none;padding:0}.prose :where(tbody tr,thead):not(:where([class~=not-prose] *)){border-bottom-color:hsl(var(--bc)/20%)}.animate-none{animation:none}@keyframes spin{to{transform:rotate(360deg)}}.animate-spin{animation:spin 1s linear infinite}@keyframes ping{75%,to{opacity:0;transform:scale(2)}}.animate-ping{animation:ping 1s cubic-bezier(0,0,.2,1) infinite}@keyframes pulse{50%{opacity:.5}}.animate-pulse{animation:pulse 2s cubic-bezier(.4,0,.6,1) infinite}@keyframes bounce{0%,to{animation-timing-function:cubic-bezier(.8,0,1,1);transform:translateY(-25%)}50%{animation-timing-function:cubic-bezier(0,0,.2,1);transform:none}}.animate-bounce{animation:bounce 1s infinite}.divide-transparent>:not([hidden])~:not([hidden]){border-color:#0000}.divide-current>:not([hidden])~:not([hidden]){border-color:currentColor}.divide-primary>:not([hidden])~:not([hidden]){border-color:hsl(var(--p))}.divide-primary-focus>:not([hidden])~:not([hidden]){border-color:hsl(var(--pf,var(--p)))}.divide-primary-content>:not([hidden])~:not([hidden]){border-color:hsl(var(--pc))}.divide-secondary>:not([hidden])~:not([hidden]){border-color:hsl(var(--s))}.divide-secondary-focus>:not([hidden])~:not([hidden]){border-color:hsl(var(--sf,var(--s)))}.divide-secondary-content>:not([hidden])~:not([hidden]){border-color:hsl(var(--sc))}.divide-accent>:not([hidden])~:not([hidden]){border-color:hsl(var(--a))}.divide-accent-focus>:not([hidden])~:not([hidden]){border-color:hsl(var(--af,var(--a)))}.divide-accent-content>:not([hidden])~:not([hidden]){border-color:hsl(var(--ac))}.divide-neutral>:not([hidden])~:not([hidden]){border-color:hsl(var(--n))}.divide-neutral-focus>:not([hidden])~:not([hidden]){border-color:hsl(var(--nf,var(--n)))}.divide-neutral-content>:not([hidden])~:not([hidden]){border-color:hsl(var(--nc))}.divide-base-100>:not([hidden])~:not([hidden]){border-color:hsl(var(--b1))}.divide-base-200>:not([hidden])~:not([hidden]){border-color:hsl(var(--b2,var(--b1)))}.divide-base-300>:not([hidden])~:not([hidden]){border-color:hsl(var(--b3,var(--b2)))}.divide-base-content>:not([hidden])~:not([hidden]){border-color:hsl(var(--bc))}.divide-info>:not([hidden])~:not([hidden]){border-color:hsl(var(--in))}.divide-info-content>:not([hidden])~:not([hidden]){border-color:hsl(var(--inc,var(--nc)))}.divide-success>:not([hidden])~:not([hidden]){border-color:hsl(var(--su))}.divide-success-content>:not([hidden])~:not([hidden]){border-color:hsl(var(--suc,var(--nc)))}.divide-warning>:not([hidden])~:not([hidden]){border-color:hsl(var(--wa))}.divide-warning-content>:not([hidden])~:not([hidden]){border-color:hsl(var(--wac,var(--nc)))}.divide-error>:not([hidden])~:not([hidden]){border-color:hsl(var(--er))}.divide-error-content>:not([hidden])~:not([hidden]){border-color:hsl(var(--erc,var(--nc)))}.border-transparent{border-color:#0000}.border-current{border-color:currentColor}.border-primary{border-color:hsl(var(--p))}.border-primary-focus{border-color:hsl(var(--pf,var(--p)))}.border-primary-content{border-color:hsl(var(--pc))}.border-secondary{border-color:hsl(var(--s))}.border-secondary-focus{border-color:hsl(var(--sf,var(--s)))}.border-secondary-content{border-color:hsl(var(--sc))}.border-accent{border-color:hsl(var(--a))}.border-accent-focus{border-color:hsl(var(--af,var(--a)))}.border-accent-content{border-color:hsl(var(--ac))}.border-neutral{border-color:hsl(var(--n))}.border-neutral-focus{border-color:hsl(var(--nf,var(--n)))}.border-neutral-content{border-color:hsl(var(--nc))}.border-base-100{border-color:hsl(var(--b1))}.border-base-200{border-color:hsl(var(--b2,var(--b1)))}.border-base-300{border-color:hsl(var(--b3,var(--b2)))}.border-base-content{border-color:hsl(var(--bc))}.border-info{border-color:hsl(var(--in))}.border-info-content{border-color:hsl(var(--inc,var(--nc)))}.border-success{border-color:hsl(var(--su))}.border-success-content{border-color:hsl(var(--suc,var(--nc)))}.border-warning{border-color:hsl(var(--wa))}.border-warning-content{border-color:hsl(var(--wac,var(--nc)))}.border-error{border-color:hsl(var(--er))}.border-error-content{border-color:hsl(var(--erc,var(--nc)))}.border-x-transparent{border-left-color:#0000;border-right-color:#0000}.border-x-current{border-left-color:currentColor;border-right-color:currentColor}.border-x-primary{border-left-color:hsl(var(--p));border-right-color:hsl(var(--p))}.border-x-primary-focus{border-left-color:hsl(var(--pf,var(--p)));border-right-color:hsl(var(--pf,var(--p)))}.border-x-primary-content{border-left-color:hsl(var(--pc));border-right-color:hsl(var(--pc))}.border-x-secondary{border-left-color:hsl(var(--s));border-right-color:hsl(var(--s))}.border-x-secondary-focus{border-left-color:hsl(var(--sf,var(--s)));border-right-color:hsl(var(--sf,var(--s)))}.border-x-secondary-content{border-left-color:hsl(var(--sc));border-right-color:hsl(var(--sc))}.border-x-accent{border-left-color:hsl(var(--a));border-right-color:hsl(var(--a))}.border-x-accent-focus{border-left-color:hsl(var(--af,var(--a)));border-right-color:hsl(var(--af,var(--a)))}.border-x-accent-content{border-left-color:hsl(var(--ac));border-right-color:hsl(var(--ac))}.border-x-neutral{border-left-color:hsl(var(--n));border-right-color:hsl(var(--n))}.border-x-neutral-focus{border-left-color:hsl(var(--nf,var(--n)));border-right-color:hsl(var(--nf,var(--n)))}.border-x-neutral-content{border-left-color:hsl(var(--nc));border-right-color:hsl(var(--nc))}.border-x-base-100{border-left-color:hsl(var(--b1));border-right-color:hsl(var(--b1))}.border-x-base-200{border-left-color:hsl(var(--b2,var(--b1)));border-right-color:hsl(var(--b2,var(--b1)))}.border-x-base-300{border-left-color:hsl(var(--b3,var(--b2)));border-right-color:hsl(var(--b3,var(--b2)))}.border-x-base-content{border-left-color:hsl(var(--bc));border-right-color:hsl(var(--bc))}.border-x-info{border-left-color:hsl(var(--in));border-right-color:hsl(var(--in))}.border-x-info-content{border-left-color:hsl(var(--inc,var(--nc)));border-right-color:hsl(var(--inc,var(--nc)))}.border-x-success{border-left-color:hsl(var(--su));border-right-color:hsl(var(--su))}.border-x-success-content{border-left-color:hsl(var(--suc,var(--nc)));border-right-color:hsl(var(--suc,var(--nc)))}.border-x-warning{border-left-color:hsl(var(--wa));border-right-color:hsl(var(--wa))}.border-x-warning-content{border-left-color:hsl(var(--wac,var(--nc)));border-right-color:hsl(var(--wac,var(--nc)))}.border-x-error{border-left-color:hsl(var(--er));border-right-color:hsl(var(--er))}.border-x-error-content{border-left-color:hsl(var(--erc,var(--nc)));border-right-color:hsl(var(--erc,var(--nc)))}.border-y-transparent{border-top-color:#0000;border-bottom-color:#0000}.border-y-current{border-top-color:currentColor;border-bottom-color:currentColor}.border-y-primary{border-top-color:hsl(var(--p));border-bottom-color:hsl(var(--p))}.border-y-primary-focus{border-top-color:hsl(var(--pf,var(--p)));border-bottom-color:hsl(var(--pf,var(--p)))}.border-y-primary-content{border-top-color:hsl(var(--pc));border-bottom-color:hsl(var(--pc))}.border-y-secondary{border-top-color:hsl(var(--s));border-bottom-color:hsl(var(--s))}.border-y-secondary-focus{border-top-color:hsl(var(--sf,var(--s)));border-bottom-color:hsl(var(--sf,var(--s)))}.border-y-secondary-content{border-top-color:hsl(var(--sc));border-bottom-color:hsl(var(--sc))}.border-y-accent{border-top-color:hsl(var(--a));border-bottom-color:hsl(var(--a))}.border-y-accent-focus{border-top-color:hsl(var(--af,var(--a)));border-bottom-color:hsl(var(--af,var(--a)))}.border-y-accent-content{border-top-color:hsl(var(--ac));border-bottom-color:hsl(var(--ac))}.border-y-neutral{border-top-color:hsl(var(--n));border-bottom-color:hsl(var(--n))}.border-y-neutral-focus{border-top-color:hsl(var(--nf,var(--n)));border-bottom-color:hsl(var(--nf,var(--n)))}.border-y-neutral-content{border-top-color:hsl(var(--nc));border-bottom-color:hsl(var(--nc))}.border-y-base-100{border-top-color:hsl(var(--b1));border-bottom-color:hsl(var(--b1))}.border-y-base-200{border-top-color:hsl(var(--b2,var(--b1)));border-bottom-color:hsl(var(--b2,var(--b1)))}.border-y-base-300{border-top-color:hsl(var(--b3,var(--b2)));border-bottom-color:hsl(var(--b3,var(--b2)))}.border-y-base-content{border-top-color:hsl(var(--bc));border-bottom-color:hsl(var(--bc))}.border-y-info{border-top-color:hsl(var(--in));border-bottom-color:hsl(var(--in))}.border-y-info-content{border-top-color:hsl(var(--inc,var(--nc)));border-bottom-color:hsl(var(--inc,var(--nc)))}.border-y-success{border-top-color:hsl(var(--su));border-bottom-color:hsl(var(--su))}.border-y-success-content{border-top-color:hsl(var(--suc,var(--nc)));border-bottom-color:hsl(var(--suc,var(--nc)))}.border-y-warning{border-top-color:hsl(var(--wa));border-bottom-color:hsl(var(--wa))}.border-y-warning-content{border-top-color:hsl(var(--wac,var(--nc)));border-bottom-color:hsl(var(--wac,var(--nc)))}.border-y-error{border-top-color:hsl(var(--er));border-bottom-color:hsl(var(--er))}.border-y-error-content{border-top-color:hsl(var(--erc,var(--nc)));border-bottom-color:hsl(var(--erc,var(--nc)))}.border-t-transparent{border-top-color:#0000}.border-t-current{border-top-color:currentColor}.border-t-primary{border-top-color:hsl(var(--p))}.border-t-primary-focus{border-top-color:hsl(var(--pf,var(--p)))}.border-t-primary-content{border-top-color:hsl(var(--pc))}.border-t-secondary{border-top-color:hsl(var(--s))}.border-t-secondary-focus{border-top-color:hsl(var(--sf,var(--s)))}.border-t-secondary-content{border-top-color:hsl(var(--sc))}.border-t-accent{border-top-color:hsl(var(--a))}.border-t-accent-focus{border-top-color:hsl(var(--af,var(--a)))}.border-t-accent-content{border-top-color:hsl(var(--ac))}.border-t-neutral{border-top-color:hsl(var(--n))}.border-t-neutral-focus{border-top-color:hsl(var(--nf,var(--n)))}.border-t-neutral-content{border-top-color:hsl(var(--nc))}.border-t-base-100{border-top-color:hsl(var(--b1))}.border-t-base-200{border-top-color:hsl(var(--b2,var(--b1)))}.border-t-base-300{border-top-color:hsl(var(--b3,var(--b2)))}.border-t-base-content{border-top-color:hsl(var(--bc))}.border-t-info{border-top-color:hsl(var(--in))}.border-t-info-content{border-top-color:hsl(var(--inc,var(--nc)))}.border-t-success{border-top-color:hsl(var(--su))}.border-t-success-content{border-top-color:hsl(var(--suc,var(--nc)))}.border-t-warning{border-top-color:hsl(var(--wa))}.border-t-warning-content{border-top-color:hsl(var(--wac,var(--nc)))}.border-t-error{border-top-color:hsl(var(--er))}.border-t-error-content{border-top-color:hsl(var(--erc,var(--nc)))}.border-r-transparent{border-right-color:#0000}.border-r-current{border-right-color:currentColor}.border-r-primary{border-right-color:hsl(var(--p))}.border-r-primary-focus{border-right-color:hsl(var(--pf,var(--p)))}.border-r-primary-content{border-right-color:hsl(var(--pc))}.border-r-secondary{border-right-color:hsl(var(--s))}.border-r-secondary-focus{border-right-color:hsl(var(--sf,var(--s)))}.border-r-secondary-content{border-right-color:hsl(var(--sc))}.border-r-accent{border-right-color:hsl(var(--a))}.border-r-accent-focus{border-right-color:hsl(var(--af,var(--a)))}.border-r-accent-content{border-right-color:hsl(var(--ac))}.border-r-neutral{border-right-color:hsl(var(--n))}.border-r-neutral-focus{border-right-color:hsl(var(--nf,var(--n)))}.border-r-neutral-content{border-right-color:hsl(var(--nc))}.border-r-base-100{border-right-color:hsl(var(--b1))}.border-r-base-200{border-right-color:hsl(var(--b2,var(--b1)))}.border-r-base-300{border-right-color:hsl(var(--b3,var(--b2)))}.border-r-base-content{border-right-color:hsl(var(--bc))}.border-r-info{border-right-color:hsl(var(--in))}.border-r-info-content{border-right-color:hsl(var(--inc,var(--nc)))}.border-r-success{border-right-color:hsl(var(--su))}.border-r-success-content{border-right-color:hsl(var(--suc,var(--nc)))}.border-r-warning{border-right-color:hsl(var(--wa))}.border-r-warning-content{border-right-color:hsl(var(--wac,var(--nc)))}.border-r-error{border-right-color:hsl(var(--er))}.border-r-error-content{border-right-color:hsl(var(--erc,var(--nc)))}.border-b-transparent{border-bottom-color:#0000}.border-b-current{border-bottom-color:currentColor}.border-b-primary{border-bottom-color:hsl(var(--p))}.border-b-primary-focus{border-bottom-color:hsl(var(--pf,var(--p)))}.border-b-primary-content{border-bottom-color:hsl(var(--pc))}.border-b-secondary{border-bottom-color:hsl(var(--s))}.border-b-secondary-focus{border-bottom-color:hsl(var(--sf,var(--s)))}.border-b-secondary-content{border-bottom-color:hsl(var(--sc))}.border-b-accent{border-bottom-color:hsl(var(--a))}.border-b-accent-focus{border-bottom-color:hsl(var(--af,var(--a)))}.border-b-accent-content{border-bottom-color:hsl(var(--ac))}.border-b-neutral{border-bottom-color:hsl(var(--n))}.border-b-neutral-focus{border-bottom-color:hsl(var(--nf,var(--n)))}.border-b-neutral-content{border-bottom-color:hsl(var(--nc))}.border-b-base-100{border-bottom-color:hsl(var(--b1))}.border-b-base-200{border-bottom-color:hsl(var(--b2,var(--b1)))}.border-b-base-300{border-bottom-color:hsl(var(--b3,var(--b2)))}.border-b-base-content{border-bottom-color:hsl(var(--bc))}.border-b-info{border-bottom-color:hsl(var(--in))}.border-b-info-content{border-bottom-color:hsl(var(--inc,var(--nc)))}.border-b-success{border-bottom-color:hsl(var(--su))}.border-b-success-content{border-bottom-color:hsl(var(--suc,var(--nc)))}.border-b-warning{border-bottom-color:hsl(var(--wa))}.border-b-warning-content{border-bottom-color:hsl(var(--wac,var(--nc)))}.border-b-error{border-bottom-color:hsl(var(--er))}.border-b-error-content{border-bottom-color:hsl(var(--erc,var(--nc)))}.border-l-transparent{border-left-color:#0000}.border-l-current{border-left-color:currentColor}.border-l-primary{border-left-color:hsl(var(--p))}.border-l-primary-focus{border-left-color:hsl(var(--pf,var(--p)))}.border-l-primary-content{border-left-color:hsl(var(--pc))}.border-l-secondary{border-left-color:hsl(var(--s))}.border-l-secondary-focus{border-left-color:hsl(var(--sf,var(--s)))}.border-l-secondary-content{border-left-color:hsl(var(--sc))}.border-l-accent{border-left-color:hsl(var(--a))}.border-l-accent-focus{border-left-color:hsl(var(--af,var(--a)))}.border-l-accent-content{border-left-color:hsl(var(--ac))}.border-l-neutral{border-left-color:hsl(var(--n))}.border-l-neutral-focus{border-left-color:hsl(var(--nf,var(--n)))}.border-l-neutral-content{border-left-color:hsl(var(--nc))}.border-l-base-100{border-left-color:hsl(var(--b1))}.border-l-base-200{border-left-color:hsl(var(--b2,var(--b1)))}.border-l-base-300{border-left-color:hsl(var(--b3,var(--b2)))}.border-l-base-content{border-left-color:hsl(var(--bc))}.border-l-info{border-left-color:hsl(var(--in))}.border-l-info-content{border-left-color:hsl(var(--inc,var(--nc)))}.border-l-success{border-left-color:hsl(var(--su))}.border-l-success-content{border-left-color:hsl(var(--suc,var(--nc)))}.border-l-warning{border-left-color:hsl(var(--wa))}.border-l-warning-content{border-left-color:hsl(var(--wac,var(--nc)))}.border-l-error{border-left-color:hsl(var(--er))}.border-l-error-content{border-left-color:hsl(var(--erc,var(--nc)))}.bg-transparent{background-color:#0000}.bg-current{background-color:currentColor}.bg-primary{background-color:hsl(var(--p))}.bg-primary-focus{background-color:hsl(var(--pf,var(--p)))}.bg-primary-content{background-color:hsl(var(--pc))}.bg-secondary{background-color:hsl(var(--s))}.bg-secondary-focus{background-color:hsl(var(--sf,var(--s)))}.bg-secondary-content{background-color:hsl(var(--sc))}.bg-accent{background-color:hsl(var(--a))}.bg-accent-focus{background-color:hsl(var(--af,var(--a)))}.bg-accent-content{background-color:hsl(var(--ac))}.bg-neutral{background-color:hsl(var(--n))}.bg-neutral-focus{background-color:hsl(var(--nf,var(--n)))}.bg-neutral-content{background-color:hsl(var(--nc))}.bg-base-100{background-color:hsl(var(--b1))}.bg-base-200{background-color:hsl(var(--b2,var(--b1)))}.bg-base-300{background-color:hsl(var(--b3,var(--b2)))}.bg-base-content{background-color:hsl(var(--bc))}.bg-info{background-color:hsl(var(--in))}.bg-info-content{background-color:hsl(var(--inc,var(--nc)))}.bg-success{background-color:hsl(var(--su))}.bg-success-content{background-color:hsl(var(--suc,var(--nc)))}.bg-warning{background-color:hsl(var(--wa))}.bg-warning-content{background-color:hsl(var(--wac,var(--nc)))}.bg-error{background-color:hsl(var(--er))}.bg-error-content{background-color:hsl(var(--erc,var(--nc)))}.bg-none{background-image:none}.bg-gradient-to-t{background-image:linear-gradient(to top,var(--tw-gradient-stops))}.bg-gradient-to-tr{background-image:linear-gradient(to top right,var(--tw-gradient-stops))}.bg-gradient-to-r{background-image:linear-gradient(to right,var(--tw-gradient-stops))}.bg-gradient-to-br{background-image:linear-gradient(to bottom right,var(--tw-gradient-stops))}.bg-gradient-to-b{background-image:linear-gradient(to bottom,var(--tw-gradient-stops))}.bg-gradient-to-bl{background-image:linear-gradient(to bottom left,var(--tw-gradient-stops))}.bg-gradient-to-l{background-image:linear-gradient(to left,var(--tw-gradient-stops))}.bg-gradient-to-tl{background-image:linear-gradient(to top left,var(--tw-gradient-stops))}.from-transparent{--tw-gradient-from:transparent;--tw-gradient-stops:var(--tw-gradient-from),var(--tw-gradient-to,#0000)}.from-current{--tw-gradient-from:currentColor;--tw-gradient-stops:var(--tw-gradient-from),var(--tw-gradient-to,#fff0)}.from-primary{--tw-gradient-from:hsl(var(--p));--tw-gradient-stops:var(--tw-gradient-from),var(--tw-gradient-to,hsl(var(--p)/0))}.from-primary-focus{--tw-gradient-from:hsl(var(--pf,var(--p)));--tw-gradient-stops:var(--tw-gradient-from),var(--tw-gradient-to,hsl(var(--pf,var(--p))/0))}.from-primary-content{--tw-gradient-from:hsl(var(--pc));--tw-gradient-stops:var(--tw-gradient-from),var(--tw-gradient-to,hsl(var(--pc)/0))}.from-secondary{--tw-gradient-from:hsl(var(--s));--tw-gradient-stops:var(--tw-gradient-from),var(--tw-gradient-to,hsl(var(--s)/0))}.from-secondary-focus{--tw-gradient-from:hsl(var(--sf,var(--s)));--tw-gradient-stops:var(--tw-gradient-from),var(--tw-gradient-to,hsl(var(--sf,var(--s))/0))}.from-secondary-content{--tw-gradient-from:hsl(var(--sc));--tw-gradient-stops:var(--tw-gradient-from),var(--tw-gradient-to,hsl(var(--sc)/0))}.from-accent{--tw-gradient-from:hsl(var(--a));--tw-gradient-stops:var(--tw-gradient-from),var(--tw-gradient-to,hsl(var(--a)/0))}.from-accent-focus{--tw-gradient-from:hsl(var(--af,var(--a)));--tw-gradient-stops:var(--tw-gradient-from),var(--tw-gradient-to,hsl(var(--af,var(--a))/0))}.from-accent-content{--tw-gradient-from:hsl(var(--ac));--tw-gradient-stops:var(--tw-gradient-from),var(--tw-gradient-to,hsl(var(--ac)/0))}.from-neutral{--tw-gradient-from:hsl(var(--n));--tw-gradient-stops:var(--tw-gradient-from),var(--tw-gradient-to,hsl(var(--n)/0))}.from-neutral-focus{--tw-gradient-from:hsl(var(--nf,var(--n)));--tw-gradient-stops:var(--tw-gradient-from),var(--tw-gradient-to,hsl(var(--nf,var(--n))/0))}.from-neutral-content{--tw-gradient-from:hsl(var(--nc));--tw-gradient-stops:var(--tw-gradient-from),var(--tw-gradient-to,hsl(var(--nc)/0))}.from-base-100{--tw-gradient-from:hsl(var(--b1));--tw-gradient-stops:var(--tw-gradient-from),var(--tw-gradient-to,hsl(var(--b1)/0))}.from-base-200{--tw-gradient-from:hsl(var(--b2,var(--b1)));--tw-gradient-stops:var(--tw-gradient-from),var(--tw-gradient-to,hsl(var(--b2,var(--b1))/0))}.from-base-300{--tw-gradient-from:hsl(var(--b3,var(--b2)));--tw-gradient-stops:var(--tw-gradient-from),var(--tw-gradient-to,hsl(var(--b3,var(--b2))/0))}.from-base-content{--tw-gradient-from:hsl(var(--bc));--tw-gradient-stops:var(--tw-gradient-from),var(--tw-gradient-to,hsl(var(--bc)/0))}.from-info{--tw-gradient-from:hsl(var(--in));--tw-gradient-stops:var(--tw-gradient-from),var(--tw-gradient-to,hsl(var(--in)/0))}.from-info-content{--tw-gradient-from:hsl(var(--inc,var(--nc)));--tw-gradient-stops:var(--tw-gradient-from),var(--tw-gradient-to,hsl(var(--inc,var(--nc))/0))}.from-success{--tw-gradient-from:hsl(var(--su));--tw-gradient-stops:var(--tw-gradient-from),var(--tw-gradient-to,hsl(var(--su)/0))}.from-success-content{--tw-gradient-from:hsl(var(--suc,var(--nc)));--tw-gradient-stops:var(--tw-gradient-from),var(--tw-gradient-to,hsl(var(--suc,var(--nc))/0))}.from-warning{--tw-gradient-from:hsl(var(--wa));--tw-gradient-stops:var(--tw-gradient-from),var(--tw-gradient-to,hsl(var(--wa)/0))}.from-warning-content{--tw-gradient-from:hsl(var(--wac,var(--nc)));--tw-gradient-stops:var(--tw-gradient-from),var(--tw-gradient-to,hsl(var(--wac,var(--nc))/0))}.from-error{--tw-gradient-from:hsl(var(--er));--tw-gradient-stops:var(--tw-gradient-from),var(--tw-gradient-to,hsl(var(--er)/0))}.from-error-content{--tw-gradient-from:hsl(var(--erc,var(--nc)));--tw-gradient-stops:var(--tw-gradient-from),var(--tw-gradient-to,hsl(var(--erc,var(--nc))/0))}.via-transparent{--tw-gradient-stops:var(--tw-gradient-from),transparent,var(--tw-gradient-to,#0000)}.via-current{--tw-gradient-stops:var(--tw-gradient-from),currentColor,var(--tw-gradient-to,#fff0)}.via-primary{--tw-gradient-stops:var(--tw-gradient-from),hsl(var(--p)),var(--tw-gradient-to,hsl(var(--p)/0))}.via-primary-focus{--tw-gradient-stops:var(--tw-gradient-from),hsl(var(--pf,var(--p))),var(--tw-gradient-to,hsl(var(--pf,var(--p))/0))}.via-primary-content{--tw-gradient-stops:var(--tw-gradient-from),hsl(var(--pc)),var(--tw-gradient-to,hsl(var(--pc)/0))}.via-secondary{--tw-gradient-stops:var(--tw-gradient-from),hsl(var(--s)),var(--tw-gradient-to,hsl(var(--s)/0))}.via-secondary-focus{--tw-gradient-stops:var(--tw-gradient-from),hsl(var(--sf,var(--s))),var(--tw-gradient-to,hsl(var(--sf,var(--s))/0))}.via-secondary-content{--tw-gradient-stops:var(--tw-gradient-from),hsl(var(--sc)),var(--tw-gradient-to,hsl(var(--sc)/0))}.via-accent{--tw-gradient-stops:var(--tw-gradient-from),hsl(var(--a)),var(--tw-gradient-to,hsl(var(--a)/0))}.via-accent-focus{--tw-gradient-stops:var(--tw-gradient-from),hsl(var(--af,var(--a))),var(--tw-gradient-to,hsl(var(--af,var(--a))/0))}.via-accent-content{--tw-gradient-stops:var(--tw-gradient-from),hsl(var(--ac)),var(--tw-gradient-to,hsl(var(--ac)/0))}.via-neutral{--tw-gradient-stops:var(--tw-gradient-from),hsl(var(--n)),var(--tw-gradient-to,hsl(var(--n)/0))}.via-neutral-focus{--tw-gradient-stops:var(--tw-gradient-from),hsl(var(--nf,var(--n))),var(--tw-gradient-to,hsl(var(--nf,var(--n))/0))}.via-neutral-content{--tw-gradient-stops:var(--tw-gradient-from),hsl(var(--nc)),var(--tw-gradient-to,hsl(var(--nc)/0))}.via-base-100{--tw-gradient-stops:var(--tw-gradient-from),hsl(var(--b1)),var(--tw-gradient-to,hsl(var(--b1)/0))}.via-base-200{--tw-gradient-stops:var(--tw-gradient-from),hsl(var(--b2,var(--b1))),var(--tw-gradient-to,hsl(var(--b2,var(--b1))/0))}.via-base-300{--tw-gradient-stops:var(--tw-gradient-from),hsl(var(--b3,var(--b2))),var(--tw-gradient-to,hsl(var(--b3,var(--b2))/0))}.via-base-content{--tw-gradient-stops:var(--tw-gradient-from),hsl(var(--bc)),var(--tw-gradient-to,hsl(var(--bc)/0))}.via-info{--tw-gradient-stops:var(--tw-gradient-from),hsl(var(--in)),var(--tw-gradient-to,hsl(var(--in)/0))}.via-info-content{--tw-gradient-stops:var(--tw-gradient-from),hsl(var(--inc,var(--nc))),var(--tw-gradient-to,hsl(var(--inc,var(--nc))/0))}.via-success{--tw-gradient-stops:var(--tw-gradient-from),hsl(var(--su)),var(--tw-gradient-to,hsl(var(--su)/0))}.via-success-content{--tw-gradient-stops:var(--tw-gradient-from),hsl(var(--suc,var(--nc))),var(--tw-gradient-to,hsl(var(--suc,var(--nc))/0))}.via-warning{--tw-gradient-stops:var(--tw-gradient-from),hsl(var(--wa)),var(--tw-gradient-to,hsl(var(--wa)/0))}.via-warning-content{--tw-gradient-stops:var(--tw-gradient-from),hsl(var(--wac,var(--nc))),var(--tw-gradient-to,hsl(var(--wac,var(--nc))/0))}.via-error{--tw-gradient-stops:var(--tw-gradient-from),hsl(var(--er)),var(--tw-gradient-to,hsl(var(--er)/0))}.via-error-content{--tw-gradient-stops:var(--tw-gradient-from),hsl(var(--erc,var(--nc))),var(--tw-gradient-to,hsl(var(--erc,var(--nc))/0))}.to-transparent{--tw-gradient-to:transparent}.to-current{--tw-gradient-to:currentColor}.to-primary{--tw-gradient-to:hsl(var(--p))}.to-primary-focus{--tw-gradient-to:hsl(var(--pf,var(--p)))}.to-primary-content{--tw-gradient-to:hsl(var(--pc))}.to-secondary{--tw-gradient-to:hsl(var(--s))}.to-secondary-focus{--tw-gradient-to:hsl(var(--sf,var(--s)))}.to-secondary-content{--tw-gradient-to:hsl(var(--sc))}.to-accent{--tw-gradient-to:hsl(var(--a))}.to-accent-focus{--tw-gradient-to:hsl(var(--af,var(--a)))}.to-accent-content{--tw-gradient-to:hsl(var(--ac))}.to-neutral{--tw-gradient-to:hsl(var(--n))}.to-neutral-focus{--tw-gradient-to:hsl(var(--nf,var(--n)))}.to-neutral-content{--tw-gradient-to:hsl(var(--nc))}.to-base-100{--tw-gradient-to:hsl(var(--b1))}.to-base-200{--tw-gradient-to:hsl(var(--b2,var(--b1)))}.to-base-300{--tw-gradient-to:hsl(var(--b3,var(--b2)))}.to-base-content{--tw-gradient-to:hsl(var(--bc))}.to-info{--tw-gradient-to:hsl(var(--in))}.to-info-content{--tw-gradient-to:hsl(var(--inc,var(--nc)))}.to-success{--tw-gradient-to:hsl(var(--su))}.to-success-content{--tw-gradient-to:hsl(var(--suc,var(--nc)))}.to-warning{--tw-gradient-to:hsl(var(--wa))}.to-warning-content{--tw-gradient-to:hsl(var(--wac,var(--nc)))}.to-error{--tw-gradient-to:hsl(var(--er))}.to-error-content{--tw-gradient-to:hsl(var(--erc,var(--nc)))}.text-transparent{color:#0000}.text-current{color:currentColor}.text-primary{color:hsl(var(--p))}.text-primary-focus{color:hsl(var(--pf,var(--p)))}.text-primary-content{color:hsl(var(--pc))}.text-secondary{color:hsl(var(--s))}.text-secondary-focus{color:hsl(var(--sf,var(--s)))}.text-secondary-content{color:hsl(var(--sc))}.text-accent{color:hsl(var(--a))}.text-accent-focus{color:hsl(var(--af,var(--a)))}.text-accent-content{color:hsl(var(--ac))}.text-neutral{color:hsl(var(--n))}.text-neutral-focus{color:hsl(var(--nf,var(--n)))}.text-neutral-content{color:hsl(var(--nc))}.text-base-100{color:hsl(var(--b1))}.text-base-200{color:hsl(var(--b2,var(--b1)))}.text-base-300{color:hsl(var(--b3,var(--b2)))}.text-base-content{color:hsl(var(--bc))}.text-info{color:hsl(var(--in))}.text-info-content{color:hsl(var(--inc,var(--nc)))}.text-success{color:hsl(var(--su))}.text-success-content{color:hsl(var(--suc,var(--nc)))}.text-warning{color:hsl(var(--wa))}.text-warning-content{color:hsl(var(--wac,var(--nc)))}.text-error{color:hsl(var(--er))}.text-error-content{color:hsl(var(--erc,var(--nc)))}.placeholder-transparent::placeholder{color:#0000}.placeholder-current::placeholder{color:currentColor}.placeholder-primary::placeholder{color:hsl(var(--p))}.placeholder-primary-focus::placeholder{color:hsl(var(--pf,var(--p)))}.placeholder-primary-content::placeholder{color:hsl(var(--pc))}.placeholder-secondary::placeholder{color:hsl(var(--s))}.placeholder-secondary-focus::placeholder{color:hsl(var(--sf,var(--s)))}.placeholder-secondary-content::placeholder{color:hsl(var(--sc))}.placeholder-accent::placeholder{color:hsl(var(--a))}.placeholder-accent-focus::placeholder{color:hsl(var(--af,var(--a)))}.placeholder-accent-content::placeholder{color:hsl(var(--ac))}.placeholder-neutral::placeholder{color:hsl(var(--n))}.placeholder-neutral-focus::placeholder{color:hsl(var(--nf,var(--n)))}.placeholder-neutral-content::placeholder{color:hsl(var(--nc))}.placeholder-base-100::placeholder{color:hsl(var(--b1))}.placeholder-base-200::placeholder{color:hsl(var(--b2,var(--b1)))}.placeholder-base-300::placeholder{color:hsl(var(--b3,var(--b2)))}.placeholder-base-content::placeholder{color:hsl(var(--bc))}.placeholder-info::placeholder{color:hsl(var(--in))}.placeholder-info-content::placeholder{color:hsl(var(--inc,var(--nc)))}.placeholder-success::placeholder{color:hsl(var(--su))}.placeholder-success-content::placeholder{color:hsl(var(--suc,var(--nc)))}.placeholder-warning::placeholder{color:hsl(var(--wa))}.placeholder-warning-content::placeholder{color:hsl(var(--wac,var(--nc)))}.placeholder-error::placeholder{color:hsl(var(--er))}.placeholder-error-content::placeholder{color:hsl(var(--erc,var(--nc)))}.ring-0{--tw-ring-offset-shadow:var(--tw-ring-inset)0 0 0 var(--tw-ring-offset-width)var(--tw-ring-offset-color);--tw-ring-shadow:var(--tw-ring-inset)0 0 0 calc(0px + var(--tw-ring-offset-width))var(--tw-ring-color);box-shadow:var(--tw-ring-offset-shadow),var(--tw-ring-shadow),var(--tw-shadow,0 0 #0000)}.ring-1{--tw-ring-offset-shadow:var(--tw-ring-inset)0 0 0 var(--tw-ring-offset-width)var(--tw-ring-offset-color);--tw-ring-shadow:var(--tw-ring-inset)0 0 0 calc(1px + var(--tw-ring-offset-width))var(--tw-ring-color);box-shadow:var(--tw-ring-offset-shadow),var(--tw-ring-shadow),var(--tw-shadow,0 0 #0000)}.ring-2{--tw-ring-offset-shadow:var(--tw-ring-inset)0 0 0 var(--tw-ring-offset-width)var(--tw-ring-offset-color);--tw-ring-shadow:var(--tw-ring-inset)0 0 0 calc(2px + var(--tw-ring-offset-width))var(--tw-ring-color);box-shadow:var(--tw-ring-offset-shadow),var(--tw-ring-shadow),var(--tw-shadow,0 0 #0000)}.ring-4{--tw-ring-offset-shadow:var(--tw-ring-inset)0 0 0 var(--tw-ring-offset-width)var(--tw-ring-offset-color);--tw-ring-shadow:var(--tw-ring-inset)0 0 0 calc(4px + var(--tw-ring-offset-width))var(--tw-ring-color);box-shadow:var(--tw-ring-offset-shadow),var(--tw-ring-shadow),var(--tw-shadow,0 0 #0000)}.ring-8{--tw-ring-offset-shadow:var(--tw-ring-inset)0 0 0 var(--tw-ring-offset-width)var(--tw-ring-offset-color);--tw-ring-shadow:var(--tw-ring-inset)0 0 0 calc(8px + var(--tw-ring-offset-width))var(--tw-ring-color);box-shadow:var(--tw-ring-offset-shadow),var(--tw-ring-shadow),var(--tw-shadow,0 0 #0000)}.ring{--tw-ring-offset-shadow:var(--tw-ring-inset)0 0 0 var(--tw-ring-offset-width)var(--tw-ring-offset-color);--tw-ring-shadow:var(--tw-ring-inset)0 0 0 calc(3px + var(--tw-ring-offset-width))var(--tw-ring-color);box-shadow:var(--tw-ring-offset-shadow),var(--tw-ring-shadow),var(--tw-shadow,0 0 #0000)}.ring-inset{--tw-ring-inset:inset}.ring-transparent{--tw-ring-color:transparent}.ring-current{--tw-ring-color:currentColor}.ring-primary{--tw-ring-opacity:1;--tw-ring-color:hsl(var(--p)/var(--tw-ring-opacity))}.ring-primary-focus{--tw-ring-opacity:1;--tw-ring-color:hsl(var(--pf,var(--p))/var(--tw-ring-opacity))}.ring-primary-content{--tw-ring-opacity:1;--tw-ring-color:hsl(var(--pc)/var(--tw-ring-opacity))}.ring-secondary{--tw-ring-opacity:1;--tw-ring-color:hsl(var(--s)/var(--tw-ring-opacity))}.ring-secondary-focus{--tw-ring-opacity:1;--tw-ring-color:hsl(var(--sf,var(--s))/var(--tw-ring-opacity))}.ring-secondary-content{--tw-ring-opacity:1;--tw-ring-color:hsl(var(--sc)/var(--tw-ring-opacity))}.ring-accent{--tw-ring-opacity:1;--tw-ring-color:hsl(var(--a)/var(--tw-ring-opacity))}.ring-accent-focus{--tw-ring-opacity:1;--tw-ring-color:hsl(var(--af,var(--a))/var(--tw-ring-opacity))}.ring-accent-content{--tw-ring-opacity:1;--tw-ring-color:hsl(var(--ac)/var(--tw-ring-opacity))}.ring-neutral{--tw-ring-opacity:1;--tw-ring-color:hsl(var(--n)/var(--tw-ring-opacity))}.ring-neutral-focus{--tw-ring-opacity:1;--tw-ring-color:hsl(var(--nf,var(--n))/var(--tw-ring-opacity))}.ring-neutral-content{--tw-ring-opacity:1;--tw-ring-color:hsl(var(--nc)/var(--tw-ring-opacity))}.ring-base-100{--tw-ring-opacity:1;--tw-ring-color:hsl(var(--b1)/var(--tw-ring-opacity))}.ring-base-200{--tw-ring-opacity:1;--tw-ring-color:hsl(var(--b2,var(--b1))/var(--tw-ring-opacity))}.ring-base-300{--tw-ring-opacity:1;--tw-ring-color:hsl(var(--b3,var(--b2))/var(--tw-ring-opacity))}.ring-base-content{--tw-ring-opacity:1;--tw-ring-color:hsl(var(--bc)/var(--tw-ring-opacity))}.ring-info{--tw-ring-opacity:1;--tw-ring-color:hsl(var(--in)/var(--tw-ring-opacity))}.ring-info-content{--tw-ring-opacity:1;--tw-ring-color:hsl(var(--inc,var(--nc))/var(--tw-ring-opacity))}.ring-success{--tw-ring-opacity:1;--tw-ring-color:hsl(var(--su)/var(--tw-ring-opacity))}.ring-success-content{--tw-ring-opacity:1;--tw-ring-color:hsl(var(--suc,var(--nc))/var(--tw-ring-opacity))}.ring-warning{--tw-ring-opacity:1;--tw-ring-color:hsl(var(--wa)/var(--tw-ring-opacity))}.ring-warning-content{--tw-ring-opacity:1;--tw-ring-color:hsl(var(--wac,var(--nc))/var(--tw-ring-opacity))}.ring-error{--tw-ring-opacity:1;--tw-ring-color:hsl(var(--er)/var(--tw-ring-opacity))}.ring-error-content{--tw-ring-opacity:1;--tw-ring-color:hsl(var(--erc,var(--nc))/var(--tw-ring-opacity))}.ring-opacity-0{--tw-ring-opacity:0}.ring-opacity-5{--tw-ring-opacity:.05}.ring-opacity-10{--tw-ring-opacity:.1}.ring-opacity-20{--tw-ring-opacity:.2}.ring-opacity-25{--tw-ring-opacity:.25}.ring-opacity-30{--tw-ring-opacity:.3}.ring-opacity-40{--tw-ring-opacity:.4}.ring-opacity-50{--tw-ring-opacity:.5}.ring-opacity-60{--tw-ring-opacity:.6}.ring-opacity-70{--tw-ring-opacity:.7}.ring-opacity-75{--tw-ring-opacity:.75}.ring-opacity-80{--tw-ring-opacity:.8}.ring-opacity-90{--tw-ring-opacity:.9}.ring-opacity-95{--tw-ring-opacity:.95}.ring-opacity-100{--tw-ring-opacity:1}.ring-offset-0{--tw-ring-offset-width:0px}.ring-offset-1{--tw-ring-offset-width:1px}.ring-offset-2{--tw-ring-offset-width:2px}.ring-offset-4{--tw-ring-offset-width:4px}.ring-offset-8{--tw-ring-offset-width:8px}.ring-offset-transparent{--tw-ring-offset-color:transparent}.ring-offset-current{--tw-ring-offset-color:currentColor}.ring-offset-primary{--tw-ring-offset-color:hsl(var(--p))}.ring-offset-primary-focus{--tw-ring-offset-color:hsl(var(--pf,var(--p)))}.ring-offset-primary-content{--tw-ring-offset-color:hsl(var(--pc))}.ring-offset-secondary{--tw-ring-offset-color:hsl(var(--s))}.ring-offset-secondary-focus{--tw-ring-offset-color:hsl(var(--sf,var(--s)))}.ring-offset-secondary-content{--tw-ring-offset-color:hsl(var(--sc))}.ring-offset-accent{--tw-ring-offset-color:hsl(var(--a))}.ring-offset-accent-focus{--tw-ring-offset-color:hsl(var(--af,var(--a)))}.ring-offset-accent-content{--tw-ring-offset-color:hsl(var(--ac))}.ring-offset-neutral{--tw-ring-offset-color:hsl(var(--n))}.ring-offset-neutral-focus{--tw-ring-offset-color:hsl(var(--nf,var(--n)))}.ring-offset-neutral-content{--tw-ring-offset-color:hsl(var(--nc))}.ring-offset-base-100{--tw-ring-offset-color:hsl(var(--b1))}.ring-offset-base-200{--tw-ring-offset-color:hsl(var(--b2,var(--b1)))}.ring-offset-base-300{--tw-ring-offset-color:hsl(var(--b3,var(--b2)))}.ring-offset-base-content{--tw-ring-offset-color:hsl(var(--bc))}.ring-offset-info{--tw-ring-offset-color:hsl(var(--in))}.ring-offset-info-content{--tw-ring-offset-color:hsl(var(--inc,var(--nc)))}.ring-offset-success{--tw-ring-offset-color:hsl(var(--su))}.ring-offset-success-content{--tw-ring-offset-color:hsl(var(--suc,var(--nc)))}.ring-offset-warning{--tw-ring-offset-color:hsl(var(--wa))}.ring-offset-warning-content{--tw-ring-offset-color:hsl(var(--wac,var(--nc)))}.ring-offset-error{--tw-ring-offset-color:hsl(var(--er))}.ring-offset-error-content{--tw-ring-offset-color:hsl(var(--erc,var(--nc)))}.transition-none{transition-property:none}.transition-all{transition-property:all;transition-duration:.15s;transition-timing-function:cubic-bezier(.4,0,.2,1)}.transition{transition-property:color,background-color,border-color,-webkit-text-decoration-color,text-decoration-color,fill,stroke,opacity,box-shadow,transform,filter,backdrop-filter;transition-duration:.15s;transition-timing-function:cubic-bezier(.4,0,.2,1)}.transition-colors{transition-property:color,background-color,border-color,-webkit-text-decoration-color,text-decoration-color,fill,stroke;transition-duration:.15s;transition-timing-function:cubic-bezier(.4,0,.2,1)}.transition-opacity{transition-property:opacity;transition-duration:.15s;transition-timing-function:cubic-bezier(.4,0,.2,1)}.transition-shadow{transition-property:box-shadow;transition-duration:.15s;transition-timing-function:cubic-bezier(.4,0,.2,1)}.transition-transform{transition-property:transform;transition-duration:.15s;transition-timing-function:cubic-bezier(.4,0,.2,1)}.rounded-box{border-radius:var(--rounded-box,1rem)}.rounded-t-box{border-top-left-radius:var(--rounded-box,1rem);border-top-right-radius:var(--rounded-box,1rem)}.rounded-b-box{border-bottom-left-radius:var(--rounded-box,1rem);border-bottom-right-radius:var(--rounded-box,1rem)}.rounded-l-box{border-top-left-radius:var(--rounded-box,1rem);border-bottom-left-radius:var(--rounded-box,1rem)}.rounded-r-box{border-top-right-radius:var(--rounded-box,1rem);border-bottom-right-radius:var(--rounded-box,1rem)}.rounded-br-box{border-bottom-right-radius:var(--rounded-box,1rem)}.rounded-tr-box{border-top-right-radius:var(--rounded-box,1rem)}.rounded-bl-box{border-bottom-left-radius:var(--rounded-box,1rem)}.rounded-tl-box{border-top-left-radius:var(--rounded-box,1rem)}.rounded-btn{border-radius:var(--rounded-btn,.5rem)}.rounded-badge{border-radius:var(--rounded-badge,1.9rem)}.text-2xs{font-size:.7rem}.glass,.glass:hover,.glass.btn-active{-webkit-backdrop-filter:blur(var(--glass-blur,40px));backdrop-filter:blur(var(--glass-blur,40px));background-color:#0000;background-image:linear-gradient( 135deg,rgb(255 255 255/var(--glass-opacity,30%))0%,#0000 100% ),linear-gradient( var(--glass-reflex-degree,100deg),rgb(255 255 255/var(--glass-reflex-opacity,10%))25%,#0000 25% );box-shadow:0 0 0 1px rgb(255 255 255/var(--glass-border-opacity,10%))inset,0 0 0 2px #0000000d;text-shadow:0 1px rgb(0 0 0/var(--glass-text-shadow-opacity,5%));border:0}.min-h-6{min-height:1.5rem}.min-h-8{min-height:2rem}.min-h-12{min-height:3rem}.min-h-16{min-height:4rem}.no-animation{--btn-focus-scale:1;--animation-btn:0;--animation-input:0}.tab-border-none{--tab-border:0px}.tab-border{--tab-border:1px}.tab-border-2{--tab-border:2px}.tab-border-3{--tab-border:3px}.tab-rounded-none{--tab-radius:0}.tab-rounded-lg{--tab-radius:.5rem}.artboard-demo{flex-direction:column;flex:none;justify-content:center;align-items:center;display:flex}.artboard.phone{width:320px}.artboard.phone-1{width:320px;height:568px}.artboard.phone-1.horizontal,.artboard.phone-1.artboard-horizontal{width:568px;height:320px}.artboard.phone-2{width:375px;height:667px}.artboard.phone-2.horizontal,.artboard.phone-2.artboard-horizontal{width:667px;height:375px}.artboard.phone-3{width:414px;height:736px}.artboard.phone-3.horizontal,.artboard.phone-3.artboard-horizontal{width:736px;height:414px}.artboard.phone-4{width:375px;height:812px}.artboard.phone-4.horizontal,.artboard.phone-4.artboard-horizontal{width:812px;height:375px}.artboard.phone-5{width:414px;height:896px}.artboard.phone-5.horizontal,.artboard.phone-5.artboard-horizontal{width:896px;height:414px}.artboard.phone-6{width:320px;height:1024px}.artboard.phone-6.horizontal,.artboard.phone-6.artboard-horizontal{width:1024px;height:320px}.badge-xs{height:.75rem;padding-left:.313rem;padding-right:.313rem;font-size:.75rem;line-height:.75rem}.badge-sm{height:1rem;padding-left:.438rem;padding-right:.438rem;font-size:.75rem;line-height:1rem}.badge-md{height:1.25rem;padding-left:.563rem;padding-right:.563rem;font-size:.875rem;line-height:1.25rem}.badge-lg{height:1.5rem;padding-left:.688rem;padding-right:.688rem;font-size:1rem;line-height:1.5rem}.btn-xs{height:1.5rem;min-height:1.5rem;padding-left:.5rem;padding-right:.5rem;font-size:.75rem}.btn-sm{height:2rem;min-height:2rem;padding-left:.75rem;padding-right:.75rem;font-size:.875rem}.btn-md{height:3rem;min-height:3rem;padding-left:1rem;padding-right:1rem;font-size:.875rem}.btn-lg{height:4rem;min-height:4rem;padding-left:1.5rem;padding-right:1.5rem;font-size:1.125rem}.btn-wide{width:16rem}.btn-block{width:100%}.btn-square:where(.btn-xs){height:1.5rem;width:1.5rem;padding:0}.btn-square:where(.btn-sm){height:2rem;width:2rem;padding:0}.btn-square:where(.btn-md){height:3rem;width:3rem;padding:0}.btn-square:where(.btn-lg){height:4rem;width:4rem;padding:0}.btn-circle:where(.btn-xs){height:1.5rem;width:1.5rem;border-radius:9999px;padding:0}.btn-circle:where(.btn-sm){height:2rem;width:2rem;border-radius:9999px;padding:0}.btn-circle:where(.btn-md){height:3rem;width:3rem;border-radius:9999px;padding:0}.btn-circle:where(.btn-lg){height:4rem;width:4rem;border-radius:9999px;padding:0}.card-side{flex-direction:row;align-items:stretch}.card-side figure>*{max-width:unset}:where(.card-side figure>*){width:100%;height:100%;object-fit:cover}.checkbox-xs{height:1rem;width:1rem}.checkbox-sm{height:1.25rem;width:1.25rem}.checkbox-md{height:1.5rem;width:1.5rem}.checkbox-lg{height:2rem;width:2rem}.divider-horizontal{flex-direction:column}.divider-horizontal:before,.divider-horizontal:after{height:100%;width:.125rem}.divider-vertical{flex-direction:row}.divider-vertical:before,.divider-vertical:after{height:.125rem;width:100%}.input-md{height:3rem;padding-left:1rem;padding-right:1rem;font-size:.875rem;line-height:2}.input-lg{height:4rem;padding-left:1.5rem;padding-right:1.5rem;font-size:1.125rem;line-height:2}.input-sm{height:2rem;padding-left:.75rem;padding-right:.75rem;font-size:.875rem;line-height:2rem}.input-xs{height:1.5rem;padding-left:.5rem;padding-right:.5rem;font-size:.75rem;line-height:1.625}.kbd-xs{min-height:1.2em;min-width:1.2em;padding-left:.25rem;padding-right:.25rem;font-size:.75rem;line-height:1rem}.kbd-sm{min-height:1.6em;min-width:1.6em;padding-left:.25rem;padding-right:.25rem;font-size:.875rem;line-height:1.25rem}.kbd-md{min-height:2.2em;min-width:2.2em;padding-left:.5rem;padding-right:.5rem;font-size:1rem;line-height:1.5rem}.kbd-lg{min-height:2.5em;min-width:2.5em;padding-left:1rem;padding-right:1rem;font-size:1.125rem;line-height:1.75rem}.menu-vertical{flex-direction:column}.menu-vertical :where(li){flex-direction:column}.menu-vertical>:where(li)>:where(ul){top:initial;left:100%}.menu-horizontal{flex-direction:row;display:inline-flex}.menu-horizontal :where(li){flex-direction:row}.menu-horizontal>:where(li)>:where(ul){top:100%;left:initial}.radio-xs{height:1rem;width:1rem}.radio-sm{height:1.25rem;width:1.25rem}.radio-md{height:1.5rem;width:1.5rem}.radio-lg{height:2rem;width:2rem}.range-xs{height:1rem}.range-xs::-webkit-slider-runnable-track{height:.25rem}.range-xs::-moz-range-track{height:.25rem}.range-xs::-webkit-slider-thumb{height:1rem;width:1rem;--filler-offset:.4rem}.range-xs::-moz-range-thumb{height:1rem;width:1rem;--filler-offset:.4rem}.range-sm{height:1.25rem}.range-sm::-webkit-slider-runnable-track{height:.25rem}.range-sm::-moz-range-track{height:.25rem}.range-sm::-webkit-slider-thumb{height:1.25rem;width:1.25rem;--filler-offset:.5rem}.range-sm::-moz-range-thumb{height:1.25rem;width:1.25rem;--filler-offset:.5rem}.range-md{height:1.5rem}.range-md::-webkit-slider-runnable-track{height:.5rem}.range-md::-moz-range-track{height:.5rem}.range-md::-webkit-slider-thumb{height:1.5rem;width:1.5rem;--filler-offset:.6rem}.range-md::-moz-range-thumb{height:1.5rem;width:1.5rem;--filler-offset:.6rem}.range-lg{height:2rem}.range-lg::-webkit-slider-runnable-track{height:1rem}.range-lg::-moz-range-track{height:1rem}.range-lg::-webkit-slider-thumb{height:2rem;width:2rem;--filler-offset:1rem}.range-lg::-moz-range-thumb{height:2rem;width:2rem;--filler-offset:1rem}.rating-xs input{height:.75rem;width:.75rem}.rating-sm input{height:1rem;width:1rem}.rating-md input{height:1.5rem;width:1.5rem}.rating-lg input{height:2.5rem;width:2.5rem}.rating-half.rating-xs input:not(.rating-hidden){width:.375rem}.rating-half.rating-sm input:not(.rating-hidden){width:.5rem}.rating-half.rating-md input:not(.rating-hidden){width:.75rem}.rating-half.rating-lg input:not(.rating-hidden){width:1.25rem}.select-md{height:3rem;min-height:3rem;padding-left:1rem;padding-right:2.5rem;font-size:.875rem;line-height:2}.select-lg{height:4rem;min-height:4rem;padding-left:1.5rem;padding-right:2rem;font-size:1.125rem;line-height:2}.select-sm{height:2rem;min-height:2rem;padding-left:.75rem;padding-right:2rem;font-size:.875rem;line-height:2rem}.select-xs{height:1.5rem;min-height:1.5rem;padding-left:.5rem;padding-right:2rem;font-size:.75rem;line-height:1.625}.stats-horizontal{grid-auto-flow:column}.stats-vertical{grid-auto-flow:row}.steps-horizontal{grid-auto-columns:1fr;grid-auto-flow:column;display:inline-grid;overflow-x:auto;overflow-y:hidden}.steps-horizontal .step{text-align:center;grid-template-rows:repeat(2,minmax(0,1fr));grid-template-columns:repeat(1,minmax(0,1fr));place-items:center;display:grid}.steps-vertical{grid-auto-rows:1fr;grid-auto-flow:row}.steps-vertical .step{grid-template-rows:repeat(1,minmax(0,1fr));grid-template-columns:repeat(2,minmax(0,1fr));display:grid}.tab-md{height:2rem;--tab-padding:1rem;font-size:.875rem;line-height:2}.tab-lg{height:3rem;--tab-padding:1.25rem;font-size:1.125rem;line-height:2}.tab-sm{height:1.5rem;--tab-padding:.75rem;font-size:.875rem;line-height:.75rem}.tab-xs{height:1.25rem;--tab-padding:.5rem;font-size:.75rem;line-height:.75rem}.toggle-xs{--handleoffset:.5rem;height:1rem;width:1.5rem}.toggle-sm{--handleoffset:.75rem;height:1.25rem;width:2rem}.toggle-md{--handleoffset:1.5rem;height:1.5rem;width:3rem}.toggle-lg{--handleoffset:2rem;height:2rem;width:4rem}.alert-sm{padding:.5rem}.alert-info{--tw-bg-opacity:1;background-color:hsl(var(--in)/var(--tw-bg-opacity));--tw-text-opacity:1;color:hsl(var(--inc,var(--nc))/var(--tw-text-opacity))}.alert-success{--tw-bg-opacity:1;background-color:hsl(var(--su)/var(--tw-bg-opacity));--tw-text-opacity:1;color:hsl(var(--suc,var(--nc))/var(--tw-text-opacity))}.alert-warning{--tw-bg-opacity:1;background-color:hsl(var(--wa)/var(--tw-bg-opacity));--tw-text-opacity:1;color:hsl(var(--wac,var(--nc))/var(--tw-text-opacity))}.alert-error{--tw-bg-opacity:1;background-color:hsl(var(--er)/var(--tw-bg-opacity));--tw-text-opacity:1;color:hsl(var(--erc,var(--nc))/var(--tw-text-opacity))}.artboard-demo{--tw-bg-opacity:1;background-color:hsl(var(--b1)/var(--tw-bg-opacity));--tw-text-opacity:1;color:hsl(var(--bc)/var(--tw-text-opacity));border-radius:var(--rounded-box,1rem);box-shadow:0 1px 3px #0000001a,0 1px 2px #0000000f}.avatar.online:before{content:"";z-index:10;--tw-bg-opacity:1;background-color:hsl(var(--su)/var(--tw-bg-opacity));width:15%;height:15%;box-shadow:0 0 0 2px hsl(var(--b1));border-radius:9999px;display:block;position:absolute;top:7%;right:7%}.avatar.offline:before{content:"";z-index:10;--tw-bg-opacity:1;background-color:hsl(var(--b3,var(--b2))/var(--tw-bg-opacity));width:15%;height:15%;box-shadow:0 0 0 2px hsl(var(--b1));border-radius:9999px;display:block;position:absolute;top:7%;right:7%}.badge-primary{--tw-border-opacity:1;border-color:hsl(var(--p)/var(--tw-border-opacity));--tw-bg-opacity:1;background-color:hsl(var(--p)/var(--tw-bg-opacity));--tw-text-opacity:1;color:hsl(var(--pc)/var(--tw-text-opacity))}.badge-secondary{--tw-border-opacity:1;border-color:hsl(var(--s)/var(--tw-border-opacity));--tw-bg-opacity:1;background-color:hsl(var(--s)/var(--tw-bg-opacity));--tw-text-opacity:1;color:hsl(var(--sc)/var(--tw-text-opacity))}.badge-accent{--tw-border-opacity:1;border-color:hsl(var(--a)/var(--tw-border-opacity));--tw-bg-opacity:1;background-color:hsl(var(--a)/var(--tw-bg-opacity));--tw-text-opacity:1;color:hsl(var(--ac)/var(--tw-text-opacity))}.badge-ghost{--tw-border-opacity:1;border-color:hsl(var(--b2,var(--b1))/var(--tw-border-opacity));--tw-bg-opacity:1;background-color:hsl(var(--b2,var(--b1))/var(--tw-bg-opacity));--tw-text-opacity:1;color:hsl(var(--bc)/var(--tw-text-opacity))}.badge-outline{--tw-border-opacity:.5;--tw-text-opacity:1;color:hsl(var(--bc)/var(--tw-text-opacity));background-color:#0000;border-color:currentColor}.badge-outline.badge-primary{--tw-text-opacity:1;color:hsl(var(--p)/var(--tw-text-opacity))}.badge-outline.badge-secondary{--tw-text-opacity:1;color:hsl(var(--s)/var(--tw-text-opacity))}.badge-outline.badge-accent{--tw-text-opacity:1;color:hsl(var(--a)/var(--tw-text-opacity))}.badge-info{--tw-bg-opacity:1;background-color:hsl(var(--in)/var(--tw-bg-opacity));--tw-text-opacity:1;color:hsl(var(--inc,var(--nc))/var(--tw-text-opacity));border-color:#0000}.badge-success{--tw-bg-opacity:1;background-color:hsl(var(--su)/var(--tw-bg-opacity));--tw-text-opacity:1;color:hsl(var(--suc,var(--nc))/var(--tw-text-opacity));border-color:#0000}.badge-warning{--tw-bg-opacity:1;background-color:hsl(var(--wa)/var(--tw-bg-opacity));--tw-text-opacity:1;color:hsl(var(--wac,var(--nc))/var(--tw-text-opacity));border-color:#0000}.badge-error{--tw-bg-opacity:1;background-color:hsl(var(--er)/var(--tw-bg-opacity));--tw-text-opacity:1;color:hsl(var(--erc,var(--nc))/var(--tw-text-opacity));border-color:#0000}.card-compact .card-body{padding:1rem;font-size:.875rem;line-height:1.25rem}.card-compact .card-title{margin-bottom:.25rem}.card-normal .card-body{padding:var(--padding-card,2rem);font-size:1rem;line-height:1.5rem}.card-normal .card-title{margin-bottom:.75rem}.divider-horizontal{height:auto;width:1rem;margin:0 1rem}.divider-vertical{height:1rem;width:auto;margin:1rem 0}.menu-vertical :where(li.bordered>*){border-bottom-width:0;border-left-width:4px}.menu-horizontal :where(li.bordered>*){border-bottom-width:4px;border-left-width:0}.menu-normal :where(li>*){padding-top:.75rem;padding-bottom:.75rem;font-size:1rem;line-height:1.5rem}.menu-compact :where(li>*){padding-top:.5rem;padding-bottom:.5rem;font-size:.875rem;line-height:1.25rem}.menu-vertical>:where(li:first-child){border-top-left-radius:inherit;border-top-right-radius:inherit;border-bottom-right-radius:unset;border-bottom-left-radius:unset}.menu-vertical>:where(li:first-child)>:where(:not(ul)){border-top-left-radius:inherit;border-top-right-radius:inherit;border-bottom-right-radius:unset;border-bottom-left-radius:unset}.menu-vertical>:where(li:last-child){border-top-left-radius:unset;border-top-right-radius:unset;border-bottom-right-radius:inherit;border-bottom-left-radius:inherit}.menu-vertical>:where(li:last-child)>:where(:not(ul)){border-top-left-radius:unset;border-top-right-radius:unset;border-bottom-right-radius:inherit;border-bottom-left-radius:inherit}.menu-horizontal>:where(li:first-child){border-top-left-radius:inherit;border-top-right-radius:unset;border-bottom-right-radius:unset;border-bottom-left-radius:inherit}.menu-horizontal>:where(li:first-child)>:where(:not(ul)){border-top-left-radius:inherit;border-top-right-radius:unset;border-bottom-right-radius:unset;border-bottom-left-radius:inherit}.menu-horizontal>:where(li:last-child){border-top-left-radius:unset;border-top-right-radius:inherit;border-bottom-right-radius:inherit;border-bottom-left-radius:unset}.menu-horizontal>:where(li:last-child)>:where(:not(ul)){border-top-left-radius:unset;border-top-right-radius:inherit;border-bottom-right-radius:inherit;border-bottom-left-radius:unset}.stats-horizontal>:not([hidden])~:not([hidden]){--tw-divide-x-reverse:0;border-right-width:calc(1px*var(--tw-divide-x-reverse));border-left-width:calc(1px*calc(1 - var(--tw-divide-x-reverse)));--tw-divide-y-reverse:0;border-top-width:calc(0px*calc(1 - var(--tw-divide-y-reverse)));border-bottom-width:calc(0px*var(--tw-divide-y-reverse))}.stats-horizontal{overflow-x:auto}.stats-vertical>:not([hidden])~:not([hidden]){--tw-divide-y-reverse:0;border-top-width:calc(1px*calc(1 - var(--tw-divide-y-reverse)));border-bottom-width:calc(1px*var(--tw-divide-y-reverse));--tw-divide-x-reverse:0;border-right-width:calc(0px*var(--tw-divide-x-reverse));border-left-width:calc(0px*calc(1 - var(--tw-divide-x-reverse)))}.stats-vertical{overflow-y:auto}.steps-horizontal .step{min-width:4rem;grid-template-rows:40px 1fr;grid-template-columns:auto}.steps-horizontal .step:before{height:.5rem;width:100%;--tw-translate-y:0px;--tw-translate-x:0px;transform:translate(var(--tw-translate-x),var(--tw-translate-y))rotate(var(--tw-rotate))skewX(var(--tw-skew-x))skewY(var(--tw-skew-y))scaleX(var(--tw-scale-x))scaleY(var(--tw-scale-y));content:"";margin-left:-100%}.steps-vertical .step{min-height:4rem;grid-template-rows:auto;grid-template-columns:40px 1fr;justify-items:start;gap:.5rem}.steps-vertical .step:before{height:100%;width:.5rem;--tw-translate-y:-50%;--tw-translate-x:-50%;transform:translate(var(--tw-translate-x),var(--tw-translate-y))rotate(var(--tw-rotate))skewX(var(--tw-skew-x))skewY(var(--tw-skew-y))scaleX(var(--tw-scale-x))scaleY(var(--tw-scale-y));margin-left:50%}.table-normal :where(th,td){padding:1rem;font-size:1rem;line-height:1.5rem}.table-compact :where(th,td){padding:.5rem;font-size:.875rem;line-height:1.25rem}@media (min-width:640px){.sm\:input-group-md{font-size:.875rem;line-height:2}.sm\:input-group-lg{font-size:1.125rem;line-height:2}.sm\:input-group-sm{font-size:.875rem;line-height:2rem}.btn.loading.sm\:btn-xl:before,.btn.loading.btn-lg:before,.btn.loading.btn-xl:before,.btn.loading.sm\:btn-lg:before{height:1.25rem;width:1.25rem}.btn.loading.sm\:btn-sm:before,.btn.loading.btn-xs:before{height:.75rem;width:.75rem}.sm\:tab-rounded-lg{--tab-radius:.5rem}.sm\:badge-sm{height:1rem;padding-left:.438rem;padding-right:.438rem;font-size:.75rem;line-height:1rem}.sm\:badge-md{height:1.25rem;padding-left:.563rem;padding-right:.563rem;font-size:.875rem;line-height:1.25rem}.sm\:badge-lg{height:1.5rem;padding-left:.688rem;padding-right:.688rem;font-size:1rem;line-height:1.5rem}.sm\:btn-sm{height:2rem;min-height:2rem;padding-left:.75rem;padding-right:.75rem;font-size:.875rem}.sm\:btn-md{height:3rem;min-height:3rem;padding-left:1rem;padding-right:1rem;font-size:.875rem}.sm\:btn-lg{height:4rem;min-height:4rem;padding-left:1.5rem;padding-right:1.5rem;font-size:1.125rem}.btn-square:where(.sm\:btn-sm){height:2rem;width:2rem;padding:0}.btn-square:where(.sm\:btn-md){height:3rem;width:3rem;padding:0}.btn-square:where(.sm\:btn-lg){height:4rem;width:4rem;padding:0}.btn-circle:where(.sm\:btn-sm){height:2rem;width:2rem;border-radius:9999px;padding:0}.btn-circle:where(.sm\:btn-md){height:3rem;width:3rem;border-radius:9999px;padding:0}.btn-circle:where(.sm\:btn-lg){height:4rem;width:4rem;border-radius:9999px;padding:0}.sm\:checkbox-sm{height:1.25rem;width:1.25rem}.sm\:checkbox-md{height:1.5rem;width:1.5rem}.sm\:checkbox-lg{height:2rem;width:2rem}.sm\:input-md{height:3rem;padding-left:1rem;padding-right:1rem;font-size:.875rem;line-height:2}.sm\:input-lg{height:4rem;padding-left:1.5rem;padding-right:1.5rem;font-size:1.125rem;line-height:2}.sm\:input-sm{height:2rem;padding-left:.75rem;padding-right:.75rem;font-size:.875rem;line-height:2rem}.sm\:kbd-sm{min-height:1.6em;min-width:1.6em;padding-left:.25rem;padding-right:.25rem;font-size:.875rem;line-height:1.25rem}.sm\:kbd-md{min-height:2.2em;min-width:2.2em;padding-left:.5rem;padding-right:.5rem;font-size:1rem;line-height:1.5rem}.sm\:kbd-lg{min-height:2.5em;min-width:2.5em;padding-left:1rem;padding-right:1rem;font-size:1.125rem;line-height:1.75rem}.sm\:radio-sm{height:1.25rem;width:1.25rem}.sm\:radio-md{height:1.5rem;width:1.5rem}.sm\:radio-lg{height:2rem;width:2rem}.sm\:range-sm{height:1.25rem}.sm\:range-sm::-webkit-slider-runnable-track{height:.25rem}.sm\:range-sm::-moz-range-track{height:.25rem}.sm\:range-sm::-webkit-slider-thumb{height:1.25rem;width:1.25rem;--filler-offset:.5rem}.sm\:range-sm::-moz-range-thumb{height:1.25rem;width:1.25rem;--filler-offset:.5rem}.sm\:range-md{height:1.5rem}.sm\:range-md::-webkit-slider-runnable-track{height:.5rem}.sm\:range-md::-moz-range-track{height:.5rem}.sm\:range-md::-webkit-slider-thumb{height:1.5rem;width:1.5rem;--filler-offset:.6rem}.sm\:range-md::-moz-range-thumb{height:1.5rem;width:1.5rem;--filler-offset:.6rem}.sm\:range-lg{height:2rem}.sm\:range-lg::-webkit-slider-runnable-track{height:1rem}.sm\:range-lg::-moz-range-track{height:1rem}.sm\:range-lg::-webkit-slider-thumb{height:2rem;width:2rem;--filler-offset:1rem}.sm\:range-lg::-moz-range-thumb{height:2rem;width:2rem;--filler-offset:1rem}.sm\:rating-sm input{height:1rem;width:1rem}.sm\:rating-md input{height:1.5rem;width:1.5rem}.sm\:rating-lg input{height:2.5rem;width:2.5rem}.rating-half.sm\:rating-sm input:not(.rating-hidden){width:.5rem}.rating-half.sm\:rating-md input:not(.rating-hidden){width:.75rem}.rating-half.sm\:rating-lg input:not(.rating-hidden){width:1.25rem}.sm\:select-md{height:3rem;min-height:3rem;padding-left:1rem;padding-right:2.5rem;font-size:.875rem;line-height:2}.sm\:select-lg{height:4rem;min-height:4rem;padding-left:1.5rem;padding-right:2rem;font-size:1.125rem;line-height:2}.sm\:select-sm{height:2rem;min-height:2rem;padding-left:.75rem;padding-right:2rem;font-size:.875rem;line-height:2rem}.sm\:tab-md{height:2rem;--tab-padding:1rem;font-size:.875rem;line-height:2}.sm\:tab-lg{height:3rem;--tab-padding:1.25rem;font-size:1.125rem;line-height:2}.sm\:tab-sm{height:1.5rem;--tab-padding:.75rem;font-size:.875rem;line-height:.75rem}.sm\:toggle-sm{--handleoffset:.75rem;height:1.25rem;width:2rem}.sm\:toggle-md{--handleoffset:1.5rem;height:1.5rem;width:3rem}.sm\:toggle-lg{--handleoffset:2rem;height:2rem;width:4rem}.sm\:alert-sm{padding:.5rem}}@media (min-width:768px){.md\:input-group-md{font-size:.875rem;line-height:2}.md\:input-group-lg{font-size:1.125rem;line-height:2}.md\:input-group-sm{font-size:.875rem;line-height:2rem}.btn.loading.md\:btn-xl:before,.btn.loading.btn-lg:before,.btn.loading.btn-xl:before,.btn.loading.md\:btn-lg:before{height:1.25rem;width:1.25rem}.btn.loading.md\:btn-sm:before,.btn.loading.btn-xs:before{height:.75rem;width:.75rem}.md\:tab-rounded-lg{--tab-radius:.5rem}.md\:badge-sm{height:1rem;padding-left:.438rem;padding-right:.438rem;font-size:.75rem;line-height:1rem}.md\:badge-md{height:1.25rem;padding-left:.563rem;padding-right:.563rem;font-size:.875rem;line-height:1.25rem}.md\:badge-lg{height:1.5rem;padding-left:.688rem;padding-right:.688rem;font-size:1rem;line-height:1.5rem}.md\:btn-sm{height:2rem;min-height:2rem;padding-left:.75rem;padding-right:.75rem;font-size:.875rem}.md\:btn-md{height:3rem;min-height:3rem;padding-left:1rem;padding-right:1rem;font-size:.875rem}.md\:btn-lg{height:4rem;min-height:4rem;padding-left:1.5rem;padding-right:1.5rem;font-size:1.125rem}.btn-square:where(.md\:btn-sm){height:2rem;width:2rem;padding:0}.btn-square:where(.md\:btn-md){height:3rem;width:3rem;padding:0}.btn-square:where(.md\:btn-lg){height:4rem;width:4rem;padding:0}.btn-circle:where(.md\:btn-sm){height:2rem;width:2rem;border-radius:9999px;padding:0}.btn-circle:where(.md\:btn-md){height:3rem;width:3rem;border-radius:9999px;padding:0}.btn-circle:where(.md\:btn-lg){height:4rem;width:4rem;border-radius:9999px;padding:0}.md\:checkbox-sm{height:1.25rem;width:1.25rem}.md\:checkbox-md{height:1.5rem;width:1.5rem}.md\:checkbox-lg{height:2rem;width:2rem}.md\:input-md{height:3rem;padding-left:1rem;padding-right:1rem;font-size:.875rem;line-height:2}.md\:input-lg{height:4rem;padding-left:1.5rem;padding-right:1.5rem;font-size:1.125rem;line-height:2}.md\:input-sm{height:2rem;padding-left:.75rem;padding-right:.75rem;font-size:.875rem;line-height:2rem}.md\:kbd-sm{min-height:1.6em;min-width:1.6em;padding-left:.25rem;padding-right:.25rem;font-size:.875rem;line-height:1.25rem}.md\:kbd-md{min-height:2.2em;min-width:2.2em;padding-left:.5rem;padding-right:.5rem;font-size:1rem;line-height:1.5rem}.md\:kbd-lg{min-height:2.5em;min-width:2.5em;padding-left:1rem;padding-right:1rem;font-size:1.125rem;line-height:1.75rem}.md\:radio-sm{height:1.25rem;width:1.25rem}.md\:radio-md{height:1.5rem;width:1.5rem}.md\:radio-lg{height:2rem;width:2rem}.md\:range-sm{height:1.25rem}.md\:range-sm::-webkit-slider-runnable-track{height:.25rem}.md\:range-sm::-moz-range-track{height:.25rem}.md\:range-sm::-webkit-slider-thumb{height:1.25rem;width:1.25rem;--filler-offset:.5rem}.md\:range-sm::-moz-range-thumb{height:1.25rem;width:1.25rem;--filler-offset:.5rem}.md\:range-md{height:1.5rem}.md\:range-md::-webkit-slider-runnable-track{height:.5rem}.md\:range-md::-moz-range-track{height:.5rem}.md\:range-md::-webkit-slider-thumb{height:1.5rem;width:1.5rem;--filler-offset:.6rem}.md\:range-md::-moz-range-thumb{height:1.5rem;width:1.5rem;--filler-offset:.6rem}.md\:range-lg{height:2rem}.md\:range-lg::-webkit-slider-runnable-track{height:1rem}.md\:range-lg::-moz-range-track{height:1rem}.md\:range-lg::-webkit-slider-thumb{height:2rem;width:2rem;--filler-offset:1rem}.md\:range-lg::-moz-range-thumb{height:2rem;width:2rem;--filler-offset:1rem}.md\:rating-sm input{height:1rem;width:1rem}.md\:rating-md input{height:1.5rem;width:1.5rem}.md\:rating-lg input{height:2.5rem;width:2.5rem}.rating-half.md\:rating-sm input:not(.rating-hidden){width:.5rem}.rating-half.md\:rating-md input:not(.rating-hidden){width:.75rem}.rating-half.md\:rating-lg input:not(.rating-hidden){width:1.25rem}.md\:select-md{height:3rem;min-height:3rem;padding-left:1rem;padding-right:2.5rem;font-size:.875rem;line-height:2}.md\:select-lg{height:4rem;min-height:4rem;padding-left:1.5rem;padding-right:2rem;font-size:1.125rem;line-height:2}.md\:select-sm{height:2rem;min-height:2rem;padding-left:.75rem;padding-right:2rem;font-size:.875rem;line-height:2rem}.md\:tab-md{height:2rem;--tab-padding:1rem;font-size:.875rem;line-height:2}.md\:tab-lg{height:3rem;--tab-padding:1.25rem;font-size:1.125rem;line-height:2}.md\:tab-sm{height:1.5rem;--tab-padding:.75rem;font-size:.875rem;line-height:.75rem}.md\:toggle-sm{--handleoffset:.75rem;height:1.25rem;width:2rem}.md\:toggle-md{--handleoffset:1.5rem;height:1.5rem;width:3rem}.md\:toggle-lg{--handleoffset:2rem;height:2rem;width:4rem}.md\:alert-sm{padding:.5rem}}@media (min-width:1024px){.lg\:input-group-md{font-size:.875rem;line-height:2}.lg\:input-group-lg{font-size:1.125rem;line-height:2}.lg\:input-group-sm{font-size:.875rem;line-height:2rem}.btn.loading.lg\:btn-xl:before,.btn.loading.btn-lg:before,.btn.loading.btn-xl:before,.btn.loading.lg\:btn-lg:before{height:1.25rem;width:1.25rem}.btn.loading.lg\:btn-sm:before,.btn.loading.btn-xs:before{height:.75rem;width:.75rem}.lg\:tab-rounded-lg{--tab-radius:.5rem}.lg\:badge-sm{height:1rem;padding-left:.438rem;padding-right:.438rem;font-size:.75rem;line-height:1rem}.lg\:badge-md{height:1.25rem;padding-left:.563rem;padding-right:.563rem;font-size:.875rem;line-height:1.25rem}.lg\:badge-lg{height:1.5rem;padding-left:.688rem;padding-right:.688rem;font-size:1rem;line-height:1.5rem}.lg\:btn-sm{height:2rem;min-height:2rem;padding-left:.75rem;padding-right:.75rem;font-size:.875rem}.lg\:btn-md{height:3rem;min-height:3rem;padding-left:1rem;padding-right:1rem;font-size:.875rem}.lg\:btn-lg{height:4rem;min-height:4rem;padding-left:1.5rem;padding-right:1.5rem;font-size:1.125rem}.btn-square:where(.lg\:btn-sm){height:2rem;width:2rem;padding:0}.btn-square:where(.lg\:btn-md){height:3rem;width:3rem;padding:0}.btn-square:where(.lg\:btn-lg){height:4rem;width:4rem;padding:0}.btn-circle:where(.lg\:btn-sm){height:2rem;width:2rem;border-radius:9999px;padding:0}.btn-circle:where(.lg\:btn-md){height:3rem;width:3rem;border-radius:9999px;padding:0}.btn-circle:where(.lg\:btn-lg){height:4rem;width:4rem;border-radius:9999px;padding:0}.lg\:checkbox-sm{height:1.25rem;width:1.25rem}.lg\:checkbox-md{height:1.5rem;width:1.5rem}.lg\:checkbox-lg{height:2rem;width:2rem}.lg\:input-md{height:3rem;padding-left:1rem;padding-right:1rem;font-size:.875rem;line-height:2}.lg\:input-lg{height:4rem;padding-left:1.5rem;padding-right:1.5rem;font-size:1.125rem;line-height:2}.lg\:input-sm{height:2rem;padding-left:.75rem;padding-right:.75rem;font-size:.875rem;line-height:2rem}.lg\:kbd-sm{min-height:1.6em;min-width:1.6em;padding-left:.25rem;padding-right:.25rem;font-size:.875rem;line-height:1.25rem}.lg\:kbd-md{min-height:2.2em;min-width:2.2em;padding-left:.5rem;padding-right:.5rem;font-size:1rem;line-height:1.5rem}.lg\:kbd-lg{min-height:2.5em;min-width:2.5em;padding-left:1rem;padding-right:1rem;font-size:1.125rem;line-height:1.75rem}.lg\:radio-sm{height:1.25rem;width:1.25rem}.lg\:radio-md{height:1.5rem;width:1.5rem}.lg\:radio-lg{height:2rem;width:2rem}.lg\:range-sm{height:1.25rem}.lg\:range-sm::-webkit-slider-runnable-track{height:.25rem}.lg\:range-sm::-moz-range-track{height:.25rem}.lg\:range-sm::-webkit-slider-thumb{height:1.25rem;width:1.25rem;--filler-offset:.5rem}.lg\:range-sm::-moz-range-thumb{height:1.25rem;width:1.25rem;--filler-offset:.5rem}.lg\:range-md{height:1.5rem}.lg\:range-md::-webkit-slider-runnable-track{height:.5rem}.lg\:range-md::-moz-range-track{height:.5rem}.lg\:range-md::-webkit-slider-thumb{height:1.5rem;width:1.5rem;--filler-offset:.6rem}.lg\:range-md::-moz-range-thumb{height:1.5rem;width:1.5rem;--filler-offset:.6rem}.lg\:range-lg{height:2rem}.lg\:range-lg::-webkit-slider-runnable-track{height:1rem}.lg\:range-lg::-moz-range-track{height:1rem}.lg\:range-lg::-webkit-slider-thumb{height:2rem;width:2rem;--filler-offset:1rem}.lg\:range-lg::-moz-range-thumb{height:2rem;width:2rem;--filler-offset:1rem}.lg\:rating-sm input{height:1rem;width:1rem}.lg\:rating-md input{height:1.5rem;width:1.5rem}.lg\:rating-lg input{height:2.5rem;width:2.5rem}.rating-half.lg\:rating-sm input:not(.rating-hidden){width:.5rem}.rating-half.lg\:rating-md input:not(.rating-hidden){width:.75rem}.rating-half.lg\:rating-lg input:not(.rating-hidden){width:1.25rem}.lg\:select-md{height:3rem;min-height:3rem;padding-left:1rem;padding-right:2.5rem;font-size:.875rem;line-height:2}.lg\:select-lg{height:4rem;min-height:4rem;padding-left:1.5rem;padding-right:2rem;font-size:1.125rem;line-height:2}.lg\:select-sm{height:2rem;min-height:2rem;padding-left:.75rem;padding-right:2rem;font-size:.875rem;line-height:2rem}.lg\:tab-md{height:2rem;--tab-padding:1rem;font-size:.875rem;line-height:2}.lg\:tab-lg{height:3rem;--tab-padding:1.25rem;font-size:1.125rem;line-height:2}.lg\:tab-sm{height:1.5rem;--tab-padding:.75rem;font-size:.875rem;line-height:.75rem}.lg\:toggle-sm{--handleoffset:.75rem;height:1.25rem;width:2rem}.lg\:toggle-md{--handleoffset:1.5rem;height:1.5rem;width:3rem}.lg\:toggle-lg{--handleoffset:2rem;height:2rem;width:4rem}.lg\:alert-sm{padding:.5rem}}@media (min-width:1280px){.xl\:input-group-md{font-size:.875rem;line-height:2}.xl\:input-group-lg{font-size:1.125rem;line-height:2}.xl\:input-group-sm{font-size:.875rem;line-height:2rem}.btn.loading.xl\:btn-xl:before,.btn.loading.btn-lg:before,.btn.loading.btn-xl:before,.btn.loading.xl\:btn-lg:before{height:1.25rem;width:1.25rem}.btn.loading.xl\:btn-sm:before,.btn.loading.btn-xs:before{height:.75rem;width:.75rem}.xl\:tab-rounded-lg{--tab-radius:.5rem}.xl\:badge-sm{height:1rem;padding-left:.438rem;padding-right:.438rem;font-size:.75rem;line-height:1rem}.xl\:badge-md{height:1.25rem;padding-left:.563rem;padding-right:.563rem;font-size:.875rem;line-height:1.25rem}.xl\:badge-lg{height:1.5rem;padding-left:.688rem;padding-right:.688rem;font-size:1rem;line-height:1.5rem}.xl\:btn-sm{height:2rem;min-height:2rem;padding-left:.75rem;padding-right:.75rem;font-size:.875rem}.xl\:btn-md{height:3rem;min-height:3rem;padding-left:1rem;padding-right:1rem;font-size:.875rem}.xl\:btn-lg{height:4rem;min-height:4rem;padding-left:1.5rem;padding-right:1.5rem;font-size:1.125rem}.btn-square:where(.xl\:btn-sm){height:2rem;width:2rem;padding:0}.btn-square:where(.xl\:btn-md){height:3rem;width:3rem;padding:0}.btn-square:where(.xl\:btn-lg){height:4rem;width:4rem;padding:0}.btn-circle:where(.xl\:btn-sm){height:2rem;width:2rem;border-radius:9999px;padding:0}.btn-circle:where(.xl\:btn-md){height:3rem;width:3rem;border-radius:9999px;padding:0}.btn-circle:where(.xl\:btn-lg){height:4rem;width:4rem;border-radius:9999px;padding:0}.xl\:checkbox-sm{height:1.25rem;width:1.25rem}.xl\:checkbox-md{height:1.5rem;width:1.5rem}.xl\:checkbox-lg{height:2rem;width:2rem}.xl\:input-md{height:3rem;padding-left:1rem;padding-right:1rem;font-size:.875rem;line-height:2}.xl\:input-lg{height:4rem;padding-left:1.5rem;padding-right:1.5rem;font-size:1.125rem;line-height:2}.xl\:input-sm{height:2rem;padding-left:.75rem;padding-right:.75rem;font-size:.875rem;line-height:2rem}.xl\:kbd-sm{min-height:1.6em;min-width:1.6em;padding-left:.25rem;padding-right:.25rem;font-size:.875rem;line-height:1.25rem}.xl\:kbd-md{min-height:2.2em;min-width:2.2em;padding-left:.5rem;padding-right:.5rem;font-size:1rem;line-height:1.5rem}.xl\:kbd-lg{min-height:2.5em;min-width:2.5em;padding-left:1rem;padding-right:1rem;font-size:1.125rem;line-height:1.75rem}.xl\:radio-sm{height:1.25rem;width:1.25rem}.xl\:radio-md{height:1.5rem;width:1.5rem}.xl\:radio-lg{height:2rem;width:2rem}.xl\:range-sm{height:1.25rem}.xl\:range-sm::-webkit-slider-runnable-track{height:.25rem}.xl\:range-sm::-moz-range-track{height:.25rem}.xl\:range-sm::-webkit-slider-thumb{height:1.25rem;width:1.25rem;--filler-offset:.5rem}.xl\:range-sm::-moz-range-thumb{height:1.25rem;width:1.25rem;--filler-offset:.5rem}.xl\:range-md{height:1.5rem}.xl\:range-md::-webkit-slider-runnable-track{height:.5rem}.xl\:range-md::-moz-range-track{height:.5rem}.xl\:range-md::-webkit-slider-thumb{height:1.5rem;width:1.5rem;--filler-offset:.6rem}.xl\:range-md::-moz-range-thumb{height:1.5rem;width:1.5rem;--filler-offset:.6rem}.xl\:range-lg{height:2rem}.xl\:range-lg::-webkit-slider-runnable-track{height:1rem}.xl\:range-lg::-moz-range-track{height:1rem}.xl\:range-lg::-webkit-slider-thumb{height:2rem;width:2rem;--filler-offset:1rem}.xl\:range-lg::-moz-range-thumb{height:2rem;width:2rem;--filler-offset:1rem}.xl\:rating-sm input{height:1rem;width:1rem}.xl\:rating-md input{height:1.5rem;width:1.5rem}.xl\:rating-lg input{height:2.5rem;width:2.5rem}.rating-half.xl\:rating-sm input:not(.rating-hidden){width:.5rem}.rating-half.xl\:rating-md input:not(.rating-hidden){width:.75rem}.rating-half.xl\:rating-lg input:not(.rating-hidden){width:1.25rem}.xl\:select-md{height:3rem;min-height:3rem;padding-left:1rem;padding-right:2.5rem;font-size:.875rem;line-height:2}.xl\:select-lg{height:4rem;min-height:4rem;padding-left:1.5rem;padding-right:2rem;font-size:1.125rem;line-height:2}.xl\:select-sm{height:2rem;min-height:2rem;padding-left:.75rem;padding-right:2rem;font-size:.875rem;line-height:2rem}.xl\:tab-md{height:2rem;--tab-padding:1rem;font-size:.875rem;line-height:2}.xl\:tab-lg{height:3rem;--tab-padding:1.25rem;font-size:1.125rem;line-height:2}.xl\:tab-sm{height:1.5rem;--tab-padding:.75rem;font-size:.875rem;line-height:.75rem}.xl\:toggle-sm{--handleoffset:.75rem;height:1.25rem;width:2rem}.xl\:toggle-md{--handleoffset:1.5rem;height:1.5rem;width:3rem}.xl\:toggle-lg{--handleoffset:2rem;height:2rem;width:4rem}.xl\:alert-sm{padding:.5rem}} \ No newline at end of file diff --git a/spaces/jcenaa/Segment-Any-RGBD/datasets/scannet_preprocess/scannet_pair/point_cloud_extractor.py b/spaces/jcenaa/Segment-Any-RGBD/datasets/scannet_preprocess/scannet_pair/point_cloud_extractor.py deleted file mode 100644 index e2b8568eef6413e2e024025b21f3d07401b0f223..0000000000000000000000000000000000000000 --- a/spaces/jcenaa/Segment-Any-RGBD/datasets/scannet_preprocess/scannet_pair/point_cloud_extractor.py +++ /dev/null @@ -1,89 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - - -import glob, os -import numpy as np -import cv2 -import torch - - -def extractor(input_path, output_path): - if not os.path.exists(output_path): - os.mkdir(output_path) - - # Load Depth Camera Intrinsic - depth_intrinsic = np.loadtxt(input_path + '/intrinsic/intrinsic_depth.txt') - print('Depth intrinsic: ') - print(depth_intrinsic) - - # Compute Camrea Distance (just for demo, so you can choose the camera distance in frame sampling) - poses = sorted(glob.glob(input_path + '/pose/*.txt'), key=lambda a: int(os.path.basename(a).split('.')[0])) - depths = sorted(glob.glob(input_path + '/depth/*.png'), key=lambda a: int(os.path.basename(a).split('.')[0])) - colors = sorted(glob.glob(input_path + '/color/*.png'), key=lambda a: int(os.path.basename(a).split('.')[0])) - - # # Get Aligned Point Clouds. - for ind, (pose, depth, color) in enumerate(zip(poses, depths, colors)): - name = os.path.basename(pose).split('.')[0] - - if os.path.exists(output_path + '/{}.npz'.format(name)): - continue - - try: - print('=' * 50, ': {}'.format(pose)) - depth_img = cv2.imread(depth, -1) # read 16bit grayscale image - mask = (depth_img != 0) - color_image = cv2.imread(color) - color_image = cv2.resize(color_image, (640, 480)) - color_image = np.reshape(color_image[mask], [-1, 3]) - colors = np.zeros_like(color_image) - colors[:, 0] = color_image[:, 2] - colors[:, 1] = color_image[:, 1] - colors[:, 2] = color_image[:, 0] - - pose = np.loadtxt(poses[ind]) - print('Camera pose: ') - print(pose) - - depth_shift = 1000.0 - x, y = np.meshgrid(np.linspace(0, depth_img.shape[1] - 1, depth_img.shape[1]), - np.linspace(0, depth_img.shape[0] - 1, depth_img.shape[0])) - uv_depth = np.zeros((depth_img.shape[0], depth_img.shape[1], 3)) - uv_depth[:, :, 0] = x - uv_depth[:, :, 1] = y - uv_depth[:, :, 2] = depth_img / depth_shift - uv_depth = np.reshape(uv_depth, [-1, 3]) - uv_depth = uv_depth[np.where(uv_depth[:, 2] != 0), :].squeeze() - - intrinsic_inv = np.linalg.inv(depth_intrinsic) - fx = depth_intrinsic[0, 0] - fy = depth_intrinsic[1, 1] - cx = depth_intrinsic[0, 2] - cy = depth_intrinsic[1, 2] - bx = depth_intrinsic[0, 3] - by = depth_intrinsic[1, 3] - point_list = [] - n = uv_depth.shape[0] - points = np.ones((n, 4)) - X = (uv_depth[:, 0] - cx) * uv_depth[:, 2] / fx + bx - Y = (uv_depth[:, 1] - cy) * uv_depth[:, 2] / fy + by - points[:, 0] = X - points[:, 1] = Y - points[:, 2] = uv_depth[:, 2] - points_world = np.dot(points, np.transpose(pose)) - print(points_world.shape) - - pcd = dict(coord=points_world[:, :3], color=colors) - # pcd_save = np.zeros((points_world.shape[0], 7)) - # pcd_save[:, :3] = points_world[:, :3] - # pcd_save[:, 3:6] = colors - - # print('Saving npz file...') - # np.savez(output_path + '/{}.npz'.format(name), pcd=pcd_save) - torch.save(pcd, output_path + '/{}.pth'.format(name)) - except: - continue - - diff --git a/spaces/jerpint/RAGTheDocs/cfg.py b/spaces/jerpint/RAGTheDocs/cfg.py deleted file mode 100644 index eb55049c31b7e47630501e305bdd31e785f1ce4c..0000000000000000000000000000000000000000 --- a/spaces/jerpint/RAGTheDocs/cfg.py +++ /dev/null @@ -1,109 +0,0 @@ -from buster.busterbot import Buster, BusterConfig -from buster.completers import ChatGPTCompleter, DocumentAnswerer -from buster.formatters.documents import DocumentsFormatterJSON -from buster.formatters.prompts import PromptFormatter -from buster.retriever import DeepLakeRetriever, Retriever -from buster.tokenizers import GPTTokenizer -from buster.validators import QuestionAnswerValidator, Validator - -buster_cfg = BusterConfig( - retriever_cfg={ - "path": "outputs/deeplake_store", - "top_k": 3, - "thresh": 0.7, - "max_tokens": 2000, - "embedding_model": "text-embedding-ada-002", - }, - documents_answerer_cfg={ - "no_documents_message": "No documents are available for this question.", - }, - completion_cfg={ - "completion_kwargs": { - "model": "gpt-3.5-turbo", - "stream": True, - "temperature": 0, - }, - }, - tokenizer_cfg={ - "model_name": "gpt-3.5-turbo", - }, - documents_formatter_cfg={ - "max_tokens": 3500, - "columns": ["content", "title", "source"], - }, - prompt_formatter_cfg={ - "max_tokens": 3500, - "text_before_docs": ( - "You are a chatbot assistant answering technical questions about artificial intelligence (AI)." - "You can only respond to a question if the content necessary to answer the question is contained in the following provided documentation. " - "If the answer is in the documentation, summarize it in a helpful way to the user. " - "If it isn't, simply reply that you cannot answer the question. " - "Do not refer to the documentation directly, but use the instructions provided within it to answer questions. " - "Here is the documentation:\n" - ), - "text_after_docs": ( - "REMEMBER:\n" - "You are a chatbot assistant answering technical questions about artificial intelligence (AI)." - "Here are the rules you must follow:\n" - "1) You must only respond with information contained in the documentation above. Say you do not know if the information is not provided.\n" - "2) Make sure to format your answers in Markdown format, including code block and snippets.\n" - "3) Do not reference any links, urls or hyperlinks in your answers.\n" - "4) If you do not know the answer to a question, or if it is completely irrelevant to the library usage, simply reply with:\n" - "5) Do not refer to the documentation directly, but use the instructions provided within it to answer questions. " - "'I'm sorry, but I am an AI language model trained to assist with questions related to AI. I cannot answer that question as it is not relevant to the library or its usage. Is there anything else I can assist you with?'" - "For example:\n" - "What is the meaning of life for an qa bot?\n" - "I'm sorry, but I am an AI language model trained to assist with questions related to AI. I cannot answer that question as it is not relevant to the library or its usage. Is there anything else I can assist you with?" - "Now answer the following question:\n" - ), - }, - validator_cfg={ - "unknown_response_templates": [ - "I'm sorry, but I am an AI language model trained to assist with questions related to AI. I cannot answer that question as it is not relevant to the library or its usage. Is there anything else I can assist you with?", - ], - "unknown_threshold": 0.85, - "embedding_model": "text-embedding-ada-002", - "use_reranking": True, - "invalid_question_response": "This question does not seem relevant to my current knowledge. If you think this is a mistake, you can modify the question validation prompt.", - "check_question_prompt": """You are an chatbot answering questions on python libraries hosted on readthedocs. - -Your job is to determine wether or not a question is valid, and should be answered. -A user will submit a question. Respond 'true' if it is valid, respond 'false' if it is invalid. - -For example: - -Q: How can I install the library? -true - -Q: What is the meaning of life? -false - -A user will submit a question. Respond 'true' if it is valid, respond 'false' if it is invalid.""", - "completion_kwargs": { - "model": "gpt-3.5-turbo", - "stream": False, - "temperature": 0, - }, - }, -) - - -def setup_buster(buster_cfg: BusterConfig): - """initialize buster with a buster_cfg class""" - retriever: Retriever = DeepLakeRetriever(**buster_cfg.retriever_cfg) - tokenizer = GPTTokenizer(**buster_cfg.tokenizer_cfg) - document_answerer: DocumentAnswerer = DocumentAnswerer( - completer=ChatGPTCompleter(**buster_cfg.completion_cfg), - documents_formatter=DocumentsFormatterJSON( - tokenizer=tokenizer, **buster_cfg.documents_formatter_cfg - ), - prompt_formatter=PromptFormatter( - tokenizer=tokenizer, **buster_cfg.prompt_formatter_cfg - ), - **buster_cfg.documents_answerer_cfg, - ) - validator: Validator = QuestionAnswerValidator(**buster_cfg.validator_cfg) - buster: Buster = Buster( - retriever=retriever, document_answerer=document_answerer, validator=validator - ) - return buster diff --git a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/anyio/abc/_resources.py b/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/anyio/abc/_resources.py deleted file mode 100644 index e0a283fc9873b524bbacb73624721353d82c34ab..0000000000000000000000000000000000000000 --- a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/anyio/abc/_resources.py +++ /dev/null @@ -1,31 +0,0 @@ -from __future__ import annotations - -from abc import ABCMeta, abstractmethod -from types import TracebackType -from typing import TypeVar - -T = TypeVar("T") - - -class AsyncResource(metaclass=ABCMeta): - """ - Abstract base class for all closeable asynchronous resources. - - Works as an asynchronous context manager which returns the instance itself on enter, and calls - :meth:`aclose` on exit. - """ - - async def __aenter__(self: T) -> T: - return self - - async def __aexit__( - self, - exc_type: type[BaseException] | None, - exc_val: BaseException | None, - exc_tb: TracebackType | None, - ) -> None: - await self.aclose() - - @abstractmethod - async def aclose(self) -> None: - """Close the resource.""" diff --git a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/fastapi/openapi/utils.py b/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/fastapi/openapi/utils.py deleted file mode 100644 index 5bfb5acef7fc47b220bfb159345b5c02cd8e8398..0000000000000000000000000000000000000000 --- a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/fastapi/openapi/utils.py +++ /dev/null @@ -1,530 +0,0 @@ -import http.client -import inspect -import warnings -from typing import Any, Dict, List, Optional, Sequence, Set, Tuple, Type, Union, cast - -from fastapi import routing -from fastapi._compat import ( - GenerateJsonSchema, - JsonSchemaValue, - ModelField, - Undefined, - get_compat_model_name_map, - get_definitions, - get_schema_from_model_field, - lenient_issubclass, -) -from fastapi.datastructures import DefaultPlaceholder -from fastapi.dependencies.models import Dependant -from fastapi.dependencies.utils import get_flat_dependant, get_flat_params -from fastapi.encoders import jsonable_encoder -from fastapi.openapi.constants import METHODS_WITH_BODY, REF_PREFIX, REF_TEMPLATE -from fastapi.openapi.models import OpenAPI -from fastapi.params import Body, Param -from fastapi.responses import Response -from fastapi.types import ModelNameMap -from fastapi.utils import ( - deep_dict_update, - generate_operation_id_for_path, - is_body_allowed_for_status_code, -) -from starlette.responses import JSONResponse -from starlette.routing import BaseRoute -from starlette.status import HTTP_422_UNPROCESSABLE_ENTITY -from typing_extensions import Literal - -validation_error_definition = { - "title": "ValidationError", - "type": "object", - "properties": { - "loc": { - "title": "Location", - "type": "array", - "items": {"anyOf": [{"type": "string"}, {"type": "integer"}]}, - }, - "msg": {"title": "Message", "type": "string"}, - "type": {"title": "Error Type", "type": "string"}, - }, - "required": ["loc", "msg", "type"], -} - -validation_error_response_definition = { - "title": "HTTPValidationError", - "type": "object", - "properties": { - "detail": { - "title": "Detail", - "type": "array", - "items": {"$ref": REF_PREFIX + "ValidationError"}, - } - }, -} - -status_code_ranges: Dict[str, str] = { - "1XX": "Information", - "2XX": "Success", - "3XX": "Redirection", - "4XX": "Client Error", - "5XX": "Server Error", - "DEFAULT": "Default Response", -} - - -def get_openapi_security_definitions( - flat_dependant: Dependant, -) -> Tuple[Dict[str, Any], List[Dict[str, Any]]]: - security_definitions = {} - operation_security = [] - for security_requirement in flat_dependant.security_requirements: - security_definition = jsonable_encoder( - security_requirement.security_scheme.model, - by_alias=True, - exclude_none=True, - ) - security_name = security_requirement.security_scheme.scheme_name - security_definitions[security_name] = security_definition - operation_security.append({security_name: security_requirement.scopes}) - return security_definitions, operation_security - - -def get_openapi_operation_parameters( - *, - all_route_params: Sequence[ModelField], - schema_generator: GenerateJsonSchema, - model_name_map: ModelNameMap, - field_mapping: Dict[ - Tuple[ModelField, Literal["validation", "serialization"]], JsonSchemaValue - ], - separate_input_output_schemas: bool = True, -) -> List[Dict[str, Any]]: - parameters = [] - for param in all_route_params: - field_info = param.field_info - field_info = cast(Param, field_info) - if not field_info.include_in_schema: - continue - param_schema = get_schema_from_model_field( - field=param, - schema_generator=schema_generator, - model_name_map=model_name_map, - field_mapping=field_mapping, - separate_input_output_schemas=separate_input_output_schemas, - ) - parameter = { - "name": param.alias, - "in": field_info.in_.value, - "required": param.required, - "schema": param_schema, - } - if field_info.description: - parameter["description"] = field_info.description - if field_info.openapi_examples: - parameter["examples"] = jsonable_encoder(field_info.openapi_examples) - elif field_info.example != Undefined: - parameter["example"] = jsonable_encoder(field_info.example) - if field_info.deprecated: - parameter["deprecated"] = field_info.deprecated - parameters.append(parameter) - return parameters - - -def get_openapi_operation_request_body( - *, - body_field: Optional[ModelField], - schema_generator: GenerateJsonSchema, - model_name_map: ModelNameMap, - field_mapping: Dict[ - Tuple[ModelField, Literal["validation", "serialization"]], JsonSchemaValue - ], - separate_input_output_schemas: bool = True, -) -> Optional[Dict[str, Any]]: - if not body_field: - return None - assert isinstance(body_field, ModelField) - body_schema = get_schema_from_model_field( - field=body_field, - schema_generator=schema_generator, - model_name_map=model_name_map, - field_mapping=field_mapping, - separate_input_output_schemas=separate_input_output_schemas, - ) - field_info = cast(Body, body_field.field_info) - request_media_type = field_info.media_type - required = body_field.required - request_body_oai: Dict[str, Any] = {} - if required: - request_body_oai["required"] = required - request_media_content: Dict[str, Any] = {"schema": body_schema} - if field_info.openapi_examples: - request_media_content["examples"] = jsonable_encoder( - field_info.openapi_examples - ) - elif field_info.example != Undefined: - request_media_content["example"] = jsonable_encoder(field_info.example) - request_body_oai["content"] = {request_media_type: request_media_content} - return request_body_oai - - -def generate_operation_id( - *, route: routing.APIRoute, method: str -) -> str: # pragma: nocover - warnings.warn( - "fastapi.openapi.utils.generate_operation_id() was deprecated, " - "it is not used internally, and will be removed soon", - DeprecationWarning, - stacklevel=2, - ) - if route.operation_id: - return route.operation_id - path: str = route.path_format - return generate_operation_id_for_path(name=route.name, path=path, method=method) - - -def generate_operation_summary(*, route: routing.APIRoute, method: str) -> str: - if route.summary: - return route.summary - return route.name.replace("_", " ").title() - - -def get_openapi_operation_metadata( - *, route: routing.APIRoute, method: str, operation_ids: Set[str] -) -> Dict[str, Any]: - operation: Dict[str, Any] = {} - if route.tags: - operation["tags"] = route.tags - operation["summary"] = generate_operation_summary(route=route, method=method) - if route.description: - operation["description"] = route.description - operation_id = route.operation_id or route.unique_id - if operation_id in operation_ids: - message = ( - f"Duplicate Operation ID {operation_id} for function " - + f"{route.endpoint.__name__}" - ) - file_name = getattr(route.endpoint, "__globals__", {}).get("__file__") - if file_name: - message += f" at {file_name}" - warnings.warn(message, stacklevel=1) - operation_ids.add(operation_id) - operation["operationId"] = operation_id - if route.deprecated: - operation["deprecated"] = route.deprecated - return operation - - -def get_openapi_path( - *, - route: routing.APIRoute, - operation_ids: Set[str], - schema_generator: GenerateJsonSchema, - model_name_map: ModelNameMap, - field_mapping: Dict[ - Tuple[ModelField, Literal["validation", "serialization"]], JsonSchemaValue - ], - separate_input_output_schemas: bool = True, -) -> Tuple[Dict[str, Any], Dict[str, Any], Dict[str, Any]]: - path = {} - security_schemes: Dict[str, Any] = {} - definitions: Dict[str, Any] = {} - assert route.methods is not None, "Methods must be a list" - if isinstance(route.response_class, DefaultPlaceholder): - current_response_class: Type[Response] = route.response_class.value - else: - current_response_class = route.response_class - assert current_response_class, "A response class is needed to generate OpenAPI" - route_response_media_type: Optional[str] = current_response_class.media_type - if route.include_in_schema: - for method in route.methods: - operation = get_openapi_operation_metadata( - route=route, method=method, operation_ids=operation_ids - ) - parameters: List[Dict[str, Any]] = [] - flat_dependant = get_flat_dependant(route.dependant, skip_repeats=True) - security_definitions, operation_security = get_openapi_security_definitions( - flat_dependant=flat_dependant - ) - if operation_security: - operation.setdefault("security", []).extend(operation_security) - if security_definitions: - security_schemes.update(security_definitions) - all_route_params = get_flat_params(route.dependant) - operation_parameters = get_openapi_operation_parameters( - all_route_params=all_route_params, - schema_generator=schema_generator, - model_name_map=model_name_map, - field_mapping=field_mapping, - separate_input_output_schemas=separate_input_output_schemas, - ) - parameters.extend(operation_parameters) - if parameters: - all_parameters = { - (param["in"], param["name"]): param for param in parameters - } - required_parameters = { - (param["in"], param["name"]): param - for param in parameters - if param.get("required") - } - # Make sure required definitions of the same parameter take precedence - # over non-required definitions - all_parameters.update(required_parameters) - operation["parameters"] = list(all_parameters.values()) - if method in METHODS_WITH_BODY: - request_body_oai = get_openapi_operation_request_body( - body_field=route.body_field, - schema_generator=schema_generator, - model_name_map=model_name_map, - field_mapping=field_mapping, - separate_input_output_schemas=separate_input_output_schemas, - ) - if request_body_oai: - operation["requestBody"] = request_body_oai - if route.callbacks: - callbacks = {} - for callback in route.callbacks: - if isinstance(callback, routing.APIRoute): - ( - cb_path, - cb_security_schemes, - cb_definitions, - ) = get_openapi_path( - route=callback, - operation_ids=operation_ids, - schema_generator=schema_generator, - model_name_map=model_name_map, - field_mapping=field_mapping, - separate_input_output_schemas=separate_input_output_schemas, - ) - callbacks[callback.name] = {callback.path: cb_path} - operation["callbacks"] = callbacks - if route.status_code is not None: - status_code = str(route.status_code) - else: - # It would probably make more sense for all response classes to have an - # explicit default status_code, and to extract it from them, instead of - # doing this inspection tricks, that would probably be in the future - # TODO: probably make status_code a default class attribute for all - # responses in Starlette - response_signature = inspect.signature(current_response_class.__init__) - status_code_param = response_signature.parameters.get("status_code") - if status_code_param is not None: - if isinstance(status_code_param.default, int): - status_code = str(status_code_param.default) - operation.setdefault("responses", {}).setdefault(status_code, {})[ - "description" - ] = route.response_description - if route_response_media_type and is_body_allowed_for_status_code( - route.status_code - ): - response_schema = {"type": "string"} - if lenient_issubclass(current_response_class, JSONResponse): - if route.response_field: - response_schema = get_schema_from_model_field( - field=route.response_field, - schema_generator=schema_generator, - model_name_map=model_name_map, - field_mapping=field_mapping, - separate_input_output_schemas=separate_input_output_schemas, - ) - else: - response_schema = {} - operation.setdefault("responses", {}).setdefault( - status_code, {} - ).setdefault("content", {}).setdefault(route_response_media_type, {})[ - "schema" - ] = response_schema - if route.responses: - operation_responses = operation.setdefault("responses", {}) - for ( - additional_status_code, - additional_response, - ) in route.responses.items(): - process_response = additional_response.copy() - process_response.pop("model", None) - status_code_key = str(additional_status_code).upper() - if status_code_key == "DEFAULT": - status_code_key = "default" - openapi_response = operation_responses.setdefault( - status_code_key, {} - ) - assert isinstance( - process_response, dict - ), "An additional response must be a dict" - field = route.response_fields.get(additional_status_code) - additional_field_schema: Optional[Dict[str, Any]] = None - if field: - additional_field_schema = get_schema_from_model_field( - field=field, - schema_generator=schema_generator, - model_name_map=model_name_map, - field_mapping=field_mapping, - separate_input_output_schemas=separate_input_output_schemas, - ) - media_type = route_response_media_type or "application/json" - additional_schema = ( - process_response.setdefault("content", {}) - .setdefault(media_type, {}) - .setdefault("schema", {}) - ) - deep_dict_update(additional_schema, additional_field_schema) - status_text: Optional[str] = status_code_ranges.get( - str(additional_status_code).upper() - ) or http.client.responses.get(int(additional_status_code)) - description = ( - process_response.get("description") - or openapi_response.get("description") - or status_text - or "Additional Response" - ) - deep_dict_update(openapi_response, process_response) - openapi_response["description"] = description - http422 = str(HTTP_422_UNPROCESSABLE_ENTITY) - if (all_route_params or route.body_field) and not any( - status in operation["responses"] - for status in [http422, "4XX", "default"] - ): - operation["responses"][http422] = { - "description": "Validation Error", - "content": { - "application/json": { - "schema": {"$ref": REF_PREFIX + "HTTPValidationError"} - } - }, - } - if "ValidationError" not in definitions: - definitions.update( - { - "ValidationError": validation_error_definition, - "HTTPValidationError": validation_error_response_definition, - } - ) - if route.openapi_extra: - deep_dict_update(operation, route.openapi_extra) - path[method.lower()] = operation - return path, security_schemes, definitions - - -def get_fields_from_routes( - routes: Sequence[BaseRoute], -) -> List[ModelField]: - body_fields_from_routes: List[ModelField] = [] - responses_from_routes: List[ModelField] = [] - request_fields_from_routes: List[ModelField] = [] - callback_flat_models: List[ModelField] = [] - for route in routes: - if getattr(route, "include_in_schema", None) and isinstance( - route, routing.APIRoute - ): - if route.body_field: - assert isinstance( - route.body_field, ModelField - ), "A request body must be a Pydantic Field" - body_fields_from_routes.append(route.body_field) - if route.response_field: - responses_from_routes.append(route.response_field) - if route.response_fields: - responses_from_routes.extend(route.response_fields.values()) - if route.callbacks: - callback_flat_models.extend(get_fields_from_routes(route.callbacks)) - params = get_flat_params(route.dependant) - request_fields_from_routes.extend(params) - - flat_models = callback_flat_models + list( - body_fields_from_routes + responses_from_routes + request_fields_from_routes - ) - return flat_models - - -def get_openapi( - *, - title: str, - version: str, - openapi_version: str = "3.1.0", - summary: Optional[str] = None, - description: Optional[str] = None, - routes: Sequence[BaseRoute], - webhooks: Optional[Sequence[BaseRoute]] = None, - tags: Optional[List[Dict[str, Any]]] = None, - servers: Optional[List[Dict[str, Union[str, Any]]]] = None, - terms_of_service: Optional[str] = None, - contact: Optional[Dict[str, Union[str, Any]]] = None, - license_info: Optional[Dict[str, Union[str, Any]]] = None, - separate_input_output_schemas: bool = True, -) -> Dict[str, Any]: - info: Dict[str, Any] = {"title": title, "version": version} - if summary: - info["summary"] = summary - if description: - info["description"] = description - if terms_of_service: - info["termsOfService"] = terms_of_service - if contact: - info["contact"] = contact - if license_info: - info["license"] = license_info - output: Dict[str, Any] = {"openapi": openapi_version, "info": info} - if servers: - output["servers"] = servers - components: Dict[str, Dict[str, Any]] = {} - paths: Dict[str, Dict[str, Any]] = {} - webhook_paths: Dict[str, Dict[str, Any]] = {} - operation_ids: Set[str] = set() - all_fields = get_fields_from_routes(list(routes or []) + list(webhooks or [])) - model_name_map = get_compat_model_name_map(all_fields) - schema_generator = GenerateJsonSchema(ref_template=REF_TEMPLATE) - field_mapping, definitions = get_definitions( - fields=all_fields, - schema_generator=schema_generator, - model_name_map=model_name_map, - separate_input_output_schemas=separate_input_output_schemas, - ) - for route in routes or []: - if isinstance(route, routing.APIRoute): - result = get_openapi_path( - route=route, - operation_ids=operation_ids, - schema_generator=schema_generator, - model_name_map=model_name_map, - field_mapping=field_mapping, - separate_input_output_schemas=separate_input_output_schemas, - ) - if result: - path, security_schemes, path_definitions = result - if path: - paths.setdefault(route.path_format, {}).update(path) - if security_schemes: - components.setdefault("securitySchemes", {}).update( - security_schemes - ) - if path_definitions: - definitions.update(path_definitions) - for webhook in webhooks or []: - if isinstance(webhook, routing.APIRoute): - result = get_openapi_path( - route=webhook, - operation_ids=operation_ids, - schema_generator=schema_generator, - model_name_map=model_name_map, - field_mapping=field_mapping, - separate_input_output_schemas=separate_input_output_schemas, - ) - if result: - path, security_schemes, path_definitions = result - if path: - webhook_paths.setdefault(webhook.path_format, {}).update(path) - if security_schemes: - components.setdefault("securitySchemes", {}).update( - security_schemes - ) - if path_definitions: - definitions.update(path_definitions) - if definitions: - components["schemas"] = {k: definitions[k] for k in sorted(definitions)} - if components: - output["components"] = components - output["paths"] = paths - if webhook_paths: - output["webhooks"] = webhook_paths - if tags: - output["tags"] = tags - return jsonable_encoder(OpenAPI(**output), by_alias=True, exclude_none=True) # type: ignore diff --git a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/fontTools/ttLib/tables/T_T_F_A_.py b/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/fontTools/ttLib/tables/T_T_F_A_.py deleted file mode 100644 index e3cf2db2d744cdda880ec1255808f60bc3795c61..0000000000000000000000000000000000000000 --- a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/fontTools/ttLib/tables/T_T_F_A_.py +++ /dev/null @@ -1,5 +0,0 @@ -from . import asciiTable - - -class table_T_T_F_A_(asciiTable.asciiTable): - pass diff --git a/spaces/jonas/KaraAgro-Cadi-AI/app.py b/spaces/jonas/KaraAgro-Cadi-AI/app.py deleted file mode 100644 index 07d174f826ac687fe90f2a529cdebea1f5d7583c..0000000000000000000000000000000000000000 --- a/spaces/jonas/KaraAgro-Cadi-AI/app.py +++ /dev/null @@ -1,36 +0,0 @@ -import gradio as gr -import cv2 -import requests -import os -from PIL import Image -import torch -import ultralytics - -model = torch.hub.load("ultralytics/yolov5", "custom", path="yolov5_0.65map_exp7_best.pt", - force_reload=False) - -model.conf = 0.20 # NMS confidence threshold - -path = [['img/test-image.jpg']] - -def show_preds_image(im): - - results = model(im) # inference - return results.render()[0] - -inputs_image = [ - gr.components.Image(type="filepath", label="Input Image"), -] -outputs_image = [ - gr.components.Image(type="filepath", label="Output Image"), -] -interface_image = gr.Interface( - fn=show_preds_image, - inputs=inputs_image, - outputs=outputs_image, - title="Cashew Disease Detection", - examples=path, - cache_examples=False, -) - -interface_image.launch() \ No newline at end of file diff --git a/spaces/jonathanmg96/TFG-YOLOP/app.py b/spaces/jonathanmg96/TFG-YOLOP/app.py deleted file mode 100644 index ec5bc10b64a82e1acff27968c164af89ce3c1912..0000000000000000000000000000000000000000 --- a/spaces/jonathanmg96/TFG-YOLOP/app.py +++ /dev/null @@ -1,39 +0,0 @@ -import gradio as gr -import test_onnx - -with gr.Blocks(css="#c1 {width: 25%}") as yolop: - gr.Markdown( - """ -
    - - - # Escenario de prueba del modelo YOLOP - - Escenario para probar el funcionamiento del modelo YOLOP. Arrastra o sube una imagen en la casilla Original, y dándole al boton de "Segmentar Imagen" se verán las imágenes procesadas y segmentadas que genera el modelo. Cuando se quiera probar con otra imagen se puede limpiar la que esta puesta y colocar una nueva. -
    - """) - with gr.Row(): - with gr.Column(): - img = gr.Image(type="filepath",label="Original") - segmentar = gr.Button(value="Segmentar Imagen") - with gr.Column(): - img2 = gr.Image("./yolop.png",label="Arquitectura YOLOP") - with gr.Row(): - with gr.Column(): - s1 = gr.Image(label='Zona Conducible') - with gr.Column(): - s2 = gr.Image(label='Líneas del Carril') - with gr.Column(): - s3 = gr.Image(label='Vehículos Detectados') - with gr.Column(): - s4 = gr.Image(label='Imagen Final') - segmentar.click(test_onnx.infer_yolop, inputs=img, outputs=[s1,s2,s3,s4]) - examples = gr.Examples(examples=[['./calle.png']],inputs=[img]) - gr.Markdown( - """ -
    - GITHUB YOLOP -
    - """) - -yolop.launch() \ No newline at end of file diff --git a/spaces/jordonpeter01/Top-20-Diffusion-g/app.py b/spaces/jordonpeter01/Top-20-Diffusion-g/app.py deleted file mode 100644 index cee87b0117855508dfa13844d0700141ff726272..0000000000000000000000000000000000000000 --- a/spaces/jordonpeter01/Top-20-Diffusion-g/app.py +++ /dev/null @@ -1,3 +0,0 @@ -import gradio as gr -max_d=gr.Interface.load("spaces/Omnibus/Top-20-Diffusion") -max_d.launch() diff --git a/spaces/jordonpeter01/ai-comic-factory/src/lib/createLlamaPrompt.ts b/spaces/jordonpeter01/ai-comic-factory/src/lib/createLlamaPrompt.ts deleted file mode 100644 index ca246b36d0ef50f37571dcf09480bf57e9aee922..0000000000000000000000000000000000000000 --- a/spaces/jordonpeter01/ai-comic-factory/src/lib/createLlamaPrompt.ts +++ /dev/null @@ -1,25 +0,0 @@ -// adapted from https://huggingface.co/TheBloke/Llama-2-13B-chat-GPTQ/discussions/5 -export function createLlamaPrompt(messages: Array<{ role: string, content: string }>) { - const B_INST = "[INST]", E_INST = "[/INST]"; - const B_SYS = "<>\n", E_SYS = "\n<>\n\n"; - const BOS = "", EOS = ""; - const DEFAULT_SYSTEM_PROMPT = "You are a helpful, respectful and honest assistant. Always answer as helpfully as possible, while being safe. Please ensure that your responses are socially unbiased and positive in nature. If a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct. If you don't know the answer to a question, please don't share false information."; - - if (messages[0].role != "system"){ - messages = [ - {role: "system", content: DEFAULT_SYSTEM_PROMPT} - ].concat(messages); - } - messages = [{role: messages[1].role, content: B_SYS + messages[0].content + E_SYS + messages[1].content}].concat(messages.slice(2)); - - let messages_list = messages.map((value, index, array) => { - if (index % 2 == 0 && index + 1 < array.length){ - return `${BOS}${B_INST} ${array[index].content.trim()} ${E_INST} ${array[index+1].content.trim()} ${EOS}` - } - return ''; - }) - - messages_list.push(`${BOS}${B_INST} ${messages[messages.length-1].content.trim()} ${E_INST}`) - - return messages_list.join(''); -} \ No newline at end of file diff --git a/spaces/jthteo/hokkientranslator/README.md b/spaces/jthteo/hokkientranslator/README.md deleted file mode 100644 index cff52f8eede26b2bfb0e79bc4c4406bc80a3ef7f..0000000000000000000000000000000000000000 --- a/spaces/jthteo/hokkientranslator/README.md +++ /dev/null @@ -1,11 +0,0 @@ ---- -title: Hokkien Translation -emoji: 🎙️ -colorFrom: gray -colorTo: blue -sdk: gradio -sdk_version: 3.5 -app_file: app.py -pinned: false -license: cc-by-nc-4.0 ---- diff --git a/spaces/kadirnar/yolov6/app.py b/spaces/kadirnar/yolov6/app.py deleted file mode 100644 index 21b9080b8af1e4d70a443c798e41f494f54788e4..0000000000000000000000000000000000000000 --- a/spaces/kadirnar/yolov6/app.py +++ /dev/null @@ -1,72 +0,0 @@ -import gradio as gr -import torch -from yolov6 import YOLOV6 - -# Images -torch.hub.download_url_to_file('https://raw.githubusercontent.com/kadirnar/dethub/main/data/images/highway.jpg', 'highway.jpg') -torch.hub.download_url_to_file('https://user-images.githubusercontent.com/34196005/142742872-1fefcc4d-d7e6-4c43-bbb7-6b5982f7e4ba.jpg', 'highway1.jpg') - - -def yolov6_inference( - image: gr.inputs.Image = None, - model_path: gr.inputs.Dropdown = None, - image_size: gr.inputs.Slider = 640, - conf_threshold: gr.inputs.Slider = 0.25, - iou_threshold: gr.inputs.Slider = 0.45, -): - """ - YOLOv6 inference function - Args: - image: Input image - model_path: Path to the model - image_size: Image size - conf_threshold: Confidence threshold - iou_threshold: IOU threshold - Returns: - Rendered image - """ - - model = YOLOV6(model_path, device="cpu", hf_model=True) - model.conf_thres = conf_threshold - model.iou_thresh = iou_threshold - model.save_img = True - model.font_path = "Arial.ttf" - pred = model.predict(source=image, img_size=image_size, yaml="coco.yaml") - return pred - - -inputs = [ - gr.inputs.Image(type="filepath", label="Input Image"), - gr.inputs.Dropdown( - label="Model", - choices=[ - "kadirnar/yolov6n-v3.0", - "kadirnar/yolov6s-v3.0", - "kadirnar/yolov6m-v3.0", - "kadirnar/yolov6l-v3.0", - "kadirnar/yolov6s6-v3.0", - "kadirnar/yolov6m6-v3.0", - "kadirnar/yolov6l6-v3.0", - ], - default="kadirnar/yolov6s-v3.0", - ), - gr.inputs.Slider(minimum=320, maximum=1280, default=1280, step=32, label="Image Size"), - gr.inputs.Slider(minimum=0.0, maximum=1.0, default=0.25, step=0.05, label="Confidence Threshold"), - gr.inputs.Slider(minimum=0.0, maximum=1.0, default=0.45, step=0.05, label="IOU Threshold"), -] - -outputs = gr.outputs.Image(type="filepath", label="Output Image") -title = "YOLOv6: a single-stage object detection framework dedicated to industrial applications." - -examples = [['highway1.jpg', 'kadirnar/yolov6m6-v3.0', 1280, 0.25, 0.45],['highway.jpg', 'kadirnar/yolov6s6-v3.0', 1280, 0.25, 0.45]] - -demo_app = gr.Interface( - fn=yolov6_inference, - inputs=inputs, - outputs=outputs, - title=title, - examples=examples, - cache_examples=True, - theme='huggingface', -) -demo_app.launch(debug=True, enable_queue=True) diff --git a/spaces/kaisugi/academic-paraphraser/app.py b/spaces/kaisugi/academic-paraphraser/app.py deleted file mode 100644 index d81c62910b3679c32100932954c6c9671d974fa2..0000000000000000000000000000000000000000 --- a/spaces/kaisugi/academic-paraphraser/app.py +++ /dev/null @@ -1,183 +0,0 @@ -from transformers import AutoModel, AutoTokenizer -import faiss -import numpy as np -import pandas as pd -import streamlit as st -import torch - -import math -import os -import re - -os.environ['KMP_DUPLICATE_LIB_OK']='True' - - -@st.cache(allow_output_mutation=True) -def load_model_and_tokenizer(): - tokenizer = AutoTokenizer.from_pretrained("kaisugi/scitoricsbert") - model = AutoModel.from_pretrained("kaisugi/scitoricsbert", output_attentions=True) - model.eval() - - return model, tokenizer - - -@st.cache(allow_output_mutation=True) -def load_sentence_data(): - sentence_df = pd.read_csv("sentence_data_858k.csv.gz") - - return sentence_df - - -@st.cache(allow_output_mutation=True) -def load_sentence_embeddings_and_index(): - npz_comp = np.load("sentence_embeddings_858k.npz") - sentence_embeddings = npz_comp["arr_0"] - - faiss.normalize_L2(sentence_embeddings) - D = 768 - N = 857610 - Xt = sentence_embeddings[:100000] - X = sentence_embeddings - - # Param of PQ - M = 16 # The number of sub-vector. Typically this is 8, 16, 32, etc. - nbits = 8 # bits per sub-vector. This is typically 8, so that each sub-vec is encoded by 1 byte - # Param of IVF - nlist = int(math.sqrt(N)) # The number of cells (space partition). Typical value is sqrt(N) - # Param of HNSW - hnsw_m = 32 # The number of neighbors for HNSW. This is typically 32 - - # Setup - quantizer = faiss.IndexHNSWFlat(D, hnsw_m) - index = faiss.IndexIVFPQ(quantizer, D, nlist, M, nbits) - - # Train - index.train(Xt) - - # Add - index.add(X) - - # Search - index.nprobe = 8 # Runtime param. The number of cells that are visited for search. - - return sentence_embeddings, index - - -@st.cache(allow_output_mutation=True) -def formulaic_phrase_extraction(sentences, model, tokenizer): - THRESHOLD = 0.01 - LAYER = 10 - - output_sentences = [] - - with torch.no_grad(): - inputs = tokenizer.batch_encode_plus( - sentences, - padding=True, - truncation=True, - max_length=512, - return_tensors='pt' - ) - outputs = model(**inputs) - attention = outputs[-1] - - cls_attentions = torch.mean(attention[LAYER][0], dim=0) - - for sentence, cls_attention in zip(sentences, cls_attentions): - check_bool_arr = list((cls_attention > THRESHOLD).numpy())[1:-1] - tokens = tokenizer.tokenize(sentence) - - cur_tokens = tokens.copy() - - while True: - flg = False - - for idx, token in enumerate(cur_tokens): - if token.startswith("##"): - flg = True - back_token = token.replace("##", "") - front_token = cur_tokens.pop(idx-1) - cur_tokens[idx-1] = front_token + back_token - - back_bool_val = check_bool_arr[idx] - front_bool_val = check_bool_arr.pop(idx-1) - check_bool_arr[idx-1] = front_bool_val and back_bool_val - - if not flg: - break - - result = " ".join([f'{original_word}' if b else original_word for (b, original_word) in zip(check_bool_arr, sentence.split())]) - output_sentences.append(result) - - return output_sentences - - -@st.cache(allow_output_mutation=True) -def get_retrieval_results(index, input_text, top_k, model, tokenizer, sentence_df, exclude_word_list, phrase_annotated=True): - with torch.no_grad(): - inputs = tokenizer.encode_plus( - input_text, - padding=True, - truncation=True, - max_length=512, - return_tensors='pt' - ) - outputs = model(**inputs) - query_embeddings = outputs.last_hidden_state[:, 0, :][0] - query_embeddings = query_embeddings.detach().cpu().numpy() - query_embeddings = query_embeddings / np.linalg.norm(query_embeddings, ord=2) - - _, ids = index.search(x=np.array([query_embeddings]), k=top_k) - retrieved_sentences = [] - retrieved_paper_ids = [] - - for id in ids[0]: - cur_sentence = sentence_df.loc[id, "sentence"] - cur_link = f"https://aclanthology.org/{sentence_df.loc[id, 'file_id']}" - - if len(exclude_word_list) == 0: - retrieved_sentences.append(cur_sentence) - retrieved_paper_ids.append(cur_link) - - else: - exclude_word_list_regex = '|'.join(exclude_word_list) - pat = re.compile(f'{exclude_word_list_regex}') - - if not bool(pat.search(cur_sentence)): - retrieved_sentences.append(cur_sentence) - retrieved_paper_ids.append(cur_link) - - if phrase_annotated: - retrieved_sentences = formulaic_phrase_extraction(retrieved_sentences, model, tokenizer) - - return retrieved_sentences, retrieved_paper_ids - - -if __name__ == "__main__": - model, tokenizer = load_model_and_tokenizer() - sentence_df = load_sentence_data() - sentence_embeddings, index = load_sentence_embeddings_and_index() - - - st.markdown("## AI-based Paraphrasing for Academic Writing") - - input_text = st.text_area("text input", "Our model shows good results.", placeholder="Write something here...") - top_k = st.number_input('top_k (upperbound)', min_value=1, value=30, step=1) - input_words = st.text_input("exclude words (comma separated)", "good, result") - - agree = st.checkbox('Include phrase annotation') - - if st.button('search'): - exclude_word_list = [s.strip() for s in input_words.split(",") if s.strip() != ""] - retrieved_sentences, retrieved_paper_ids = get_retrieval_results(index, input_text, top_k, model, tokenizer, sentence_df, exclude_word_list, phrase_annotated=agree) - - result_table_markdown = "| sentence | source link |\n|:---|:---|\n" - - for (retrieved_sentence, retrieved_paper_id) in zip(retrieved_sentences, retrieved_paper_ids): - result_table_markdown += f"| {retrieved_sentence} | {retrieved_paper_id} |\n" - - st.markdown(result_table_markdown, unsafe_allow_html=True) - - st.markdown("---\n#### How this works") - - st.markdown("This app uses ScitoricsBERT [(Sugimoto and Aizawa, 2022)](https://aclanthology.org/2022.sdp-1.7/), a functional sentence representation model, to retrieve sentences that are functionally similar to the input. It also extracts phrasal patterns that accord to the function, by leveraging self-attention patterns within ScitoricsBERT.") \ No newline at end of file diff --git a/spaces/kcagle/AutoGPT/autogpt/promptgenerator.py b/spaces/kcagle/AutoGPT/autogpt/promptgenerator.py deleted file mode 100644 index 0ad7046a0c41dab356abcd0151b65890e5544cd2..0000000000000000000000000000000000000000 --- a/spaces/kcagle/AutoGPT/autogpt/promptgenerator.py +++ /dev/null @@ -1,138 +0,0 @@ -""" A module for generating custom prompt strings.""" -from __future__ import annotations - -import json -from typing import Any - - -class PromptGenerator: - """ - A class for generating custom prompt strings based on constraints, commands, - resources, and performance evaluations. - """ - - def __init__(self) -> None: - """ - Initialize the PromptGenerator object with empty lists of constraints, - commands, resources, and performance evaluations. - """ - self.constraints = [] - self.commands = [] - self.resources = [] - self.performance_evaluation = [] - self.response_format = { - "thoughts": { - "text": "thought", - "reasoning": "reasoning", - "plan": "- short bulleted\n- list that conveys\n- long-term plan", - "criticism": "constructive self-criticism", - "speak": "thoughts summary to say to user", - }, - "command": {"name": "command name", "args": {"arg name": "value"}}, - } - - def add_constraint(self, constraint: str) -> None: - """ - Add a constraint to the constraints list. - - Args: - constraint (str): The constraint to be added. - """ - self.constraints.append(constraint) - - def add_command(self, command_label: str, command_name: str, args=None) -> None: - """ - Add a command to the commands list with a label, name, and optional arguments. - - Args: - command_label (str): The label of the command. - command_name (str): The name of the command. - args (dict, optional): A dictionary containing argument names and their - values. Defaults to None. - """ - if args is None: - args = {} - - command_args = {arg_key: arg_value for arg_key, arg_value in args.items()} - - command = { - "label": command_label, - "name": command_name, - "args": command_args, - } - - self.commands.append(command) - - def _generate_command_string(self, command: dict[str, Any]) -> str: - """ - Generate a formatted string representation of a command. - - Args: - command (dict): A dictionary containing command information. - - Returns: - str: The formatted command string. - """ - args_string = ", ".join( - f'"{key}": "{value}"' for key, value in command["args"].items() - ) - return f'{command["label"]}: "{command["name"]}", args: {args_string}' - - def add_resource(self, resource: str) -> None: - """ - Add a resource to the resources list. - - Args: - resource (str): The resource to be added. - """ - self.resources.append(resource) - - def add_performance_evaluation(self, evaluation: str) -> None: - """ - Add a performance evaluation item to the performance_evaluation list. - - Args: - evaluation (str): The evaluation item to be added. - """ - self.performance_evaluation.append(evaluation) - - def _generate_numbered_list(self, items: list[Any], item_type="list") -> str: - """ - Generate a numbered list from given items based on the item_type. - - Args: - items (list): A list of items to be numbered. - item_type (str, optional): The type of items in the list. - Defaults to 'list'. - - Returns: - str: The formatted numbered list. - """ - if item_type == "command": - return "\n".join( - f"{i+1}. {self._generate_command_string(item)}" - for i, item in enumerate(items) - ) - else: - return "\n".join(f"{i+1}. {item}" for i, item in enumerate(items)) - - def generate_prompt_string(self) -> str: - """ - Generate a prompt string based on the constraints, commands, resources, - and performance evaluations. - - Returns: - str: The generated prompt string. - """ - formatted_response_format = json.dumps(self.response_format, indent=4) - return ( - f"Constraints:\n{self._generate_numbered_list(self.constraints)}\n\n" - "Commands:\n" - f"{self._generate_numbered_list(self.commands, item_type='command')}\n\n" - f"Resources:\n{self._generate_numbered_list(self.resources)}\n\n" - "Performance Evaluation:\n" - f"{self._generate_numbered_list(self.performance_evaluation)}\n\n" - "You should only respond in JSON format as described below \nResponse" - f" Format: \n{formatted_response_format} \nEnsure the response can be" - " parsed by Python json.loads" - ) diff --git a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/aiohttp/web_log.py b/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/aiohttp/web_log.py deleted file mode 100644 index bc6e3b5a8a280347d606e91374517fef223fa441..0000000000000000000000000000000000000000 --- a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/aiohttp/web_log.py +++ /dev/null @@ -1,208 +0,0 @@ -import datetime -import functools -import logging -import os -import re -from collections import namedtuple -from typing import Any, Callable, Dict, Iterable, List, Tuple # noqa - -from .abc import AbstractAccessLogger -from .web_request import BaseRequest -from .web_response import StreamResponse - -KeyMethod = namedtuple("KeyMethod", "key method") - - -class AccessLogger(AbstractAccessLogger): - """Helper object to log access. - - Usage: - log = logging.getLogger("spam") - log_format = "%a %{User-Agent}i" - access_logger = AccessLogger(log, log_format) - access_logger.log(request, response, time) - - Format: - %% The percent sign - %a Remote IP-address (IP-address of proxy if using reverse proxy) - %t Time when the request was started to process - %P The process ID of the child that serviced the request - %r First line of request - %s Response status code - %b Size of response in bytes, including HTTP headers - %T Time taken to serve the request, in seconds - %Tf Time taken to serve the request, in seconds with floating fraction - in .06f format - %D Time taken to serve the request, in microseconds - %{FOO}i request.headers['FOO'] - %{FOO}o response.headers['FOO'] - %{FOO}e os.environ['FOO'] - - """ - - LOG_FORMAT_MAP = { - "a": "remote_address", - "t": "request_start_time", - "P": "process_id", - "r": "first_request_line", - "s": "response_status", - "b": "response_size", - "T": "request_time", - "Tf": "request_time_frac", - "D": "request_time_micro", - "i": "request_header", - "o": "response_header", - } - - LOG_FORMAT = '%a %t "%r" %s %b "%{Referer}i" "%{User-Agent}i"' - FORMAT_RE = re.compile(r"%(\{([A-Za-z0-9\-_]+)\}([ioe])|[atPrsbOD]|Tf?)") - CLEANUP_RE = re.compile(r"(%[^s])") - _FORMAT_CACHE: Dict[str, Tuple[str, List[KeyMethod]]] = {} - - def __init__(self, logger: logging.Logger, log_format: str = LOG_FORMAT) -> None: - """Initialise the logger. - - logger is a logger object to be used for logging. - log_format is a string with apache compatible log format description. - - """ - super().__init__(logger, log_format=log_format) - - _compiled_format = AccessLogger._FORMAT_CACHE.get(log_format) - if not _compiled_format: - _compiled_format = self.compile_format(log_format) - AccessLogger._FORMAT_CACHE[log_format] = _compiled_format - - self._log_format, self._methods = _compiled_format - - def compile_format(self, log_format: str) -> Tuple[str, List[KeyMethod]]: - """Translate log_format into form usable by modulo formatting - - All known atoms will be replaced with %s - Also methods for formatting of those atoms will be added to - _methods in appropriate order - - For example we have log_format = "%a %t" - This format will be translated to "%s %s" - Also contents of _methods will be - [self._format_a, self._format_t] - These method will be called and results will be passed - to translated string format. - - Each _format_* method receive 'args' which is list of arguments - given to self.log - - Exceptions are _format_e, _format_i and _format_o methods which - also receive key name (by functools.partial) - - """ - # list of (key, method) tuples, we don't use an OrderedDict as users - # can repeat the same key more than once - methods = list() - - for atom in self.FORMAT_RE.findall(log_format): - if atom[1] == "": - format_key1 = self.LOG_FORMAT_MAP[atom[0]] - m = getattr(AccessLogger, "_format_%s" % atom[0]) - key_method = KeyMethod(format_key1, m) - else: - format_key2 = (self.LOG_FORMAT_MAP[atom[2]], atom[1]) - m = getattr(AccessLogger, "_format_%s" % atom[2]) - key_method = KeyMethod(format_key2, functools.partial(m, atom[1])) - - methods.append(key_method) - - log_format = self.FORMAT_RE.sub(r"%s", log_format) - log_format = self.CLEANUP_RE.sub(r"%\1", log_format) - return log_format, methods - - @staticmethod - def _format_i( - key: str, request: BaseRequest, response: StreamResponse, time: float - ) -> str: - if request is None: - return "(no headers)" - - # suboptimal, make istr(key) once - return request.headers.get(key, "-") - - @staticmethod - def _format_o( - key: str, request: BaseRequest, response: StreamResponse, time: float - ) -> str: - # suboptimal, make istr(key) once - return response.headers.get(key, "-") - - @staticmethod - def _format_a(request: BaseRequest, response: StreamResponse, time: float) -> str: - if request is None: - return "-" - ip = request.remote - return ip if ip is not None else "-" - - @staticmethod - def _format_t(request: BaseRequest, response: StreamResponse, time: float) -> str: - now = datetime.datetime.utcnow() - start_time = now - datetime.timedelta(seconds=time) - return start_time.strftime("[%d/%b/%Y:%H:%M:%S +0000]") - - @staticmethod - def _format_P(request: BaseRequest, response: StreamResponse, time: float) -> str: - return "<%s>" % os.getpid() - - @staticmethod - def _format_r(request: BaseRequest, response: StreamResponse, time: float) -> str: - if request is None: - return "-" - return "{} {} HTTP/{}.{}".format( - request.method, - request.path_qs, - request.version.major, - request.version.minor, - ) - - @staticmethod - def _format_s(request: BaseRequest, response: StreamResponse, time: float) -> int: - return response.status - - @staticmethod - def _format_b(request: BaseRequest, response: StreamResponse, time: float) -> int: - return response.body_length - - @staticmethod - def _format_T(request: BaseRequest, response: StreamResponse, time: float) -> str: - return str(round(time)) - - @staticmethod - def _format_Tf(request: BaseRequest, response: StreamResponse, time: float) -> str: - return "%06f" % time - - @staticmethod - def _format_D(request: BaseRequest, response: StreamResponse, time: float) -> str: - return str(round(time * 1000000)) - - def _format_line( - self, request: BaseRequest, response: StreamResponse, time: float - ) -> Iterable[Tuple[str, Callable[[BaseRequest, StreamResponse, float], str]]]: - return [(key, method(request, response, time)) for key, method in self._methods] - - def log(self, request: BaseRequest, response: StreamResponse, time: float) -> None: - try: - fmt_info = self._format_line(request, response, time) - - values = list() - extra = dict() - for key, value in fmt_info: - values.append(value) - - if key.__class__ is str: - extra[key] = value - else: - k1, k2 = key # type: ignore[misc] - dct = extra.get(k1, {}) # type: ignore[var-annotated,has-type] - dct[k2] = value # type: ignore[index,has-type] - extra[k1] = dct # type: ignore[has-type,assignment] - - self.logger.info(self._log_format % tuple(values), extra=extra) - except Exception: - self.logger.exception("Error in logging") diff --git a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/gradio/templates/cdn/assets/ColorPicker-76ff4dc7.css b/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/gradio/templates/cdn/assets/ColorPicker-76ff4dc7.css deleted file mode 100644 index 841ff42605752f0be14e2d3fcb79cb5a041c8d8e..0000000000000000000000000000000000000000 --- a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/gradio/templates/cdn/assets/ColorPicker-76ff4dc7.css +++ /dev/null @@ -1 +0,0 @@ -label.svelte-1ojmf70.svelte-1ojmf70.svelte-1ojmf70{display:flex;align-items:center;cursor:pointer;color:var(--body-text-color);font-weight:var(--checkbox-label-text-weight);font-size:var(--checkbox-label-text-size);line-height:var(--line-md)}label.svelte-1ojmf70>.svelte-1ojmf70+.svelte-1ojmf70{margin-left:var(--size-2)}input.svelte-1ojmf70.svelte-1ojmf70.svelte-1ojmf70{--ring-color:transparent;position:relative;box-shadow:var(--input-shadow);border:1px solid var(--checkbox-border-color);border-radius:var(--checkbox-border-radius);background-color:var(--checkbox-background-color);line-height:var(--line-sm)}input.svelte-1ojmf70.svelte-1ojmf70.svelte-1ojmf70:checked,input.svelte-1ojmf70.svelte-1ojmf70.svelte-1ojmf70:checked:hover,input.svelte-1ojmf70.svelte-1ojmf70.svelte-1ojmf70:checked:focus{border-color:var(--checkbox-border-color-selected);background-image:var(--checkbox-check);background-color:var(--checkbox-background-color-selected)}input.svelte-1ojmf70.svelte-1ojmf70.svelte-1ojmf70:hover{border-color:var(--checkbox-border-color-hover);background-color:var(--checkbox-background-color-hover)}input.svelte-1ojmf70.svelte-1ojmf70.svelte-1ojmf70:focus{border-color:var(--checkbox-border-color-focus);background-color:var(--checkbox-background-color-focus)}input[disabled].svelte-1ojmf70.svelte-1ojmf70.svelte-1ojmf70,.disabled.svelte-1ojmf70.svelte-1ojmf70.svelte-1ojmf70{cursor:not-allowed}.wrap.svelte-1qxcj04.svelte-1qxcj04.svelte-1qxcj04{display:flex;flex-wrap:wrap;gap:var(--checkbox-label-gap)}label.svelte-1qxcj04.svelte-1qxcj04.svelte-1qxcj04{display:flex;align-items:center;transition:var(--button-transition);cursor:pointer;box-shadow:var(--checkbox-label-shadow);border:var(--checkbox-label-border-width) solid var(--checkbox-label-border-color);border-radius:var(--button-small-radius);background:var(--checkbox-label-background-fill);padding:var(--checkbox-label-padding);color:var(--checkbox-label-text-color);font-weight:var(--checkbox-label-text-weight);font-size:var(--checkbox-label-text-size);line-height:var(--line-md)}label.svelte-1qxcj04.svelte-1qxcj04.svelte-1qxcj04:hover{background:var(--checkbox-label-background-fill-hover)}label.svelte-1qxcj04.svelte-1qxcj04.svelte-1qxcj04:focus{background:var(--checkbox-label-background-fill-focus)}label.selected.svelte-1qxcj04.svelte-1qxcj04.svelte-1qxcj04{background:var(--checkbox-label-background-fill-selected);color:var(--checkbox-label-text-color-selected)}label.svelte-1qxcj04>.svelte-1qxcj04+.svelte-1qxcj04{margin-left:var(--size-2)}input.svelte-1qxcj04.svelte-1qxcj04.svelte-1qxcj04{--ring-color:transparent;position:relative;box-shadow:var(--checkbox-shadow);border:var(--checkbox-border-width) solid var(--checkbox-border-color);border-radius:var(--checkbox-border-radius);background-color:var(--checkbox-background-color);line-height:var(--line-sm)}input.svelte-1qxcj04.svelte-1qxcj04.svelte-1qxcj04:checked,input.svelte-1qxcj04.svelte-1qxcj04.svelte-1qxcj04:checked:hover,input.svelte-1qxcj04.svelte-1qxcj04.svelte-1qxcj04:checked:focus{border-color:var(--checkbox-border-color-selected);background-image:var(--checkbox-check);background-color:var(--checkbox-background-color-selected)}input.svelte-1qxcj04.svelte-1qxcj04.svelte-1qxcj04:hover{border-color:var(--checkbox-border-color-hover);background-color:var(--checkbox-background-color-hover)}input.svelte-1qxcj04.svelte-1qxcj04.svelte-1qxcj04:focus{border-color:var(--checkbox-border-color-focus);background-color:var(--checkbox-background-color-focus)}input[disabled].svelte-1qxcj04.svelte-1qxcj04.svelte-1qxcj04,.disabled.svelte-1qxcj04.svelte-1qxcj04.svelte-1qxcj04{cursor:not-allowed}.options.svelte-1udn3b5{--window-padding:var(--size-8);position:absolute;z-index:var(--layer-5);margin-left:0;box-shadow:var(--shadow-drop-lg);border-radius:var(--container-radius);background:var(--background-fill-primary);width:var(--size-full);min-width:fit-content;max-width:inherit;overflow:auto;color:var(--body-text-color);list-style:none}.item.svelte-1udn3b5{display:flex;cursor:pointer;padding:var(--size-2)}.item.svelte-1udn3b5:hover,.active.svelte-1udn3b5{background:var(--background-fill-secondary)}.inner-item.svelte-1udn3b5{padding-right:var(--size-1)}.hide.svelte-1udn3b5{visibility:hidden}.wrap.svelte-aqlk7e.svelte-aqlk7e.svelte-aqlk7e{position:relative;box-shadow:var(--input-shadow);border:var(--input-border-width) solid var(--border-color-primary);border-radius:var(--input-radius);background:var(--input-background-fill)}.wrap.svelte-aqlk7e.svelte-aqlk7e.svelte-aqlk7e:focus-within{box-shadow:var(--input-shadow-focus);border-color:var(--input-border-color-focus)}.wrap-inner.svelte-aqlk7e.svelte-aqlk7e.svelte-aqlk7e{display:flex;position:relative;flex-wrap:wrap;align-items:center;gap:var(--checkbox-label-gap);padding:var(--checkbox-label-padding)}.token.svelte-aqlk7e.svelte-aqlk7e.svelte-aqlk7e{display:flex;align-items:center;transition:var(--button-transition);cursor:pointer;box-shadow:var(--checkbox-label-shadow);border:var(--checkbox-label-border-width) solid var(--checkbox-label-border-color);border-radius:var(--button-small-radius);background:var(--checkbox-label-background-fill);padding:var(--checkbox-label-padding);color:var(--checkbox-label-text-color);font-weight:var(--checkbox-label-text-weight);font-size:var(--checkbox-label-text-size);line-height:var(--line-md)}.token.svelte-aqlk7e>.svelte-aqlk7e+.svelte-aqlk7e{margin-left:var(--size-2)}.token-remove.svelte-aqlk7e.svelte-aqlk7e.svelte-aqlk7e{fill:var(--body-text-color);display:flex;justify-content:center;align-items:center;cursor:pointer;border:var(--checkbox-border-width) solid var(--border-color-primary);border-radius:var(--radius-full);background:var(--background-fill-primary);padding:var(--size-0-5);width:18px;height:18px}.secondary-wrap.svelte-aqlk7e.svelte-aqlk7e.svelte-aqlk7e{display:flex;flex:1 1 0%;align-items:center;border:none;min-width:min-content}input.svelte-aqlk7e.svelte-aqlk7e.svelte-aqlk7e{margin:var(--spacing-sm);outline:none;border:none;background:inherit;width:var(--size-full);color:var(--body-text-color);font-size:var(--input-text-size)}input.svelte-aqlk7e.svelte-aqlk7e.svelte-aqlk7e:disabled{-webkit-text-fill-color:var(--body-text-color);-webkit-opacity:1;opacity:1;cursor:not-allowed}.remove-all.svelte-aqlk7e.svelte-aqlk7e.svelte-aqlk7e{margin-left:var(--size-1);width:20px;height:20px}.hide.svelte-aqlk7e.svelte-aqlk7e.svelte-aqlk7e{display:none}.subdued.svelte-aqlk7e.svelte-aqlk7e.svelte-aqlk7e{color:var(--body-text-color-subdued)}input[type=number].svelte-og1zwl{display:block;position:relative;outline:none!important;box-shadow:var(--input-shadow);border:var(--input-border-width) solid var(--input-border-color);border-radius:var(--input-radius);background:var(--input-background-fill);padding:var(--input-padding);width:100%;color:var(--body-text-color);font-size:var(--input-text-size);line-height:var(--line-sm)}input.svelte-og1zwl:disabled{-webkit-text-fill-color:var(--body-text-color);-webkit-opacity:1;opacity:1}input.svelte-og1zwl:focus{box-shadow:var(--input-shadow-focus);border-color:var(--input-border-color-focus)}input.svelte-og1zwl::placeholder{color:var(--input-placeholder-color)}.wrap.svelte-1p9xokt.svelte-1p9xokt.svelte-1p9xokt{display:flex;flex-wrap:wrap;gap:var(--checkbox-label-gap)}label.svelte-1p9xokt.svelte-1p9xokt.svelte-1p9xokt{display:flex;align-items:center;transition:var(--button-transition);cursor:pointer;box-shadow:var(--checkbox-label-shadow);border:var(--checkbox-label-border-width) solid var(--checkbox-label-border-color);border-radius:var(--button-small-radius);background:var(--checkbox-label-background-fill);padding:var(--checkbox-label-padding);color:var(--checkbox-label-text-color);font-weight:var(--checkbox-label-text-weight);font-size:var(--checkbox-label-text-size);line-height:var(--line-md)}label.svelte-1p9xokt.svelte-1p9xokt.svelte-1p9xokt:hover{background:var(--checkbox-label-background-fill-hover)}label.svelte-1p9xokt.svelte-1p9xokt.svelte-1p9xokt:focus{background:var(--checkbox-label-background-fill-focus)}label.selected.svelte-1p9xokt.svelte-1p9xokt.svelte-1p9xokt{background:var(--checkbox-label-background-fill-selected);color:var(--checkbox-label-text-color-selected)}label.svelte-1p9xokt>.svelte-1p9xokt+.svelte-1p9xokt{margin-left:var(--size-2)}input.svelte-1p9xokt.svelte-1p9xokt.svelte-1p9xokt{--ring-color:transparent;position:relative;box-shadow:var(--checkbox-shadow);border:var(--checkbox-border-width) solid var(--checkbox-border-color);border-radius:var(--radius-full);background-color:var(--checkbox-background-color);line-height:var(--line-sm)}input.svelte-1p9xokt.svelte-1p9xokt.svelte-1p9xokt:checked,input.svelte-1p9xokt.svelte-1p9xokt.svelte-1p9xokt:checked:hover,input.svelte-1p9xokt.svelte-1p9xokt.svelte-1p9xokt:checked:focus{border-color:var(--checkbox-border-color-selected);background-image:var(--radio-circle);background-color:var(--checkbox-background-color-selected)}input.svelte-1p9xokt.svelte-1p9xokt.svelte-1p9xokt:hover{border-color:var(--checkbox-border-color-hover);background-color:var(--checkbox-background-color-hover)}input.svelte-1p9xokt.svelte-1p9xokt.svelte-1p9xokt:focus{border-color:var(--checkbox-border-color-focus);background-color:var(--checkbox-background-color-focus)}input[disabled].svelte-1p9xokt.svelte-1p9xokt.svelte-1p9xokt,.disabled.svelte-1p9xokt.svelte-1p9xokt.svelte-1p9xokt{cursor:not-allowed}label.svelte-1pie7s6{display:block;width:100%}input[type=text].svelte-1pie7s6,input[type=password].svelte-1pie7s6,input[type=email].svelte-1pie7s6,textarea.svelte-1pie7s6{display:block;position:relative;outline:none!important;box-shadow:var(--input-shadow);border:var(--input-border-width) solid var(--input-border-color);border-radius:var(--input-radius);background:var(--input-background-fill);padding:var(--input-padding);width:100%;color:var(--body-text-color);font-weight:var(--input-text-weight);font-size:var(--input-text-size);line-height:var(--line-sm)}input.svelte-1pie7s6:disabled,textarea.svelte-1pie7s6:disabled{-webkit-text-fill-color:var(--body-text-color);-webkit-opacity:1;opacity:1}input.svelte-1pie7s6:focus,textarea.svelte-1pie7s6:focus{box-shadow:var(--input-shadow-focus);border-color:var(--input-border-color-focus)}input.svelte-1pie7s6::placeholder,textarea.svelte-1pie7s6::placeholder{color:var(--input-placeholder-color)}button.svelte-1pie7s6{display:flex;position:absolute;top:var(--block-label-margin);right:var(--block-label-margin);align-items:center;box-shadow:var(--shadow-drop);border:1px solid var(--color-border-primary);border-top:none;border-right:none;border-radius:var(--block-label-right-radius);background:var(--block-label-background-fill);padding:5px;width:22px;height:22px;overflow:hidden;color:var(--block-label-color);font:var(--font-sans);font-size:var(--button-small-text-size)}.wrap.svelte-1cl284s{display:flex;flex-direction:column;width:100%}.head.svelte-1cl284s{display:flex;justify-content:space-between}input[type=number].svelte-1cl284s{display:block;position:relative;outline:none!important;box-shadow:var(--input-shadow);border:var(--input-border-width) solid var(--input-border-color);border-radius:var(--input-radius);background:var(--input-background-fill);padding:var(--size-2) var(--size-2);height:var(--size-6);color:var(--body-text-color);font-size:var(--input-text-size);line-height:var(--line-sm);text-align:center}input.svelte-1cl284s:disabled{-webkit-text-fill-color:var(--body-text-color);-webkit-opacity:1;opacity:1}input[type=number].svelte-1cl284s:focus{box-shadow:var(--input-shadow-focus);border-color:var(--input-border-color-focus)}input.svelte-1cl284s::placeholder{color:var(--input-placeholder-color)}input[type=range].svelte-1cl284s{width:100%;accent-color:var(--slider-color)}input[disabled].svelte-1cl284s{cursor:not-allowed}input.svelte-56zyyb{display:block;position:relative;background:var(--background-fill-primary);line-height:var(--line-sm)} diff --git a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/gradio/templates/cdn/assets/index-42f42b44.js b/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/gradio/templates/cdn/assets/index-42f42b44.js deleted file mode 100644 index 131b1cd982ca5c9d5050c56e57e0cc86de720bdb..0000000000000000000000000000000000000000 --- a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/gradio/templates/cdn/assets/index-42f42b44.js +++ /dev/null @@ -1,2 +0,0 @@ -import{L as o}from"./index-0c011c1e.js";import{s,t as r,L as n,i as P,w as a,f as i,a as Q,b as p}from"./index-90411bc1.js";import"./index-7c0e54a6.js";import"./Blocks-61158678.js";import"./Button-661a0701.js";import"./BlockLabel-95be8dd1.js";import"./Empty-96265974.js";/* empty css */import"./Copy-c4997e4e.js";import"./Download-e5de98da.js";const c=s({String:r.string,Number:r.number,"True False":r.bool,PropertyName:r.propertyName,Null:r.null,",":r.separator,"[ ]":r.squareBracket,"{ }":r.brace}),g=o.deserialize({version:14,states:"$bOVQPOOOOQO'#Cb'#CbOnQPO'#CeOvQPO'#CjOOQO'#Cp'#CpQOQPOOOOQO'#Cg'#CgO}QPO'#CfO!SQPO'#CrOOQO,59P,59PO![QPO,59PO!aQPO'#CuOOQO,59U,59UO!iQPO,59UOVQPO,59QOqQPO'#CkO!nQPO,59^OOQO1G.k1G.kOVQPO'#ClO!vQPO,59aOOQO1G.p1G.pOOQO1G.l1G.lOOQO,59V,59VOOQO-E6i-E6iOOQO,59W,59WOOQO-E6j-E6j",stateData:"#O~OcOS~OQSORSOSSOTSOWQO]ROePO~OVXOeUO~O[[O~PVOg^O~Oh_OVfX~OVaO~OhbO[iX~O[dO~Oh_OVfa~OhbO[ia~O",goto:"!kjPPPPPPkPPkqwPPk{!RPPP!XP!ePP!hXSOR^bQWQRf_TVQ_Q`WRg`QcZRicQTOQZRQe^RhbRYQR]R",nodeNames:"⚠ JsonText True False Null Number String } { Object Property PropertyName ] [ Array",maxTerm:25,nodeProps:[["openedBy",7,"{",12,"["],["closedBy",8,"}",13,"]"]],propSources:[c],skippedNodes:[0],repeatNodeCount:2,tokenData:"(p~RaXY!WYZ!W]^!Wpq!Wrs!]|}$i}!O$n!Q!R$w!R![&V![!]&h!}#O&m#P#Q&r#Y#Z&w#b#c'f#h#i'}#o#p(f#q#r(k~!]Oc~~!`Upq!]qr!]rs!rs#O!]#O#P!w#P~!]~!wOe~~!zXrs!]!P!Q!]#O#P!]#U#V!]#Y#Z!]#b#c!]#f#g!]#h#i!]#i#j#g~#jR!Q![#s!c!i#s#T#Z#s~#vR!Q![$P!c!i$P#T#Z$P~$SR!Q![$]!c!i$]#T#Z$]~$`R!Q![!]!c!i!]#T#Z!]~$nOh~~$qQ!Q!R$w!R![&V~$|RT~!O!P%V!g!h%k#X#Y%k~%YP!Q![%]~%bRT~!Q![%]!g!h%k#X#Y%k~%nR{|%w}!O%w!Q![%}~%zP!Q![%}~&SPT~!Q![%}~&[ST~!O!P%V!Q![&V!g!h%k#X#Y%k~&mOg~~&rO]~~&wO[~~&zP#T#U&}~'QP#`#a'T~'WP#g#h'Z~'^P#X#Y'a~'fOR~~'iP#i#j'l~'oP#`#a'r~'uP#`#a'x~'}OS~~(QP#f#g(T~(WP#i#j(Z~(^P#X#Y(a~(fOQ~~(kOW~~(pOV~",tokenizers:[0],topRules:{JsonText:[0,1]},tokenPrec:0}),$=()=>t=>{try{JSON.parse(t.state.doc.toString())}catch(O){if(!(O instanceof SyntaxError))throw O;const e=m(O,t.state.doc);return[{from:e,message:O.message,severity:"error",to:e}]}return[]};function m(t,O){let e;return(e=t.message.match(/at position (\d+)/))?Math.min(+e[1],O.length):(e=t.message.match(/at line (\d+) column (\d+)/))?Math.min(O.line(+e[1]).from+ +e[2]-1,O.length):0}const u=n.define({name:"json",parser:g.configure({props:[P.add({Object:a({except:/^\s*\}/}),Array:a({except:/^\s*\]/})}),i.add({"Object Array":Q})]}),languageData:{closeBrackets:{brackets:["[","{",'"']},indentOnInput:/^\s*[\}\]]$/}});function j(){return new p(u)}export{j as json,u as jsonLanguage,$ as jsonParseLinter}; -//# sourceMappingURL=index-42f42b44.js.map diff --git a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/matplotlib/mathtext.py b/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/matplotlib/mathtext.py deleted file mode 100644 index fc677e83616e479cd6a2d955e00d1c3ea81a0bf6..0000000000000000000000000000000000000000 --- a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/matplotlib/mathtext.py +++ /dev/null @@ -1,287 +0,0 @@ -r""" -A module for parsing a subset of the TeX math syntax and rendering it to a -Matplotlib backend. - -For a tutorial of its usage, see :doc:`/tutorials/text/mathtext`. This -document is primarily concerned with implementation details. - -The module uses pyparsing_ to parse the TeX expression. - -.. _pyparsing: https://pypi.org/project/pyparsing/ - -The Bakoma distribution of the TeX Computer Modern fonts, and STIX -fonts are supported. There is experimental support for using -arbitrary fonts, but results may vary without proper tweaking and -metrics for those fonts. -""" - -from collections import namedtuple -import functools -import logging - -import numpy as np - -import matplotlib as mpl -from matplotlib import _api, _mathtext -from matplotlib.ft2font import FT2Image, LOAD_NO_HINTING -from matplotlib.font_manager import FontProperties -from ._mathtext import ( # noqa: reexported API - RasterParse, VectorParse, get_unicode_index) - -_log = logging.getLogger(__name__) - - -get_unicode_index.__module__ = __name__ - - -@_api.deprecated("3.6") -class MathtextBackend: - """ - The base class for the mathtext backend-specific code. `MathtextBackend` - subclasses interface between mathtext and specific Matplotlib graphics - backends. - - Subclasses need to override the following: - - - :meth:`render_glyph` - - :meth:`render_rect_filled` - - :meth:`get_results` - - And optionally, if you need to use a FreeType hinting style: - - - :meth:`get_hinting_type` - """ - def __init__(self): - self.width = 0 - self.height = 0 - self.depth = 0 - - def set_canvas_size(self, w, h, d): - """Set the dimension of the drawing canvas.""" - self.width = w - self.height = h - self.depth = d - - def render_glyph(self, ox, oy, info): - """ - Draw a glyph described by *info* to the reference point (*ox*, - *oy*). - """ - raise NotImplementedError() - - def render_rect_filled(self, x1, y1, x2, y2): - """ - Draw a filled black rectangle from (*x1*, *y1*) to (*x2*, *y2*). - """ - raise NotImplementedError() - - def get_results(self, box): - """ - Return a backend-specific tuple to return to the backend after - all processing is done. - """ - raise NotImplementedError() - - def get_hinting_type(self): - """ - Get the FreeType hinting type to use with this particular - backend. - """ - return LOAD_NO_HINTING - - -@_api.deprecated("3.6") -class MathtextBackendAgg(MathtextBackend): - """ - Render glyphs and rectangles to an FTImage buffer, which is later - transferred to the Agg image by the Agg backend. - """ - def __init__(self): - self.ox = 0 - self.oy = 0 - self.image = None - self.mode = 'bbox' - self.bbox = [0, 0, 0, 0] - super().__init__() - - def _update_bbox(self, x1, y1, x2, y2): - self.bbox = [min(self.bbox[0], x1), - min(self.bbox[1], y1), - max(self.bbox[2], x2), - max(self.bbox[3], y2)] - - def set_canvas_size(self, w, h, d): - super().set_canvas_size(w, h, d) - if self.mode != 'bbox': - self.image = FT2Image(np.ceil(w), np.ceil(h + max(d, 0))) - - def render_glyph(self, ox, oy, info): - if self.mode == 'bbox': - self._update_bbox(ox + info.metrics.xmin, - oy - info.metrics.ymax, - ox + info.metrics.xmax, - oy - info.metrics.ymin) - else: - info.font.draw_glyph_to_bitmap( - self.image, ox, oy - info.metrics.iceberg, info.glyph, - antialiased=mpl.rcParams['text.antialiased']) - - def render_rect_filled(self, x1, y1, x2, y2): - if self.mode == 'bbox': - self._update_bbox(x1, y1, x2, y2) - else: - height = max(int(y2 - y1) - 1, 0) - if height == 0: - center = (y2 + y1) / 2.0 - y = int(center - (height + 1) / 2.0) - else: - y = int(y1) - self.image.draw_rect_filled(int(x1), y, np.ceil(x2), y + height) - - def get_results(self, box): - self.image = None - self.mode = 'render' - return _mathtext.ship(box).to_raster() - - def get_hinting_type(self): - from matplotlib.backends import backend_agg - return backend_agg.get_hinting_flag() - - -@_api.deprecated("3.6") -class MathtextBackendPath(MathtextBackend): - """ - Store information to write a mathtext rendering to the text path - machinery. - """ - - _Result = namedtuple("_Result", "width height depth glyphs rects") - - def __init__(self): - super().__init__() - self.glyphs = [] - self.rects = [] - - def render_glyph(self, ox, oy, info): - oy = self.height - oy + info.offset - self.glyphs.append((info.font, info.fontsize, info.num, ox, oy)) - - def render_rect_filled(self, x1, y1, x2, y2): - self.rects.append((x1, self.height - y2, x2 - x1, y2 - y1)) - - def get_results(self, box): - return _mathtext.ship(box).to_vector() - - -@_api.deprecated("3.6") -class MathTextWarning(Warning): - pass - - -############################################################################## -# MAIN - - -class MathTextParser: - _parser = None - _font_type_mapping = { - 'cm': _mathtext.BakomaFonts, - 'dejavuserif': _mathtext.DejaVuSerifFonts, - 'dejavusans': _mathtext.DejaVuSansFonts, - 'stix': _mathtext.StixFonts, - 'stixsans': _mathtext.StixSansFonts, - 'custom': _mathtext.UnicodeFonts, - } - - def __init__(self, output): - """ - Create a MathTextParser for the given backend *output*. - - Parameters - ---------- - output : {"path", "agg"} - Whether to return a `VectorParse` ("path") or a - `RasterParse` ("agg", or its synonym "macosx"). - """ - self._output_type = _api.check_getitem( - {"path": "vector", "agg": "raster", "macosx": "raster"}, - output=output.lower()) - - def parse(self, s, dpi=72, prop=None): - """ - Parse the given math expression *s* at the given *dpi*. If *prop* is - provided, it is a `.FontProperties` object specifying the "default" - font to use in the math expression, used for all non-math text. - - The results are cached, so multiple calls to `parse` - with the same expression should be fast. - - Depending on the *output* type, this returns either a `VectorParse` or - a `RasterParse`. - """ - # lru_cache can't decorate parse() directly because prop - # is mutable; key the cache using an internal copy (see - # text._get_text_metrics_with_cache for a similar case). - prop = prop.copy() if prop is not None else None - return self._parse_cached(s, dpi, prop) - - @functools.lru_cache(50) - def _parse_cached(self, s, dpi, prop): - from matplotlib.backends import backend_agg - - if prop is None: - prop = FontProperties() - fontset_class = _api.check_getitem( - self._font_type_mapping, fontset=prop.get_math_fontfamily()) - load_glyph_flags = { - "vector": LOAD_NO_HINTING, - "raster": backend_agg.get_hinting_flag(), - }[self._output_type] - fontset = fontset_class(prop, load_glyph_flags) - - fontsize = prop.get_size_in_points() - - if self._parser is None: # Cache the parser globally. - self.__class__._parser = _mathtext.Parser() - - box = self._parser.parse(s, fontset, fontsize, dpi) - output = _mathtext.ship(box) - if self._output_type == "vector": - return output.to_vector() - elif self._output_type == "raster": - return output.to_raster() - - -def math_to_image(s, filename_or_obj, prop=None, dpi=None, format=None, - *, color=None): - """ - Given a math expression, renders it in a closely-clipped bounding - box to an image file. - - Parameters - ---------- - s : str - A math expression. The math portion must be enclosed in dollar signs. - filename_or_obj : str or path-like or file-like - Where to write the image data. - prop : `.FontProperties`, optional - The size and style of the text. - dpi : float, optional - The output dpi. If not set, the dpi is determined as for - `.Figure.savefig`. - format : str, optional - The output format, e.g., 'svg', 'pdf', 'ps' or 'png'. If not set, the - format is determined as for `.Figure.savefig`. - color : str, optional - Foreground color, defaults to :rc:`text.color`. - """ - from matplotlib import figure - - parser = MathTextParser('path') - width, height, depth, _, _ = parser.parse(s, dpi=72, prop=prop) - - fig = figure.Figure(figsize=(width / 72.0, height / 72.0)) - fig.text(0, depth/height, s, fontproperties=prop, color=color) - fig.savefig(filename_or_obj, dpi=dpi, format=format) - - return depth diff --git a/spaces/leogabraneth/text-generation-webui-main/modules/exllamav2.py b/spaces/leogabraneth/text-generation-webui-main/modules/exllamav2.py deleted file mode 100644 index 558c2365967820669f7cc70682c4eec3913064e5..0000000000000000000000000000000000000000 --- a/spaces/leogabraneth/text-generation-webui-main/modules/exllamav2.py +++ /dev/null @@ -1,140 +0,0 @@ -import random -import traceback -from pathlib import Path - -import torch -from exllamav2 import ( - ExLlamaV2, - ExLlamaV2Cache, - ExLlamaV2Config, - ExLlamaV2Tokenizer -) -from exllamav2.generator import ExLlamaV2BaseGenerator, ExLlamaV2Sampler - -from modules import shared -from modules.logging_colors import logger -from modules.text_generation import get_max_prompt_length - -try: - import flash_attn -except ModuleNotFoundError: - logger.warning( - 'You are running ExLlamaV2 without flash-attention. This will cause the VRAM usage ' - 'to be a lot higher than it could be.\n' - 'Try installing flash-attention following the instructions here: ' - 'https://github.com/Dao-AILab/flash-attention#installation-and-features' - ) - pass -except Exception: - logger.warning('Failed to load flash-attention due to the following error:\n') - traceback.print_exc() - - -class Exllamav2Model: - def __init__(self): - pass - - @classmethod - def from_pretrained(self, path_to_model): - - path_to_model = Path(f'{shared.args.model_dir}') / Path(path_to_model) - - config = ExLlamaV2Config() - config.model_dir = str(path_to_model) - config.prepare() - - config.max_seq_len = shared.args.max_seq_len - config.scale_pos_emb = shared.args.compress_pos_emb - config.scale_alpha_value = shared.args.alpha_value - - model = ExLlamaV2(config) - - split = None - if shared.args.gpu_split: - split = [float(alloc) for alloc in shared.args.gpu_split.split(",")] - - model.load(split) - - tokenizer = ExLlamaV2Tokenizer(config) - cache = ExLlamaV2Cache(model) - generator = ExLlamaV2BaseGenerator(model, cache, tokenizer) - - result = self() - result.model = model - result.cache = cache - result.tokenizer = tokenizer - result.generator = generator - result.loras = None - return result, result - - def encode(self, string, **kwargs): - return self.tokenizer.encode(string, add_bos=True, encode_special_tokens=True) - - def decode(self, ids, **kwargs): - if isinstance(ids, list): - ids = torch.tensor([ids]) - elif isinstance(ids, torch.Tensor) and ids.numel() == 1: - ids = ids.view(1, -1) - - return self.tokenizer.decode(ids, decode_special_tokens=True)[0] - - def get_logits(self, token_ids, **kwargs): - self.cache.current_seq_len = 0 - if token_ids.shape[-1] > 1: - self.model.forward(token_ids[:, :-1], self.cache, input_mask=None, preprocess_only=True, loras=self.loras) - - return self.model.forward(token_ids[:, -1:], self.cache, input_mask=None, loras=self.loras, **kwargs).float().cpu() - - def generate_with_streaming(self, prompt, state): - settings = ExLlamaV2Sampler.Settings() - settings.temperature = state['temperature'] - settings.top_k = state['top_k'] - settings.top_p = state['top_p'] - settings.typical = state['typical_p'] - settings.token_repetition_penalty = state['repetition_penalty'] - settings.token_repetition_range = -1 if state['repetition_penalty_range'] <= 0 else state['repetition_penalty_range'] - if state['ban_eos_token']: - settings.disallow_tokens(self.tokenizer, [self.tokenizer.eos_token_id]) - - if state['custom_token_bans']: - to_ban = [int(x) for x in state['custom_token_bans'].split(',')] - if len(to_ban) > 0: - settings.disallow_tokens(self.tokenizer, to_ban) - - ids = self.tokenizer.encode(prompt, add_bos=state['add_bos_token'], encode_special_tokens=True) - ids = ids[:, -get_max_prompt_length(state):] - initial_len = ids.shape[-1] - - if state['auto_max_new_tokens']: - max_new_tokens = state['truncation_length'] - ids.shape[-1] - else: - max_new_tokens = state['max_new_tokens'] - - # _gen_begin_base - self.cache.current_seq_len = 0 - self.model.forward(ids[:, :-1], self.cache, input_mask=None, preprocess_only=True, loras=self.loras) - - has_leading_space = False - for i in range(max_new_tokens): - logits = self.model.forward(ids[:, -1:], self.cache, input_mask=None, loras=self.loras).float().cpu() - token, _, _ = ExLlamaV2Sampler.sample(logits, settings, ids, random.random(), self.tokenizer) - ids = torch.cat([ids, token], dim=1) - - if i == 0 and self.tokenizer.tokenizer.IdToPiece(int(token)).startswith('▁'): - has_leading_space = True - - decoded_text = self.tokenizer.decode(ids[:, initial_len:], decode_special_tokens=not state['skip_special_tokens'])[0] - if has_leading_space: - decoded_text = ' ' + decoded_text - - yield decoded_text - - if token.item() == self.tokenizer.eos_token_id or shared.stop_everything: - break - - def generate(self, prompt, state): - output = '' - for output in self.generate_with_streaming(prompt, state): - pass - - return output diff --git a/spaces/leurez/moss/start.sh b/spaces/leurez/moss/start.sh deleted file mode 100644 index 8440c6ce4d3d4353d6499eb62bce0394ddb8b7d0..0000000000000000000000000000000000000000 --- a/spaces/leurez/moss/start.sh +++ /dev/null @@ -1,11 +0,0 @@ - -cd ./service -nohup pnpm start > service.log & -echo "Start service complete!" - - -cd .. -echo "" > front.log -nohup pnpm dev > front.log & -echo "Start front complete!" -tail -f front.log diff --git a/spaces/lewisrxliu/3.3/README.md b/spaces/lewisrxliu/3.3/README.md deleted file mode 100644 index b613bc07f524c71f584c9142446884454685cc33..0000000000000000000000000000000000000000 --- a/spaces/lewisrxliu/3.3/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Lewis -emoji: 🏢 -colorFrom: yellow -colorTo: purple -sdk: gradio -sdk_version: 3.22.1 -app_file: app.py -pinned: false -duplicated_from: lewisrxliu/3.2 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/lewiswu1209/MockingBird/ppg_extractor/encoder/repeat.py b/spaces/lewiswu1209/MockingBird/ppg_extractor/encoder/repeat.py deleted file mode 100644 index 7a8af6ce850e930feb2bf0cd0e9bc7a8d21520e4..0000000000000000000000000000000000000000 --- a/spaces/lewiswu1209/MockingBird/ppg_extractor/encoder/repeat.py +++ /dev/null @@ -1,30 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- - -# Copyright 2019 Shigeki Karita -# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0) - -"""Repeat the same layer definition.""" - -import torch - - -class MultiSequential(torch.nn.Sequential): - """Multi-input multi-output torch.nn.Sequential.""" - - def forward(self, *args): - """Repeat.""" - for m in self: - args = m(*args) - return args - - -def repeat(N, fn): - """Repeat module N times. - - :param int N: repeat time - :param function fn: function to generate module - :return: repeated modules - :rtype: MultiSequential - """ - return MultiSequential(*[fn(n) for n in range(N)]) diff --git a/spaces/lincquiQcaudo/Top-20-Diffusion/Fitter Trade Theory Book Pdf In Hindi Free Download.md b/spaces/lincquiQcaudo/Top-20-Diffusion/Fitter Trade Theory Book Pdf In Hindi Free Download.md deleted file mode 100644 index ed8b9ed110f74d666b7dec2f5526ec3af24d3834..0000000000000000000000000000000000000000 --- a/spaces/lincquiQcaudo/Top-20-Diffusion/Fitter Trade Theory Book Pdf In Hindi Free Download.md +++ /dev/null @@ -1,9 +0,0 @@ -
    -

    iti trade theory mechanical and electrician (volume-1 and volume -2) group full books pdf download dear students, sarkari result update railway students rukmini iti trade theory hindi (vol 1-2) pdf full books hindi book railway students railway alp / tech second stage exam download book fitter / turner / welder / machinist, diesel mechanic / automobile tractor mechanic / mechanic motor vehicle practice iti electrician theory book in english pdf download click download.

    -

    fitter trade theory book pdf in hindi free download


    Download Zip ⇒⇒⇒ https://bytlly.com/2uGxWc



    -

    this year (2018-2019) the indian railways institute of trade and technology have framed the fresh syllabus and published the latest annual pattern exam for all 4 year trainees of trade. we have already published fitter, weaver, fitter first year syllabus pdf book to best our existing collection. you can also get free pdf from the link below.

    -

    iti fitter syllabus 2nd year free pdf

    iti fitter syllabus 1st year free pdf

    for iti fitter first year syllabus in 2018-2019, you can get the 1st year fitter syllabus for free from the link below. ( janta dnyan).

    -

    iti fitter syllabus has framed in nsqf level -5 and also an annual pattern examination from 2018 onwards. iti fitter course is a two years training. iti fitter syllabus old semester pattern has closed and annual pattern new syllabus has framed on 2018 onwards. iti fitter trainees need to follow new annual pattern syllabus. the below listed table has shown the free pdf link for iti fitter first year as well as second year syllabus in free pdf.


    (adsbygoogle = window.adsbygoogle []).push( google_ad_client: ca-pub-1197480662475958, enable_page_level_ads: true ); (adsbygoogle = window.push( google_ad_client: ca-pub-1197480662475958, enable_page_level_ads: true ); (adsbygoogle = window.push();
    first year fitter syllabus - free pdf

    there are 5 subjects are included in iti fitter curriculum for iti fitter first year training. those are 1. trade theory 2. trade practical and other 3 subjects are core skill papers. those are 3. employability skills, 4. workshop calculation and science and 5. engineering drawing. the below table has provided link for iti fitter syllabus pdf.

    -

    899543212b
    -
    -
    \ No newline at end of file diff --git a/spaces/ljjggr/bingo/src/components/ui/alert-dialog.tsx b/spaces/ljjggr/bingo/src/components/ui/alert-dialog.tsx deleted file mode 100644 index 17fec4d16510328deacc1416569173c97761ef72..0000000000000000000000000000000000000000 --- a/spaces/ljjggr/bingo/src/components/ui/alert-dialog.tsx +++ /dev/null @@ -1,150 +0,0 @@ -'use client' - -import * as React from 'react' -import * as AlertDialogPrimitive from '@radix-ui/react-alert-dialog' - -import { cn } from '@/lib/utils' -import { buttonVariants } from '@/components/ui/button' - -const AlertDialog = AlertDialogPrimitive.Root - -const AlertDialogTrigger = AlertDialogPrimitive.Trigger - -const AlertDialogPortal = ({ - className, - children, - ...props -}: AlertDialogPrimitive.AlertDialogPortalProps) => ( - -
    - {children} -
    -
    -) -AlertDialogPortal.displayName = AlertDialogPrimitive.Portal.displayName - -const AlertDialogOverlay = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef ->(({ className, children, ...props }, ref) => ( - -)) -AlertDialogOverlay.displayName = AlertDialogPrimitive.Overlay.displayName - -const AlertDialogContent = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef ->(({ className, ...props }, ref) => ( - - - - -)) -AlertDialogContent.displayName = AlertDialogPrimitive.Content.displayName - -const AlertDialogHeader = ({ - className, - ...props -}: React.HTMLAttributes) => ( -
    -) -AlertDialogHeader.displayName = 'AlertDialogHeader' - -const AlertDialogFooter = ({ - className, - ...props -}: React.HTMLAttributes) => ( -
    -) -AlertDialogFooter.displayName = 'AlertDialogFooter' - -const AlertDialogTitle = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef ->(({ className, ...props }, ref) => ( - -)) -AlertDialogTitle.displayName = AlertDialogPrimitive.Title.displayName - -const AlertDialogDescription = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef ->(({ className, ...props }, ref) => ( - -)) -AlertDialogDescription.displayName = - AlertDialogPrimitive.Description.displayName - -const AlertDialogAction = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef ->(({ className, ...props }, ref) => ( - -)) -AlertDialogAction.displayName = AlertDialogPrimitive.Action.displayName - -const AlertDialogCancel = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef ->(({ className, ...props }, ref) => ( - -)) -AlertDialogCancel.displayName = AlertDialogPrimitive.Cancel.displayName - -export { - AlertDialog, - AlertDialogTrigger, - AlertDialogContent, - AlertDialogHeader, - AlertDialogFooter, - AlertDialogTitle, - AlertDialogDescription, - AlertDialogAction, - AlertDialogCancel -} diff --git a/spaces/llmonitor/benchmarks/components/CaptchaInput.js b/spaces/llmonitor/benchmarks/components/CaptchaInput.js deleted file mode 100644 index cd5fb398a447a5e4d57edd6aad65c8455bacf60b..0000000000000000000000000000000000000000 --- a/spaces/llmonitor/benchmarks/components/CaptchaInput.js +++ /dev/null @@ -1,21 +0,0 @@ -"use client" -import ReCAPTCHA from "react-google-recaptcha" -import { useState } from "react" - -export default function CaptchaInput() { - const [recaptchaValue, setRecaptchaValue] = useState(null) - - return ( - <> - - setRecaptchaValue(value)} - /> - - ) -} diff --git a/spaces/lunarflu/HF-QA-Demo-3/qa_engine/logger.py b/spaces/lunarflu/HF-QA-Demo-3/qa_engine/logger.py deleted file mode 100644 index ea1703dcbd952487f1fa27488f418eceaa1cf274..0000000000000000000000000000000000000000 --- a/spaces/lunarflu/HF-QA-Demo-3/qa_engine/logger.py +++ /dev/null @@ -1,14 +0,0 @@ -import logging - - -logger = logging.getLogger(__name__) - -def setup_logger() -> None: - """ - Logger setup. - """ - logger.setLevel(logging.DEBUG) - formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') - handler = logging.StreamHandler() - handler.setFormatter(formatter) - logger.addHandler(handler) diff --git a/spaces/ma-xu/LIVE/pydiffvg/render_pytorch.py b/spaces/ma-xu/LIVE/pydiffvg/render_pytorch.py deleted file mode 100644 index b776ce67a0cdb587c8bd558fe5060a6d96e51e3c..0000000000000000000000000000000000000000 --- a/spaces/ma-xu/LIVE/pydiffvg/render_pytorch.py +++ /dev/null @@ -1,870 +0,0 @@ -import torch -import diffvg -import pydiffvg -import time -from enum import IntEnum -import warnings - -print_timing = False - -def set_print_timing(val): - global print_timing - print_timing=val - -class OutputType(IntEnum): - color = 1 - sdf = 2 - -class RenderFunction(torch.autograd.Function): - """ - The PyTorch interface of diffvg. - """ - @staticmethod - def serialize_scene(canvas_width, - canvas_height, - shapes, - shape_groups, - filter = pydiffvg.PixelFilter(type = diffvg.FilterType.box, - radius = torch.tensor(0.5)), - output_type = OutputType.color, - use_prefiltering = False, - eval_positions = torch.tensor([])): - """ - Given a list of shapes, convert them to a linear list of argument, - so that we can use it in PyTorch. - """ - num_shapes = len(shapes) - num_shape_groups = len(shape_groups) - args = [] - args.append(canvas_width) - args.append(canvas_height) - args.append(num_shapes) - args.append(num_shape_groups) - args.append(output_type) - args.append(use_prefiltering) - args.append(eval_positions.to(pydiffvg.get_device())) - for shape in shapes: - use_thickness = False - if isinstance(shape, pydiffvg.Circle): - assert(shape.center.is_contiguous()) - args.append(diffvg.ShapeType.circle) - args.append(shape.radius.cpu()) - args.append(shape.center.cpu()) - elif isinstance(shape, pydiffvg.Ellipse): - assert(shape.radius.is_contiguous()) - assert(shape.center.is_contiguous()) - args.append(diffvg.ShapeType.ellipse) - args.append(shape.radius.cpu()) - args.append(shape.center.cpu()) - elif isinstance(shape, pydiffvg.Path): - assert(shape.num_control_points.is_contiguous()) - assert(shape.points.is_contiguous()) - assert(shape.points.shape[1] == 2) - assert(torch.isfinite(shape.points).all()) - args.append(diffvg.ShapeType.path) - args.append(shape.num_control_points.to(torch.int32).cpu()) - args.append(shape.points.cpu()) - if len(shape.stroke_width.shape) > 0 and shape.stroke_width.shape[0] > 1: - assert(torch.isfinite(shape.stroke_width).all()) - use_thickness = True - args.append(shape.stroke_width.cpu()) - else: - args.append(None) - args.append(shape.is_closed) - args.append(shape.use_distance_approx) - elif isinstance(shape, pydiffvg.Polygon): - assert(shape.points.is_contiguous()) - assert(shape.points.shape[1] == 2) - args.append(diffvg.ShapeType.path) - if shape.is_closed: - args.append(torch.zeros(shape.points.shape[0], dtype = torch.int32)) - else: - args.append(torch.zeros(shape.points.shape[0] - 1, dtype = torch.int32)) - args.append(shape.points.cpu()) - args.append(None) - args.append(shape.is_closed) - args.append(False) # use_distance_approx - elif isinstance(shape, pydiffvg.Rect): - assert(shape.p_min.is_contiguous()) - assert(shape.p_max.is_contiguous()) - args.append(diffvg.ShapeType.rect) - args.append(shape.p_min.cpu()) - args.append(shape.p_max.cpu()) - else: - assert(False) - if use_thickness: - args.append(torch.tensor(0.0)) - else: - args.append(shape.stroke_width.cpu()) - - for shape_group in shape_groups: - assert(shape_group.shape_ids.is_contiguous()) - args.append(shape_group.shape_ids.to(torch.int32).cpu()) - # Fill color - if shape_group.fill_color is None: - args.append(None) - elif isinstance(shape_group.fill_color, torch.Tensor): - assert(shape_group.fill_color.is_contiguous()) - args.append(diffvg.ColorType.constant) - args.append(shape_group.fill_color.cpu()) - elif isinstance(shape_group.fill_color, pydiffvg.LinearGradient): - assert(shape_group.fill_color.begin.is_contiguous()) - assert(shape_group.fill_color.end.is_contiguous()) - assert(shape_group.fill_color.offsets.is_contiguous()) - assert(shape_group.fill_color.stop_colors.is_contiguous()) - args.append(diffvg.ColorType.linear_gradient) - args.append(shape_group.fill_color.begin.cpu()) - args.append(shape_group.fill_color.end.cpu()) - args.append(shape_group.fill_color.offsets.cpu()) - args.append(shape_group.fill_color.stop_colors.cpu()) - elif isinstance(shape_group.fill_color, pydiffvg.RadialGradient): - assert(shape_group.fill_color.center.is_contiguous()) - assert(shape_group.fill_color.radius.is_contiguous()) - assert(shape_group.fill_color.offsets.is_contiguous()) - assert(shape_group.fill_color.stop_colors.is_contiguous()) - args.append(diffvg.ColorType.radial_gradient) - args.append(shape_group.fill_color.center.cpu()) - args.append(shape_group.fill_color.radius.cpu()) - args.append(shape_group.fill_color.offsets.cpu()) - args.append(shape_group.fill_color.stop_colors.cpu()) - - if shape_group.fill_color is not None: - # go through the underlying shapes and check if they are all closed - for shape_id in shape_group.shape_ids: - if isinstance(shapes[shape_id], pydiffvg.Path): - if not shapes[shape_id].is_closed: - warnings.warn("Detected non-closed paths with fill color. This might causes unexpected results.", Warning) - - # Stroke color - if shape_group.stroke_color is None: - args.append(None) - elif isinstance(shape_group.stroke_color, torch.Tensor): - assert(shape_group.stroke_color.is_contiguous()) - args.append(diffvg.ColorType.constant) - args.append(shape_group.stroke_color.cpu()) - elif isinstance(shape_group.stroke_color, pydiffvg.LinearGradient): - assert(shape_group.stroke_color.begin.is_contiguous()) - assert(shape_group.stroke_color.end.is_contiguous()) - assert(shape_group.stroke_color.offsets.is_contiguous()) - assert(shape_group.stroke_color.stop_colors.is_contiguous()) - assert(torch.isfinite(shape_group.stroke_color.stop_colors).all()) - args.append(diffvg.ColorType.linear_gradient) - args.append(shape_group.stroke_color.begin.cpu()) - args.append(shape_group.stroke_color.end.cpu()) - args.append(shape_group.stroke_color.offsets.cpu()) - args.append(shape_group.stroke_color.stop_colors.cpu()) - elif isinstance(shape_group.stroke_color, pydiffvg.RadialGradient): - assert(shape_group.stroke_color.center.is_contiguous()) - assert(shape_group.stroke_color.radius.is_contiguous()) - assert(shape_group.stroke_color.offsets.is_contiguous()) - assert(shape_group.stroke_color.stop_colors.is_contiguous()) - assert(torch.isfinite(shape_group.stroke_color.stop_colors).all()) - args.append(diffvg.ColorType.radial_gradient) - args.append(shape_group.stroke_color.center.cpu()) - args.append(shape_group.stroke_color.radius.cpu()) - args.append(shape_group.stroke_color.offsets.cpu()) - args.append(shape_group.stroke_color.stop_colors.cpu()) - args.append(shape_group.use_even_odd_rule) - # Transformation - args.append(shape_group.shape_to_canvas.contiguous().cpu()) - args.append(filter.type) - args.append(filter.radius.cpu()) - return args - - @staticmethod - def forward(ctx, - width, - height, - num_samples_x, - num_samples_y, - seed, - background_image, - *args): - """ - Forward rendering pass. - """ - # Unpack arguments - current_index = 0 - canvas_width = args[current_index] - current_index += 1 - canvas_height = args[current_index] - current_index += 1 - num_shapes = args[current_index] - current_index += 1 - num_shape_groups = args[current_index] - current_index += 1 - output_type = args[current_index] - current_index += 1 - use_prefiltering = args[current_index] - current_index += 1 - eval_positions = args[current_index] - current_index += 1 - shapes = [] - shape_groups = [] - shape_contents = [] # Important to avoid GC deleting the shapes - color_contents = [] # Same as above - for shape_id in range(num_shapes): - shape_type = args[current_index] - current_index += 1 - if shape_type == diffvg.ShapeType.circle: - radius = args[current_index] - current_index += 1 - center = args[current_index] - current_index += 1 - shape = diffvg.Circle(radius, diffvg.Vector2f(center[0], center[1])) - elif shape_type == diffvg.ShapeType.ellipse: - radius = args[current_index] - current_index += 1 - center = args[current_index] - current_index += 1 - shape = diffvg.Ellipse(diffvg.Vector2f(radius[0], radius[1]), - diffvg.Vector2f(center[0], center[1])) - elif shape_type == diffvg.ShapeType.path: - num_control_points = args[current_index] - current_index += 1 - points = args[current_index] - current_index += 1 - thickness = args[current_index] - current_index += 1 - is_closed = args[current_index] - current_index += 1 - use_distance_approx = args[current_index] - current_index += 1 - shape = diffvg.Path(diffvg.int_ptr(num_control_points.data_ptr()), - diffvg.float_ptr(points.data_ptr()), - diffvg.float_ptr(thickness.data_ptr() if thickness is not None else 0), - num_control_points.shape[0], - points.shape[0], - is_closed, - use_distance_approx) - elif shape_type == diffvg.ShapeType.rect: - p_min = args[current_index] - current_index += 1 - p_max = args[current_index] - current_index += 1 - shape = diffvg.Rect(diffvg.Vector2f(p_min[0], p_min[1]), - diffvg.Vector2f(p_max[0], p_max[1])) - else: - assert(False) - stroke_width = args[current_index] - current_index += 1 - shapes.append(diffvg.Shape(\ - shape_type, shape.get_ptr(), stroke_width.item())) - shape_contents.append(shape) - - for shape_group_id in range(num_shape_groups): - shape_ids = args[current_index] - current_index += 1 - fill_color_type = args[current_index] - current_index += 1 - if fill_color_type == diffvg.ColorType.constant: - color = args[current_index] - current_index += 1 - fill_color = diffvg.Constant(\ - diffvg.Vector4f(color[0], color[1], color[2], color[3])) - elif fill_color_type == diffvg.ColorType.linear_gradient: - beg = args[current_index] - current_index += 1 - end = args[current_index] - current_index += 1 - offsets = args[current_index] - current_index += 1 - stop_colors = args[current_index] - current_index += 1 - assert(offsets.shape[0] == stop_colors.shape[0]) - fill_color = diffvg.LinearGradient(diffvg.Vector2f(beg[0], beg[1]), - diffvg.Vector2f(end[0], end[1]), - offsets.shape[0], - diffvg.float_ptr(offsets.data_ptr()), - diffvg.float_ptr(stop_colors.data_ptr())) - elif fill_color_type == diffvg.ColorType.radial_gradient: - center = args[current_index] - current_index += 1 - radius = args[current_index] - current_index += 1 - offsets = args[current_index] - current_index += 1 - stop_colors = args[current_index] - current_index += 1 - assert(offsets.shape[0] == stop_colors.shape[0]) - fill_color = diffvg.RadialGradient(diffvg.Vector2f(center[0], center[1]), - diffvg.Vector2f(radius[0], radius[1]), - offsets.shape[0], - diffvg.float_ptr(offsets.data_ptr()), - diffvg.float_ptr(stop_colors.data_ptr())) - elif fill_color_type is None: - fill_color = None - else: - assert(False) - stroke_color_type = args[current_index] - current_index += 1 - if stroke_color_type == diffvg.ColorType.constant: - color = args[current_index] - current_index += 1 - stroke_color = diffvg.Constant(\ - diffvg.Vector4f(color[0], color[1], color[2], color[3])) - elif stroke_color_type == diffvg.ColorType.linear_gradient: - beg = args[current_index] - current_index += 1 - end = args[current_index] - current_index += 1 - offsets = args[current_index] - current_index += 1 - stop_colors = args[current_index] - current_index += 1 - assert(offsets.shape[0] == stop_colors.shape[0]) - stroke_color = diffvg.LinearGradient(diffvg.Vector2f(beg[0], beg[1]), - diffvg.Vector2f(end[0], end[1]), - offsets.shape[0], - diffvg.float_ptr(offsets.data_ptr()), - diffvg.float_ptr(stop_colors.data_ptr())) - elif stroke_color_type == diffvg.ColorType.radial_gradient: - center = args[current_index] - current_index += 1 - radius = args[current_index] - current_index += 1 - offsets = args[current_index] - current_index += 1 - stop_colors = args[current_index] - current_index += 1 - assert(offsets.shape[0] == stop_colors.shape[0]) - stroke_color = diffvg.RadialGradient(diffvg.Vector2f(center[0], center[1]), - diffvg.Vector2f(radius[0], radius[1]), - offsets.shape[0], - diffvg.float_ptr(offsets.data_ptr()), - diffvg.float_ptr(stop_colors.data_ptr())) - elif stroke_color_type is None: - stroke_color = None - else: - assert(False) - use_even_odd_rule = args[current_index] - current_index += 1 - shape_to_canvas = args[current_index] - current_index += 1 - - if fill_color is not None: - color_contents.append(fill_color) - if stroke_color is not None: - color_contents.append(stroke_color) - shape_groups.append(diffvg.ShapeGroup(\ - diffvg.int_ptr(shape_ids.data_ptr()), - shape_ids.shape[0], - diffvg.ColorType.constant if fill_color_type is None else fill_color_type, - diffvg.void_ptr(0) if fill_color is None else fill_color.get_ptr(), - diffvg.ColorType.constant if stroke_color_type is None else stroke_color_type, - diffvg.void_ptr(0) if stroke_color is None else stroke_color.get_ptr(), - use_even_odd_rule, - diffvg.float_ptr(shape_to_canvas.data_ptr()))) - - filter_type = args[current_index] - current_index += 1 - filter_radius = args[current_index] - current_index += 1 - filt = diffvg.Filter(filter_type, filter_radius) - - start = time.time() - scene = diffvg.Scene(canvas_width, canvas_height, - shapes, shape_groups, filt, pydiffvg.get_use_gpu(), - pydiffvg.get_device().index if pydiffvg.get_device().index is not None else -1) - time_elapsed = time.time() - start - global print_timing - if print_timing: - print('Scene construction, time: %.5f s' % time_elapsed) - - if output_type == OutputType.color: - assert(eval_positions.shape[0] == 0) - rendered_image = torch.zeros(height, width, 4, device = pydiffvg.get_device()) - else: - assert(output_type == OutputType.sdf) - if eval_positions.shape[0] == 0: - rendered_image = torch.zeros(height, width, 1, device = pydiffvg.get_device()) - else: - rendered_image = torch.zeros(eval_positions.shape[0], 1, device = pydiffvg.get_device()) - - if background_image is not None: - background_image = background_image.to(pydiffvg.get_device()) - if background_image.shape[2] == 3: - background_image = torch.cat((\ - background_image, torch.ones(background_image.shape[0], background_image.shape[1], 1, - device = background_image.device)), dim = 2) - background_image = background_image.contiguous() - assert(background_image.shape[0] == rendered_image.shape[0]) - assert(background_image.shape[1] == rendered_image.shape[1]) - assert(background_image.shape[2] == 4) - - start = time.time() - diffvg.render(scene, - diffvg.float_ptr(background_image.data_ptr() if background_image is not None else 0), - diffvg.float_ptr(rendered_image.data_ptr() if output_type == OutputType.color else 0), - diffvg.float_ptr(rendered_image.data_ptr() if output_type == OutputType.sdf else 0), - width, - height, - num_samples_x, - num_samples_y, - seed, - diffvg.float_ptr(0), # d_background_image - diffvg.float_ptr(0), # d_render_image - diffvg.float_ptr(0), # d_render_sdf - diffvg.float_ptr(0), # d_translation - use_prefiltering, - diffvg.float_ptr(eval_positions.data_ptr()), - eval_positions.shape[0]) - assert(torch.isfinite(rendered_image).all()) - time_elapsed = time.time() - start - if print_timing: - print('Forward pass, time: %.5f s' % time_elapsed) - - ctx.scene = scene - ctx.background_image = background_image - ctx.shape_contents = shape_contents - ctx.color_contents = color_contents - ctx.filter = filt - ctx.width = width - ctx.height = height - ctx.num_samples_x = num_samples_x - ctx.num_samples_y = num_samples_y - ctx.seed = seed - ctx.output_type = output_type - ctx.use_prefiltering = use_prefiltering - ctx.eval_positions = eval_positions - return rendered_image - - @staticmethod - def render_grad(grad_img, - width, - height, - num_samples_x, - num_samples_y, - seed, - background_image, - *args): - if not grad_img.is_contiguous(): - grad_img = grad_img.contiguous() - assert(torch.isfinite(grad_img).all()) - - # Unpack arguments - current_index = 0 - canvas_width = args[current_index] - current_index += 1 - canvas_height = args[current_index] - current_index += 1 - num_shapes = args[current_index] - current_index += 1 - num_shape_groups = args[current_index] - current_index += 1 - output_type = args[current_index] - current_index += 1 - use_prefiltering = args[current_index] - current_index += 1 - eval_positions = args[current_index] - current_index += 1 - shapes = [] - shape_groups = [] - shape_contents = [] # Important to avoid GC deleting the shapes - color_contents = [] # Same as above - for shape_id in range(num_shapes): - shape_type = args[current_index] - current_index += 1 - if shape_type == diffvg.ShapeType.circle: - radius = args[current_index] - current_index += 1 - center = args[current_index] - current_index += 1 - shape = diffvg.Circle(radius, diffvg.Vector2f(center[0], center[1])) - elif shape_type == diffvg.ShapeType.ellipse: - radius = args[current_index] - current_index += 1 - center = args[current_index] - current_index += 1 - shape = diffvg.Ellipse(diffvg.Vector2f(radius[0], radius[1]), - diffvg.Vector2f(center[0], center[1])) - elif shape_type == diffvg.ShapeType.path: - num_control_points = args[current_index] - current_index += 1 - points = args[current_index] - current_index += 1 - thickness = args[current_index] - current_index += 1 - is_closed = args[current_index] - current_index += 1 - use_distance_approx = args[current_index] - current_index += 1 - shape = diffvg.Path(diffvg.int_ptr(num_control_points.data_ptr()), - diffvg.float_ptr(points.data_ptr()), - diffvg.float_ptr(thickness.data_ptr() if thickness is not None else 0), - num_control_points.shape[0], - points.shape[0], - is_closed, - use_distance_approx) - elif shape_type == diffvg.ShapeType.rect: - p_min = args[current_index] - current_index += 1 - p_max = args[current_index] - current_index += 1 - shape = diffvg.Rect(diffvg.Vector2f(p_min[0], p_min[1]), - diffvg.Vector2f(p_max[0], p_max[1])) - else: - assert(False) - stroke_width = args[current_index] - current_index += 1 - shapes.append(diffvg.Shape(\ - shape_type, shape.get_ptr(), stroke_width.item())) - shape_contents.append(shape) - - for shape_group_id in range(num_shape_groups): - shape_ids = args[current_index] - current_index += 1 - fill_color_type = args[current_index] - current_index += 1 - if fill_color_type == diffvg.ColorType.constant: - color = args[current_index] - current_index += 1 - fill_color = diffvg.Constant(\ - diffvg.Vector4f(color[0], color[1], color[2], color[3])) - elif fill_color_type == diffvg.ColorType.linear_gradient: - beg = args[current_index] - current_index += 1 - end = args[current_index] - current_index += 1 - offsets = args[current_index] - current_index += 1 - stop_colors = args[current_index] - current_index += 1 - assert(offsets.shape[0] == stop_colors.shape[0]) - fill_color = diffvg.LinearGradient(diffvg.Vector2f(beg[0], beg[1]), - diffvg.Vector2f(end[0], end[1]), - offsets.shape[0], - diffvg.float_ptr(offsets.data_ptr()), - diffvg.float_ptr(stop_colors.data_ptr())) - elif fill_color_type == diffvg.ColorType.radial_gradient: - center = args[current_index] - current_index += 1 - radius = args[current_index] - current_index += 1 - offsets = args[current_index] - current_index += 1 - stop_colors = args[current_index] - current_index += 1 - assert(offsets.shape[0] == stop_colors.shape[0]) - fill_color = diffvg.RadialGradient(diffvg.Vector2f(center[0], center[1]), - diffvg.Vector2f(radius[0], radius[1]), - offsets.shape[0], - diffvg.float_ptr(offsets.data_ptr()), - diffvg.float_ptr(stop_colors.data_ptr())) - elif fill_color_type is None: - fill_color = None - else: - assert(False) - stroke_color_type = args[current_index] - current_index += 1 - if stroke_color_type == diffvg.ColorType.constant: - color = args[current_index] - current_index += 1 - stroke_color = diffvg.Constant(\ - diffvg.Vector4f(color[0], color[1], color[2], color[3])) - elif stroke_color_type == diffvg.ColorType.linear_gradient: - beg = args[current_index] - current_index += 1 - end = args[current_index] - current_index += 1 - offsets = args[current_index] - current_index += 1 - stop_colors = args[current_index] - current_index += 1 - assert(offsets.shape[0] == stop_colors.shape[0]) - stroke_color = diffvg.LinearGradient(diffvg.Vector2f(beg[0], beg[1]), - diffvg.Vector2f(end[0], end[1]), - offsets.shape[0], - diffvg.float_ptr(offsets.data_ptr()), - diffvg.float_ptr(stop_colors.data_ptr())) - elif stroke_color_type == diffvg.ColorType.radial_gradient: - center = args[current_index] - current_index += 1 - radius = args[current_index] - current_index += 1 - offsets = args[current_index] - current_index += 1 - stop_colors = args[current_index] - current_index += 1 - assert(offsets.shape[0] == stop_colors.shape[0]) - stroke_color = diffvg.RadialGradient(diffvg.Vector2f(center[0], center[1]), - diffvg.Vector2f(radius[0], radius[1]), - offsets.shape[0], - diffvg.float_ptr(offsets.data_ptr()), - diffvg.float_ptr(stop_colors.data_ptr())) - elif stroke_color_type is None: - stroke_color = None - else: - assert(False) - use_even_odd_rule = args[current_index] - current_index += 1 - shape_to_canvas = args[current_index] - current_index += 1 - - if fill_color is not None: - color_contents.append(fill_color) - if stroke_color is not None: - color_contents.append(stroke_color) - shape_groups.append(diffvg.ShapeGroup(\ - diffvg.int_ptr(shape_ids.data_ptr()), - shape_ids.shape[0], - diffvg.ColorType.constant if fill_color_type is None else fill_color_type, - diffvg.void_ptr(0) if fill_color is None else fill_color.get_ptr(), - diffvg.ColorType.constant if stroke_color_type is None else stroke_color_type, - diffvg.void_ptr(0) if stroke_color is None else stroke_color.get_ptr(), - use_even_odd_rule, - diffvg.float_ptr(shape_to_canvas.data_ptr()))) - - filter_type = args[current_index] - current_index += 1 - filter_radius = args[current_index] - current_index += 1 - filt = diffvg.Filter(filter_type, filter_radius) - - scene = diffvg.Scene(canvas_width, canvas_height, - shapes, shape_groups, filt, pydiffvg.get_use_gpu(), - pydiffvg.get_device().index if pydiffvg.get_device().index is not None else -1) - - if output_type == OutputType.color: - assert(grad_img.shape[2] == 4) - else: - assert(grad_img.shape[2] == 1) - - if background_image is not None: - background_image = background_image.to(pydiffvg.get_device()) - if background_image.shape[2] == 3: - background_image = torch.cat((\ - background_image, torch.ones(background_image.shape[0], background_image.shape[1], 1, - device = background_image.device)), dim = 2) - background_image = background_image.contiguous() - assert(background_image.shape[0] == rendered_image.shape[0]) - assert(background_image.shape[1] == rendered_image.shape[1]) - assert(background_image.shape[2] == 4) - - translation_grad_image = \ - torch.zeros(height, width, 2, device = pydiffvg.get_device()) - start = time.time() - diffvg.render(scene, - diffvg.float_ptr(background_image.data_ptr() if background_image is not None else 0), - diffvg.float_ptr(0), # render_image - diffvg.float_ptr(0), # render_sdf - width, - height, - num_samples_x, - num_samples_y, - seed, - diffvg.float_ptr(0), # d_background_image - diffvg.float_ptr(grad_img.data_ptr() if output_type == OutputType.color else 0), - diffvg.float_ptr(grad_img.data_ptr() if output_type == OutputType.sdf else 0), - diffvg.float_ptr(translation_grad_image.data_ptr()), - use_prefiltering, - diffvg.float_ptr(eval_positions.data_ptr()), - eval_positions.shape[0]) - time_elapsed = time.time() - start - if print_timing: - print('Gradient pass, time: %.5f s' % time_elapsed) - assert(torch.isfinite(translation_grad_image).all()) - - return translation_grad_image - - @staticmethod - def backward(ctx, - grad_img): - if not grad_img.is_contiguous(): - grad_img = grad_img.contiguous() - assert(torch.isfinite(grad_img).all()) - - scene = ctx.scene - width = ctx.width - height = ctx.height - num_samples_x = ctx.num_samples_x - num_samples_y = ctx.num_samples_y - seed = ctx.seed - output_type = ctx.output_type - use_prefiltering = ctx.use_prefiltering - eval_positions = ctx.eval_positions - background_image = ctx.background_image - - if background_image is not None: - d_background_image = torch.zeros_like(background_image) - else: - d_background_image = None - - start = time.time() - diffvg.render(scene, - diffvg.float_ptr(background_image.data_ptr() if background_image is not None else 0), - diffvg.float_ptr(0), # render_image - diffvg.float_ptr(0), # render_sdf - width, - height, - num_samples_x, - num_samples_y, - seed, - diffvg.float_ptr(d_background_image.data_ptr() if background_image is not None else 0), - diffvg.float_ptr(grad_img.data_ptr() if output_type == OutputType.color else 0), - diffvg.float_ptr(grad_img.data_ptr() if output_type == OutputType.sdf else 0), - diffvg.float_ptr(0), # d_translation - use_prefiltering, - diffvg.float_ptr(eval_positions.data_ptr()), - eval_positions.shape[0]) - time_elapsed = time.time() - start - global print_timing - if print_timing: - print('Backward pass, time: %.5f s' % time_elapsed) - - d_args = [] - d_args.append(None) # width - d_args.append(None) # height - d_args.append(None) # num_samples_x - d_args.append(None) # num_samples_y - d_args.append(None) # seed - d_args.append(d_background_image) - d_args.append(None) # canvas_width - d_args.append(None) # canvas_height - d_args.append(None) # num_shapes - d_args.append(None) # num_shape_groups - d_args.append(None) # output_type - d_args.append(None) # use_prefiltering - d_args.append(None) # eval_positions - for shape_id in range(scene.num_shapes): - d_args.append(None) # type - d_shape = scene.get_d_shape(shape_id) - use_thickness = False - if d_shape.type == diffvg.ShapeType.circle: - d_circle = d_shape.as_circle() - radius = torch.tensor(d_circle.radius) - assert(torch.isfinite(radius).all()) - d_args.append(radius) - c = d_circle.center - c = torch.tensor((c.x, c.y)) - assert(torch.isfinite(c).all()) - d_args.append(c) - elif d_shape.type == diffvg.ShapeType.ellipse: - d_ellipse = d_shape.as_ellipse() - r = d_ellipse.radius - r = torch.tensor((d_ellipse.radius.x, d_ellipse.radius.y)) - assert(torch.isfinite(r).all()) - d_args.append(r) - c = d_ellipse.center - c = torch.tensor((c.x, c.y)) - assert(torch.isfinite(c).all()) - d_args.append(c) - elif d_shape.type == diffvg.ShapeType.path: - d_path = d_shape.as_path() - points = torch.zeros((d_path.num_points, 2)) - thickness = None - if d_path.has_thickness(): - use_thickness = True - thickness = torch.zeros(d_path.num_points) - d_path.copy_to(diffvg.float_ptr(points.data_ptr()), diffvg.float_ptr(thickness.data_ptr())) - else: - d_path.copy_to(diffvg.float_ptr(points.data_ptr()), diffvg.float_ptr(0)) - assert(torch.isfinite(points).all()) - if thickness is not None: - assert(torch.isfinite(thickness).all()) - d_args.append(None) # num_control_points - d_args.append(points) - d_args.append(thickness) - d_args.append(None) # is_closed - d_args.append(None) # use_distance_approx - elif d_shape.type == diffvg.ShapeType.rect: - d_rect = d_shape.as_rect() - p_min = torch.tensor((d_rect.p_min.x, d_rect.p_min.y)) - p_max = torch.tensor((d_rect.p_max.x, d_rect.p_max.y)) - assert(torch.isfinite(p_min).all()) - assert(torch.isfinite(p_max).all()) - d_args.append(p_min) - d_args.append(p_max) - else: - assert(False) - if use_thickness: - d_args.append(None) - else: - w = torch.tensor((d_shape.stroke_width)) - assert(torch.isfinite(w).all()) - d_args.append(w) - - for group_id in range(scene.num_shape_groups): - d_shape_group = scene.get_d_shape_group(group_id) - d_args.append(None) # shape_ids - d_args.append(None) # fill_color_type - if d_shape_group.has_fill_color(): - if d_shape_group.fill_color_type == diffvg.ColorType.constant: - d_constant = d_shape_group.fill_color_as_constant() - c = d_constant.color - d_args.append(torch.tensor((c.x, c.y, c.z, c.w))) - elif d_shape_group.fill_color_type == diffvg.ColorType.linear_gradient: - d_linear_gradient = d_shape_group.fill_color_as_linear_gradient() - beg = d_linear_gradient.begin - d_args.append(torch.tensor((beg.x, beg.y))) - end = d_linear_gradient.end - d_args.append(torch.tensor((end.x, end.y))) - offsets = torch.zeros((d_linear_gradient.num_stops)) - stop_colors = torch.zeros((d_linear_gradient.num_stops, 4)) - d_linear_gradient.copy_to(\ - diffvg.float_ptr(offsets.data_ptr()), - diffvg.float_ptr(stop_colors.data_ptr())) - assert(torch.isfinite(stop_colors).all()) - d_args.append(offsets) - d_args.append(stop_colors) - elif d_shape_group.fill_color_type == diffvg.ColorType.radial_gradient: - d_radial_gradient = d_shape_group.fill_color_as_radial_gradient() - center = d_radial_gradient.center - d_args.append(torch.tensor((center.x, center.y))) - radius = d_radial_gradient.radius - d_args.append(torch.tensor((radius.x, radius.y))) - offsets = torch.zeros((d_radial_gradient.num_stops)) - stop_colors = torch.zeros((d_radial_gradient.num_stops, 4)) - d_radial_gradient.copy_to(\ - diffvg.float_ptr(offsets.data_ptr()), - diffvg.float_ptr(stop_colors.data_ptr())) - assert(torch.isfinite(stop_colors).all()) - d_args.append(offsets) - d_args.append(stop_colors) - else: - assert(False) - d_args.append(None) # stroke_color_type - if d_shape_group.has_stroke_color(): - if d_shape_group.stroke_color_type == diffvg.ColorType.constant: - d_constant = d_shape_group.stroke_color_as_constant() - c = d_constant.color - d_args.append(torch.tensor((c.x, c.y, c.z, c.w))) - elif d_shape_group.stroke_color_type == diffvg.ColorType.linear_gradient: - d_linear_gradient = d_shape_group.stroke_color_as_linear_gradient() - beg = d_linear_gradient.begin - d_args.append(torch.tensor((beg.x, beg.y))) - end = d_linear_gradient.end - d_args.append(torch.tensor((end.x, end.y))) - offsets = torch.zeros((d_linear_gradient.num_stops)) - stop_colors = torch.zeros((d_linear_gradient.num_stops, 4)) - d_linear_gradient.copy_to(\ - diffvg.float_ptr(offsets.data_ptr()), - diffvg.float_ptr(stop_colors.data_ptr())) - assert(torch.isfinite(stop_colors).all()) - d_args.append(offsets) - d_args.append(stop_colors) - elif d_shape_group.fill_color_type == diffvg.ColorType.radial_gradient: - d_radial_gradient = d_shape_group.stroke_color_as_radial_gradient() - center = d_radial_gradient.center - d_args.append(torch.tensor((center.x, center.y))) - radius = d_radial_gradient.radius - d_args.append(torch.tensor((radius.x, radius.y))) - offsets = torch.zeros((d_radial_gradient.num_stops)) - stop_colors = torch.zeros((d_radial_gradient.num_stops, 4)) - d_radial_gradient.copy_to(\ - diffvg.float_ptr(offsets.data_ptr()), - diffvg.float_ptr(stop_colors.data_ptr())) - assert(torch.isfinite(stop_colors).all()) - d_args.append(offsets) - d_args.append(stop_colors) - else: - assert(False) - d_args.append(None) # use_even_odd_rule - d_shape_to_canvas = torch.zeros((3, 3)) - d_shape_group.copy_to(diffvg.float_ptr(d_shape_to_canvas.data_ptr())) - assert(torch.isfinite(d_shape_to_canvas).all()) - d_args.append(d_shape_to_canvas) - d_args.append(None) # filter_type - d_args.append(torch.tensor(scene.get_d_filter_radius())) - - return tuple(d_args) diff --git a/spaces/manan/Score-Clinical-Patient-Notes/model.py b/spaces/manan/Score-Clinical-Patient-Notes/model.py deleted file mode 100644 index f4704a4832c7aacebdf88db0f5645db2777634da..0000000000000000000000000000000000000000 --- a/spaces/manan/Score-Clinical-Patient-Notes/model.py +++ /dev/null @@ -1,345 +0,0 @@ -import gc - -import numpy as np -import pandas as pd - -import torch -from torch import nn -import transformers -from transformers import AutoModel, AutoTokenizer, AutoConfig - - -config = dict( - # basic - seed = 3407, - num_jobs=1, - num_labels=2, - - # model info - tokenizer_path = 'roberta-large', # 'allenai/biomed_roberta_base', - model_checkpoint = 'roberta-large', # 'allenai/biomed_roberta_base', - device = 'cuda' if torch.cuda.is_available() else 'cpu', - - # training paramters - max_length = 512, - batch_size=16, - - # for this notebook - debug = False, -) - - -def create_sample_test(): - feats = pd.read_csv(f"../input/nbme-score-clinical-patient-notes/features.csv") - feats.loc[27, 'feature_text'] = "Last-Pap-smear-1-year-ago" - - notes = pd.read_csv(f"../input/nbme-score-clinical-patient-notes/patient_notes.csv") - test = pd.read_csv(f"../input/nbme-score-clinical-patient-notes/test.csv") - - merged = test.merge(notes, how = "left") - merged = merged.merge(feats, how = "left") - - def process_feature_text(text): - return text.replace("-OR-", ";-").replace("-", " ") - merged["feature_text"] = [process_feature_text(x) for x in merged["feature_text"]] - - return merged.sample(1).reset_index(drop=True) - -class NBMETestData(torch.utils.data.Dataset): - def __init__(self, feature_text, pn_history, tokenizer): - self.feature_text = feature_text - self.pn_history = pn_history - self.tokenizer = tokenizer - - def __len__(self): - return len(self.feature_text) - - def __getitem__(self, idx): - tokenized = self.tokenizer( - self.feature_text[idx], - self.pn_history[idx], - truncation = "only_second", - max_length = config['max_length'], - padding = "max_length", - return_offsets_mapping = True - ) - tokenized["sequence_ids"] = tokenized.sequence_ids() - - input_ids = np.array(tokenized["input_ids"]) - attention_mask = np.array(tokenized["attention_mask"]) - offset_mapping = np.array(tokenized["offset_mapping"]) - sequence_ids = np.array(tokenized["sequence_ids"]).astype("float16") - - return { - 'input_ids': input_ids, - 'attention_mask': attention_mask, - 'offset_mapping': offset_mapping, - 'sequence_ids': sequence_ids, - } - -# class NBMEModel(nn.Module): -# def __init__(self, num_labels=1, path=None): -# super().__init__() - -# layer_norm_eps: float = 1e-6 - -# self.path = path -# self.num_labels = num_labels - -# self.transformer = transformers.AutoModel.from_pretrained(config['model_checkpoint']) -# self.dropout = nn.Dropout(0.2) -# self.output = nn.Linear(768, 1) - -# if self.path is not None: -# self.load_state_dict(torch.load(self.path)['model']) - -# def forward(self, data): - -# ids = data['input_ids'] -# mask = data['attention_mask'] -# try: -# target = data['targets'] -# except: -# target = None - -# transformer_out = self.transformer(ids, mask) -# sequence_output = transformer_out[0] -# sequence_output = self.dropout(sequence_output) -# logits = self.output(sequence_output) - -# ret = { -# "logits": torch.sigmoid(logits), -# } - -# if target is not None: -# loss = self.get_loss(logits, target) -# ret['loss'] = loss -# ret['targets'] = target - -# return ret - - -# def get_optimizer(self, learning_rate, weigth_decay): -# optimizer = torch.optim.AdamW( -# self.parameters(), -# lr=learning_rate, -# weight_decay=weigth_decay, -# ) -# if self.path is not None: -# optimizer.load_state_dict(torch.load(self.path)['optimizer']) - -# return optimizer - -# def get_scheduler(self, optimizer, num_warmup_steps, num_training_steps): -# scheduler = transformers.get_linear_schedule_with_warmup( -# optimizer, -# num_warmup_steps=num_warmup_steps, -# num_training_steps=num_training_steps, -# ) -# if self.path is not None: -# scheduler.load_state_dict(torch.load(self.path)['scheduler']) - -# return scheduler - -# def get_loss(self, output, target): -# loss_fn = nn.BCEWithLogitsLoss(reduction="none") -# loss = loss_fn(output.view(-1, 1), target.view(-1, 1)) -# loss = torch.masked_select(loss, target.view(-1, 1) != -100).mean() -# return loss - - -class NBMEModel(nn.Module): - def __init__(self, num_labels=2, path=None): - super().__init__() - - layer_norm_eps: float = 1e-6 - - self.path = path - self.num_labels = num_labels - self.transformer = transformers.AutoModel.from_pretrained(config['model_checkpoint']) - self.dropout = nn.Dropout(0.1) - - self.dropout1 = nn.Dropout(0.1) - self.dropout2 = nn.Dropout(0.2) - self.dropout3 = nn.Dropout(0.3) - self.dropout4 = nn.Dropout(0.4) - self.dropout5 = nn.Dropout(0.5) - - self.output = nn.Linear(1024, 1) - - if self.path is not None: - self.load_state_dict(torch.load(self.path)['model']) - - def forward(self, data): - - ids = data['input_ids'] - mask = data['attention_mask'] - try: - target = data['targets'] - except: - target = None - - transformer_out = self.transformer(ids, mask) - sequence_output = transformer_out[0] - sequence_output = self.dropout(sequence_output) - - logits1 = self.output(self.dropout1(sequence_output)) - logits2 = self.output(self.dropout2(sequence_output)) - logits3 = self.output(self.dropout3(sequence_output)) - logits4 = self.output(self.dropout4(sequence_output)) - logits5 = self.output(self.dropout5(sequence_output)) - - logits = (logits1 + logits2 + logits3 + logits4 + logits5) / 5 - ret = { - 'logits': torch.sigmoid(logits), - } - - loss = 0 - - if target is not None: - loss1 = self.get_loss(logits1, target) - loss2 = self.get_loss(logits2, target) - loss3 = self.get_loss(logits3, target) - loss4 = self.get_loss(logits4, target) - loss5 = self.get_loss(logits5, target) - loss = (loss1 + loss2 + loss3 + loss4 + loss5) / 5 - ret['loss'] = loss - ret['target'] = target - - return ret - - - def get_optimizer(self, learning_rate, weigth_decay): - optimizer = torch.optim.AdamW( - self.parameters(), - lr=learning_rate, - weight_decay=weigth_decay, - ) - if self.path is not None: - optimizer.load_state_dict(torch.load(self.path)['optimizer']) - - return optimizer - - def get_scheduler(self, optimizer, num_warmup_steps, num_training_steps): - scheduler = transformers.get_linear_schedule_with_warmup( - optimizer, - num_warmup_steps=num_warmup_steps, - num_training_steps=num_training_steps, - ) - if self.path is not None: - scheduler.load_state_dict(torch.load(self.path)['scheduler']) - - return scheduler - - def get_loss(self, output, target): - loss_fn = nn.BCEWithLogitsLoss(reduction="none") - loss = loss_fn(output.view(-1, 1), target.view(-1, 1)) - loss = torch.masked_select(loss, target.view(-1, 1) != -100).mean() - return loss - - -def get_location_predictions(preds, offset_mapping, sequence_ids, test=False): - all_predictions = [] - for pred, offsets, seq_ids in zip(preds, offset_mapping, sequence_ids): - start_idx = None - current_preds = [] - for p, o, s_id in zip(pred, offsets, seq_ids): - if s_id is None or s_id == 0: - continue - if p > 0.5: - if start_idx is None: - start_idx = o[0] - end_idx = o[1] - elif start_idx is not None: - if test: - current_preds.append(f"{start_idx} {end_idx}") - else: - current_preds.append((start_idx, end_idx)) - start_idx = None - if test: - all_predictions.append("; ".join(current_preds)) - else: - all_predictions.append(current_preds) - return all_predictions - - - -def predict_location_preds(tokenizer, model, feature_text, pn_history, pn_history_lower): - - test_ds = NBMETestData(feature_text, pn_history_lower, tokenizer) - test_dl = torch.utils.data.DataLoader( - test_ds, - batch_size=config['batch_size'], - pin_memory=True, - shuffle=False, - drop_last=False - ) - - all_preds = None - offsets = [] - seq_ids = [] - - preds = [] - - with torch.no_grad(): - for batch in test_dl: - - for k, v in batch.items(): - if k not in ['offset_mapping', 'sequence_id']: - batch[k] = v.to(config['device']) - - logits = model(batch)['logits'] - preds.append(logits.cpu().numpy()) - - offset_mapping = batch['offset_mapping'] - sequence_ids = batch['sequence_ids'] - offsets.append(offset_mapping.cpu().numpy()) - seq_ids.append(sequence_ids.cpu().numpy()) - - preds = np.concatenate(preds, axis=0) - if all_preds is None: - all_preds = np.array(preds).astype(np.float32) - else: - all_preds += np.array(preds).astype(np.float32) - torch.cuda.empty_cache() - - all_preds = all_preds.squeeze() - - offsets = np.concatenate(offsets, axis=0) - seq_ids = np.concatenate(seq_ids, axis=0) - - # print(all_preds.shape, offsets.shape, seq_ids.shape) - - location_preds = get_location_predictions([all_preds], offsets, seq_ids, test=False)[0] - - x = [] - - for location in location_preds: - x.append(pn_history[0][location[0]: location[1]]) - - return location_preds, ', '.join(x) - -def get_predictions(feature_text, pn_history): - feature_text = feature_text.lower().replace("-OR-", ";-").replace("-", " ") - pn_history_lower = pn_history.lower() - - location_preds, pred_string = predict_location_preds(tokenizer, model, [feature_text], [pn_history], [pn_history_lower]) - - if pred_string == "": - pred_string = 'Feature not present!' - else: - pred_string = 'Feature is present!' + '\nText Span - ' + pred_string - - return pred_string - -tokenizer = AutoTokenizer.from_pretrained(config['tokenizer_path']) -path = 'model_large_pseudo_label.pth' - -model = NBMEModel().to(config['device']) -model.load_state_dict( - torch.load( - path, - map_location=torch.device(config['device']) - ) -) -model.eval() \ No newline at end of file diff --git a/spaces/marcusj83/MusicGenbruh/tests/modules/test_lstm.py b/spaces/marcusj83/MusicGenbruh/tests/modules/test_lstm.py deleted file mode 100644 index 1248964c8191e19f27661f0974bef9cc967eb015..0000000000000000000000000000000000000000 --- a/spaces/marcusj83/MusicGenbruh/tests/modules/test_lstm.py +++ /dev/null @@ -1,32 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -import random -import torch - -from audiocraft.modules.lstm import StreamableLSTM - - -class TestStreamableLSTM: - - def test_lstm(self): - B, C, T = 4, 2, random.randint(1, 100) - - lstm = StreamableLSTM(C, 3, skip=False) - x = torch.randn(B, C, T) - y = lstm(x) - - print(y.shape) - assert y.shape == torch.Size([B, C, T]) - - def test_lstm_skip(self): - B, C, T = 4, 2, random.randint(1, 100) - - lstm = StreamableLSTM(C, 3, skip=True) - x = torch.randn(B, C, T) - y = lstm(x) - - assert y.shape == torch.Size([B, C, T]) diff --git a/spaces/matthoffner/open-codetree/components/Playground/IframeErrorScreen.tsx b/spaces/matthoffner/open-codetree/components/Playground/IframeErrorScreen.tsx deleted file mode 100644 index cefad4959bd2e29a55bfe49732b80bc225383195..0000000000000000000000000000000000000000 --- a/spaces/matthoffner/open-codetree/components/Playground/IframeErrorScreen.tsx +++ /dev/null @@ -1,9 +0,0 @@ -import React from "react"; - -export const IframeErrorScreen = ({ err }: any) => { - return ( -
    - {err} -
    - ); -}; diff --git a/spaces/matthoffner/starchat-ui/utils/app/prompts.ts b/spaces/matthoffner/starchat-ui/utils/app/prompts.ts deleted file mode 100644 index 64a8b2fd71ad78983d1bdd7d988b95b1a34ea7f9..0000000000000000000000000000000000000000 --- a/spaces/matthoffner/starchat-ui/utils/app/prompts.ts +++ /dev/null @@ -1,22 +0,0 @@ -import { Prompt } from '@/types/prompt'; - -export const updatePrompt = (updatedPrompt: Prompt, allPrompts: Prompt[]) => { - const updatedPrompts = allPrompts.map((c) => { - if (c.id === updatedPrompt.id) { - return updatedPrompt; - } - - return c; - }); - - savePrompts(updatedPrompts); - - return { - single: updatedPrompt, - all: updatedPrompts, - }; -}; - -export const savePrompts = (prompts: Prompt[]) => { - localStorage.setItem('prompts', JSON.stringify(prompts)); -}; diff --git a/spaces/mattricesound/RemFx/scripts/train.py b/spaces/mattricesound/RemFx/scripts/train.py deleted file mode 100644 index 2d805025823188c271a5757527bb817466ac8598..0000000000000000000000000000000000000000 --- a/spaces/mattricesound/RemFx/scripts/train.py +++ /dev/null @@ -1,59 +0,0 @@ -import pytorch_lightning as pl -import hydra -from omegaconf import DictConfig -import remfx.utils as utils - -log = utils.get_logger(__name__) - - -@hydra.main(version_base=None, config_path="../cfg", config_name="config.yaml") -def main(cfg: DictConfig): - # Apply seed for reproducibility - if cfg.seed: - pl.seed_everything(cfg.seed) - log.info(f"Instantiating datamodule <{cfg.datamodule._target_}>.") - datamodule = hydra.utils.instantiate(cfg.datamodule, _convert_="partial") - log.info(f"Instantiating model <{cfg.model._target_}>.") - model = hydra.utils.instantiate(cfg.model, _convert_="partial") - - if "ckpt_path" in cfg: - log.info(f"Loading checkpoint from <{cfg.ckpt_path}>.") - model.load_from_checkpoint( - cfg.ckpt_path, - lr=model.lr, - lr_beta1=model.lr_beta1, - lr_beta2=model.lr_beta2, - lr_eps=model.lr_eps, - lr_weight_decay=model.lr_weight_decay, - sample_rate=model.sample_rate, - network=model.model, - ) - - # Init all callbacks - callbacks = [] - if "callbacks" in cfg: - for _, cb_conf in cfg["callbacks"].items(): - if "_target_" in cb_conf: - log.info(f"Instantiating callback <{cb_conf._target_}>.") - callbacks.append(hydra.utils.instantiate(cb_conf, _convert_="partial")) - - logger = hydra.utils.instantiate(cfg.logger, _convert_="partial") - log.info(f"Instantiating trainer <{cfg.trainer._target_}>.") - trainer = hydra.utils.instantiate( - cfg.trainer, callbacks=callbacks, logger=logger, _convert_="partial" - ) - log.info("Logging hyperparameters!") - utils.log_hyperparameters( - config=cfg, - model=model, - datamodule=datamodule, - trainer=trainer, - callbacks=callbacks, - logger=logger, - ) - trainer.fit(model=model, datamodule=datamodule) - trainer.test(model=model, datamodule=datamodule, ckpt_path="best") - - -if __name__ == "__main__": - main() diff --git a/spaces/meraih/English-Japanese-Anime-TTS/monotonic_align/core.c b/spaces/meraih/English-Japanese-Anime-TTS/monotonic_align/core.c deleted file mode 100644 index 5631d20a9a00db29e143a6e8e4e5c378d6bb850a..0000000000000000000000000000000000000000 --- a/spaces/meraih/English-Japanese-Anime-TTS/monotonic_align/core.c +++ /dev/null @@ -1,21299 +0,0 @@ -/* Generated by Cython 0.29.21 */ - -/* BEGIN: Cython Metadata -{ - "distutils": { - "name": "monotonic_align.core", - "sources": [ - "core.pyx" - ] - }, - "module_name": "monotonic_align.core" -} -END: Cython Metadata */ - -#define PY_SSIZE_T_CLEAN -#include "Python.h" -#ifndef Py_PYTHON_H - #error Python headers needed to compile C extensions, please install development version of Python. -#elif PY_VERSION_HEX < 0x02060000 || (0x03000000 <= PY_VERSION_HEX && PY_VERSION_HEX < 0x03030000) - #error Cython requires Python 2.6+ or Python 3.3+. -#else -#define CYTHON_ABI "0_29_21" -#define CYTHON_HEX_VERSION 0x001D15F0 -#define CYTHON_FUTURE_DIVISION 0 -#include -#ifndef offsetof - #define offsetof(type, member) ( (size_t) & ((type*)0) -> member ) -#endif -#if !defined(WIN32) && !defined(MS_WINDOWS) - #ifndef __stdcall - #define __stdcall - #endif - #ifndef __cdecl - #define __cdecl - #endif - #ifndef __fastcall - #define __fastcall - #endif -#endif -#ifndef DL_IMPORT - #define DL_IMPORT(t) t -#endif -#ifndef DL_EXPORT - #define DL_EXPORT(t) t -#endif -#define __PYX_COMMA , -#ifndef HAVE_LONG_LONG - #if PY_VERSION_HEX >= 0x02070000 - #define HAVE_LONG_LONG - #endif -#endif -#ifndef PY_LONG_LONG - #define PY_LONG_LONG LONG_LONG -#endif -#ifndef Py_HUGE_VAL - #define Py_HUGE_VAL HUGE_VAL -#endif -#ifdef PYPY_VERSION - #define CYTHON_COMPILING_IN_PYPY 1 - #define CYTHON_COMPILING_IN_PYSTON 0 - #define CYTHON_COMPILING_IN_CPYTHON 0 - #undef CYTHON_USE_TYPE_SLOTS - #define CYTHON_USE_TYPE_SLOTS 0 - #undef CYTHON_USE_PYTYPE_LOOKUP - #define CYTHON_USE_PYTYPE_LOOKUP 0 - #if PY_VERSION_HEX < 0x03050000 - #undef CYTHON_USE_ASYNC_SLOTS - #define CYTHON_USE_ASYNC_SLOTS 0 - #elif !defined(CYTHON_USE_ASYNC_SLOTS) - #define CYTHON_USE_ASYNC_SLOTS 1 - #endif - #undef CYTHON_USE_PYLIST_INTERNALS - #define CYTHON_USE_PYLIST_INTERNALS 0 - #undef CYTHON_USE_UNICODE_INTERNALS - #define CYTHON_USE_UNICODE_INTERNALS 0 - #undef CYTHON_USE_UNICODE_WRITER - #define CYTHON_USE_UNICODE_WRITER 0 - #undef CYTHON_USE_PYLONG_INTERNALS - #define CYTHON_USE_PYLONG_INTERNALS 0 - #undef CYTHON_AVOID_BORROWED_REFS - #define CYTHON_AVOID_BORROWED_REFS 1 - #undef CYTHON_ASSUME_SAFE_MACROS - #define CYTHON_ASSUME_SAFE_MACROS 0 - #undef CYTHON_UNPACK_METHODS - #define CYTHON_UNPACK_METHODS 0 - #undef CYTHON_FAST_THREAD_STATE - #define CYTHON_FAST_THREAD_STATE 0 - #undef CYTHON_FAST_PYCALL - #define CYTHON_FAST_PYCALL 0 - #undef CYTHON_PEP489_MULTI_PHASE_INIT - #define CYTHON_PEP489_MULTI_PHASE_INIT 0 - #undef CYTHON_USE_TP_FINALIZE - #define CYTHON_USE_TP_FINALIZE 0 - #undef CYTHON_USE_DICT_VERSIONS - #define CYTHON_USE_DICT_VERSIONS 0 - #undef CYTHON_USE_EXC_INFO_STACK - #define CYTHON_USE_EXC_INFO_STACK 0 -#elif defined(PYSTON_VERSION) - #define CYTHON_COMPILING_IN_PYPY 0 - #define CYTHON_COMPILING_IN_PYSTON 1 - #define CYTHON_COMPILING_IN_CPYTHON 0 - #ifndef CYTHON_USE_TYPE_SLOTS - #define CYTHON_USE_TYPE_SLOTS 1 - #endif - #undef CYTHON_USE_PYTYPE_LOOKUP - #define CYTHON_USE_PYTYPE_LOOKUP 0 - #undef CYTHON_USE_ASYNC_SLOTS - #define CYTHON_USE_ASYNC_SLOTS 0 - #undef CYTHON_USE_PYLIST_INTERNALS - #define CYTHON_USE_PYLIST_INTERNALS 0 - #ifndef CYTHON_USE_UNICODE_INTERNALS - #define CYTHON_USE_UNICODE_INTERNALS 1 - #endif - #undef CYTHON_USE_UNICODE_WRITER - #define CYTHON_USE_UNICODE_WRITER 0 - #undef CYTHON_USE_PYLONG_INTERNALS - #define CYTHON_USE_PYLONG_INTERNALS 0 - #ifndef CYTHON_AVOID_BORROWED_REFS - #define CYTHON_AVOID_BORROWED_REFS 0 - #endif - #ifndef CYTHON_ASSUME_SAFE_MACROS - #define CYTHON_ASSUME_SAFE_MACROS 1 - #endif - #ifndef CYTHON_UNPACK_METHODS - #define CYTHON_UNPACK_METHODS 1 - #endif - #undef CYTHON_FAST_THREAD_STATE - #define CYTHON_FAST_THREAD_STATE 0 - #undef CYTHON_FAST_PYCALL - #define CYTHON_FAST_PYCALL 0 - #undef CYTHON_PEP489_MULTI_PHASE_INIT - #define CYTHON_PEP489_MULTI_PHASE_INIT 0 - #undef CYTHON_USE_TP_FINALIZE - #define CYTHON_USE_TP_FINALIZE 0 - #undef CYTHON_USE_DICT_VERSIONS - #define CYTHON_USE_DICT_VERSIONS 0 - #undef CYTHON_USE_EXC_INFO_STACK - #define CYTHON_USE_EXC_INFO_STACK 0 -#else - #define CYTHON_COMPILING_IN_PYPY 0 - #define CYTHON_COMPILING_IN_PYSTON 0 - #define CYTHON_COMPILING_IN_CPYTHON 1 - #ifndef CYTHON_USE_TYPE_SLOTS - #define CYTHON_USE_TYPE_SLOTS 1 - #endif - #if PY_VERSION_HEX < 0x02070000 - #undef CYTHON_USE_PYTYPE_LOOKUP - #define CYTHON_USE_PYTYPE_LOOKUP 0 - #elif !defined(CYTHON_USE_PYTYPE_LOOKUP) - #define CYTHON_USE_PYTYPE_LOOKUP 1 - #endif - #if PY_MAJOR_VERSION < 3 - #undef CYTHON_USE_ASYNC_SLOTS - #define CYTHON_USE_ASYNC_SLOTS 0 - #elif !defined(CYTHON_USE_ASYNC_SLOTS) - #define CYTHON_USE_ASYNC_SLOTS 1 - #endif - #if PY_VERSION_HEX < 0x02070000 - #undef CYTHON_USE_PYLONG_INTERNALS - #define CYTHON_USE_PYLONG_INTERNALS 0 - #elif !defined(CYTHON_USE_PYLONG_INTERNALS) - #define CYTHON_USE_PYLONG_INTERNALS 1 - #endif - #ifndef CYTHON_USE_PYLIST_INTERNALS - #define CYTHON_USE_PYLIST_INTERNALS 1 - #endif - #ifndef CYTHON_USE_UNICODE_INTERNALS - #define CYTHON_USE_UNICODE_INTERNALS 1 - #endif - #if PY_VERSION_HEX < 0x030300F0 - #undef CYTHON_USE_UNICODE_WRITER - #define CYTHON_USE_UNICODE_WRITER 0 - #elif !defined(CYTHON_USE_UNICODE_WRITER) - #define CYTHON_USE_UNICODE_WRITER 1 - #endif - #ifndef CYTHON_AVOID_BORROWED_REFS - #define CYTHON_AVOID_BORROWED_REFS 0 - #endif - #ifndef CYTHON_ASSUME_SAFE_MACROS - #define CYTHON_ASSUME_SAFE_MACROS 1 - #endif - #ifndef CYTHON_UNPACK_METHODS - #define CYTHON_UNPACK_METHODS 1 - #endif - #ifndef CYTHON_FAST_THREAD_STATE - #define CYTHON_FAST_THREAD_STATE 1 - #endif - #ifndef CYTHON_FAST_PYCALL - #define CYTHON_FAST_PYCALL 1 - #endif - #ifndef CYTHON_PEP489_MULTI_PHASE_INIT - #define CYTHON_PEP489_MULTI_PHASE_INIT (PY_VERSION_HEX >= 0x03050000) - #endif - #ifndef CYTHON_USE_TP_FINALIZE - #define CYTHON_USE_TP_FINALIZE (PY_VERSION_HEX >= 0x030400a1) - #endif - #ifndef CYTHON_USE_DICT_VERSIONS - #define CYTHON_USE_DICT_VERSIONS (PY_VERSION_HEX >= 0x030600B1) - #endif - #ifndef CYTHON_USE_EXC_INFO_STACK - #define CYTHON_USE_EXC_INFO_STACK (PY_VERSION_HEX >= 0x030700A3) - #endif -#endif -#if !defined(CYTHON_FAST_PYCCALL) -#define CYTHON_FAST_PYCCALL (CYTHON_FAST_PYCALL && PY_VERSION_HEX >= 0x030600B1) -#endif -#if CYTHON_USE_PYLONG_INTERNALS - #include "longintrepr.h" - #undef SHIFT - #undef BASE - #undef MASK - #ifdef SIZEOF_VOID_P - enum { __pyx_check_sizeof_voidp = 1 / (int)(SIZEOF_VOID_P == sizeof(void*)) }; - #endif -#endif -#ifndef __has_attribute - #define __has_attribute(x) 0 -#endif -#ifndef __has_cpp_attribute - #define __has_cpp_attribute(x) 0 -#endif -#ifndef CYTHON_RESTRICT - #if defined(__GNUC__) - #define CYTHON_RESTRICT __restrict__ - #elif defined(_MSC_VER) && _MSC_VER >= 1400 - #define CYTHON_RESTRICT __restrict - #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L - #define CYTHON_RESTRICT restrict - #else - #define CYTHON_RESTRICT - #endif -#endif -#ifndef CYTHON_UNUSED -# if defined(__GNUC__) -# if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)) -# define CYTHON_UNUSED __attribute__ ((__unused__)) -# else -# define CYTHON_UNUSED -# endif -# elif defined(__ICC) || (defined(__INTEL_COMPILER) && !defined(_MSC_VER)) -# define CYTHON_UNUSED __attribute__ ((__unused__)) -# else -# define CYTHON_UNUSED -# endif -#endif -#ifndef CYTHON_MAYBE_UNUSED_VAR -# if defined(__cplusplus) - template void CYTHON_MAYBE_UNUSED_VAR( const T& ) { } -# else -# define CYTHON_MAYBE_UNUSED_VAR(x) (void)(x) -# endif -#endif -#ifndef CYTHON_NCP_UNUSED -# if CYTHON_COMPILING_IN_CPYTHON -# define CYTHON_NCP_UNUSED -# else -# define CYTHON_NCP_UNUSED CYTHON_UNUSED -# endif -#endif -#define __Pyx_void_to_None(void_result) ((void)(void_result), Py_INCREF(Py_None), Py_None) -#ifdef _MSC_VER - #ifndef _MSC_STDINT_H_ - #if _MSC_VER < 1300 - typedef unsigned char uint8_t; - typedef unsigned int uint32_t; - #else - typedef unsigned __int8 uint8_t; - typedef unsigned __int32 uint32_t; - #endif - #endif -#else - #include -#endif -#ifndef CYTHON_FALLTHROUGH - #if defined(__cplusplus) && __cplusplus >= 201103L - #if __has_cpp_attribute(fallthrough) - #define CYTHON_FALLTHROUGH [[fallthrough]] - #elif __has_cpp_attribute(clang::fallthrough) - #define CYTHON_FALLTHROUGH [[clang::fallthrough]] - #elif __has_cpp_attribute(gnu::fallthrough) - #define CYTHON_FALLTHROUGH [[gnu::fallthrough]] - #endif - #endif - #ifndef CYTHON_FALLTHROUGH - #if __has_attribute(fallthrough) - #define CYTHON_FALLTHROUGH __attribute__((fallthrough)) - #else - #define CYTHON_FALLTHROUGH - #endif - #endif - #if defined(__clang__ ) && defined(__apple_build_version__) - #if __apple_build_version__ < 7000000 - #undef CYTHON_FALLTHROUGH - #define CYTHON_FALLTHROUGH - #endif - #endif -#endif - -#ifndef CYTHON_INLINE - #if defined(__clang__) - #define CYTHON_INLINE __inline__ __attribute__ ((__unused__)) - #elif defined(__GNUC__) - #define CYTHON_INLINE __inline__ - #elif defined(_MSC_VER) - #define CYTHON_INLINE __inline - #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L - #define CYTHON_INLINE inline - #else - #define CYTHON_INLINE - #endif -#endif - -#if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX < 0x02070600 && !defined(Py_OptimizeFlag) - #define Py_OptimizeFlag 0 -#endif -#define __PYX_BUILD_PY_SSIZE_T "n" -#define CYTHON_FORMAT_SSIZE_T "z" -#if PY_MAJOR_VERSION < 3 - #define __Pyx_BUILTIN_MODULE_NAME "__builtin__" - #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ - PyCode_New(a+k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) - #define __Pyx_DefaultClassType PyClass_Type -#else - #define __Pyx_BUILTIN_MODULE_NAME "builtins" -#if PY_VERSION_HEX >= 0x030800A4 && PY_VERSION_HEX < 0x030800B2 - #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ - PyCode_New(a, 0, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) -#else - #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ - PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) -#endif - #define __Pyx_DefaultClassType PyType_Type -#endif -#ifndef Py_TPFLAGS_CHECKTYPES - #define Py_TPFLAGS_CHECKTYPES 0 -#endif -#ifndef Py_TPFLAGS_HAVE_INDEX - #define Py_TPFLAGS_HAVE_INDEX 0 -#endif -#ifndef Py_TPFLAGS_HAVE_NEWBUFFER - #define Py_TPFLAGS_HAVE_NEWBUFFER 0 -#endif -#ifndef Py_TPFLAGS_HAVE_FINALIZE - #define Py_TPFLAGS_HAVE_FINALIZE 0 -#endif -#ifndef METH_STACKLESS - #define METH_STACKLESS 0 -#endif -#if PY_VERSION_HEX <= 0x030700A3 || !defined(METH_FASTCALL) - #ifndef METH_FASTCALL - #define METH_FASTCALL 0x80 - #endif - typedef PyObject *(*__Pyx_PyCFunctionFast) (PyObject *self, PyObject *const *args, Py_ssize_t nargs); - typedef PyObject *(*__Pyx_PyCFunctionFastWithKeywords) (PyObject *self, PyObject *const *args, - Py_ssize_t nargs, PyObject *kwnames); -#else - #define __Pyx_PyCFunctionFast _PyCFunctionFast - #define __Pyx_PyCFunctionFastWithKeywords _PyCFunctionFastWithKeywords -#endif -#if CYTHON_FAST_PYCCALL -#define __Pyx_PyFastCFunction_Check(func)\ - ((PyCFunction_Check(func) && (METH_FASTCALL == (PyCFunction_GET_FLAGS(func) & ~(METH_CLASS | METH_STATIC | METH_COEXIST | METH_KEYWORDS | METH_STACKLESS))))) -#else -#define __Pyx_PyFastCFunction_Check(func) 0 -#endif -#if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Malloc) - #define PyObject_Malloc(s) PyMem_Malloc(s) - #define PyObject_Free(p) PyMem_Free(p) - #define PyObject_Realloc(p) PyMem_Realloc(p) -#endif -#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX < 0x030400A1 - #define PyMem_RawMalloc(n) PyMem_Malloc(n) - #define PyMem_RawRealloc(p, n) PyMem_Realloc(p, n) - #define PyMem_RawFree(p) PyMem_Free(p) -#endif -#if CYTHON_COMPILING_IN_PYSTON - #define __Pyx_PyCode_HasFreeVars(co) PyCode_HasFreeVars(co) - #define __Pyx_PyFrame_SetLineNumber(frame, lineno) PyFrame_SetLineNumber(frame, lineno) -#else - #define __Pyx_PyCode_HasFreeVars(co) (PyCode_GetNumFree(co) > 0) - #define __Pyx_PyFrame_SetLineNumber(frame, lineno) (frame)->f_lineno = (lineno) -#endif -#if !CYTHON_FAST_THREAD_STATE || PY_VERSION_HEX < 0x02070000 - #define __Pyx_PyThreadState_Current PyThreadState_GET() -#elif PY_VERSION_HEX >= 0x03060000 - #define __Pyx_PyThreadState_Current _PyThreadState_UncheckedGet() -#elif PY_VERSION_HEX >= 0x03000000 - #define __Pyx_PyThreadState_Current PyThreadState_GET() -#else - #define __Pyx_PyThreadState_Current _PyThreadState_Current -#endif -#if PY_VERSION_HEX < 0x030700A2 && !defined(PyThread_tss_create) && !defined(Py_tss_NEEDS_INIT) -#include "pythread.h" -#define Py_tss_NEEDS_INIT 0 -typedef int Py_tss_t; -static CYTHON_INLINE int PyThread_tss_create(Py_tss_t *key) { - *key = PyThread_create_key(); - return 0; -} -static CYTHON_INLINE Py_tss_t * PyThread_tss_alloc(void) { - Py_tss_t *key = (Py_tss_t *)PyObject_Malloc(sizeof(Py_tss_t)); - *key = Py_tss_NEEDS_INIT; - return key; -} -static CYTHON_INLINE void PyThread_tss_free(Py_tss_t *key) { - PyObject_Free(key); -} -static CYTHON_INLINE int PyThread_tss_is_created(Py_tss_t *key) { - return *key != Py_tss_NEEDS_INIT; -} -static CYTHON_INLINE void PyThread_tss_delete(Py_tss_t *key) { - PyThread_delete_key(*key); - *key = Py_tss_NEEDS_INIT; -} -static CYTHON_INLINE int PyThread_tss_set(Py_tss_t *key, void *value) { - return PyThread_set_key_value(*key, value); -} -static CYTHON_INLINE void * PyThread_tss_get(Py_tss_t *key) { - return PyThread_get_key_value(*key); -} -#endif -#if CYTHON_COMPILING_IN_CPYTHON || defined(_PyDict_NewPresized) -#define __Pyx_PyDict_NewPresized(n) ((n <= 8) ? PyDict_New() : _PyDict_NewPresized(n)) -#else -#define __Pyx_PyDict_NewPresized(n) PyDict_New() -#endif -#if PY_MAJOR_VERSION >= 3 || CYTHON_FUTURE_DIVISION - #define __Pyx_PyNumber_Divide(x,y) PyNumber_TrueDivide(x,y) - #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceTrueDivide(x,y) -#else - #define __Pyx_PyNumber_Divide(x,y) PyNumber_Divide(x,y) - #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceDivide(x,y) -#endif -#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030500A1 && CYTHON_USE_UNICODE_INTERNALS -#define __Pyx_PyDict_GetItemStr(dict, name) _PyDict_GetItem_KnownHash(dict, name, ((PyASCIIObject *) name)->hash) -#else -#define __Pyx_PyDict_GetItemStr(dict, name) PyDict_GetItem(dict, name) -#endif -#if PY_VERSION_HEX > 0x03030000 && defined(PyUnicode_KIND) - #define CYTHON_PEP393_ENABLED 1 - #define __Pyx_PyUnicode_READY(op) (likely(PyUnicode_IS_READY(op)) ?\ - 0 : _PyUnicode_Ready((PyObject *)(op))) - #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_LENGTH(u) - #define __Pyx_PyUnicode_READ_CHAR(u, i) PyUnicode_READ_CHAR(u, i) - #define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) PyUnicode_MAX_CHAR_VALUE(u) - #define __Pyx_PyUnicode_KIND(u) PyUnicode_KIND(u) - #define __Pyx_PyUnicode_DATA(u) PyUnicode_DATA(u) - #define __Pyx_PyUnicode_READ(k, d, i) PyUnicode_READ(k, d, i) - #define __Pyx_PyUnicode_WRITE(k, d, i, ch) PyUnicode_WRITE(k, d, i, ch) - #if defined(PyUnicode_IS_READY) && defined(PyUnicode_GET_SIZE) - #define __Pyx_PyUnicode_IS_TRUE(u) (0 != (likely(PyUnicode_IS_READY(u)) ? PyUnicode_GET_LENGTH(u) : PyUnicode_GET_SIZE(u))) - #else - #define __Pyx_PyUnicode_IS_TRUE(u) (0 != PyUnicode_GET_LENGTH(u)) - #endif -#else - #define CYTHON_PEP393_ENABLED 0 - #define PyUnicode_1BYTE_KIND 1 - #define PyUnicode_2BYTE_KIND 2 - #define PyUnicode_4BYTE_KIND 4 - #define __Pyx_PyUnicode_READY(op) (0) - #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_SIZE(u) - #define __Pyx_PyUnicode_READ_CHAR(u, i) ((Py_UCS4)(PyUnicode_AS_UNICODE(u)[i])) - #define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) ((sizeof(Py_UNICODE) == 2) ? 65535 : 1114111) - #define __Pyx_PyUnicode_KIND(u) (sizeof(Py_UNICODE)) - #define __Pyx_PyUnicode_DATA(u) ((void*)PyUnicode_AS_UNICODE(u)) - #define __Pyx_PyUnicode_READ(k, d, i) ((void)(k), (Py_UCS4)(((Py_UNICODE*)d)[i])) - #define __Pyx_PyUnicode_WRITE(k, d, i, ch) (((void)(k)), ((Py_UNICODE*)d)[i] = ch) - #define __Pyx_PyUnicode_IS_TRUE(u) (0 != PyUnicode_GET_SIZE(u)) -#endif -#if CYTHON_COMPILING_IN_PYPY - #define __Pyx_PyUnicode_Concat(a, b) PyNumber_Add(a, b) - #define __Pyx_PyUnicode_ConcatSafe(a, b) PyNumber_Add(a, b) -#else - #define __Pyx_PyUnicode_Concat(a, b) PyUnicode_Concat(a, b) - #define __Pyx_PyUnicode_ConcatSafe(a, b) ((unlikely((a) == Py_None) || unlikely((b) == Py_None)) ?\ - PyNumber_Add(a, b) : __Pyx_PyUnicode_Concat(a, b)) -#endif -#if CYTHON_COMPILING_IN_PYPY && !defined(PyUnicode_Contains) - #define PyUnicode_Contains(u, s) PySequence_Contains(u, s) -#endif -#if CYTHON_COMPILING_IN_PYPY && !defined(PyByteArray_Check) - #define PyByteArray_Check(obj) PyObject_TypeCheck(obj, &PyByteArray_Type) -#endif -#if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Format) - #define PyObject_Format(obj, fmt) PyObject_CallMethod(obj, "__format__", "O", fmt) -#endif -#define __Pyx_PyString_FormatSafe(a, b) ((unlikely((a) == Py_None || (PyString_Check(b) && !PyString_CheckExact(b)))) ? PyNumber_Remainder(a, b) : __Pyx_PyString_Format(a, b)) -#define __Pyx_PyUnicode_FormatSafe(a, b) ((unlikely((a) == Py_None || (PyUnicode_Check(b) && !PyUnicode_CheckExact(b)))) ? PyNumber_Remainder(a, b) : PyUnicode_Format(a, b)) -#if PY_MAJOR_VERSION >= 3 - #define __Pyx_PyString_Format(a, b) PyUnicode_Format(a, b) -#else - #define __Pyx_PyString_Format(a, b) PyString_Format(a, b) -#endif -#if PY_MAJOR_VERSION < 3 && !defined(PyObject_ASCII) - #define PyObject_ASCII(o) PyObject_Repr(o) -#endif -#if PY_MAJOR_VERSION >= 3 - #define PyBaseString_Type PyUnicode_Type - #define PyStringObject PyUnicodeObject - #define PyString_Type PyUnicode_Type - #define PyString_Check PyUnicode_Check - #define PyString_CheckExact PyUnicode_CheckExact -#ifndef PyObject_Unicode - #define PyObject_Unicode PyObject_Str -#endif -#endif -#if PY_MAJOR_VERSION >= 3 - #define __Pyx_PyBaseString_Check(obj) PyUnicode_Check(obj) - #define __Pyx_PyBaseString_CheckExact(obj) PyUnicode_CheckExact(obj) -#else - #define __Pyx_PyBaseString_Check(obj) (PyString_Check(obj) || PyUnicode_Check(obj)) - #define __Pyx_PyBaseString_CheckExact(obj) (PyString_CheckExact(obj) || PyUnicode_CheckExact(obj)) -#endif -#ifndef PySet_CheckExact - #define PySet_CheckExact(obj) (Py_TYPE(obj) == &PySet_Type) -#endif -#if PY_VERSION_HEX >= 0x030900A4 - #define __Pyx_SET_REFCNT(obj, refcnt) Py_SET_REFCNT(obj, refcnt) - #define __Pyx_SET_SIZE(obj, size) Py_SET_SIZE(obj, size) -#else - #define __Pyx_SET_REFCNT(obj, refcnt) Py_REFCNT(obj) = (refcnt) - #define __Pyx_SET_SIZE(obj, size) Py_SIZE(obj) = (size) -#endif -#if CYTHON_ASSUME_SAFE_MACROS - #define __Pyx_PySequence_SIZE(seq) Py_SIZE(seq) -#else - #define __Pyx_PySequence_SIZE(seq) PySequence_Size(seq) -#endif -#if PY_MAJOR_VERSION >= 3 - #define PyIntObject PyLongObject - #define PyInt_Type PyLong_Type - #define PyInt_Check(op) PyLong_Check(op) - #define PyInt_CheckExact(op) PyLong_CheckExact(op) - #define PyInt_FromString PyLong_FromString - #define PyInt_FromUnicode PyLong_FromUnicode - #define PyInt_FromLong PyLong_FromLong - #define PyInt_FromSize_t PyLong_FromSize_t - #define PyInt_FromSsize_t PyLong_FromSsize_t - #define PyInt_AsLong PyLong_AsLong - #define PyInt_AS_LONG PyLong_AS_LONG - #define PyInt_AsSsize_t PyLong_AsSsize_t - #define PyInt_AsUnsignedLongMask PyLong_AsUnsignedLongMask - #define PyInt_AsUnsignedLongLongMask PyLong_AsUnsignedLongLongMask - #define PyNumber_Int PyNumber_Long -#endif -#if PY_MAJOR_VERSION >= 3 - #define PyBoolObject PyLongObject -#endif -#if PY_MAJOR_VERSION >= 3 && CYTHON_COMPILING_IN_PYPY - #ifndef PyUnicode_InternFromString - #define PyUnicode_InternFromString(s) PyUnicode_FromString(s) - #endif -#endif -#if PY_VERSION_HEX < 0x030200A4 - typedef long Py_hash_t; - #define __Pyx_PyInt_FromHash_t PyInt_FromLong - #define __Pyx_PyInt_AsHash_t PyInt_AsLong -#else - #define __Pyx_PyInt_FromHash_t PyInt_FromSsize_t - #define __Pyx_PyInt_AsHash_t PyInt_AsSsize_t -#endif -#if PY_MAJOR_VERSION >= 3 - #define __Pyx_PyMethod_New(func, self, klass) ((self) ? ((void)(klass), PyMethod_New(func, self)) : __Pyx_NewRef(func)) -#else - #define __Pyx_PyMethod_New(func, self, klass) PyMethod_New(func, self, klass) -#endif -#if CYTHON_USE_ASYNC_SLOTS - #if PY_VERSION_HEX >= 0x030500B1 - #define __Pyx_PyAsyncMethodsStruct PyAsyncMethods - #define __Pyx_PyType_AsAsync(obj) (Py_TYPE(obj)->tp_as_async) - #else - #define __Pyx_PyType_AsAsync(obj) ((__Pyx_PyAsyncMethodsStruct*) (Py_TYPE(obj)->tp_reserved)) - #endif -#else - #define __Pyx_PyType_AsAsync(obj) NULL -#endif -#ifndef __Pyx_PyAsyncMethodsStruct - typedef struct { - unaryfunc am_await; - unaryfunc am_aiter; - unaryfunc am_anext; - } __Pyx_PyAsyncMethodsStruct; -#endif - -#if defined(WIN32) || defined(MS_WINDOWS) - #define _USE_MATH_DEFINES -#endif -#include -#ifdef NAN -#define __PYX_NAN() ((float) NAN) -#else -static CYTHON_INLINE float __PYX_NAN() { - float value; - memset(&value, 0xFF, sizeof(value)); - return value; -} -#endif -#if defined(__CYGWIN__) && defined(_LDBL_EQ_DBL) -#define __Pyx_truncl trunc -#else -#define __Pyx_truncl truncl -#endif - -#define __PYX_MARK_ERR_POS(f_index, lineno) \ - { __pyx_filename = __pyx_f[f_index]; (void)__pyx_filename; __pyx_lineno = lineno; (void)__pyx_lineno; __pyx_clineno = __LINE__; (void)__pyx_clineno; } -#define __PYX_ERR(f_index, lineno, Ln_error) \ - { __PYX_MARK_ERR_POS(f_index, lineno) goto Ln_error; } - -#ifndef __PYX_EXTERN_C - #ifdef __cplusplus - #define __PYX_EXTERN_C extern "C" - #else - #define __PYX_EXTERN_C extern - #endif -#endif - -#define __PYX_HAVE__monotonic_align__core -#define __PYX_HAVE_API__monotonic_align__core -/* Early includes */ -#include "pythread.h" -#include -#include -#include -#include "pystate.h" -#ifdef _OPENMP -#include -#endif /* _OPENMP */ - -#if defined(PYREX_WITHOUT_ASSERTIONS) && !defined(CYTHON_WITHOUT_ASSERTIONS) -#define CYTHON_WITHOUT_ASSERTIONS -#endif - -typedef struct {PyObject **p; const char *s; const Py_ssize_t n; const char* encoding; - const char is_unicode; const char is_str; const char intern; } __Pyx_StringTabEntry; - -#define __PYX_DEFAULT_STRING_ENCODING_IS_ASCII 0 -#define __PYX_DEFAULT_STRING_ENCODING_IS_UTF8 0 -#define __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT (PY_MAJOR_VERSION >= 3 && __PYX_DEFAULT_STRING_ENCODING_IS_UTF8) -#define __PYX_DEFAULT_STRING_ENCODING "" -#define __Pyx_PyObject_FromString __Pyx_PyBytes_FromString -#define __Pyx_PyObject_FromStringAndSize __Pyx_PyBytes_FromStringAndSize -#define __Pyx_uchar_cast(c) ((unsigned char)c) -#define __Pyx_long_cast(x) ((long)x) -#define __Pyx_fits_Py_ssize_t(v, type, is_signed) (\ - (sizeof(type) < sizeof(Py_ssize_t)) ||\ - (sizeof(type) > sizeof(Py_ssize_t) &&\ - likely(v < (type)PY_SSIZE_T_MAX ||\ - v == (type)PY_SSIZE_T_MAX) &&\ - (!is_signed || likely(v > (type)PY_SSIZE_T_MIN ||\ - v == (type)PY_SSIZE_T_MIN))) ||\ - (sizeof(type) == sizeof(Py_ssize_t) &&\ - (is_signed || likely(v < (type)PY_SSIZE_T_MAX ||\ - v == (type)PY_SSIZE_T_MAX))) ) -static CYTHON_INLINE int __Pyx_is_valid_index(Py_ssize_t i, Py_ssize_t limit) { - return (size_t) i < (size_t) limit; -} -#if defined (__cplusplus) && __cplusplus >= 201103L - #include - #define __Pyx_sst_abs(value) std::abs(value) -#elif SIZEOF_INT >= SIZEOF_SIZE_T - #define __Pyx_sst_abs(value) abs(value) -#elif SIZEOF_LONG >= SIZEOF_SIZE_T - #define __Pyx_sst_abs(value) labs(value) -#elif defined (_MSC_VER) - #define __Pyx_sst_abs(value) ((Py_ssize_t)_abs64(value)) -#elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L - #define __Pyx_sst_abs(value) llabs(value) -#elif defined (__GNUC__) - #define __Pyx_sst_abs(value) __builtin_llabs(value) -#else - #define __Pyx_sst_abs(value) ((value<0) ? -value : value) -#endif -static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject*); -static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject*, Py_ssize_t* length); -#define __Pyx_PyByteArray_FromString(s) PyByteArray_FromStringAndSize((const char*)s, strlen((const char*)s)) -#define __Pyx_PyByteArray_FromStringAndSize(s, l) PyByteArray_FromStringAndSize((const char*)s, l) -#define __Pyx_PyBytes_FromString PyBytes_FromString -#define __Pyx_PyBytes_FromStringAndSize PyBytes_FromStringAndSize -static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char*); -#if PY_MAJOR_VERSION < 3 - #define __Pyx_PyStr_FromString __Pyx_PyBytes_FromString - #define __Pyx_PyStr_FromStringAndSize __Pyx_PyBytes_FromStringAndSize -#else - #define __Pyx_PyStr_FromString __Pyx_PyUnicode_FromString - #define __Pyx_PyStr_FromStringAndSize __Pyx_PyUnicode_FromStringAndSize -#endif -#define __Pyx_PyBytes_AsWritableString(s) ((char*) PyBytes_AS_STRING(s)) -#define __Pyx_PyBytes_AsWritableSString(s) ((signed char*) PyBytes_AS_STRING(s)) -#define __Pyx_PyBytes_AsWritableUString(s) ((unsigned char*) PyBytes_AS_STRING(s)) -#define __Pyx_PyBytes_AsString(s) ((const char*) PyBytes_AS_STRING(s)) -#define __Pyx_PyBytes_AsSString(s) ((const signed char*) PyBytes_AS_STRING(s)) -#define __Pyx_PyBytes_AsUString(s) ((const unsigned char*) PyBytes_AS_STRING(s)) -#define __Pyx_PyObject_AsWritableString(s) ((char*) __Pyx_PyObject_AsString(s)) -#define __Pyx_PyObject_AsWritableSString(s) ((signed char*) __Pyx_PyObject_AsString(s)) -#define __Pyx_PyObject_AsWritableUString(s) ((unsigned char*) __Pyx_PyObject_AsString(s)) -#define __Pyx_PyObject_AsSString(s) ((const signed char*) __Pyx_PyObject_AsString(s)) -#define __Pyx_PyObject_AsUString(s) ((const unsigned char*) __Pyx_PyObject_AsString(s)) -#define __Pyx_PyObject_FromCString(s) __Pyx_PyObject_FromString((const char*)s) -#define __Pyx_PyBytes_FromCString(s) __Pyx_PyBytes_FromString((const char*)s) -#define __Pyx_PyByteArray_FromCString(s) __Pyx_PyByteArray_FromString((const char*)s) -#define __Pyx_PyStr_FromCString(s) __Pyx_PyStr_FromString((const char*)s) -#define __Pyx_PyUnicode_FromCString(s) __Pyx_PyUnicode_FromString((const char*)s) -static CYTHON_INLINE size_t __Pyx_Py_UNICODE_strlen(const Py_UNICODE *u) { - const Py_UNICODE *u_end = u; - while (*u_end++) ; - return (size_t)(u_end - u - 1); -} -#define __Pyx_PyUnicode_FromUnicode(u) PyUnicode_FromUnicode(u, __Pyx_Py_UNICODE_strlen(u)) -#define __Pyx_PyUnicode_FromUnicodeAndLength PyUnicode_FromUnicode -#define __Pyx_PyUnicode_AsUnicode PyUnicode_AsUnicode -#define __Pyx_NewRef(obj) (Py_INCREF(obj), obj) -#define __Pyx_Owned_Py_None(b) __Pyx_NewRef(Py_None) -static CYTHON_INLINE PyObject * __Pyx_PyBool_FromLong(long b); -static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject*); -static CYTHON_INLINE int __Pyx_PyObject_IsTrueAndDecref(PyObject*); -static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x); -#define __Pyx_PySequence_Tuple(obj)\ - (likely(PyTuple_CheckExact(obj)) ? __Pyx_NewRef(obj) : PySequence_Tuple(obj)) -static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject*); -static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t); -#if CYTHON_ASSUME_SAFE_MACROS -#define __pyx_PyFloat_AsDouble(x) (PyFloat_CheckExact(x) ? PyFloat_AS_DOUBLE(x) : PyFloat_AsDouble(x)) -#else -#define __pyx_PyFloat_AsDouble(x) PyFloat_AsDouble(x) -#endif -#define __pyx_PyFloat_AsFloat(x) ((float) __pyx_PyFloat_AsDouble(x)) -#if PY_MAJOR_VERSION >= 3 -#define __Pyx_PyNumber_Int(x) (PyLong_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Long(x)) -#else -#define __Pyx_PyNumber_Int(x) (PyInt_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Int(x)) -#endif -#define __Pyx_PyNumber_Float(x) (PyFloat_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Float(x)) -#if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII -static int __Pyx_sys_getdefaultencoding_not_ascii; -static int __Pyx_init_sys_getdefaultencoding_params(void) { - PyObject* sys; - PyObject* default_encoding = NULL; - PyObject* ascii_chars_u = NULL; - PyObject* ascii_chars_b = NULL; - const char* default_encoding_c; - sys = PyImport_ImportModule("sys"); - if (!sys) goto bad; - default_encoding = PyObject_CallMethod(sys, (char*) "getdefaultencoding", NULL); - Py_DECREF(sys); - if (!default_encoding) goto bad; - default_encoding_c = PyBytes_AsString(default_encoding); - if (!default_encoding_c) goto bad; - if (strcmp(default_encoding_c, "ascii") == 0) { - __Pyx_sys_getdefaultencoding_not_ascii = 0; - } else { - char ascii_chars[128]; - int c; - for (c = 0; c < 128; c++) { - ascii_chars[c] = c; - } - __Pyx_sys_getdefaultencoding_not_ascii = 1; - ascii_chars_u = PyUnicode_DecodeASCII(ascii_chars, 128, NULL); - if (!ascii_chars_u) goto bad; - ascii_chars_b = PyUnicode_AsEncodedString(ascii_chars_u, default_encoding_c, NULL); - if (!ascii_chars_b || !PyBytes_Check(ascii_chars_b) || memcmp(ascii_chars, PyBytes_AS_STRING(ascii_chars_b), 128) != 0) { - PyErr_Format( - PyExc_ValueError, - "This module compiled with c_string_encoding=ascii, but default encoding '%.200s' is not a superset of ascii.", - default_encoding_c); - goto bad; - } - Py_DECREF(ascii_chars_u); - Py_DECREF(ascii_chars_b); - } - Py_DECREF(default_encoding); - return 0; -bad: - Py_XDECREF(default_encoding); - Py_XDECREF(ascii_chars_u); - Py_XDECREF(ascii_chars_b); - return -1; -} -#endif -#if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT && PY_MAJOR_VERSION >= 3 -#define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_DecodeUTF8(c_str, size, NULL) -#else -#define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_Decode(c_str, size, __PYX_DEFAULT_STRING_ENCODING, NULL) -#if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT -static char* __PYX_DEFAULT_STRING_ENCODING; -static int __Pyx_init_sys_getdefaultencoding_params(void) { - PyObject* sys; - PyObject* default_encoding = NULL; - char* default_encoding_c; - sys = PyImport_ImportModule("sys"); - if (!sys) goto bad; - default_encoding = PyObject_CallMethod(sys, (char*) (const char*) "getdefaultencoding", NULL); - Py_DECREF(sys); - if (!default_encoding) goto bad; - default_encoding_c = PyBytes_AsString(default_encoding); - if (!default_encoding_c) goto bad; - __PYX_DEFAULT_STRING_ENCODING = (char*) malloc(strlen(default_encoding_c) + 1); - if (!__PYX_DEFAULT_STRING_ENCODING) goto bad; - strcpy(__PYX_DEFAULT_STRING_ENCODING, default_encoding_c); - Py_DECREF(default_encoding); - return 0; -bad: - Py_XDECREF(default_encoding); - return -1; -} -#endif -#endif - - -/* Test for GCC > 2.95 */ -#if defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))) - #define likely(x) __builtin_expect(!!(x), 1) - #define unlikely(x) __builtin_expect(!!(x), 0) -#else /* !__GNUC__ or GCC < 2.95 */ - #define likely(x) (x) - #define unlikely(x) (x) -#endif /* __GNUC__ */ -static CYTHON_INLINE void __Pyx_pretend_to_initialize(void* ptr) { (void)ptr; } - -static PyObject *__pyx_m = NULL; -static PyObject *__pyx_d; -static PyObject *__pyx_b; -static PyObject *__pyx_cython_runtime = NULL; -static PyObject *__pyx_empty_tuple; -static PyObject *__pyx_empty_bytes; -static PyObject *__pyx_empty_unicode; -static int __pyx_lineno; -static int __pyx_clineno = 0; -static const char * __pyx_cfilenm= __FILE__; -static const char *__pyx_filename; - - -static const char *__pyx_f[] = { - "core.pyx", - "stringsource", -}; -/* NoFastGil.proto */ -#define __Pyx_PyGILState_Ensure PyGILState_Ensure -#define __Pyx_PyGILState_Release PyGILState_Release -#define __Pyx_FastGIL_Remember() -#define __Pyx_FastGIL_Forget() -#define __Pyx_FastGilFuncInit() - -/* MemviewSliceStruct.proto */ -struct __pyx_memoryview_obj; -typedef struct { - struct __pyx_memoryview_obj *memview; - char *data; - Py_ssize_t shape[8]; - Py_ssize_t strides[8]; - Py_ssize_t suboffsets[8]; -} __Pyx_memviewslice; -#define __Pyx_MemoryView_Len(m) (m.shape[0]) - -/* Atomics.proto */ -#include -#ifndef CYTHON_ATOMICS - #define CYTHON_ATOMICS 1 -#endif -#define __pyx_atomic_int_type int -#if CYTHON_ATOMICS && __GNUC__ >= 4 && (__GNUC_MINOR__ > 1 ||\ - (__GNUC_MINOR__ == 1 && __GNUC_PATCHLEVEL >= 2)) &&\ - !defined(__i386__) - #define __pyx_atomic_incr_aligned(value, lock) __sync_fetch_and_add(value, 1) - #define __pyx_atomic_decr_aligned(value, lock) __sync_fetch_and_sub(value, 1) - #ifdef __PYX_DEBUG_ATOMICS - #warning "Using GNU atomics" - #endif -#elif CYTHON_ATOMICS && defined(_MSC_VER) && 0 - #include - #undef __pyx_atomic_int_type - #define __pyx_atomic_int_type LONG - #define __pyx_atomic_incr_aligned(value, lock) InterlockedIncrement(value) - #define __pyx_atomic_decr_aligned(value, lock) InterlockedDecrement(value) - #ifdef __PYX_DEBUG_ATOMICS - #pragma message ("Using MSVC atomics") - #endif -#elif CYTHON_ATOMICS && (defined(__ICC) || defined(__INTEL_COMPILER)) && 0 - #define __pyx_atomic_incr_aligned(value, lock) _InterlockedIncrement(value) - #define __pyx_atomic_decr_aligned(value, lock) _InterlockedDecrement(value) - #ifdef __PYX_DEBUG_ATOMICS - #warning "Using Intel atomics" - #endif -#else - #undef CYTHON_ATOMICS - #define CYTHON_ATOMICS 0 - #ifdef __PYX_DEBUG_ATOMICS - #warning "Not using atomics" - #endif -#endif -typedef volatile __pyx_atomic_int_type __pyx_atomic_int; -#if CYTHON_ATOMICS - #define __pyx_add_acquisition_count(memview)\ - __pyx_atomic_incr_aligned(__pyx_get_slice_count_pointer(memview), memview->lock) - #define __pyx_sub_acquisition_count(memview)\ - __pyx_atomic_decr_aligned(__pyx_get_slice_count_pointer(memview), memview->lock) -#else - #define __pyx_add_acquisition_count(memview)\ - __pyx_add_acquisition_count_locked(__pyx_get_slice_count_pointer(memview), memview->lock) - #define __pyx_sub_acquisition_count(memview)\ - __pyx_sub_acquisition_count_locked(__pyx_get_slice_count_pointer(memview), memview->lock) -#endif - -/* ForceInitThreads.proto */ -#ifndef __PYX_FORCE_INIT_THREADS - #define __PYX_FORCE_INIT_THREADS 0 -#endif - -/* BufferFormatStructs.proto */ -#define IS_UNSIGNED(type) (((type) -1) > 0) -struct __Pyx_StructField_; -#define __PYX_BUF_FLAGS_PACKED_STRUCT (1 << 0) -typedef struct { - const char* name; - struct __Pyx_StructField_* fields; - size_t size; - size_t arraysize[8]; - int ndim; - char typegroup; - char is_unsigned; - int flags; -} __Pyx_TypeInfo; -typedef struct __Pyx_StructField_ { - __Pyx_TypeInfo* type; - const char* name; - size_t offset; -} __Pyx_StructField; -typedef struct { - __Pyx_StructField* field; - size_t parent_offset; -} __Pyx_BufFmt_StackElem; -typedef struct { - __Pyx_StructField root; - __Pyx_BufFmt_StackElem* head; - size_t fmt_offset; - size_t new_count, enc_count; - size_t struct_alignment; - int is_complex; - char enc_type; - char new_packmode; - char enc_packmode; - char is_valid_array; -} __Pyx_BufFmt_Context; - - -/*--- Type declarations ---*/ -struct __pyx_array_obj; -struct __pyx_MemviewEnum_obj; -struct __pyx_memoryview_obj; -struct __pyx_memoryviewslice_obj; -struct __pyx_opt_args_15monotonic_align_4core_maximum_path_each; - -/* "monotonic_align/core.pyx":7 - * @cython.boundscheck(False) - * @cython.wraparound(False) - * cdef void maximum_path_each(int[:,::1] path, float[:,::1] value, int t_y, int t_x, float max_neg_val=-1e9) nogil: # <<<<<<<<<<<<<< - * cdef int x - * cdef int y - */ -struct __pyx_opt_args_15monotonic_align_4core_maximum_path_each { - int __pyx_n; - float max_neg_val; -}; - -/* "View.MemoryView":105 - * - * @cname("__pyx_array") - * cdef class array: # <<<<<<<<<<<<<< - * - * cdef: - */ -struct __pyx_array_obj { - PyObject_HEAD - struct __pyx_vtabstruct_array *__pyx_vtab; - char *data; - Py_ssize_t len; - char *format; - int ndim; - Py_ssize_t *_shape; - Py_ssize_t *_strides; - Py_ssize_t itemsize; - PyObject *mode; - PyObject *_format; - void (*callback_free_data)(void *); - int free_data; - int dtype_is_object; -}; - - -/* "View.MemoryView":279 - * - * @cname('__pyx_MemviewEnum') - * cdef class Enum(object): # <<<<<<<<<<<<<< - * cdef object name - * def __init__(self, name): - */ -struct __pyx_MemviewEnum_obj { - PyObject_HEAD - PyObject *name; -}; - - -/* "View.MemoryView":330 - * - * @cname('__pyx_memoryview') - * cdef class memoryview(object): # <<<<<<<<<<<<<< - * - * cdef object obj - */ -struct __pyx_memoryview_obj { - PyObject_HEAD - struct __pyx_vtabstruct_memoryview *__pyx_vtab; - PyObject *obj; - PyObject *_size; - PyObject *_array_interface; - PyThread_type_lock lock; - __pyx_atomic_int acquisition_count[2]; - __pyx_atomic_int *acquisition_count_aligned_p; - Py_buffer view; - int flags; - int dtype_is_object; - __Pyx_TypeInfo *typeinfo; -}; - - -/* "View.MemoryView":965 - * - * @cname('__pyx_memoryviewslice') - * cdef class _memoryviewslice(memoryview): # <<<<<<<<<<<<<< - * "Internal class for passing memoryview slices to Python" - * - */ -struct __pyx_memoryviewslice_obj { - struct __pyx_memoryview_obj __pyx_base; - __Pyx_memviewslice from_slice; - PyObject *from_object; - PyObject *(*to_object_func)(char *); - int (*to_dtype_func)(char *, PyObject *); -}; - - - -/* "View.MemoryView":105 - * - * @cname("__pyx_array") - * cdef class array: # <<<<<<<<<<<<<< - * - * cdef: - */ - -struct __pyx_vtabstruct_array { - PyObject *(*get_memview)(struct __pyx_array_obj *); -}; -static struct __pyx_vtabstruct_array *__pyx_vtabptr_array; - - -/* "View.MemoryView":330 - * - * @cname('__pyx_memoryview') - * cdef class memoryview(object): # <<<<<<<<<<<<<< - * - * cdef object obj - */ - -struct __pyx_vtabstruct_memoryview { - char *(*get_item_pointer)(struct __pyx_memoryview_obj *, PyObject *); - PyObject *(*is_slice)(struct __pyx_memoryview_obj *, PyObject *); - PyObject *(*setitem_slice_assignment)(struct __pyx_memoryview_obj *, PyObject *, PyObject *); - PyObject *(*setitem_slice_assign_scalar)(struct __pyx_memoryview_obj *, struct __pyx_memoryview_obj *, PyObject *); - PyObject *(*setitem_indexed)(struct __pyx_memoryview_obj *, PyObject *, PyObject *); - PyObject *(*convert_item_to_object)(struct __pyx_memoryview_obj *, char *); - PyObject *(*assign_item_from_object)(struct __pyx_memoryview_obj *, char *, PyObject *); -}; -static struct __pyx_vtabstruct_memoryview *__pyx_vtabptr_memoryview; - - -/* "View.MemoryView":965 - * - * @cname('__pyx_memoryviewslice') - * cdef class _memoryviewslice(memoryview): # <<<<<<<<<<<<<< - * "Internal class for passing memoryview slices to Python" - * - */ - -struct __pyx_vtabstruct__memoryviewslice { - struct __pyx_vtabstruct_memoryview __pyx_base; -}; -static struct __pyx_vtabstruct__memoryviewslice *__pyx_vtabptr__memoryviewslice; - -/* --- Runtime support code (head) --- */ -/* Refnanny.proto */ -#ifndef CYTHON_REFNANNY - #define CYTHON_REFNANNY 0 -#endif -#if CYTHON_REFNANNY - typedef struct { - void (*INCREF)(void*, PyObject*, int); - void (*DECREF)(void*, PyObject*, int); - void (*GOTREF)(void*, PyObject*, int); - void (*GIVEREF)(void*, PyObject*, int); - void* (*SetupContext)(const char*, int, const char*); - void (*FinishContext)(void**); - } __Pyx_RefNannyAPIStruct; - static __Pyx_RefNannyAPIStruct *__Pyx_RefNanny = NULL; - static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname); - #define __Pyx_RefNannyDeclarations void *__pyx_refnanny = NULL; -#ifdef WITH_THREAD - #define __Pyx_RefNannySetupContext(name, acquire_gil)\ - if (acquire_gil) {\ - PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();\ - __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\ - PyGILState_Release(__pyx_gilstate_save);\ - } else {\ - __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\ - } -#else - #define __Pyx_RefNannySetupContext(name, acquire_gil)\ - __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__) -#endif - #define __Pyx_RefNannyFinishContext()\ - __Pyx_RefNanny->FinishContext(&__pyx_refnanny) - #define __Pyx_INCREF(r) __Pyx_RefNanny->INCREF(__pyx_refnanny, (PyObject *)(r), __LINE__) - #define __Pyx_DECREF(r) __Pyx_RefNanny->DECREF(__pyx_refnanny, (PyObject *)(r), __LINE__) - #define __Pyx_GOTREF(r) __Pyx_RefNanny->GOTREF(__pyx_refnanny, (PyObject *)(r), __LINE__) - #define __Pyx_GIVEREF(r) __Pyx_RefNanny->GIVEREF(__pyx_refnanny, (PyObject *)(r), __LINE__) - #define __Pyx_XINCREF(r) do { if((r) != NULL) {__Pyx_INCREF(r); }} while(0) - #define __Pyx_XDECREF(r) do { if((r) != NULL) {__Pyx_DECREF(r); }} while(0) - #define __Pyx_XGOTREF(r) do { if((r) != NULL) {__Pyx_GOTREF(r); }} while(0) - #define __Pyx_XGIVEREF(r) do { if((r) != NULL) {__Pyx_GIVEREF(r);}} while(0) -#else - #define __Pyx_RefNannyDeclarations - #define __Pyx_RefNannySetupContext(name, acquire_gil) - #define __Pyx_RefNannyFinishContext() - #define __Pyx_INCREF(r) Py_INCREF(r) - #define __Pyx_DECREF(r) Py_DECREF(r) - #define __Pyx_GOTREF(r) - #define __Pyx_GIVEREF(r) - #define __Pyx_XINCREF(r) Py_XINCREF(r) - #define __Pyx_XDECREF(r) Py_XDECREF(r) - #define __Pyx_XGOTREF(r) - #define __Pyx_XGIVEREF(r) -#endif -#define __Pyx_XDECREF_SET(r, v) do {\ - PyObject *tmp = (PyObject *) r;\ - r = v; __Pyx_XDECREF(tmp);\ - } while (0) -#define __Pyx_DECREF_SET(r, v) do {\ - PyObject *tmp = (PyObject *) r;\ - r = v; __Pyx_DECREF(tmp);\ - } while (0) -#define __Pyx_CLEAR(r) do { PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);} while(0) -#define __Pyx_XCLEAR(r) do { if((r) != NULL) {PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);}} while(0) - -/* PyObjectGetAttrStr.proto */ -#if CYTHON_USE_TYPE_SLOTS -static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name); -#else -#define __Pyx_PyObject_GetAttrStr(o,n) PyObject_GetAttr(o,n) -#endif - -/* GetBuiltinName.proto */ -static PyObject *__Pyx_GetBuiltinName(PyObject *name); - -/* MemviewSliceInit.proto */ -#define __Pyx_BUF_MAX_NDIMS %(BUF_MAX_NDIMS)d -#define __Pyx_MEMVIEW_DIRECT 1 -#define __Pyx_MEMVIEW_PTR 2 -#define __Pyx_MEMVIEW_FULL 4 -#define __Pyx_MEMVIEW_CONTIG 8 -#define __Pyx_MEMVIEW_STRIDED 16 -#define __Pyx_MEMVIEW_FOLLOW 32 -#define __Pyx_IS_C_CONTIG 1 -#define __Pyx_IS_F_CONTIG 2 -static int __Pyx_init_memviewslice( - struct __pyx_memoryview_obj *memview, - int ndim, - __Pyx_memviewslice *memviewslice, - int memview_is_new_reference); -static CYTHON_INLINE int __pyx_add_acquisition_count_locked( - __pyx_atomic_int *acquisition_count, PyThread_type_lock lock); -static CYTHON_INLINE int __pyx_sub_acquisition_count_locked( - __pyx_atomic_int *acquisition_count, PyThread_type_lock lock); -#define __pyx_get_slice_count_pointer(memview) (memview->acquisition_count_aligned_p) -#define __pyx_get_slice_count(memview) (*__pyx_get_slice_count_pointer(memview)) -#define __PYX_INC_MEMVIEW(slice, have_gil) __Pyx_INC_MEMVIEW(slice, have_gil, __LINE__) -#define __PYX_XDEC_MEMVIEW(slice, have_gil) __Pyx_XDEC_MEMVIEW(slice, have_gil, __LINE__) -static CYTHON_INLINE void __Pyx_INC_MEMVIEW(__Pyx_memviewslice *, int, int); -static CYTHON_INLINE void __Pyx_XDEC_MEMVIEW(__Pyx_memviewslice *, int, int); - -/* RaiseArgTupleInvalid.proto */ -static void __Pyx_RaiseArgtupleInvalid(const char* func_name, int exact, - Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found); - -/* RaiseDoubleKeywords.proto */ -static void __Pyx_RaiseDoubleKeywordsError(const char* func_name, PyObject* kw_name); - -/* ParseKeywords.proto */ -static int __Pyx_ParseOptionalKeywords(PyObject *kwds, PyObject **argnames[],\ - PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args,\ - const char* function_name); - -/* None.proto */ -static CYTHON_INLINE void __Pyx_RaiseUnboundLocalError(const char *varname); - -/* ArgTypeTest.proto */ -#define __Pyx_ArgTypeTest(obj, type, none_allowed, name, exact)\ - ((likely((Py_TYPE(obj) == type) | (none_allowed && (obj == Py_None)))) ? 1 :\ - __Pyx__ArgTypeTest(obj, type, name, exact)) -static int __Pyx__ArgTypeTest(PyObject *obj, PyTypeObject *type, const char *name, int exact); - -/* PyObjectCall.proto */ -#if CYTHON_COMPILING_IN_CPYTHON -static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw); -#else -#define __Pyx_PyObject_Call(func, arg, kw) PyObject_Call(func, arg, kw) -#endif - -/* PyThreadStateGet.proto */ -#if CYTHON_FAST_THREAD_STATE -#define __Pyx_PyThreadState_declare PyThreadState *__pyx_tstate; -#define __Pyx_PyThreadState_assign __pyx_tstate = __Pyx_PyThreadState_Current; -#define __Pyx_PyErr_Occurred() __pyx_tstate->curexc_type -#else -#define __Pyx_PyThreadState_declare -#define __Pyx_PyThreadState_assign -#define __Pyx_PyErr_Occurred() PyErr_Occurred() -#endif - -/* PyErrFetchRestore.proto */ -#if CYTHON_FAST_THREAD_STATE -#define __Pyx_PyErr_Clear() __Pyx_ErrRestore(NULL, NULL, NULL) -#define __Pyx_ErrRestoreWithState(type, value, tb) __Pyx_ErrRestoreInState(PyThreadState_GET(), type, value, tb) -#define __Pyx_ErrFetchWithState(type, value, tb) __Pyx_ErrFetchInState(PyThreadState_GET(), type, value, tb) -#define __Pyx_ErrRestore(type, value, tb) __Pyx_ErrRestoreInState(__pyx_tstate, type, value, tb) -#define __Pyx_ErrFetch(type, value, tb) __Pyx_ErrFetchInState(__pyx_tstate, type, value, tb) -static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb); -static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); -#if CYTHON_COMPILING_IN_CPYTHON -#define __Pyx_PyErr_SetNone(exc) (Py_INCREF(exc), __Pyx_ErrRestore((exc), NULL, NULL)) -#else -#define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc) -#endif -#else -#define __Pyx_PyErr_Clear() PyErr_Clear() -#define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc) -#define __Pyx_ErrRestoreWithState(type, value, tb) PyErr_Restore(type, value, tb) -#define __Pyx_ErrFetchWithState(type, value, tb) PyErr_Fetch(type, value, tb) -#define __Pyx_ErrRestoreInState(tstate, type, value, tb) PyErr_Restore(type, value, tb) -#define __Pyx_ErrFetchInState(tstate, type, value, tb) PyErr_Fetch(type, value, tb) -#define __Pyx_ErrRestore(type, value, tb) PyErr_Restore(type, value, tb) -#define __Pyx_ErrFetch(type, value, tb) PyErr_Fetch(type, value, tb) -#endif - -/* RaiseException.proto */ -static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause); - -/* PyCFunctionFastCall.proto */ -#if CYTHON_FAST_PYCCALL -static CYTHON_INLINE PyObject *__Pyx_PyCFunction_FastCall(PyObject *func, PyObject **args, Py_ssize_t nargs); -#else -#define __Pyx_PyCFunction_FastCall(func, args, nargs) (assert(0), NULL) -#endif - -/* PyFunctionFastCall.proto */ -#if CYTHON_FAST_PYCALL -#define __Pyx_PyFunction_FastCall(func, args, nargs)\ - __Pyx_PyFunction_FastCallDict((func), (args), (nargs), NULL) -#if 1 || PY_VERSION_HEX < 0x030600B1 -static PyObject *__Pyx_PyFunction_FastCallDict(PyObject *func, PyObject **args, Py_ssize_t nargs, PyObject *kwargs); -#else -#define __Pyx_PyFunction_FastCallDict(func, args, nargs, kwargs) _PyFunction_FastCallDict(func, args, nargs, kwargs) -#endif -#define __Pyx_BUILD_ASSERT_EXPR(cond)\ - (sizeof(char [1 - 2*!(cond)]) - 1) -#ifndef Py_MEMBER_SIZE -#define Py_MEMBER_SIZE(type, member) sizeof(((type *)0)->member) -#endif - static size_t __pyx_pyframe_localsplus_offset = 0; - #include "frameobject.h" - #define __Pxy_PyFrame_Initialize_Offsets()\ - ((void)__Pyx_BUILD_ASSERT_EXPR(sizeof(PyFrameObject) == offsetof(PyFrameObject, f_localsplus) + Py_MEMBER_SIZE(PyFrameObject, f_localsplus)),\ - (void)(__pyx_pyframe_localsplus_offset = ((size_t)PyFrame_Type.tp_basicsize) - Py_MEMBER_SIZE(PyFrameObject, f_localsplus))) - #define __Pyx_PyFrame_GetLocalsplus(frame)\ - (assert(__pyx_pyframe_localsplus_offset), (PyObject **)(((char *)(frame)) + __pyx_pyframe_localsplus_offset)) -#endif - -/* PyObjectCall2Args.proto */ -static CYTHON_UNUSED PyObject* __Pyx_PyObject_Call2Args(PyObject* function, PyObject* arg1, PyObject* arg2); - -/* PyObjectCallMethO.proto */ -#if CYTHON_COMPILING_IN_CPYTHON -static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg); -#endif - -/* PyObjectCallOneArg.proto */ -static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg); - -/* IncludeStringH.proto */ -#include - -/* BytesEquals.proto */ -static CYTHON_INLINE int __Pyx_PyBytes_Equals(PyObject* s1, PyObject* s2, int equals); - -/* UnicodeEquals.proto */ -static CYTHON_INLINE int __Pyx_PyUnicode_Equals(PyObject* s1, PyObject* s2, int equals); - -/* StrEquals.proto */ -#if PY_MAJOR_VERSION >= 3 -#define __Pyx_PyString_Equals __Pyx_PyUnicode_Equals -#else -#define __Pyx_PyString_Equals __Pyx_PyBytes_Equals -#endif - -/* None.proto */ -static CYTHON_INLINE Py_ssize_t __Pyx_div_Py_ssize_t(Py_ssize_t, Py_ssize_t); - -/* UnaryNegOverflows.proto */ -#define UNARY_NEG_WOULD_OVERFLOW(x)\ - (((x) < 0) & ((unsigned long)(x) == 0-(unsigned long)(x))) - -static CYTHON_UNUSED int __pyx_array_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ -static PyObject *__pyx_array_get_memview(struct __pyx_array_obj *); /*proto*/ -/* GetAttr.proto */ -static CYTHON_INLINE PyObject *__Pyx_GetAttr(PyObject *, PyObject *); - -/* GetItemInt.proto */ -#define __Pyx_GetItemInt(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\ - (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\ - __Pyx_GetItemInt_Fast(o, (Py_ssize_t)i, is_list, wraparound, boundscheck) :\ - (is_list ? (PyErr_SetString(PyExc_IndexError, "list index out of range"), (PyObject*)NULL) :\ - __Pyx_GetItemInt_Generic(o, to_py_func(i)))) -#define __Pyx_GetItemInt_List(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\ - (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\ - __Pyx_GetItemInt_List_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) :\ - (PyErr_SetString(PyExc_IndexError, "list index out of range"), (PyObject*)NULL)) -static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i, - int wraparound, int boundscheck); -#define __Pyx_GetItemInt_Tuple(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\ - (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\ - __Pyx_GetItemInt_Tuple_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) :\ - (PyErr_SetString(PyExc_IndexError, "tuple index out of range"), (PyObject*)NULL)) -static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i, - int wraparound, int boundscheck); -static PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j); -static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i, - int is_list, int wraparound, int boundscheck); - -/* ObjectGetItem.proto */ -#if CYTHON_USE_TYPE_SLOTS -static CYTHON_INLINE PyObject *__Pyx_PyObject_GetItem(PyObject *obj, PyObject* key); -#else -#define __Pyx_PyObject_GetItem(obj, key) PyObject_GetItem(obj, key) -#endif - -/* decode_c_string_utf16.proto */ -static CYTHON_INLINE PyObject *__Pyx_PyUnicode_DecodeUTF16(const char *s, Py_ssize_t size, const char *errors) { - int byteorder = 0; - return PyUnicode_DecodeUTF16(s, size, errors, &byteorder); -} -static CYTHON_INLINE PyObject *__Pyx_PyUnicode_DecodeUTF16LE(const char *s, Py_ssize_t size, const char *errors) { - int byteorder = -1; - return PyUnicode_DecodeUTF16(s, size, errors, &byteorder); -} -static CYTHON_INLINE PyObject *__Pyx_PyUnicode_DecodeUTF16BE(const char *s, Py_ssize_t size, const char *errors) { - int byteorder = 1; - return PyUnicode_DecodeUTF16(s, size, errors, &byteorder); -} - -/* decode_c_string.proto */ -static CYTHON_INLINE PyObject* __Pyx_decode_c_string( - const char* cstring, Py_ssize_t start, Py_ssize_t stop, - const char* encoding, const char* errors, - PyObject* (*decode_func)(const char *s, Py_ssize_t size, const char *errors)); - -/* PyErrExceptionMatches.proto */ -#if CYTHON_FAST_THREAD_STATE -#define __Pyx_PyErr_ExceptionMatches(err) __Pyx_PyErr_ExceptionMatchesInState(__pyx_tstate, err) -static CYTHON_INLINE int __Pyx_PyErr_ExceptionMatchesInState(PyThreadState* tstate, PyObject* err); -#else -#define __Pyx_PyErr_ExceptionMatches(err) PyErr_ExceptionMatches(err) -#endif - -/* GetAttr3.proto */ -static CYTHON_INLINE PyObject *__Pyx_GetAttr3(PyObject *, PyObject *, PyObject *); - -/* PyDictVersioning.proto */ -#if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_TYPE_SLOTS -#define __PYX_DICT_VERSION_INIT ((PY_UINT64_T) -1) -#define __PYX_GET_DICT_VERSION(dict) (((PyDictObject*)(dict))->ma_version_tag) -#define __PYX_UPDATE_DICT_CACHE(dict, value, cache_var, version_var)\ - (version_var) = __PYX_GET_DICT_VERSION(dict);\ - (cache_var) = (value); -#define __PYX_PY_DICT_LOOKUP_IF_MODIFIED(VAR, DICT, LOOKUP) {\ - static PY_UINT64_T __pyx_dict_version = 0;\ - static PyObject *__pyx_dict_cached_value = NULL;\ - if (likely(__PYX_GET_DICT_VERSION(DICT) == __pyx_dict_version)) {\ - (VAR) = __pyx_dict_cached_value;\ - } else {\ - (VAR) = __pyx_dict_cached_value = (LOOKUP);\ - __pyx_dict_version = __PYX_GET_DICT_VERSION(DICT);\ - }\ -} -static CYTHON_INLINE PY_UINT64_T __Pyx_get_tp_dict_version(PyObject *obj); -static CYTHON_INLINE PY_UINT64_T __Pyx_get_object_dict_version(PyObject *obj); -static CYTHON_INLINE int __Pyx_object_dict_version_matches(PyObject* obj, PY_UINT64_T tp_dict_version, PY_UINT64_T obj_dict_version); -#else -#define __PYX_GET_DICT_VERSION(dict) (0) -#define __PYX_UPDATE_DICT_CACHE(dict, value, cache_var, version_var) -#define __PYX_PY_DICT_LOOKUP_IF_MODIFIED(VAR, DICT, LOOKUP) (VAR) = (LOOKUP); -#endif - -/* GetModuleGlobalName.proto */ -#if CYTHON_USE_DICT_VERSIONS -#define __Pyx_GetModuleGlobalName(var, name) {\ - static PY_UINT64_T __pyx_dict_version = 0;\ - static PyObject *__pyx_dict_cached_value = NULL;\ - (var) = (likely(__pyx_dict_version == __PYX_GET_DICT_VERSION(__pyx_d))) ?\ - (likely(__pyx_dict_cached_value) ? __Pyx_NewRef(__pyx_dict_cached_value) : __Pyx_GetBuiltinName(name)) :\ - __Pyx__GetModuleGlobalName(name, &__pyx_dict_version, &__pyx_dict_cached_value);\ -} -#define __Pyx_GetModuleGlobalNameUncached(var, name) {\ - PY_UINT64_T __pyx_dict_version;\ - PyObject *__pyx_dict_cached_value;\ - (var) = __Pyx__GetModuleGlobalName(name, &__pyx_dict_version, &__pyx_dict_cached_value);\ -} -static PyObject *__Pyx__GetModuleGlobalName(PyObject *name, PY_UINT64_T *dict_version, PyObject **dict_cached_value); -#else -#define __Pyx_GetModuleGlobalName(var, name) (var) = __Pyx__GetModuleGlobalName(name) -#define __Pyx_GetModuleGlobalNameUncached(var, name) (var) = __Pyx__GetModuleGlobalName(name) -static CYTHON_INLINE PyObject *__Pyx__GetModuleGlobalName(PyObject *name); -#endif - -/* RaiseTooManyValuesToUnpack.proto */ -static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected); - -/* RaiseNeedMoreValuesToUnpack.proto */ -static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index); - -/* RaiseNoneIterError.proto */ -static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void); - -/* ExtTypeTest.proto */ -static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type); - -/* GetTopmostException.proto */ -#if CYTHON_USE_EXC_INFO_STACK -static _PyErr_StackItem * __Pyx_PyErr_GetTopmostException(PyThreadState *tstate); -#endif - -/* SaveResetException.proto */ -#if CYTHON_FAST_THREAD_STATE -#define __Pyx_ExceptionSave(type, value, tb) __Pyx__ExceptionSave(__pyx_tstate, type, value, tb) -static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); -#define __Pyx_ExceptionReset(type, value, tb) __Pyx__ExceptionReset(__pyx_tstate, type, value, tb) -static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb); -#else -#define __Pyx_ExceptionSave(type, value, tb) PyErr_GetExcInfo(type, value, tb) -#define __Pyx_ExceptionReset(type, value, tb) PyErr_SetExcInfo(type, value, tb) -#endif - -/* GetException.proto */ -#if CYTHON_FAST_THREAD_STATE -#define __Pyx_GetException(type, value, tb) __Pyx__GetException(__pyx_tstate, type, value, tb) -static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); -#else -static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb); -#endif - -/* SwapException.proto */ -#if CYTHON_FAST_THREAD_STATE -#define __Pyx_ExceptionSwap(type, value, tb) __Pyx__ExceptionSwap(__pyx_tstate, type, value, tb) -static CYTHON_INLINE void __Pyx__ExceptionSwap(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); -#else -static CYTHON_INLINE void __Pyx_ExceptionSwap(PyObject **type, PyObject **value, PyObject **tb); -#endif - -/* Import.proto */ -static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level); - -/* FastTypeChecks.proto */ -#if CYTHON_COMPILING_IN_CPYTHON -#define __Pyx_TypeCheck(obj, type) __Pyx_IsSubtype(Py_TYPE(obj), (PyTypeObject *)type) -static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b); -static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches(PyObject *err, PyObject *type); -static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches2(PyObject *err, PyObject *type1, PyObject *type2); -#else -#define __Pyx_TypeCheck(obj, type) PyObject_TypeCheck(obj, (PyTypeObject *)type) -#define __Pyx_PyErr_GivenExceptionMatches(err, type) PyErr_GivenExceptionMatches(err, type) -#define __Pyx_PyErr_GivenExceptionMatches2(err, type1, type2) (PyErr_GivenExceptionMatches(err, type1) || PyErr_GivenExceptionMatches(err, type2)) -#endif -#define __Pyx_PyException_Check(obj) __Pyx_TypeCheck(obj, PyExc_Exception) - -static CYTHON_UNUSED int __pyx_memoryview_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ -/* ListCompAppend.proto */ -#if CYTHON_USE_PYLIST_INTERNALS && CYTHON_ASSUME_SAFE_MACROS -static CYTHON_INLINE int __Pyx_ListComp_Append(PyObject* list, PyObject* x) { - PyListObject* L = (PyListObject*) list; - Py_ssize_t len = Py_SIZE(list); - if (likely(L->allocated > len)) { - Py_INCREF(x); - PyList_SET_ITEM(list, len, x); - __Pyx_SET_SIZE(list, len + 1); - return 0; - } - return PyList_Append(list, x); -} -#else -#define __Pyx_ListComp_Append(L,x) PyList_Append(L,x) -#endif - -/* PyIntBinop.proto */ -#if !CYTHON_COMPILING_IN_PYPY -static PyObject* __Pyx_PyInt_AddObjC(PyObject *op1, PyObject *op2, long intval, int inplace, int zerodivision_check); -#else -#define __Pyx_PyInt_AddObjC(op1, op2, intval, inplace, zerodivision_check)\ - (inplace ? PyNumber_InPlaceAdd(op1, op2) : PyNumber_Add(op1, op2)) -#endif - -/* ListExtend.proto */ -static CYTHON_INLINE int __Pyx_PyList_Extend(PyObject* L, PyObject* v) { -#if CYTHON_COMPILING_IN_CPYTHON - PyObject* none = _PyList_Extend((PyListObject*)L, v); - if (unlikely(!none)) - return -1; - Py_DECREF(none); - return 0; -#else - return PyList_SetSlice(L, PY_SSIZE_T_MAX, PY_SSIZE_T_MAX, v); -#endif -} - -/* ListAppend.proto */ -#if CYTHON_USE_PYLIST_INTERNALS && CYTHON_ASSUME_SAFE_MACROS -static CYTHON_INLINE int __Pyx_PyList_Append(PyObject* list, PyObject* x) { - PyListObject* L = (PyListObject*) list; - Py_ssize_t len = Py_SIZE(list); - if (likely(L->allocated > len) & likely(len > (L->allocated >> 1))) { - Py_INCREF(x); - PyList_SET_ITEM(list, len, x); - __Pyx_SET_SIZE(list, len + 1); - return 0; - } - return PyList_Append(list, x); -} -#else -#define __Pyx_PyList_Append(L,x) PyList_Append(L,x) -#endif - -/* None.proto */ -static CYTHON_INLINE long __Pyx_div_long(long, long); - -/* ImportFrom.proto */ -static PyObject* __Pyx_ImportFrom(PyObject* module, PyObject* name); - -/* HasAttr.proto */ -static CYTHON_INLINE int __Pyx_HasAttr(PyObject *, PyObject *); - -/* PyObject_GenericGetAttrNoDict.proto */ -#if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000 -static CYTHON_INLINE PyObject* __Pyx_PyObject_GenericGetAttrNoDict(PyObject* obj, PyObject* attr_name); -#else -#define __Pyx_PyObject_GenericGetAttrNoDict PyObject_GenericGetAttr -#endif - -/* PyObject_GenericGetAttr.proto */ -#if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000 -static PyObject* __Pyx_PyObject_GenericGetAttr(PyObject* obj, PyObject* attr_name); -#else -#define __Pyx_PyObject_GenericGetAttr PyObject_GenericGetAttr -#endif - -/* SetVTable.proto */ -static int __Pyx_SetVtable(PyObject *dict, void *vtable); - -/* PyObjectGetAttrStrNoError.proto */ -static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStrNoError(PyObject* obj, PyObject* attr_name); - -/* SetupReduce.proto */ -static int __Pyx_setup_reduce(PyObject* type_obj); - -/* CLineInTraceback.proto */ -#ifdef CYTHON_CLINE_IN_TRACEBACK -#define __Pyx_CLineForTraceback(tstate, c_line) (((CYTHON_CLINE_IN_TRACEBACK)) ? c_line : 0) -#else -static int __Pyx_CLineForTraceback(PyThreadState *tstate, int c_line); -#endif - -/* CodeObjectCache.proto */ -typedef struct { - PyCodeObject* code_object; - int code_line; -} __Pyx_CodeObjectCacheEntry; -struct __Pyx_CodeObjectCache { - int count; - int max_count; - __Pyx_CodeObjectCacheEntry* entries; -}; -static struct __Pyx_CodeObjectCache __pyx_code_cache = {0,0,NULL}; -static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line); -static PyCodeObject *__pyx_find_code_object(int code_line); -static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object); - -/* AddTraceback.proto */ -static void __Pyx_AddTraceback(const char *funcname, int c_line, - int py_line, const char *filename); - -#if PY_MAJOR_VERSION < 3 - static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags); - static void __Pyx_ReleaseBuffer(Py_buffer *view); -#else - #define __Pyx_GetBuffer PyObject_GetBuffer - #define __Pyx_ReleaseBuffer PyBuffer_Release -#endif - - -/* BufferStructDeclare.proto */ -typedef struct { - Py_ssize_t shape, strides, suboffsets; -} __Pyx_Buf_DimInfo; -typedef struct { - size_t refcount; - Py_buffer pybuffer; -} __Pyx_Buffer; -typedef struct { - __Pyx_Buffer *rcbuffer; - char *data; - __Pyx_Buf_DimInfo diminfo[8]; -} __Pyx_LocalBuf_ND; - -/* MemviewSliceIsContig.proto */ -static int __pyx_memviewslice_is_contig(const __Pyx_memviewslice mvs, char order, int ndim); - -/* OverlappingSlices.proto */ -static int __pyx_slices_overlap(__Pyx_memviewslice *slice1, - __Pyx_memviewslice *slice2, - int ndim, size_t itemsize); - -/* Capsule.proto */ -static CYTHON_INLINE PyObject *__pyx_capsule_create(void *p, const char *sig); - -/* IsLittleEndian.proto */ -static CYTHON_INLINE int __Pyx_Is_Little_Endian(void); - -/* BufferFormatCheck.proto */ -static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts); -static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx, - __Pyx_BufFmt_StackElem* stack, - __Pyx_TypeInfo* type); - -/* TypeInfoCompare.proto */ -static int __pyx_typeinfo_cmp(__Pyx_TypeInfo *a, __Pyx_TypeInfo *b); - -/* MemviewSliceValidateAndInit.proto */ -static int __Pyx_ValidateAndInit_memviewslice( - int *axes_specs, - int c_or_f_flag, - int buf_flags, - int ndim, - __Pyx_TypeInfo *dtype, - __Pyx_BufFmt_StackElem stack[], - __Pyx_memviewslice *memviewslice, - PyObject *original_obj); - -/* ObjectToMemviewSlice.proto */ -static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_d_d_dc_int(PyObject *, int writable_flag); - -/* ObjectToMemviewSlice.proto */ -static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_d_d_dc_float(PyObject *, int writable_flag); - -/* ObjectToMemviewSlice.proto */ -static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_dc_int(PyObject *, int writable_flag); - -/* CIntToPy.proto */ -static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value); - -/* CIntToPy.proto */ -static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value); - -/* MemviewSliceCopyTemplate.proto */ -static __Pyx_memviewslice -__pyx_memoryview_copy_new_contig(const __Pyx_memviewslice *from_mvs, - const char *mode, int ndim, - size_t sizeof_dtype, int contig_flag, - int dtype_is_object); - -/* CIntFromPy.proto */ -static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *); - -/* CIntFromPy.proto */ -static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *); - -/* CIntFromPy.proto */ -static CYTHON_INLINE char __Pyx_PyInt_As_char(PyObject *); - -/* CheckBinaryVersion.proto */ -static int __Pyx_check_binary_version(void); - -/* InitStrings.proto */ -static int __Pyx_InitStrings(__Pyx_StringTabEntry *t); - -static PyObject *__pyx_array_get_memview(struct __pyx_array_obj *__pyx_v_self); /* proto*/ -static char *__pyx_memoryview_get_item_pointer(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index); /* proto*/ -static PyObject *__pyx_memoryview_is_slice(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj); /* proto*/ -static PyObject *__pyx_memoryview_setitem_slice_assignment(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_dst, PyObject *__pyx_v_src); /* proto*/ -static PyObject *__pyx_memoryview_setitem_slice_assign_scalar(struct __pyx_memoryview_obj *__pyx_v_self, struct __pyx_memoryview_obj *__pyx_v_dst, PyObject *__pyx_v_value); /* proto*/ -static PyObject *__pyx_memoryview_setitem_indexed(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value); /* proto*/ -static PyObject *__pyx_memoryview_convert_item_to_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp); /* proto*/ -static PyObject *__pyx_memoryview_assign_item_from_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value); /* proto*/ -static PyObject *__pyx_memoryviewslice_convert_item_to_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp); /* proto*/ -static PyObject *__pyx_memoryviewslice_assign_item_from_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value); /* proto*/ - -/* Module declarations from 'cython.view' */ - -/* Module declarations from 'cython' */ - -/* Module declarations from 'monotonic_align.core' */ -static PyTypeObject *__pyx_array_type = 0; -static PyTypeObject *__pyx_MemviewEnum_type = 0; -static PyTypeObject *__pyx_memoryview_type = 0; -static PyTypeObject *__pyx_memoryviewslice_type = 0; -static PyObject *generic = 0; -static PyObject *strided = 0; -static PyObject *indirect = 0; -static PyObject *contiguous = 0; -static PyObject *indirect_contiguous = 0; -static int __pyx_memoryview_thread_locks_used; -static PyThread_type_lock __pyx_memoryview_thread_locks[8]; -static void __pyx_f_15monotonic_align_4core_maximum_path_each(__Pyx_memviewslice, __Pyx_memviewslice, int, int, struct __pyx_opt_args_15monotonic_align_4core_maximum_path_each *__pyx_optional_args); /*proto*/ -static void __pyx_f_15monotonic_align_4core_maximum_path_c(__Pyx_memviewslice, __Pyx_memviewslice, __Pyx_memviewslice, __Pyx_memviewslice, int __pyx_skip_dispatch); /*proto*/ -static struct __pyx_array_obj *__pyx_array_new(PyObject *, Py_ssize_t, char *, char *, char *); /*proto*/ -static void *__pyx_align_pointer(void *, size_t); /*proto*/ -static PyObject *__pyx_memoryview_new(PyObject *, int, int, __Pyx_TypeInfo *); /*proto*/ -static CYTHON_INLINE int __pyx_memoryview_check(PyObject *); /*proto*/ -static PyObject *_unellipsify(PyObject *, int); /*proto*/ -static PyObject *assert_direct_dimensions(Py_ssize_t *, int); /*proto*/ -static struct __pyx_memoryview_obj *__pyx_memview_slice(struct __pyx_memoryview_obj *, PyObject *); /*proto*/ -static int __pyx_memoryview_slice_memviewslice(__Pyx_memviewslice *, Py_ssize_t, Py_ssize_t, Py_ssize_t, int, int, int *, Py_ssize_t, Py_ssize_t, Py_ssize_t, int, int, int, int); /*proto*/ -static char *__pyx_pybuffer_index(Py_buffer *, char *, Py_ssize_t, Py_ssize_t); /*proto*/ -static int __pyx_memslice_transpose(__Pyx_memviewslice *); /*proto*/ -static PyObject *__pyx_memoryview_fromslice(__Pyx_memviewslice, int, PyObject *(*)(char *), int (*)(char *, PyObject *), int); /*proto*/ -static __Pyx_memviewslice *__pyx_memoryview_get_slice_from_memoryview(struct __pyx_memoryview_obj *, __Pyx_memviewslice *); /*proto*/ -static void __pyx_memoryview_slice_copy(struct __pyx_memoryview_obj *, __Pyx_memviewslice *); /*proto*/ -static PyObject *__pyx_memoryview_copy_object(struct __pyx_memoryview_obj *); /*proto*/ -static PyObject *__pyx_memoryview_copy_object_from_slice(struct __pyx_memoryview_obj *, __Pyx_memviewslice *); /*proto*/ -static Py_ssize_t abs_py_ssize_t(Py_ssize_t); /*proto*/ -static char __pyx_get_best_slice_order(__Pyx_memviewslice *, int); /*proto*/ -static void _copy_strided_to_strided(char *, Py_ssize_t *, char *, Py_ssize_t *, Py_ssize_t *, Py_ssize_t *, int, size_t); /*proto*/ -static void copy_strided_to_strided(__Pyx_memviewslice *, __Pyx_memviewslice *, int, size_t); /*proto*/ -static Py_ssize_t __pyx_memoryview_slice_get_size(__Pyx_memviewslice *, int); /*proto*/ -static Py_ssize_t __pyx_fill_contig_strides_array(Py_ssize_t *, Py_ssize_t *, Py_ssize_t, int, char); /*proto*/ -static void *__pyx_memoryview_copy_data_to_temp(__Pyx_memviewslice *, __Pyx_memviewslice *, char, int); /*proto*/ -static int __pyx_memoryview_err_extents(int, Py_ssize_t, Py_ssize_t); /*proto*/ -static int __pyx_memoryview_err_dim(PyObject *, char *, int); /*proto*/ -static int __pyx_memoryview_err(PyObject *, char *); /*proto*/ -static int __pyx_memoryview_copy_contents(__Pyx_memviewslice, __Pyx_memviewslice, int, int, int); /*proto*/ -static void __pyx_memoryview_broadcast_leading(__Pyx_memviewslice *, int, int); /*proto*/ -static void __pyx_memoryview_refcount_copying(__Pyx_memviewslice *, int, int, int); /*proto*/ -static void __pyx_memoryview_refcount_objects_in_slice_with_gil(char *, Py_ssize_t *, Py_ssize_t *, int, int); /*proto*/ -static void __pyx_memoryview_refcount_objects_in_slice(char *, Py_ssize_t *, Py_ssize_t *, int, int); /*proto*/ -static void __pyx_memoryview_slice_assign_scalar(__Pyx_memviewslice *, int, size_t, void *, int); /*proto*/ -static void __pyx_memoryview__slice_assign_scalar(char *, Py_ssize_t *, Py_ssize_t *, int, size_t, void *); /*proto*/ -static PyObject *__pyx_unpickle_Enum__set_state(struct __pyx_MemviewEnum_obj *, PyObject *); /*proto*/ -static __Pyx_TypeInfo __Pyx_TypeInfo_int = { "int", NULL, sizeof(int), { 0 }, 0, IS_UNSIGNED(int) ? 'U' : 'I', IS_UNSIGNED(int), 0 }; -static __Pyx_TypeInfo __Pyx_TypeInfo_float = { "float", NULL, sizeof(float), { 0 }, 0, 'R', 0, 0 }; -#define __Pyx_MODULE_NAME "monotonic_align.core" -extern int __pyx_module_is_main_monotonic_align__core; -int __pyx_module_is_main_monotonic_align__core = 0; - -/* Implementation of 'monotonic_align.core' */ -static PyObject *__pyx_builtin_range; -static PyObject *__pyx_builtin_ValueError; -static PyObject *__pyx_builtin_MemoryError; -static PyObject *__pyx_builtin_enumerate; -static PyObject *__pyx_builtin_TypeError; -static PyObject *__pyx_builtin_Ellipsis; -static PyObject *__pyx_builtin_id; -static PyObject *__pyx_builtin_IndexError; -static const char __pyx_k_O[] = "O"; -static const char __pyx_k_c[] = "c"; -static const char __pyx_k_id[] = "id"; -static const char __pyx_k_new[] = "__new__"; -static const char __pyx_k_obj[] = "obj"; -static const char __pyx_k_base[] = "base"; -static const char __pyx_k_dict[] = "__dict__"; -static const char __pyx_k_main[] = "__main__"; -static const char __pyx_k_mode[] = "mode"; -static const char __pyx_k_name[] = "name"; -static const char __pyx_k_ndim[] = "ndim"; -static const char __pyx_k_pack[] = "pack"; -static const char __pyx_k_size[] = "size"; -static const char __pyx_k_step[] = "step"; -static const char __pyx_k_stop[] = "stop"; -static const char __pyx_k_t_xs[] = "t_xs"; -static const char __pyx_k_t_ys[] = "t_ys"; -static const char __pyx_k_test[] = "__test__"; -static const char __pyx_k_ASCII[] = "ASCII"; -static const char __pyx_k_class[] = "__class__"; -static const char __pyx_k_error[] = "error"; -static const char __pyx_k_flags[] = "flags"; -static const char __pyx_k_paths[] = "paths"; -static const char __pyx_k_range[] = "range"; -static const char __pyx_k_shape[] = "shape"; -static const char __pyx_k_start[] = "start"; -static const char __pyx_k_encode[] = "encode"; -static const char __pyx_k_format[] = "format"; -static const char __pyx_k_import[] = "__import__"; -static const char __pyx_k_name_2[] = "__name__"; -static const char __pyx_k_pickle[] = "pickle"; -static const char __pyx_k_reduce[] = "__reduce__"; -static const char __pyx_k_struct[] = "struct"; -static const char __pyx_k_unpack[] = "unpack"; -static const char __pyx_k_update[] = "update"; -static const char __pyx_k_values[] = "values"; -static const char __pyx_k_fortran[] = "fortran"; -static const char __pyx_k_memview[] = "memview"; -static const char __pyx_k_Ellipsis[] = "Ellipsis"; -static const char __pyx_k_getstate[] = "__getstate__"; -static const char __pyx_k_itemsize[] = "itemsize"; -static const char __pyx_k_pyx_type[] = "__pyx_type"; -static const char __pyx_k_setstate[] = "__setstate__"; -static const char __pyx_k_TypeError[] = "TypeError"; -static const char __pyx_k_enumerate[] = "enumerate"; -static const char __pyx_k_pyx_state[] = "__pyx_state"; -static const char __pyx_k_reduce_ex[] = "__reduce_ex__"; -static const char __pyx_k_IndexError[] = "IndexError"; -static const char __pyx_k_ValueError[] = "ValueError"; -static const char __pyx_k_pyx_result[] = "__pyx_result"; -static const char __pyx_k_pyx_vtable[] = "__pyx_vtable__"; -static const char __pyx_k_MemoryError[] = "MemoryError"; -static const char __pyx_k_PickleError[] = "PickleError"; -static const char __pyx_k_pyx_checksum[] = "__pyx_checksum"; -static const char __pyx_k_stringsource[] = "stringsource"; -static const char __pyx_k_pyx_getbuffer[] = "__pyx_getbuffer"; -static const char __pyx_k_reduce_cython[] = "__reduce_cython__"; -static const char __pyx_k_View_MemoryView[] = "View.MemoryView"; -static const char __pyx_k_allocate_buffer[] = "allocate_buffer"; -static const char __pyx_k_dtype_is_object[] = "dtype_is_object"; -static const char __pyx_k_pyx_PickleError[] = "__pyx_PickleError"; -static const char __pyx_k_setstate_cython[] = "__setstate_cython__"; -static const char __pyx_k_pyx_unpickle_Enum[] = "__pyx_unpickle_Enum"; -static const char __pyx_k_cline_in_traceback[] = "cline_in_traceback"; -static const char __pyx_k_strided_and_direct[] = ""; -static const char __pyx_k_strided_and_indirect[] = ""; -static const char __pyx_k_contiguous_and_direct[] = ""; -static const char __pyx_k_MemoryView_of_r_object[] = ""; -static const char __pyx_k_MemoryView_of_r_at_0x_x[] = ""; -static const char __pyx_k_contiguous_and_indirect[] = ""; -static const char __pyx_k_Cannot_index_with_type_s[] = "Cannot index with type '%s'"; -static const char __pyx_k_Invalid_shape_in_axis_d_d[] = "Invalid shape in axis %d: %d."; -static const char __pyx_k_itemsize_0_for_cython_array[] = "itemsize <= 0 for cython.array"; -static const char __pyx_k_unable_to_allocate_array_data[] = "unable to allocate array data."; -static const char __pyx_k_strided_and_direct_or_indirect[] = ""; -static const char __pyx_k_Buffer_view_does_not_expose_stri[] = "Buffer view does not expose strides"; -static const char __pyx_k_Can_only_create_a_buffer_that_is[] = "Can only create a buffer that is contiguous in memory."; -static const char __pyx_k_Cannot_assign_to_read_only_memor[] = "Cannot assign to read-only memoryview"; -static const char __pyx_k_Cannot_create_writable_memory_vi[] = "Cannot create writable memory view from read-only memoryview"; -static const char __pyx_k_Empty_shape_tuple_for_cython_arr[] = "Empty shape tuple for cython.array"; -static const char __pyx_k_Incompatible_checksums_s_vs_0xb0[] = "Incompatible checksums (%s vs 0xb068931 = (name))"; -static const char __pyx_k_Indirect_dimensions_not_supporte[] = "Indirect dimensions not supported"; -static const char __pyx_k_Invalid_mode_expected_c_or_fortr[] = "Invalid mode, expected 'c' or 'fortran', got %s"; -static const char __pyx_k_Out_of_bounds_on_buffer_access_a[] = "Out of bounds on buffer access (axis %d)"; -static const char __pyx_k_Unable_to_convert_item_to_object[] = "Unable to convert item to object"; -static const char __pyx_k_got_differing_extents_in_dimensi[] = "got differing extents in dimension %d (got %d and %d)"; -static const char __pyx_k_no_default___reduce___due_to_non[] = "no default __reduce__ due to non-trivial __cinit__"; -static const char __pyx_k_unable_to_allocate_shape_and_str[] = "unable to allocate shape and strides."; -static PyObject *__pyx_n_s_ASCII; -static PyObject *__pyx_kp_s_Buffer_view_does_not_expose_stri; -static PyObject *__pyx_kp_s_Can_only_create_a_buffer_that_is; -static PyObject *__pyx_kp_s_Cannot_assign_to_read_only_memor; -static PyObject *__pyx_kp_s_Cannot_create_writable_memory_vi; -static PyObject *__pyx_kp_s_Cannot_index_with_type_s; -static PyObject *__pyx_n_s_Ellipsis; -static PyObject *__pyx_kp_s_Empty_shape_tuple_for_cython_arr; -static PyObject *__pyx_kp_s_Incompatible_checksums_s_vs_0xb0; -static PyObject *__pyx_n_s_IndexError; -static PyObject *__pyx_kp_s_Indirect_dimensions_not_supporte; -static PyObject *__pyx_kp_s_Invalid_mode_expected_c_or_fortr; -static PyObject *__pyx_kp_s_Invalid_shape_in_axis_d_d; -static PyObject *__pyx_n_s_MemoryError; -static PyObject *__pyx_kp_s_MemoryView_of_r_at_0x_x; -static PyObject *__pyx_kp_s_MemoryView_of_r_object; -static PyObject *__pyx_n_b_O; -static PyObject *__pyx_kp_s_Out_of_bounds_on_buffer_access_a; -static PyObject *__pyx_n_s_PickleError; -static PyObject *__pyx_n_s_TypeError; -static PyObject *__pyx_kp_s_Unable_to_convert_item_to_object; -static PyObject *__pyx_n_s_ValueError; -static PyObject *__pyx_n_s_View_MemoryView; -static PyObject *__pyx_n_s_allocate_buffer; -static PyObject *__pyx_n_s_base; -static PyObject *__pyx_n_s_c; -static PyObject *__pyx_n_u_c; -static PyObject *__pyx_n_s_class; -static PyObject *__pyx_n_s_cline_in_traceback; -static PyObject *__pyx_kp_s_contiguous_and_direct; -static PyObject *__pyx_kp_s_contiguous_and_indirect; -static PyObject *__pyx_n_s_dict; -static PyObject *__pyx_n_s_dtype_is_object; -static PyObject *__pyx_n_s_encode; -static PyObject *__pyx_n_s_enumerate; -static PyObject *__pyx_n_s_error; -static PyObject *__pyx_n_s_flags; -static PyObject *__pyx_n_s_format; -static PyObject *__pyx_n_s_fortran; -static PyObject *__pyx_n_u_fortran; -static PyObject *__pyx_n_s_getstate; -static PyObject *__pyx_kp_s_got_differing_extents_in_dimensi; -static PyObject *__pyx_n_s_id; -static PyObject *__pyx_n_s_import; -static PyObject *__pyx_n_s_itemsize; -static PyObject *__pyx_kp_s_itemsize_0_for_cython_array; -static PyObject *__pyx_n_s_main; -static PyObject *__pyx_n_s_memview; -static PyObject *__pyx_n_s_mode; -static PyObject *__pyx_n_s_name; -static PyObject *__pyx_n_s_name_2; -static PyObject *__pyx_n_s_ndim; -static PyObject *__pyx_n_s_new; -static PyObject *__pyx_kp_s_no_default___reduce___due_to_non; -static PyObject *__pyx_n_s_obj; -static PyObject *__pyx_n_s_pack; -static PyObject *__pyx_n_s_paths; -static PyObject *__pyx_n_s_pickle; -static PyObject *__pyx_n_s_pyx_PickleError; -static PyObject *__pyx_n_s_pyx_checksum; -static PyObject *__pyx_n_s_pyx_getbuffer; -static PyObject *__pyx_n_s_pyx_result; -static PyObject *__pyx_n_s_pyx_state; -static PyObject *__pyx_n_s_pyx_type; -static PyObject *__pyx_n_s_pyx_unpickle_Enum; -static PyObject *__pyx_n_s_pyx_vtable; -static PyObject *__pyx_n_s_range; -static PyObject *__pyx_n_s_reduce; -static PyObject *__pyx_n_s_reduce_cython; -static PyObject *__pyx_n_s_reduce_ex; -static PyObject *__pyx_n_s_setstate; -static PyObject *__pyx_n_s_setstate_cython; -static PyObject *__pyx_n_s_shape; -static PyObject *__pyx_n_s_size; -static PyObject *__pyx_n_s_start; -static PyObject *__pyx_n_s_step; -static PyObject *__pyx_n_s_stop; -static PyObject *__pyx_kp_s_strided_and_direct; -static PyObject *__pyx_kp_s_strided_and_direct_or_indirect; -static PyObject *__pyx_kp_s_strided_and_indirect; -static PyObject *__pyx_kp_s_stringsource; -static PyObject *__pyx_n_s_struct; -static PyObject *__pyx_n_s_t_xs; -static PyObject *__pyx_n_s_t_ys; -static PyObject *__pyx_n_s_test; -static PyObject *__pyx_kp_s_unable_to_allocate_array_data; -static PyObject *__pyx_kp_s_unable_to_allocate_shape_and_str; -static PyObject *__pyx_n_s_unpack; -static PyObject *__pyx_n_s_update; -static PyObject *__pyx_n_s_values; -static PyObject *__pyx_pf_15monotonic_align_4core_maximum_path_c(CYTHON_UNUSED PyObject *__pyx_self, __Pyx_memviewslice __pyx_v_paths, __Pyx_memviewslice __pyx_v_values, __Pyx_memviewslice __pyx_v_t_ys, __Pyx_memviewslice __pyx_v_t_xs); /* proto */ -static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array___cinit__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_shape, Py_ssize_t __pyx_v_itemsize, PyObject *__pyx_v_format, PyObject *__pyx_v_mode, int __pyx_v_allocate_buffer); /* proto */ -static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_2__getbuffer__(struct __pyx_array_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /* proto */ -static void __pyx_array___pyx_pf_15View_dot_MemoryView_5array_4__dealloc__(struct __pyx_array_obj *__pyx_v_self); /* proto */ -static PyObject *__pyx_pf_15View_dot_MemoryView_5array_7memview___get__(struct __pyx_array_obj *__pyx_v_self); /* proto */ -static Py_ssize_t __pyx_array___pyx_pf_15View_dot_MemoryView_5array_6__len__(struct __pyx_array_obj *__pyx_v_self); /* proto */ -static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_8__getattr__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_attr); /* proto */ -static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_10__getitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item); /* proto */ -static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_12__setitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value); /* proto */ -static PyObject *__pyx_pf___pyx_array___reduce_cython__(CYTHON_UNUSED struct __pyx_array_obj *__pyx_v_self); /* proto */ -static PyObject *__pyx_pf___pyx_array_2__setstate_cython__(CYTHON_UNUSED struct __pyx_array_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */ -static int __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum___init__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v_name); /* proto */ -static PyObject *__pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum_2__repr__(struct __pyx_MemviewEnum_obj *__pyx_v_self); /* proto */ -static PyObject *__pyx_pf___pyx_MemviewEnum___reduce_cython__(struct __pyx_MemviewEnum_obj *__pyx_v_self); /* proto */ -static PyObject *__pyx_pf___pyx_MemviewEnum_2__setstate_cython__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v___pyx_state); /* proto */ -static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview___cinit__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj, int __pyx_v_flags, int __pyx_v_dtype_is_object); /* proto */ -static void __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_2__dealloc__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ -static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_4__getitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index); /* proto */ -static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_6__setitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value); /* proto */ -static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_8__getbuffer__(struct __pyx_memoryview_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /* proto */ -static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_1T___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ -static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4base___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ -static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_5shape___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ -static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_7strides___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ -static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_10suboffsets___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ -static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4ndim___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ -static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_8itemsize___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ -static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_6nbytes___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ -static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4size___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ -static Py_ssize_t __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_10__len__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ -static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_12__repr__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ -static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_14__str__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ -static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_16is_c_contig(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ -static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_18is_f_contig(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ -static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_20copy(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ -static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_22copy_fortran(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ -static PyObject *__pyx_pf___pyx_memoryview___reduce_cython__(CYTHON_UNUSED struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ -static PyObject *__pyx_pf___pyx_memoryview_2__setstate_cython__(CYTHON_UNUSED struct __pyx_memoryview_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */ -static void __pyx_memoryviewslice___pyx_pf_15View_dot_MemoryView_16_memoryviewslice___dealloc__(struct __pyx_memoryviewslice_obj *__pyx_v_self); /* proto */ -static PyObject *__pyx_pf_15View_dot_MemoryView_16_memoryviewslice_4base___get__(struct __pyx_memoryviewslice_obj *__pyx_v_self); /* proto */ -static PyObject *__pyx_pf___pyx_memoryviewslice___reduce_cython__(CYTHON_UNUSED struct __pyx_memoryviewslice_obj *__pyx_v_self); /* proto */ -static PyObject *__pyx_pf___pyx_memoryviewslice_2__setstate_cython__(CYTHON_UNUSED struct __pyx_memoryviewslice_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */ -static PyObject *__pyx_pf_15View_dot_MemoryView___pyx_unpickle_Enum(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v___pyx_type, long __pyx_v___pyx_checksum, PyObject *__pyx_v___pyx_state); /* proto */ -static PyObject *__pyx_tp_new_array(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/ -static PyObject *__pyx_tp_new_Enum(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/ -static PyObject *__pyx_tp_new_memoryview(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/ -static PyObject *__pyx_tp_new__memoryviewslice(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/ -static PyObject *__pyx_int_0; -static PyObject *__pyx_int_1; -static PyObject *__pyx_int_184977713; -static PyObject *__pyx_int_neg_1; -static float __pyx_k_; -static PyObject *__pyx_tuple__2; -static PyObject *__pyx_tuple__3; -static PyObject *__pyx_tuple__4; -static PyObject *__pyx_tuple__5; -static PyObject *__pyx_tuple__6; -static PyObject *__pyx_tuple__7; -static PyObject *__pyx_tuple__8; -static PyObject *__pyx_tuple__9; -static PyObject *__pyx_slice__16; -static PyObject *__pyx_tuple__10; -static PyObject *__pyx_tuple__11; -static PyObject *__pyx_tuple__12; -static PyObject *__pyx_tuple__13; -static PyObject *__pyx_tuple__14; -static PyObject *__pyx_tuple__15; -static PyObject *__pyx_tuple__17; -static PyObject *__pyx_tuple__18; -static PyObject *__pyx_tuple__19; -static PyObject *__pyx_tuple__20; -static PyObject *__pyx_tuple__21; -static PyObject *__pyx_tuple__22; -static PyObject *__pyx_tuple__23; -static PyObject *__pyx_tuple__24; -static PyObject *__pyx_tuple__25; -static PyObject *__pyx_codeobj__26; -/* Late includes */ - -/* "monotonic_align/core.pyx":7 - * @cython.boundscheck(False) - * @cython.wraparound(False) - * cdef void maximum_path_each(int[:,::1] path, float[:,::1] value, int t_y, int t_x, float max_neg_val=-1e9) nogil: # <<<<<<<<<<<<<< - * cdef int x - * cdef int y - */ - -static void __pyx_f_15monotonic_align_4core_maximum_path_each(__Pyx_memviewslice __pyx_v_path, __Pyx_memviewslice __pyx_v_value, int __pyx_v_t_y, int __pyx_v_t_x, struct __pyx_opt_args_15monotonic_align_4core_maximum_path_each *__pyx_optional_args) { - float __pyx_v_max_neg_val = __pyx_k_; - int __pyx_v_x; - int __pyx_v_y; - float __pyx_v_v_prev; - float __pyx_v_v_cur; - int __pyx_v_index; - int __pyx_t_1; - int __pyx_t_2; - int __pyx_t_3; - long __pyx_t_4; - int __pyx_t_5; - long __pyx_t_6; - long __pyx_t_7; - int __pyx_t_8; - Py_ssize_t __pyx_t_9; - Py_ssize_t __pyx_t_10; - float __pyx_t_11; - float __pyx_t_12; - float __pyx_t_13; - int __pyx_t_14; - Py_ssize_t __pyx_t_15; - Py_ssize_t __pyx_t_16; - if (__pyx_optional_args) { - if (__pyx_optional_args->__pyx_n > 0) { - __pyx_v_max_neg_val = __pyx_optional_args->max_neg_val; - } - } - - /* "monotonic_align/core.pyx":13 - * cdef float v_cur - * cdef float tmp - * cdef int index = t_x - 1 # <<<<<<<<<<<<<< - * - * for y in range(t_y): - */ - __pyx_v_index = (__pyx_v_t_x - 1); - - /* "monotonic_align/core.pyx":15 - * cdef int index = t_x - 1 - * - * for y in range(t_y): # <<<<<<<<<<<<<< - * for x in range(max(0, t_x + y - t_y), min(t_x, y + 1)): - * if x == y: - */ - __pyx_t_1 = __pyx_v_t_y; - __pyx_t_2 = __pyx_t_1; - for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) { - __pyx_v_y = __pyx_t_3; - - /* "monotonic_align/core.pyx":16 - * - * for y in range(t_y): - * for x in range(max(0, t_x + y - t_y), min(t_x, y + 1)): # <<<<<<<<<<<<<< - * if x == y: - * v_cur = max_neg_val - */ - __pyx_t_4 = (__pyx_v_y + 1); - __pyx_t_5 = __pyx_v_t_x; - if (((__pyx_t_4 < __pyx_t_5) != 0)) { - __pyx_t_6 = __pyx_t_4; - } else { - __pyx_t_6 = __pyx_t_5; - } - __pyx_t_4 = __pyx_t_6; - __pyx_t_5 = ((__pyx_v_t_x + __pyx_v_y) - __pyx_v_t_y); - __pyx_t_6 = 0; - if (((__pyx_t_5 > __pyx_t_6) != 0)) { - __pyx_t_7 = __pyx_t_5; - } else { - __pyx_t_7 = __pyx_t_6; - } - __pyx_t_6 = __pyx_t_4; - for (__pyx_t_5 = __pyx_t_7; __pyx_t_5 < __pyx_t_6; __pyx_t_5+=1) { - __pyx_v_x = __pyx_t_5; - - /* "monotonic_align/core.pyx":17 - * for y in range(t_y): - * for x in range(max(0, t_x + y - t_y), min(t_x, y + 1)): - * if x == y: # <<<<<<<<<<<<<< - * v_cur = max_neg_val - * else: - */ - __pyx_t_8 = ((__pyx_v_x == __pyx_v_y) != 0); - if (__pyx_t_8) { - - /* "monotonic_align/core.pyx":18 - * for x in range(max(0, t_x + y - t_y), min(t_x, y + 1)): - * if x == y: - * v_cur = max_neg_val # <<<<<<<<<<<<<< - * else: - * v_cur = value[y-1, x] - */ - __pyx_v_v_cur = __pyx_v_max_neg_val; - - /* "monotonic_align/core.pyx":17 - * for y in range(t_y): - * for x in range(max(0, t_x + y - t_y), min(t_x, y + 1)): - * if x == y: # <<<<<<<<<<<<<< - * v_cur = max_neg_val - * else: - */ - goto __pyx_L7; - } - - /* "monotonic_align/core.pyx":20 - * v_cur = max_neg_val - * else: - * v_cur = value[y-1, x] # <<<<<<<<<<<<<< - * if x == 0: - * if y == 0: - */ - /*else*/ { - __pyx_t_9 = (__pyx_v_y - 1); - __pyx_t_10 = __pyx_v_x; - __pyx_v_v_cur = (*((float *) ( /* dim=1 */ ((char *) (((float *) ( /* dim=0 */ (__pyx_v_value.data + __pyx_t_9 * __pyx_v_value.strides[0]) )) + __pyx_t_10)) ))); - } - __pyx_L7:; - - /* "monotonic_align/core.pyx":21 - * else: - * v_cur = value[y-1, x] - * if x == 0: # <<<<<<<<<<<<<< - * if y == 0: - * v_prev = 0. - */ - __pyx_t_8 = ((__pyx_v_x == 0) != 0); - if (__pyx_t_8) { - - /* "monotonic_align/core.pyx":22 - * v_cur = value[y-1, x] - * if x == 0: - * if y == 0: # <<<<<<<<<<<<<< - * v_prev = 0. - * else: - */ - __pyx_t_8 = ((__pyx_v_y == 0) != 0); - if (__pyx_t_8) { - - /* "monotonic_align/core.pyx":23 - * if x == 0: - * if y == 0: - * v_prev = 0. # <<<<<<<<<<<<<< - * else: - * v_prev = max_neg_val - */ - __pyx_v_v_prev = 0.; - - /* "monotonic_align/core.pyx":22 - * v_cur = value[y-1, x] - * if x == 0: - * if y == 0: # <<<<<<<<<<<<<< - * v_prev = 0. - * else: - */ - goto __pyx_L9; - } - - /* "monotonic_align/core.pyx":25 - * v_prev = 0. - * else: - * v_prev = max_neg_val # <<<<<<<<<<<<<< - * else: - * v_prev = value[y-1, x-1] - */ - /*else*/ { - __pyx_v_v_prev = __pyx_v_max_neg_val; - } - __pyx_L9:; - - /* "monotonic_align/core.pyx":21 - * else: - * v_cur = value[y-1, x] - * if x == 0: # <<<<<<<<<<<<<< - * if y == 0: - * v_prev = 0. - */ - goto __pyx_L8; - } - - /* "monotonic_align/core.pyx":27 - * v_prev = max_neg_val - * else: - * v_prev = value[y-1, x-1] # <<<<<<<<<<<<<< - * value[y, x] += max(v_prev, v_cur) - * - */ - /*else*/ { - __pyx_t_10 = (__pyx_v_y - 1); - __pyx_t_9 = (__pyx_v_x - 1); - __pyx_v_v_prev = (*((float *) ( /* dim=1 */ ((char *) (((float *) ( /* dim=0 */ (__pyx_v_value.data + __pyx_t_10 * __pyx_v_value.strides[0]) )) + __pyx_t_9)) ))); - } - __pyx_L8:; - - /* "monotonic_align/core.pyx":28 - * else: - * v_prev = value[y-1, x-1] - * value[y, x] += max(v_prev, v_cur) # <<<<<<<<<<<<<< - * - * for y in range(t_y - 1, -1, -1): - */ - __pyx_t_11 = __pyx_v_v_cur; - __pyx_t_12 = __pyx_v_v_prev; - if (((__pyx_t_11 > __pyx_t_12) != 0)) { - __pyx_t_13 = __pyx_t_11; - } else { - __pyx_t_13 = __pyx_t_12; - } - __pyx_t_9 = __pyx_v_y; - __pyx_t_10 = __pyx_v_x; - *((float *) ( /* dim=1 */ ((char *) (((float *) ( /* dim=0 */ (__pyx_v_value.data + __pyx_t_9 * __pyx_v_value.strides[0]) )) + __pyx_t_10)) )) += __pyx_t_13; - } - } - - /* "monotonic_align/core.pyx":30 - * value[y, x] += max(v_prev, v_cur) - * - * for y in range(t_y - 1, -1, -1): # <<<<<<<<<<<<<< - * path[y, index] = 1 - * if index != 0 and (index == y or value[y-1, index] < value[y-1, index-1]): - */ - for (__pyx_t_1 = (__pyx_v_t_y - 1); __pyx_t_1 > -1; __pyx_t_1-=1) { - __pyx_v_y = __pyx_t_1; - - /* "monotonic_align/core.pyx":31 - * - * for y in range(t_y - 1, -1, -1): - * path[y, index] = 1 # <<<<<<<<<<<<<< - * if index != 0 and (index == y or value[y-1, index] < value[y-1, index-1]): - * index = index - 1 - */ - __pyx_t_10 = __pyx_v_y; - __pyx_t_9 = __pyx_v_index; - *((int *) ( /* dim=1 */ ((char *) (((int *) ( /* dim=0 */ (__pyx_v_path.data + __pyx_t_10 * __pyx_v_path.strides[0]) )) + __pyx_t_9)) )) = 1; - - /* "monotonic_align/core.pyx":32 - * for y in range(t_y - 1, -1, -1): - * path[y, index] = 1 - * if index != 0 and (index == y or value[y-1, index] < value[y-1, index-1]): # <<<<<<<<<<<<<< - * index = index - 1 - * - */ - __pyx_t_14 = ((__pyx_v_index != 0) != 0); - if (__pyx_t_14) { - } else { - __pyx_t_8 = __pyx_t_14; - goto __pyx_L13_bool_binop_done; - } - __pyx_t_14 = ((__pyx_v_index == __pyx_v_y) != 0); - if (!__pyx_t_14) { - } else { - __pyx_t_8 = __pyx_t_14; - goto __pyx_L13_bool_binop_done; - } - __pyx_t_9 = (__pyx_v_y - 1); - __pyx_t_10 = __pyx_v_index; - __pyx_t_15 = (__pyx_v_y - 1); - __pyx_t_16 = (__pyx_v_index - 1); - __pyx_t_14 = (((*((float *) ( /* dim=1 */ ((char *) (((float *) ( /* dim=0 */ (__pyx_v_value.data + __pyx_t_9 * __pyx_v_value.strides[0]) )) + __pyx_t_10)) ))) < (*((float *) ( /* dim=1 */ ((char *) (((float *) ( /* dim=0 */ (__pyx_v_value.data + __pyx_t_15 * __pyx_v_value.strides[0]) )) + __pyx_t_16)) )))) != 0); - __pyx_t_8 = __pyx_t_14; - __pyx_L13_bool_binop_done:; - if (__pyx_t_8) { - - /* "monotonic_align/core.pyx":33 - * path[y, index] = 1 - * if index != 0 and (index == y or value[y-1, index] < value[y-1, index-1]): - * index = index - 1 # <<<<<<<<<<<<<< - * - * - */ - __pyx_v_index = (__pyx_v_index - 1); - - /* "monotonic_align/core.pyx":32 - * for y in range(t_y - 1, -1, -1): - * path[y, index] = 1 - * if index != 0 and (index == y or value[y-1, index] < value[y-1, index-1]): # <<<<<<<<<<<<<< - * index = index - 1 - * - */ - } - } - - /* "monotonic_align/core.pyx":7 - * @cython.boundscheck(False) - * @cython.wraparound(False) - * cdef void maximum_path_each(int[:,::1] path, float[:,::1] value, int t_y, int t_x, float max_neg_val=-1e9) nogil: # <<<<<<<<<<<<<< - * cdef int x - * cdef int y - */ - - /* function exit code */ -} - -/* "monotonic_align/core.pyx":38 - * @cython.boundscheck(False) - * @cython.wraparound(False) - * cpdef void maximum_path_c(int[:,:,::1] paths, float[:,:,::1] values, int[::1] t_ys, int[::1] t_xs) nogil: # <<<<<<<<<<<<<< - * cdef int b = paths.shape[0] - * cdef int i - */ - -static PyObject *__pyx_pw_15monotonic_align_4core_1maximum_path_c(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ -static void __pyx_f_15monotonic_align_4core_maximum_path_c(__Pyx_memviewslice __pyx_v_paths, __Pyx_memviewslice __pyx_v_values, __Pyx_memviewslice __pyx_v_t_ys, __Pyx_memviewslice __pyx_v_t_xs, CYTHON_UNUSED int __pyx_skip_dispatch) { - CYTHON_UNUSED int __pyx_v_b; - int __pyx_v_i; - int __pyx_t_1; - int __pyx_t_2; - int __pyx_t_3; - __Pyx_memviewslice __pyx_t_4 = { 0, 0, { 0 }, { 0 }, { 0 } }; - __Pyx_memviewslice __pyx_t_5 = { 0, 0, { 0 }, { 0 }, { 0 } }; - Py_ssize_t __pyx_t_6; - Py_ssize_t __pyx_t_7; - - /* "monotonic_align/core.pyx":39 - * @cython.wraparound(False) - * cpdef void maximum_path_c(int[:,:,::1] paths, float[:,:,::1] values, int[::1] t_ys, int[::1] t_xs) nogil: - * cdef int b = paths.shape[0] # <<<<<<<<<<<<<< - * cdef int i - * for i in prange(b, nogil=True): - */ - __pyx_v_b = (__pyx_v_paths.shape[0]); - - /* "monotonic_align/core.pyx":41 - * cdef int b = paths.shape[0] - * cdef int i - * for i in prange(b, nogil=True): # <<<<<<<<<<<<<< - * maximum_path_each(paths[i], values[i], t_ys[i], t_xs[i]) - */ - { - #ifdef WITH_THREAD - PyThreadState *_save; - Py_UNBLOCK_THREADS - __Pyx_FastGIL_Remember(); - #endif - /*try:*/ { - __pyx_t_1 = __pyx_v_b; - if ((1 == 0)) abort(); - { - #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) - #undef likely - #undef unlikely - #define likely(x) (x) - #define unlikely(x) (x) - #endif - __pyx_t_3 = (__pyx_t_1 - 0 + 1 - 1/abs(1)) / 1; - if (__pyx_t_3 > 0) - { - #ifdef _OPENMP - #pragma omp parallel private(__pyx_t_6, __pyx_t_7) firstprivate(__pyx_t_4, __pyx_t_5) - #endif /* _OPENMP */ - { - #ifdef _OPENMP - #pragma omp for firstprivate(__pyx_v_i) lastprivate(__pyx_v_i) - #endif /* _OPENMP */ - for (__pyx_t_2 = 0; __pyx_t_2 < __pyx_t_3; __pyx_t_2++){ - { - __pyx_v_i = (int)(0 + 1 * __pyx_t_2); - - /* "monotonic_align/core.pyx":42 - * cdef int i - * for i in prange(b, nogil=True): - * maximum_path_each(paths[i], values[i], t_ys[i], t_xs[i]) # <<<<<<<<<<<<<< - */ - __pyx_t_4.data = __pyx_v_paths.data; - __pyx_t_4.memview = __pyx_v_paths.memview; - __PYX_INC_MEMVIEW(&__pyx_t_4, 0); - { - Py_ssize_t __pyx_tmp_idx = __pyx_v_i; - Py_ssize_t __pyx_tmp_stride = __pyx_v_paths.strides[0]; - __pyx_t_4.data += __pyx_tmp_idx * __pyx_tmp_stride; -} - -__pyx_t_4.shape[0] = __pyx_v_paths.shape[1]; -__pyx_t_4.strides[0] = __pyx_v_paths.strides[1]; - __pyx_t_4.suboffsets[0] = -1; - -__pyx_t_4.shape[1] = __pyx_v_paths.shape[2]; -__pyx_t_4.strides[1] = __pyx_v_paths.strides[2]; - __pyx_t_4.suboffsets[1] = -1; - -__pyx_t_5.data = __pyx_v_values.data; - __pyx_t_5.memview = __pyx_v_values.memview; - __PYX_INC_MEMVIEW(&__pyx_t_5, 0); - { - Py_ssize_t __pyx_tmp_idx = __pyx_v_i; - Py_ssize_t __pyx_tmp_stride = __pyx_v_values.strides[0]; - __pyx_t_5.data += __pyx_tmp_idx * __pyx_tmp_stride; -} - -__pyx_t_5.shape[0] = __pyx_v_values.shape[1]; -__pyx_t_5.strides[0] = __pyx_v_values.strides[1]; - __pyx_t_5.suboffsets[0] = -1; - -__pyx_t_5.shape[1] = __pyx_v_values.shape[2]; -__pyx_t_5.strides[1] = __pyx_v_values.strides[2]; - __pyx_t_5.suboffsets[1] = -1; - -__pyx_t_6 = __pyx_v_i; - __pyx_t_7 = __pyx_v_i; - __pyx_f_15monotonic_align_4core_maximum_path_each(__pyx_t_4, __pyx_t_5, (*((int *) ( /* dim=0 */ ((char *) (((int *) __pyx_v_t_ys.data) + __pyx_t_6)) ))), (*((int *) ( /* dim=0 */ ((char *) (((int *) __pyx_v_t_xs.data) + __pyx_t_7)) ))), NULL); - __PYX_XDEC_MEMVIEW(&__pyx_t_4, 0); - __pyx_t_4.memview = NULL; - __pyx_t_4.data = NULL; - __PYX_XDEC_MEMVIEW(&__pyx_t_5, 0); - __pyx_t_5.memview = NULL; - __pyx_t_5.data = NULL; - } - } - } - } - } - #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) - #undef likely - #undef unlikely - #define likely(x) __builtin_expect(!!(x), 1) - #define unlikely(x) __builtin_expect(!!(x), 0) - #endif - } - - /* "monotonic_align/core.pyx":41 - * cdef int b = paths.shape[0] - * cdef int i - * for i in prange(b, nogil=True): # <<<<<<<<<<<<<< - * maximum_path_each(paths[i], values[i], t_ys[i], t_xs[i]) - */ - /*finally:*/ { - /*normal exit:*/{ - #ifdef WITH_THREAD - __Pyx_FastGIL_Forget(); - Py_BLOCK_THREADS - #endif - goto __pyx_L5; - } - __pyx_L5:; - } - } - - /* "monotonic_align/core.pyx":38 - * @cython.boundscheck(False) - * @cython.wraparound(False) - * cpdef void maximum_path_c(int[:,:,::1] paths, float[:,:,::1] values, int[::1] t_ys, int[::1] t_xs) nogil: # <<<<<<<<<<<<<< - * cdef int b = paths.shape[0] - * cdef int i - */ - - /* function exit code */ -} - -/* Python wrapper */ -static PyObject *__pyx_pw_15monotonic_align_4core_1maximum_path_c(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ -static PyObject *__pyx_pw_15monotonic_align_4core_1maximum_path_c(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { - __Pyx_memviewslice __pyx_v_paths = { 0, 0, { 0 }, { 0 }, { 0 } }; - __Pyx_memviewslice __pyx_v_values = { 0, 0, { 0 }, { 0 }, { 0 } }; - __Pyx_memviewslice __pyx_v_t_ys = { 0, 0, { 0 }, { 0 }, { 0 } }; - __Pyx_memviewslice __pyx_v_t_xs = { 0, 0, { 0 }, { 0 }, { 0 } }; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("maximum_path_c (wrapper)", 0); - { - static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_paths,&__pyx_n_s_values,&__pyx_n_s_t_ys,&__pyx_n_s_t_xs,0}; - PyObject* values[4] = {0,0,0,0}; - if (unlikely(__pyx_kwds)) { - Py_ssize_t kw_args; - const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); - switch (pos_args) { - case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); - CYTHON_FALLTHROUGH; - case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); - CYTHON_FALLTHROUGH; - case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); - CYTHON_FALLTHROUGH; - case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); - CYTHON_FALLTHROUGH; - case 0: break; - default: goto __pyx_L5_argtuple_error; - } - kw_args = PyDict_Size(__pyx_kwds); - switch (pos_args) { - case 0: - if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_paths)) != 0)) kw_args--; - else goto __pyx_L5_argtuple_error; - CYTHON_FALLTHROUGH; - case 1: - if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_values)) != 0)) kw_args--; - else { - __Pyx_RaiseArgtupleInvalid("maximum_path_c", 1, 4, 4, 1); __PYX_ERR(0, 38, __pyx_L3_error) - } - CYTHON_FALLTHROUGH; - case 2: - if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_t_ys)) != 0)) kw_args--; - else { - __Pyx_RaiseArgtupleInvalid("maximum_path_c", 1, 4, 4, 2); __PYX_ERR(0, 38, __pyx_L3_error) - } - CYTHON_FALLTHROUGH; - case 3: - if (likely((values[3] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_t_xs)) != 0)) kw_args--; - else { - __Pyx_RaiseArgtupleInvalid("maximum_path_c", 1, 4, 4, 3); __PYX_ERR(0, 38, __pyx_L3_error) - } - } - if (unlikely(kw_args > 0)) { - if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "maximum_path_c") < 0)) __PYX_ERR(0, 38, __pyx_L3_error) - } - } else if (PyTuple_GET_SIZE(__pyx_args) != 4) { - goto __pyx_L5_argtuple_error; - } else { - values[0] = PyTuple_GET_ITEM(__pyx_args, 0); - values[1] = PyTuple_GET_ITEM(__pyx_args, 1); - values[2] = PyTuple_GET_ITEM(__pyx_args, 2); - values[3] = PyTuple_GET_ITEM(__pyx_args, 3); - } - __pyx_v_paths = __Pyx_PyObject_to_MemoryviewSlice_d_d_dc_int(values[0], PyBUF_WRITABLE); if (unlikely(!__pyx_v_paths.memview)) __PYX_ERR(0, 38, __pyx_L3_error) - __pyx_v_values = __Pyx_PyObject_to_MemoryviewSlice_d_d_dc_float(values[1], PyBUF_WRITABLE); if (unlikely(!__pyx_v_values.memview)) __PYX_ERR(0, 38, __pyx_L3_error) - __pyx_v_t_ys = __Pyx_PyObject_to_MemoryviewSlice_dc_int(values[2], PyBUF_WRITABLE); if (unlikely(!__pyx_v_t_ys.memview)) __PYX_ERR(0, 38, __pyx_L3_error) - __pyx_v_t_xs = __Pyx_PyObject_to_MemoryviewSlice_dc_int(values[3], PyBUF_WRITABLE); if (unlikely(!__pyx_v_t_xs.memview)) __PYX_ERR(0, 38, __pyx_L3_error) - } - goto __pyx_L4_argument_unpacking_done; - __pyx_L5_argtuple_error:; - __Pyx_RaiseArgtupleInvalid("maximum_path_c", 1, 4, 4, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 38, __pyx_L3_error) - __pyx_L3_error:; - __Pyx_AddTraceback("monotonic_align.core.maximum_path_c", __pyx_clineno, __pyx_lineno, __pyx_filename); - __Pyx_RefNannyFinishContext(); - return NULL; - __pyx_L4_argument_unpacking_done:; - __pyx_r = __pyx_pf_15monotonic_align_4core_maximum_path_c(__pyx_self, __pyx_v_paths, __pyx_v_values, __pyx_v_t_ys, __pyx_v_t_xs); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_pf_15monotonic_align_4core_maximum_path_c(CYTHON_UNUSED PyObject *__pyx_self, __Pyx_memviewslice __pyx_v_paths, __Pyx_memviewslice __pyx_v_values, __Pyx_memviewslice __pyx_v_t_ys, __Pyx_memviewslice __pyx_v_t_xs) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("maximum_path_c", 0); - __Pyx_XDECREF(__pyx_r); - if (unlikely(!__pyx_v_paths.memview)) { __Pyx_RaiseUnboundLocalError("paths"); __PYX_ERR(0, 38, __pyx_L1_error) } - if (unlikely(!__pyx_v_values.memview)) { __Pyx_RaiseUnboundLocalError("values"); __PYX_ERR(0, 38, __pyx_L1_error) } - if (unlikely(!__pyx_v_t_ys.memview)) { __Pyx_RaiseUnboundLocalError("t_ys"); __PYX_ERR(0, 38, __pyx_L1_error) } - if (unlikely(!__pyx_v_t_xs.memview)) { __Pyx_RaiseUnboundLocalError("t_xs"); __PYX_ERR(0, 38, __pyx_L1_error) } - __pyx_t_1 = __Pyx_void_to_None(__pyx_f_15monotonic_align_4core_maximum_path_c(__pyx_v_paths, __pyx_v_values, __pyx_v_t_ys, __pyx_v_t_xs, 0)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 38, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_r = __pyx_t_1; - __pyx_t_1 = 0; - goto __pyx_L0; - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_AddTraceback("monotonic_align.core.maximum_path_c", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __PYX_XDEC_MEMVIEW(&__pyx_v_paths, 1); - __PYX_XDEC_MEMVIEW(&__pyx_v_values, 1); - __PYX_XDEC_MEMVIEW(&__pyx_v_t_ys, 1); - __PYX_XDEC_MEMVIEW(&__pyx_v_t_xs, 1); - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":122 - * cdef bint dtype_is_object - * - * def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, # <<<<<<<<<<<<<< - * mode="c", bint allocate_buffer=True): - * - */ - -/* Python wrapper */ -static int __pyx_array___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ -static int __pyx_array___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { - PyObject *__pyx_v_shape = 0; - Py_ssize_t __pyx_v_itemsize; - PyObject *__pyx_v_format = 0; - PyObject *__pyx_v_mode = 0; - int __pyx_v_allocate_buffer; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - int __pyx_r; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__cinit__ (wrapper)", 0); - { - static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_shape,&__pyx_n_s_itemsize,&__pyx_n_s_format,&__pyx_n_s_mode,&__pyx_n_s_allocate_buffer,0}; - PyObject* values[5] = {0,0,0,0,0}; - values[3] = ((PyObject *)__pyx_n_s_c); - if (unlikely(__pyx_kwds)) { - Py_ssize_t kw_args; - const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); - switch (pos_args) { - case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); - CYTHON_FALLTHROUGH; - case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); - CYTHON_FALLTHROUGH; - case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); - CYTHON_FALLTHROUGH; - case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); - CYTHON_FALLTHROUGH; - case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); - CYTHON_FALLTHROUGH; - case 0: break; - default: goto __pyx_L5_argtuple_error; - } - kw_args = PyDict_Size(__pyx_kwds); - switch (pos_args) { - case 0: - if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_shape)) != 0)) kw_args--; - else goto __pyx_L5_argtuple_error; - CYTHON_FALLTHROUGH; - case 1: - if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_itemsize)) != 0)) kw_args--; - else { - __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 3, 5, 1); __PYX_ERR(1, 122, __pyx_L3_error) - } - CYTHON_FALLTHROUGH; - case 2: - if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_format)) != 0)) kw_args--; - else { - __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 3, 5, 2); __PYX_ERR(1, 122, __pyx_L3_error) - } - CYTHON_FALLTHROUGH; - case 3: - if (kw_args > 0) { - PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_mode); - if (value) { values[3] = value; kw_args--; } - } - CYTHON_FALLTHROUGH; - case 4: - if (kw_args > 0) { - PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_allocate_buffer); - if (value) { values[4] = value; kw_args--; } - } - } - if (unlikely(kw_args > 0)) { - if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__cinit__") < 0)) __PYX_ERR(1, 122, __pyx_L3_error) - } - } else { - switch (PyTuple_GET_SIZE(__pyx_args)) { - case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); - CYTHON_FALLTHROUGH; - case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); - CYTHON_FALLTHROUGH; - case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); - values[1] = PyTuple_GET_ITEM(__pyx_args, 1); - values[0] = PyTuple_GET_ITEM(__pyx_args, 0); - break; - default: goto __pyx_L5_argtuple_error; - } - } - __pyx_v_shape = ((PyObject*)values[0]); - __pyx_v_itemsize = __Pyx_PyIndex_AsSsize_t(values[1]); if (unlikely((__pyx_v_itemsize == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 122, __pyx_L3_error) - __pyx_v_format = values[2]; - __pyx_v_mode = values[3]; - if (values[4]) { - __pyx_v_allocate_buffer = __Pyx_PyObject_IsTrue(values[4]); if (unlikely((__pyx_v_allocate_buffer == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 123, __pyx_L3_error) - } else { - - /* "View.MemoryView":123 - * - * def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, - * mode="c", bint allocate_buffer=True): # <<<<<<<<<<<<<< - * - * cdef int idx - */ - __pyx_v_allocate_buffer = ((int)1); - } - } - goto __pyx_L4_argument_unpacking_done; - __pyx_L5_argtuple_error:; - __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 3, 5, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(1, 122, __pyx_L3_error) - __pyx_L3_error:; - __Pyx_AddTraceback("View.MemoryView.array.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __Pyx_RefNannyFinishContext(); - return -1; - __pyx_L4_argument_unpacking_done:; - if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_shape), (&PyTuple_Type), 1, "shape", 1))) __PYX_ERR(1, 122, __pyx_L1_error) - if (unlikely(((PyObject *)__pyx_v_format) == Py_None)) { - PyErr_Format(PyExc_TypeError, "Argument '%.200s' must not be None", "format"); __PYX_ERR(1, 122, __pyx_L1_error) - } - __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array___cinit__(((struct __pyx_array_obj *)__pyx_v_self), __pyx_v_shape, __pyx_v_itemsize, __pyx_v_format, __pyx_v_mode, __pyx_v_allocate_buffer); - - /* "View.MemoryView":122 - * cdef bint dtype_is_object - * - * def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, # <<<<<<<<<<<<<< - * mode="c", bint allocate_buffer=True): - * - */ - - /* function exit code */ - goto __pyx_L0; - __pyx_L1_error:; - __pyx_r = -1; - __pyx_L0:; - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array___cinit__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_shape, Py_ssize_t __pyx_v_itemsize, PyObject *__pyx_v_format, PyObject *__pyx_v_mode, int __pyx_v_allocate_buffer) { - int __pyx_v_idx; - Py_ssize_t __pyx_v_i; - Py_ssize_t __pyx_v_dim; - PyObject **__pyx_v_p; - char __pyx_v_order; - int __pyx_r; - __Pyx_RefNannyDeclarations - Py_ssize_t __pyx_t_1; - int __pyx_t_2; - PyObject *__pyx_t_3 = NULL; - int __pyx_t_4; - PyObject *__pyx_t_5 = NULL; - PyObject *__pyx_t_6 = NULL; - char *__pyx_t_7; - int __pyx_t_8; - Py_ssize_t __pyx_t_9; - PyObject *__pyx_t_10 = NULL; - Py_ssize_t __pyx_t_11; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("__cinit__", 0); - __Pyx_INCREF(__pyx_v_format); - - /* "View.MemoryView":129 - * cdef PyObject **p - * - * self.ndim = len(shape) # <<<<<<<<<<<<<< - * self.itemsize = itemsize - * - */ - if (unlikely(__pyx_v_shape == Py_None)) { - PyErr_SetString(PyExc_TypeError, "object of type 'NoneType' has no len()"); - __PYX_ERR(1, 129, __pyx_L1_error) - } - __pyx_t_1 = PyTuple_GET_SIZE(__pyx_v_shape); if (unlikely(__pyx_t_1 == ((Py_ssize_t)-1))) __PYX_ERR(1, 129, __pyx_L1_error) - __pyx_v_self->ndim = ((int)__pyx_t_1); - - /* "View.MemoryView":130 - * - * self.ndim = len(shape) - * self.itemsize = itemsize # <<<<<<<<<<<<<< - * - * if not self.ndim: - */ - __pyx_v_self->itemsize = __pyx_v_itemsize; - - /* "View.MemoryView":132 - * self.itemsize = itemsize - * - * if not self.ndim: # <<<<<<<<<<<<<< - * raise ValueError("Empty shape tuple for cython.array") - * - */ - __pyx_t_2 = ((!(__pyx_v_self->ndim != 0)) != 0); - if (unlikely(__pyx_t_2)) { - - /* "View.MemoryView":133 - * - * if not self.ndim: - * raise ValueError("Empty shape tuple for cython.array") # <<<<<<<<<<<<<< - * - * if itemsize <= 0: - */ - __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__2, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 133, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_Raise(__pyx_t_3, 0, 0, 0); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __PYX_ERR(1, 133, __pyx_L1_error) - - /* "View.MemoryView":132 - * self.itemsize = itemsize - * - * if not self.ndim: # <<<<<<<<<<<<<< - * raise ValueError("Empty shape tuple for cython.array") - * - */ - } - - /* "View.MemoryView":135 - * raise ValueError("Empty shape tuple for cython.array") - * - * if itemsize <= 0: # <<<<<<<<<<<<<< - * raise ValueError("itemsize <= 0 for cython.array") - * - */ - __pyx_t_2 = ((__pyx_v_itemsize <= 0) != 0); - if (unlikely(__pyx_t_2)) { - - /* "View.MemoryView":136 - * - * if itemsize <= 0: - * raise ValueError("itemsize <= 0 for cython.array") # <<<<<<<<<<<<<< - * - * if not isinstance(format, bytes): - */ - __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__3, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 136, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_Raise(__pyx_t_3, 0, 0, 0); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __PYX_ERR(1, 136, __pyx_L1_error) - - /* "View.MemoryView":135 - * raise ValueError("Empty shape tuple for cython.array") - * - * if itemsize <= 0: # <<<<<<<<<<<<<< - * raise ValueError("itemsize <= 0 for cython.array") - * - */ - } - - /* "View.MemoryView":138 - * raise ValueError("itemsize <= 0 for cython.array") - * - * if not isinstance(format, bytes): # <<<<<<<<<<<<<< - * format = format.encode('ASCII') - * self._format = format # keep a reference to the byte string - */ - __pyx_t_2 = PyBytes_Check(__pyx_v_format); - __pyx_t_4 = ((!(__pyx_t_2 != 0)) != 0); - if (__pyx_t_4) { - - /* "View.MemoryView":139 - * - * if not isinstance(format, bytes): - * format = format.encode('ASCII') # <<<<<<<<<<<<<< - * self._format = format # keep a reference to the byte string - * self.format = self._format - */ - __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_format, __pyx_n_s_encode); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 139, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_6 = NULL; - if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_5))) { - __pyx_t_6 = PyMethod_GET_SELF(__pyx_t_5); - if (likely(__pyx_t_6)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5); - __Pyx_INCREF(__pyx_t_6); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_5, function); - } - } - __pyx_t_3 = (__pyx_t_6) ? __Pyx_PyObject_Call2Args(__pyx_t_5, __pyx_t_6, __pyx_n_s_ASCII) : __Pyx_PyObject_CallOneArg(__pyx_t_5, __pyx_n_s_ASCII); - __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; - if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 139, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __Pyx_DECREF_SET(__pyx_v_format, __pyx_t_3); - __pyx_t_3 = 0; - - /* "View.MemoryView":138 - * raise ValueError("itemsize <= 0 for cython.array") - * - * if not isinstance(format, bytes): # <<<<<<<<<<<<<< - * format = format.encode('ASCII') - * self._format = format # keep a reference to the byte string - */ - } - - /* "View.MemoryView":140 - * if not isinstance(format, bytes): - * format = format.encode('ASCII') - * self._format = format # keep a reference to the byte string # <<<<<<<<<<<<<< - * self.format = self._format - * - */ - if (!(likely(PyBytes_CheckExact(__pyx_v_format))||((__pyx_v_format) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "bytes", Py_TYPE(__pyx_v_format)->tp_name), 0))) __PYX_ERR(1, 140, __pyx_L1_error) - __pyx_t_3 = __pyx_v_format; - __Pyx_INCREF(__pyx_t_3); - __Pyx_GIVEREF(__pyx_t_3); - __Pyx_GOTREF(__pyx_v_self->_format); - __Pyx_DECREF(__pyx_v_self->_format); - __pyx_v_self->_format = ((PyObject*)__pyx_t_3); - __pyx_t_3 = 0; - - /* "View.MemoryView":141 - * format = format.encode('ASCII') - * self._format = format # keep a reference to the byte string - * self.format = self._format # <<<<<<<<<<<<<< - * - * - */ - if (unlikely(__pyx_v_self->_format == Py_None)) { - PyErr_SetString(PyExc_TypeError, "expected bytes, NoneType found"); - __PYX_ERR(1, 141, __pyx_L1_error) - } - __pyx_t_7 = __Pyx_PyBytes_AsWritableString(__pyx_v_self->_format); if (unlikely((!__pyx_t_7) && PyErr_Occurred())) __PYX_ERR(1, 141, __pyx_L1_error) - __pyx_v_self->format = __pyx_t_7; - - /* "View.MemoryView":144 - * - * - * self._shape = PyObject_Malloc(sizeof(Py_ssize_t)*self.ndim*2) # <<<<<<<<<<<<<< - * self._strides = self._shape + self.ndim - * - */ - __pyx_v_self->_shape = ((Py_ssize_t *)PyObject_Malloc((((sizeof(Py_ssize_t)) * __pyx_v_self->ndim) * 2))); - - /* "View.MemoryView":145 - * - * self._shape = PyObject_Malloc(sizeof(Py_ssize_t)*self.ndim*2) - * self._strides = self._shape + self.ndim # <<<<<<<<<<<<<< - * - * if not self._shape: - */ - __pyx_v_self->_strides = (__pyx_v_self->_shape + __pyx_v_self->ndim); - - /* "View.MemoryView":147 - * self._strides = self._shape + self.ndim - * - * if not self._shape: # <<<<<<<<<<<<<< - * raise MemoryError("unable to allocate shape and strides.") - * - */ - __pyx_t_4 = ((!(__pyx_v_self->_shape != 0)) != 0); - if (unlikely(__pyx_t_4)) { - - /* "View.MemoryView":148 - * - * if not self._shape: - * raise MemoryError("unable to allocate shape and strides.") # <<<<<<<<<<<<<< - * - * - */ - __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_MemoryError, __pyx_tuple__4, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 148, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_Raise(__pyx_t_3, 0, 0, 0); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __PYX_ERR(1, 148, __pyx_L1_error) - - /* "View.MemoryView":147 - * self._strides = self._shape + self.ndim - * - * if not self._shape: # <<<<<<<<<<<<<< - * raise MemoryError("unable to allocate shape and strides.") - * - */ - } - - /* "View.MemoryView":151 - * - * - * for idx, dim in enumerate(shape): # <<<<<<<<<<<<<< - * if dim <= 0: - * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) - */ - __pyx_t_8 = 0; - __pyx_t_3 = __pyx_v_shape; __Pyx_INCREF(__pyx_t_3); __pyx_t_1 = 0; - for (;;) { - if (__pyx_t_1 >= PyTuple_GET_SIZE(__pyx_t_3)) break; - #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS - __pyx_t_5 = PyTuple_GET_ITEM(__pyx_t_3, __pyx_t_1); __Pyx_INCREF(__pyx_t_5); __pyx_t_1++; if (unlikely(0 < 0)) __PYX_ERR(1, 151, __pyx_L1_error) - #else - __pyx_t_5 = PySequence_ITEM(__pyx_t_3, __pyx_t_1); __pyx_t_1++; if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 151, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - #endif - __pyx_t_9 = __Pyx_PyIndex_AsSsize_t(__pyx_t_5); if (unlikely((__pyx_t_9 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 151, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_v_dim = __pyx_t_9; - __pyx_v_idx = __pyx_t_8; - __pyx_t_8 = (__pyx_t_8 + 1); - - /* "View.MemoryView":152 - * - * for idx, dim in enumerate(shape): - * if dim <= 0: # <<<<<<<<<<<<<< - * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) - * self._shape[idx] = dim - */ - __pyx_t_4 = ((__pyx_v_dim <= 0) != 0); - if (unlikely(__pyx_t_4)) { - - /* "View.MemoryView":153 - * for idx, dim in enumerate(shape): - * if dim <= 0: - * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) # <<<<<<<<<<<<<< - * self._shape[idx] = dim - * - */ - __pyx_t_5 = __Pyx_PyInt_From_int(__pyx_v_idx); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 153, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_6 = PyInt_FromSsize_t(__pyx_v_dim); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 153, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_6); - __pyx_t_10 = PyTuple_New(2); if (unlikely(!__pyx_t_10)) __PYX_ERR(1, 153, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_10); - __Pyx_GIVEREF(__pyx_t_5); - PyTuple_SET_ITEM(__pyx_t_10, 0, __pyx_t_5); - __Pyx_GIVEREF(__pyx_t_6); - PyTuple_SET_ITEM(__pyx_t_10, 1, __pyx_t_6); - __pyx_t_5 = 0; - __pyx_t_6 = 0; - __pyx_t_6 = __Pyx_PyString_Format(__pyx_kp_s_Invalid_shape_in_axis_d_d, __pyx_t_10); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 153, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_6); - __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; - __pyx_t_10 = __Pyx_PyObject_CallOneArg(__pyx_builtin_ValueError, __pyx_t_6); if (unlikely(!__pyx_t_10)) __PYX_ERR(1, 153, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_10); - __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; - __Pyx_Raise(__pyx_t_10, 0, 0, 0); - __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; - __PYX_ERR(1, 153, __pyx_L1_error) - - /* "View.MemoryView":152 - * - * for idx, dim in enumerate(shape): - * if dim <= 0: # <<<<<<<<<<<<<< - * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) - * self._shape[idx] = dim - */ - } - - /* "View.MemoryView":154 - * if dim <= 0: - * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) - * self._shape[idx] = dim # <<<<<<<<<<<<<< - * - * cdef char order - */ - (__pyx_v_self->_shape[__pyx_v_idx]) = __pyx_v_dim; - - /* "View.MemoryView":151 - * - * - * for idx, dim in enumerate(shape): # <<<<<<<<<<<<<< - * if dim <= 0: - * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) - */ - } - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - - /* "View.MemoryView":157 - * - * cdef char order - * if mode == 'fortran': # <<<<<<<<<<<<<< - * order = b'F' - * self.mode = u'fortran' - */ - __pyx_t_4 = (__Pyx_PyString_Equals(__pyx_v_mode, __pyx_n_s_fortran, Py_EQ)); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(1, 157, __pyx_L1_error) - if (__pyx_t_4) { - - /* "View.MemoryView":158 - * cdef char order - * if mode == 'fortran': - * order = b'F' # <<<<<<<<<<<<<< - * self.mode = u'fortran' - * elif mode == 'c': - */ - __pyx_v_order = 'F'; - - /* "View.MemoryView":159 - * if mode == 'fortran': - * order = b'F' - * self.mode = u'fortran' # <<<<<<<<<<<<<< - * elif mode == 'c': - * order = b'C' - */ - __Pyx_INCREF(__pyx_n_u_fortran); - __Pyx_GIVEREF(__pyx_n_u_fortran); - __Pyx_GOTREF(__pyx_v_self->mode); - __Pyx_DECREF(__pyx_v_self->mode); - __pyx_v_self->mode = __pyx_n_u_fortran; - - /* "View.MemoryView":157 - * - * cdef char order - * if mode == 'fortran': # <<<<<<<<<<<<<< - * order = b'F' - * self.mode = u'fortran' - */ - goto __pyx_L10; - } - - /* "View.MemoryView":160 - * order = b'F' - * self.mode = u'fortran' - * elif mode == 'c': # <<<<<<<<<<<<<< - * order = b'C' - * self.mode = u'c' - */ - __pyx_t_4 = (__Pyx_PyString_Equals(__pyx_v_mode, __pyx_n_s_c, Py_EQ)); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(1, 160, __pyx_L1_error) - if (likely(__pyx_t_4)) { - - /* "View.MemoryView":161 - * self.mode = u'fortran' - * elif mode == 'c': - * order = b'C' # <<<<<<<<<<<<<< - * self.mode = u'c' - * else: - */ - __pyx_v_order = 'C'; - - /* "View.MemoryView":162 - * elif mode == 'c': - * order = b'C' - * self.mode = u'c' # <<<<<<<<<<<<<< - * else: - * raise ValueError("Invalid mode, expected 'c' or 'fortran', got %s" % mode) - */ - __Pyx_INCREF(__pyx_n_u_c); - __Pyx_GIVEREF(__pyx_n_u_c); - __Pyx_GOTREF(__pyx_v_self->mode); - __Pyx_DECREF(__pyx_v_self->mode); - __pyx_v_self->mode = __pyx_n_u_c; - - /* "View.MemoryView":160 - * order = b'F' - * self.mode = u'fortran' - * elif mode == 'c': # <<<<<<<<<<<<<< - * order = b'C' - * self.mode = u'c' - */ - goto __pyx_L10; - } - - /* "View.MemoryView":164 - * self.mode = u'c' - * else: - * raise ValueError("Invalid mode, expected 'c' or 'fortran', got %s" % mode) # <<<<<<<<<<<<<< - * - * self.len = fill_contig_strides_array(self._shape, self._strides, - */ - /*else*/ { - __pyx_t_3 = __Pyx_PyString_FormatSafe(__pyx_kp_s_Invalid_mode_expected_c_or_fortr, __pyx_v_mode); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 164, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_10 = __Pyx_PyObject_CallOneArg(__pyx_builtin_ValueError, __pyx_t_3); if (unlikely(!__pyx_t_10)) __PYX_ERR(1, 164, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_10); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __Pyx_Raise(__pyx_t_10, 0, 0, 0); - __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; - __PYX_ERR(1, 164, __pyx_L1_error) - } - __pyx_L10:; - - /* "View.MemoryView":166 - * raise ValueError("Invalid mode, expected 'c' or 'fortran', got %s" % mode) - * - * self.len = fill_contig_strides_array(self._shape, self._strides, # <<<<<<<<<<<<<< - * itemsize, self.ndim, order) - * - */ - __pyx_v_self->len = __pyx_fill_contig_strides_array(__pyx_v_self->_shape, __pyx_v_self->_strides, __pyx_v_itemsize, __pyx_v_self->ndim, __pyx_v_order); - - /* "View.MemoryView":169 - * itemsize, self.ndim, order) - * - * self.free_data = allocate_buffer # <<<<<<<<<<<<<< - * self.dtype_is_object = format == b'O' - * if allocate_buffer: - */ - __pyx_v_self->free_data = __pyx_v_allocate_buffer; - - /* "View.MemoryView":170 - * - * self.free_data = allocate_buffer - * self.dtype_is_object = format == b'O' # <<<<<<<<<<<<<< - * if allocate_buffer: - * - */ - __pyx_t_10 = PyObject_RichCompare(__pyx_v_format, __pyx_n_b_O, Py_EQ); __Pyx_XGOTREF(__pyx_t_10); if (unlikely(!__pyx_t_10)) __PYX_ERR(1, 170, __pyx_L1_error) - __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_10); if (unlikely((__pyx_t_4 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 170, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; - __pyx_v_self->dtype_is_object = __pyx_t_4; - - /* "View.MemoryView":171 - * self.free_data = allocate_buffer - * self.dtype_is_object = format == b'O' - * if allocate_buffer: # <<<<<<<<<<<<<< - * - * - */ - __pyx_t_4 = (__pyx_v_allocate_buffer != 0); - if (__pyx_t_4) { - - /* "View.MemoryView":174 - * - * - * self.data = malloc(self.len) # <<<<<<<<<<<<<< - * if not self.data: - * raise MemoryError("unable to allocate array data.") - */ - __pyx_v_self->data = ((char *)malloc(__pyx_v_self->len)); - - /* "View.MemoryView":175 - * - * self.data = malloc(self.len) - * if not self.data: # <<<<<<<<<<<<<< - * raise MemoryError("unable to allocate array data.") - * - */ - __pyx_t_4 = ((!(__pyx_v_self->data != 0)) != 0); - if (unlikely(__pyx_t_4)) { - - /* "View.MemoryView":176 - * self.data = malloc(self.len) - * if not self.data: - * raise MemoryError("unable to allocate array data.") # <<<<<<<<<<<<<< - * - * if self.dtype_is_object: - */ - __pyx_t_10 = __Pyx_PyObject_Call(__pyx_builtin_MemoryError, __pyx_tuple__5, NULL); if (unlikely(!__pyx_t_10)) __PYX_ERR(1, 176, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_10); - __Pyx_Raise(__pyx_t_10, 0, 0, 0); - __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; - __PYX_ERR(1, 176, __pyx_L1_error) - - /* "View.MemoryView":175 - * - * self.data = malloc(self.len) - * if not self.data: # <<<<<<<<<<<<<< - * raise MemoryError("unable to allocate array data.") - * - */ - } - - /* "View.MemoryView":178 - * raise MemoryError("unable to allocate array data.") - * - * if self.dtype_is_object: # <<<<<<<<<<<<<< - * p = self.data - * for i in range(self.len / itemsize): - */ - __pyx_t_4 = (__pyx_v_self->dtype_is_object != 0); - if (__pyx_t_4) { - - /* "View.MemoryView":179 - * - * if self.dtype_is_object: - * p = self.data # <<<<<<<<<<<<<< - * for i in range(self.len / itemsize): - * p[i] = Py_None - */ - __pyx_v_p = ((PyObject **)__pyx_v_self->data); - - /* "View.MemoryView":180 - * if self.dtype_is_object: - * p = self.data - * for i in range(self.len / itemsize): # <<<<<<<<<<<<<< - * p[i] = Py_None - * Py_INCREF(Py_None) - */ - if (unlikely(__pyx_v_itemsize == 0)) { - PyErr_SetString(PyExc_ZeroDivisionError, "integer division or modulo by zero"); - __PYX_ERR(1, 180, __pyx_L1_error) - } - else if (sizeof(Py_ssize_t) == sizeof(long) && (!(((Py_ssize_t)-1) > 0)) && unlikely(__pyx_v_itemsize == (Py_ssize_t)-1) && unlikely(UNARY_NEG_WOULD_OVERFLOW(__pyx_v_self->len))) { - PyErr_SetString(PyExc_OverflowError, "value too large to perform division"); - __PYX_ERR(1, 180, __pyx_L1_error) - } - __pyx_t_1 = __Pyx_div_Py_ssize_t(__pyx_v_self->len, __pyx_v_itemsize); - __pyx_t_9 = __pyx_t_1; - for (__pyx_t_11 = 0; __pyx_t_11 < __pyx_t_9; __pyx_t_11+=1) { - __pyx_v_i = __pyx_t_11; - - /* "View.MemoryView":181 - * p = self.data - * for i in range(self.len / itemsize): - * p[i] = Py_None # <<<<<<<<<<<<<< - * Py_INCREF(Py_None) - * - */ - (__pyx_v_p[__pyx_v_i]) = Py_None; - - /* "View.MemoryView":182 - * for i in range(self.len / itemsize): - * p[i] = Py_None - * Py_INCREF(Py_None) # <<<<<<<<<<<<<< - * - * @cname('getbuffer') - */ - Py_INCREF(Py_None); - } - - /* "View.MemoryView":178 - * raise MemoryError("unable to allocate array data.") - * - * if self.dtype_is_object: # <<<<<<<<<<<<<< - * p = self.data - * for i in range(self.len / itemsize): - */ - } - - /* "View.MemoryView":171 - * self.free_data = allocate_buffer - * self.dtype_is_object = format == b'O' - * if allocate_buffer: # <<<<<<<<<<<<<< - * - * - */ - } - - /* "View.MemoryView":122 - * cdef bint dtype_is_object - * - * def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, # <<<<<<<<<<<<<< - * mode="c", bint allocate_buffer=True): - * - */ - - /* function exit code */ - __pyx_r = 0; - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_3); - __Pyx_XDECREF(__pyx_t_5); - __Pyx_XDECREF(__pyx_t_6); - __Pyx_XDECREF(__pyx_t_10); - __Pyx_AddTraceback("View.MemoryView.array.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = -1; - __pyx_L0:; - __Pyx_XDECREF(__pyx_v_format); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":185 - * - * @cname('getbuffer') - * def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<< - * cdef int bufmode = -1 - * if self.mode == u"c": - */ - -/* Python wrapper */ -static CYTHON_UNUSED int __pyx_array_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ -static CYTHON_UNUSED int __pyx_array_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { - int __pyx_r; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__getbuffer__ (wrapper)", 0); - __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_2__getbuffer__(((struct __pyx_array_obj *)__pyx_v_self), ((Py_buffer *)__pyx_v_info), ((int)__pyx_v_flags)); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_2__getbuffer__(struct __pyx_array_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { - int __pyx_v_bufmode; - int __pyx_r; - __Pyx_RefNannyDeclarations - int __pyx_t_1; - int __pyx_t_2; - PyObject *__pyx_t_3 = NULL; - char *__pyx_t_4; - Py_ssize_t __pyx_t_5; - int __pyx_t_6; - Py_ssize_t *__pyx_t_7; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - if (__pyx_v_info == NULL) { - PyErr_SetString(PyExc_BufferError, "PyObject_GetBuffer: view==NULL argument is obsolete"); - return -1; - } - __Pyx_RefNannySetupContext("__getbuffer__", 0); - __pyx_v_info->obj = Py_None; __Pyx_INCREF(Py_None); - __Pyx_GIVEREF(__pyx_v_info->obj); - - /* "View.MemoryView":186 - * @cname('getbuffer') - * def __getbuffer__(self, Py_buffer *info, int flags): - * cdef int bufmode = -1 # <<<<<<<<<<<<<< - * if self.mode == u"c": - * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS - */ - __pyx_v_bufmode = -1; - - /* "View.MemoryView":187 - * def __getbuffer__(self, Py_buffer *info, int flags): - * cdef int bufmode = -1 - * if self.mode == u"c": # <<<<<<<<<<<<<< - * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS - * elif self.mode == u"fortran": - */ - __pyx_t_1 = (__Pyx_PyUnicode_Equals(__pyx_v_self->mode, __pyx_n_u_c, Py_EQ)); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 187, __pyx_L1_error) - __pyx_t_2 = (__pyx_t_1 != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":188 - * cdef int bufmode = -1 - * if self.mode == u"c": - * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS # <<<<<<<<<<<<<< - * elif self.mode == u"fortran": - * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS - */ - __pyx_v_bufmode = (PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS); - - /* "View.MemoryView":187 - * def __getbuffer__(self, Py_buffer *info, int flags): - * cdef int bufmode = -1 - * if self.mode == u"c": # <<<<<<<<<<<<<< - * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS - * elif self.mode == u"fortran": - */ - goto __pyx_L3; - } - - /* "View.MemoryView":189 - * if self.mode == u"c": - * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS - * elif self.mode == u"fortran": # <<<<<<<<<<<<<< - * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS - * if not (flags & bufmode): - */ - __pyx_t_2 = (__Pyx_PyUnicode_Equals(__pyx_v_self->mode, __pyx_n_u_fortran, Py_EQ)); if (unlikely(__pyx_t_2 < 0)) __PYX_ERR(1, 189, __pyx_L1_error) - __pyx_t_1 = (__pyx_t_2 != 0); - if (__pyx_t_1) { - - /* "View.MemoryView":190 - * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS - * elif self.mode == u"fortran": - * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS # <<<<<<<<<<<<<< - * if not (flags & bufmode): - * raise ValueError("Can only create a buffer that is contiguous in memory.") - */ - __pyx_v_bufmode = (PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS); - - /* "View.MemoryView":189 - * if self.mode == u"c": - * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS - * elif self.mode == u"fortran": # <<<<<<<<<<<<<< - * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS - * if not (flags & bufmode): - */ - } - __pyx_L3:; - - /* "View.MemoryView":191 - * elif self.mode == u"fortran": - * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS - * if not (flags & bufmode): # <<<<<<<<<<<<<< - * raise ValueError("Can only create a buffer that is contiguous in memory.") - * info.buf = self.data - */ - __pyx_t_1 = ((!((__pyx_v_flags & __pyx_v_bufmode) != 0)) != 0); - if (unlikely(__pyx_t_1)) { - - /* "View.MemoryView":192 - * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS - * if not (flags & bufmode): - * raise ValueError("Can only create a buffer that is contiguous in memory.") # <<<<<<<<<<<<<< - * info.buf = self.data - * info.len = self.len - */ - __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__6, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 192, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_Raise(__pyx_t_3, 0, 0, 0); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __PYX_ERR(1, 192, __pyx_L1_error) - - /* "View.MemoryView":191 - * elif self.mode == u"fortran": - * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS - * if not (flags & bufmode): # <<<<<<<<<<<<<< - * raise ValueError("Can only create a buffer that is contiguous in memory.") - * info.buf = self.data - */ - } - - /* "View.MemoryView":193 - * if not (flags & bufmode): - * raise ValueError("Can only create a buffer that is contiguous in memory.") - * info.buf = self.data # <<<<<<<<<<<<<< - * info.len = self.len - * info.ndim = self.ndim - */ - __pyx_t_4 = __pyx_v_self->data; - __pyx_v_info->buf = __pyx_t_4; - - /* "View.MemoryView":194 - * raise ValueError("Can only create a buffer that is contiguous in memory.") - * info.buf = self.data - * info.len = self.len # <<<<<<<<<<<<<< - * info.ndim = self.ndim - * info.shape = self._shape - */ - __pyx_t_5 = __pyx_v_self->len; - __pyx_v_info->len = __pyx_t_5; - - /* "View.MemoryView":195 - * info.buf = self.data - * info.len = self.len - * info.ndim = self.ndim # <<<<<<<<<<<<<< - * info.shape = self._shape - * info.strides = self._strides - */ - __pyx_t_6 = __pyx_v_self->ndim; - __pyx_v_info->ndim = __pyx_t_6; - - /* "View.MemoryView":196 - * info.len = self.len - * info.ndim = self.ndim - * info.shape = self._shape # <<<<<<<<<<<<<< - * info.strides = self._strides - * info.suboffsets = NULL - */ - __pyx_t_7 = __pyx_v_self->_shape; - __pyx_v_info->shape = __pyx_t_7; - - /* "View.MemoryView":197 - * info.ndim = self.ndim - * info.shape = self._shape - * info.strides = self._strides # <<<<<<<<<<<<<< - * info.suboffsets = NULL - * info.itemsize = self.itemsize - */ - __pyx_t_7 = __pyx_v_self->_strides; - __pyx_v_info->strides = __pyx_t_7; - - /* "View.MemoryView":198 - * info.shape = self._shape - * info.strides = self._strides - * info.suboffsets = NULL # <<<<<<<<<<<<<< - * info.itemsize = self.itemsize - * info.readonly = 0 - */ - __pyx_v_info->suboffsets = NULL; - - /* "View.MemoryView":199 - * info.strides = self._strides - * info.suboffsets = NULL - * info.itemsize = self.itemsize # <<<<<<<<<<<<<< - * info.readonly = 0 - * - */ - __pyx_t_5 = __pyx_v_self->itemsize; - __pyx_v_info->itemsize = __pyx_t_5; - - /* "View.MemoryView":200 - * info.suboffsets = NULL - * info.itemsize = self.itemsize - * info.readonly = 0 # <<<<<<<<<<<<<< - * - * if flags & PyBUF_FORMAT: - */ - __pyx_v_info->readonly = 0; - - /* "View.MemoryView":202 - * info.readonly = 0 - * - * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< - * info.format = self.format - * else: - */ - __pyx_t_1 = ((__pyx_v_flags & PyBUF_FORMAT) != 0); - if (__pyx_t_1) { - - /* "View.MemoryView":203 - * - * if flags & PyBUF_FORMAT: - * info.format = self.format # <<<<<<<<<<<<<< - * else: - * info.format = NULL - */ - __pyx_t_4 = __pyx_v_self->format; - __pyx_v_info->format = __pyx_t_4; - - /* "View.MemoryView":202 - * info.readonly = 0 - * - * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< - * info.format = self.format - * else: - */ - goto __pyx_L5; - } - - /* "View.MemoryView":205 - * info.format = self.format - * else: - * info.format = NULL # <<<<<<<<<<<<<< - * - * info.obj = self - */ - /*else*/ { - __pyx_v_info->format = NULL; - } - __pyx_L5:; - - /* "View.MemoryView":207 - * info.format = NULL - * - * info.obj = self # <<<<<<<<<<<<<< - * - * __pyx_getbuffer = capsule( &__pyx_array_getbuffer, "getbuffer(obj, view, flags)") - */ - __Pyx_INCREF(((PyObject *)__pyx_v_self)); - __Pyx_GIVEREF(((PyObject *)__pyx_v_self)); - __Pyx_GOTREF(__pyx_v_info->obj); - __Pyx_DECREF(__pyx_v_info->obj); - __pyx_v_info->obj = ((PyObject *)__pyx_v_self); - - /* "View.MemoryView":185 - * - * @cname('getbuffer') - * def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<< - * cdef int bufmode = -1 - * if self.mode == u"c": - */ - - /* function exit code */ - __pyx_r = 0; - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_3); - __Pyx_AddTraceback("View.MemoryView.array.__getbuffer__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = -1; - if (__pyx_v_info->obj != NULL) { - __Pyx_GOTREF(__pyx_v_info->obj); - __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = 0; - } - goto __pyx_L2; - __pyx_L0:; - if (__pyx_v_info->obj == Py_None) { - __Pyx_GOTREF(__pyx_v_info->obj); - __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = 0; - } - __pyx_L2:; - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":211 - * __pyx_getbuffer = capsule( &__pyx_array_getbuffer, "getbuffer(obj, view, flags)") - * - * def __dealloc__(array self): # <<<<<<<<<<<<<< - * if self.callback_free_data != NULL: - * self.callback_free_data(self.data) - */ - -/* Python wrapper */ -static void __pyx_array___dealloc__(PyObject *__pyx_v_self); /*proto*/ -static void __pyx_array___dealloc__(PyObject *__pyx_v_self) { - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0); - __pyx_array___pyx_pf_15View_dot_MemoryView_5array_4__dealloc__(((struct __pyx_array_obj *)__pyx_v_self)); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); -} - -static void __pyx_array___pyx_pf_15View_dot_MemoryView_5array_4__dealloc__(struct __pyx_array_obj *__pyx_v_self) { - __Pyx_RefNannyDeclarations - int __pyx_t_1; - __Pyx_RefNannySetupContext("__dealloc__", 0); - - /* "View.MemoryView":212 - * - * def __dealloc__(array self): - * if self.callback_free_data != NULL: # <<<<<<<<<<<<<< - * self.callback_free_data(self.data) - * elif self.free_data: - */ - __pyx_t_1 = ((__pyx_v_self->callback_free_data != NULL) != 0); - if (__pyx_t_1) { - - /* "View.MemoryView":213 - * def __dealloc__(array self): - * if self.callback_free_data != NULL: - * self.callback_free_data(self.data) # <<<<<<<<<<<<<< - * elif self.free_data: - * if self.dtype_is_object: - */ - __pyx_v_self->callback_free_data(__pyx_v_self->data); - - /* "View.MemoryView":212 - * - * def __dealloc__(array self): - * if self.callback_free_data != NULL: # <<<<<<<<<<<<<< - * self.callback_free_data(self.data) - * elif self.free_data: - */ - goto __pyx_L3; - } - - /* "View.MemoryView":214 - * if self.callback_free_data != NULL: - * self.callback_free_data(self.data) - * elif self.free_data: # <<<<<<<<<<<<<< - * if self.dtype_is_object: - * refcount_objects_in_slice(self.data, self._shape, - */ - __pyx_t_1 = (__pyx_v_self->free_data != 0); - if (__pyx_t_1) { - - /* "View.MemoryView":215 - * self.callback_free_data(self.data) - * elif self.free_data: - * if self.dtype_is_object: # <<<<<<<<<<<<<< - * refcount_objects_in_slice(self.data, self._shape, - * self._strides, self.ndim, False) - */ - __pyx_t_1 = (__pyx_v_self->dtype_is_object != 0); - if (__pyx_t_1) { - - /* "View.MemoryView":216 - * elif self.free_data: - * if self.dtype_is_object: - * refcount_objects_in_slice(self.data, self._shape, # <<<<<<<<<<<<<< - * self._strides, self.ndim, False) - * free(self.data) - */ - __pyx_memoryview_refcount_objects_in_slice(__pyx_v_self->data, __pyx_v_self->_shape, __pyx_v_self->_strides, __pyx_v_self->ndim, 0); - - /* "View.MemoryView":215 - * self.callback_free_data(self.data) - * elif self.free_data: - * if self.dtype_is_object: # <<<<<<<<<<<<<< - * refcount_objects_in_slice(self.data, self._shape, - * self._strides, self.ndim, False) - */ - } - - /* "View.MemoryView":218 - * refcount_objects_in_slice(self.data, self._shape, - * self._strides, self.ndim, False) - * free(self.data) # <<<<<<<<<<<<<< - * PyObject_Free(self._shape) - * - */ - free(__pyx_v_self->data); - - /* "View.MemoryView":214 - * if self.callback_free_data != NULL: - * self.callback_free_data(self.data) - * elif self.free_data: # <<<<<<<<<<<<<< - * if self.dtype_is_object: - * refcount_objects_in_slice(self.data, self._shape, - */ - } - __pyx_L3:; - - /* "View.MemoryView":219 - * self._strides, self.ndim, False) - * free(self.data) - * PyObject_Free(self._shape) # <<<<<<<<<<<<<< - * - * @property - */ - PyObject_Free(__pyx_v_self->_shape); - - /* "View.MemoryView":211 - * __pyx_getbuffer = capsule( &__pyx_array_getbuffer, "getbuffer(obj, view, flags)") - * - * def __dealloc__(array self): # <<<<<<<<<<<<<< - * if self.callback_free_data != NULL: - * self.callback_free_data(self.data) - */ - - /* function exit code */ - __Pyx_RefNannyFinishContext(); -} - -/* "View.MemoryView":222 - * - * @property - * def memview(self): # <<<<<<<<<<<<<< - * return self.get_memview() - * - */ - -/* Python wrapper */ -static PyObject *__pyx_pw_15View_dot_MemoryView_5array_7memview_1__get__(PyObject *__pyx_v_self); /*proto*/ -static PyObject *__pyx_pw_15View_dot_MemoryView_5array_7memview_1__get__(PyObject *__pyx_v_self) { - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); - __pyx_r = __pyx_pf_15View_dot_MemoryView_5array_7memview___get__(((struct __pyx_array_obj *)__pyx_v_self)); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_pf_15View_dot_MemoryView_5array_7memview___get__(struct __pyx_array_obj *__pyx_v_self) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("__get__", 0); - - /* "View.MemoryView":223 - * @property - * def memview(self): - * return self.get_memview() # <<<<<<<<<<<<<< - * - * @cname('get_memview') - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = ((struct __pyx_vtabstruct_array *)__pyx_v_self->__pyx_vtab)->get_memview(__pyx_v_self); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 223, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_r = __pyx_t_1; - __pyx_t_1 = 0; - goto __pyx_L0; - - /* "View.MemoryView":222 - * - * @property - * def memview(self): # <<<<<<<<<<<<<< - * return self.get_memview() - * - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_AddTraceback("View.MemoryView.array.memview.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":226 - * - * @cname('get_memview') - * cdef get_memview(self): # <<<<<<<<<<<<<< - * flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE - * return memoryview(self, flags, self.dtype_is_object) - */ - -static PyObject *__pyx_array_get_memview(struct __pyx_array_obj *__pyx_v_self) { - int __pyx_v_flags; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - PyObject *__pyx_t_2 = NULL; - PyObject *__pyx_t_3 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("get_memview", 0); - - /* "View.MemoryView":227 - * @cname('get_memview') - * cdef get_memview(self): - * flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE # <<<<<<<<<<<<<< - * return memoryview(self, flags, self.dtype_is_object) - * - */ - __pyx_v_flags = ((PyBUF_ANY_CONTIGUOUS | PyBUF_FORMAT) | PyBUF_WRITABLE); - - /* "View.MemoryView":228 - * cdef get_memview(self): - * flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE - * return memoryview(self, flags, self.dtype_is_object) # <<<<<<<<<<<<<< - * - * def __len__(self): - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_flags); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 228, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_v_self->dtype_is_object); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 228, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 228, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_INCREF(((PyObject *)__pyx_v_self)); - __Pyx_GIVEREF(((PyObject *)__pyx_v_self)); - PyTuple_SET_ITEM(__pyx_t_3, 0, ((PyObject *)__pyx_v_self)); - __Pyx_GIVEREF(__pyx_t_1); - PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_1); - __Pyx_GIVEREF(__pyx_t_2); - PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_t_2); - __pyx_t_1 = 0; - __pyx_t_2 = 0; - __pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryview_type), __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 228, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_r = __pyx_t_2; - __pyx_t_2 = 0; - goto __pyx_L0; - - /* "View.MemoryView":226 - * - * @cname('get_memview') - * cdef get_memview(self): # <<<<<<<<<<<<<< - * flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE - * return memoryview(self, flags, self.dtype_is_object) - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_2); - __Pyx_XDECREF(__pyx_t_3); - __Pyx_AddTraceback("View.MemoryView.array.get_memview", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = 0; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":230 - * return memoryview(self, flags, self.dtype_is_object) - * - * def __len__(self): # <<<<<<<<<<<<<< - * return self._shape[0] - * - */ - -/* Python wrapper */ -static Py_ssize_t __pyx_array___len__(PyObject *__pyx_v_self); /*proto*/ -static Py_ssize_t __pyx_array___len__(PyObject *__pyx_v_self) { - Py_ssize_t __pyx_r; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__len__ (wrapper)", 0); - __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_6__len__(((struct __pyx_array_obj *)__pyx_v_self)); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static Py_ssize_t __pyx_array___pyx_pf_15View_dot_MemoryView_5array_6__len__(struct __pyx_array_obj *__pyx_v_self) { - Py_ssize_t __pyx_r; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__len__", 0); - - /* "View.MemoryView":231 - * - * def __len__(self): - * return self._shape[0] # <<<<<<<<<<<<<< - * - * def __getattr__(self, attr): - */ - __pyx_r = (__pyx_v_self->_shape[0]); - goto __pyx_L0; - - /* "View.MemoryView":230 - * return memoryview(self, flags, self.dtype_is_object) - * - * def __len__(self): # <<<<<<<<<<<<<< - * return self._shape[0] - * - */ - - /* function exit code */ - __pyx_L0:; - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":233 - * return self._shape[0] - * - * def __getattr__(self, attr): # <<<<<<<<<<<<<< - * return getattr(self.memview, attr) - * - */ - -/* Python wrapper */ -static PyObject *__pyx_array___getattr__(PyObject *__pyx_v_self, PyObject *__pyx_v_attr); /*proto*/ -static PyObject *__pyx_array___getattr__(PyObject *__pyx_v_self, PyObject *__pyx_v_attr) { - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__getattr__ (wrapper)", 0); - __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_8__getattr__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v_attr)); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_8__getattr__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_attr) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - PyObject *__pyx_t_2 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("__getattr__", 0); - - /* "View.MemoryView":234 - * - * def __getattr__(self, attr): - * return getattr(self.memview, attr) # <<<<<<<<<<<<<< - * - * def __getitem__(self, item): - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_memview); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 234, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = __Pyx_GetAttr(__pyx_t_1, __pyx_v_attr); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 234, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_r = __pyx_t_2; - __pyx_t_2 = 0; - goto __pyx_L0; - - /* "View.MemoryView":233 - * return self._shape[0] - * - * def __getattr__(self, attr): # <<<<<<<<<<<<<< - * return getattr(self.memview, attr) - * - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_2); - __Pyx_AddTraceback("View.MemoryView.array.__getattr__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":236 - * return getattr(self.memview, attr) - * - * def __getitem__(self, item): # <<<<<<<<<<<<<< - * return self.memview[item] - * - */ - -/* Python wrapper */ -static PyObject *__pyx_array___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item); /*proto*/ -static PyObject *__pyx_array___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item) { - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__getitem__ (wrapper)", 0); - __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_10__getitem__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v_item)); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_10__getitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - PyObject *__pyx_t_2 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("__getitem__", 0); - - /* "View.MemoryView":237 - * - * def __getitem__(self, item): - * return self.memview[item] # <<<<<<<<<<<<<< - * - * def __setitem__(self, item, value): - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_memview); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 237, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = __Pyx_PyObject_GetItem(__pyx_t_1, __pyx_v_item); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 237, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_r = __pyx_t_2; - __pyx_t_2 = 0; - goto __pyx_L0; - - /* "View.MemoryView":236 - * return getattr(self.memview, attr) - * - * def __getitem__(self, item): # <<<<<<<<<<<<<< - * return self.memview[item] - * - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_2); - __Pyx_AddTraceback("View.MemoryView.array.__getitem__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":239 - * return self.memview[item] - * - * def __setitem__(self, item, value): # <<<<<<<<<<<<<< - * self.memview[item] = value - * - */ - -/* Python wrapper */ -static int __pyx_array___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value); /*proto*/ -static int __pyx_array___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value) { - int __pyx_r; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0); - __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_12__setitem__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v_item), ((PyObject *)__pyx_v_value)); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_12__setitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value) { - int __pyx_r; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("__setitem__", 0); - - /* "View.MemoryView":240 - * - * def __setitem__(self, item, value): - * self.memview[item] = value # <<<<<<<<<<<<<< - * - * - */ - __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_memview); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 240, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - if (unlikely(PyObject_SetItem(__pyx_t_1, __pyx_v_item, __pyx_v_value) < 0)) __PYX_ERR(1, 240, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - - /* "View.MemoryView":239 - * return self.memview[item] - * - * def __setitem__(self, item, value): # <<<<<<<<<<<<<< - * self.memview[item] = value - * - */ - - /* function exit code */ - __pyx_r = 0; - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_AddTraceback("View.MemoryView.array.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = -1; - __pyx_L0:; - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "(tree fragment)":1 - * def __reduce_cython__(self): # <<<<<<<<<<<<<< - * raise TypeError("no default __reduce__ due to non-trivial __cinit__") - * def __setstate_cython__(self, __pyx_state): - */ - -/* Python wrapper */ -static PyObject *__pyx_pw___pyx_array_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ -static PyObject *__pyx_pw___pyx_array_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0); - __pyx_r = __pyx_pf___pyx_array___reduce_cython__(((struct __pyx_array_obj *)__pyx_v_self)); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_pf___pyx_array___reduce_cython__(CYTHON_UNUSED struct __pyx_array_obj *__pyx_v_self) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("__reduce_cython__", 0); - - /* "(tree fragment)":2 - * def __reduce_cython__(self): - * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< - * def __setstate_cython__(self, __pyx_state): - * raise TypeError("no default __reduce__ due to non-trivial __cinit__") - */ - __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__7, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 2, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_Raise(__pyx_t_1, 0, 0, 0); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __PYX_ERR(1, 2, __pyx_L1_error) - - /* "(tree fragment)":1 - * def __reduce_cython__(self): # <<<<<<<<<<<<<< - * raise TypeError("no default __reduce__ due to non-trivial __cinit__") - * def __setstate_cython__(self, __pyx_state): - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_AddTraceback("View.MemoryView.array.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "(tree fragment)":3 - * def __reduce_cython__(self): - * raise TypeError("no default __reduce__ due to non-trivial __cinit__") - * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< - * raise TypeError("no default __reduce__ due to non-trivial __cinit__") - */ - -/* Python wrapper */ -static PyObject *__pyx_pw___pyx_array_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/ -static PyObject *__pyx_pw___pyx_array_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) { - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0); - __pyx_r = __pyx_pf___pyx_array_2__setstate_cython__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state)); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_pf___pyx_array_2__setstate_cython__(CYTHON_UNUSED struct __pyx_array_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("__setstate_cython__", 0); - - /* "(tree fragment)":4 - * raise TypeError("no default __reduce__ due to non-trivial __cinit__") - * def __setstate_cython__(self, __pyx_state): - * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< - */ - __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__8, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 4, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_Raise(__pyx_t_1, 0, 0, 0); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __PYX_ERR(1, 4, __pyx_L1_error) - - /* "(tree fragment)":3 - * def __reduce_cython__(self): - * raise TypeError("no default __reduce__ due to non-trivial __cinit__") - * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< - * raise TypeError("no default __reduce__ due to non-trivial __cinit__") - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_AddTraceback("View.MemoryView.array.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":244 - * - * @cname("__pyx_array_new") - * cdef array array_cwrapper(tuple shape, Py_ssize_t itemsize, char *format, # <<<<<<<<<<<<<< - * char *mode, char *buf): - * cdef array result - */ - -static struct __pyx_array_obj *__pyx_array_new(PyObject *__pyx_v_shape, Py_ssize_t __pyx_v_itemsize, char *__pyx_v_format, char *__pyx_v_mode, char *__pyx_v_buf) { - struct __pyx_array_obj *__pyx_v_result = 0; - struct __pyx_array_obj *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - int __pyx_t_1; - PyObject *__pyx_t_2 = NULL; - PyObject *__pyx_t_3 = NULL; - PyObject *__pyx_t_4 = NULL; - PyObject *__pyx_t_5 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("array_cwrapper", 0); - - /* "View.MemoryView":248 - * cdef array result - * - * if buf == NULL: # <<<<<<<<<<<<<< - * result = array(shape, itemsize, format, mode.decode('ASCII')) - * else: - */ - __pyx_t_1 = ((__pyx_v_buf == NULL) != 0); - if (__pyx_t_1) { - - /* "View.MemoryView":249 - * - * if buf == NULL: - * result = array(shape, itemsize, format, mode.decode('ASCII')) # <<<<<<<<<<<<<< - * else: - * result = array(shape, itemsize, format, mode.decode('ASCII'), - */ - __pyx_t_2 = PyInt_FromSsize_t(__pyx_v_itemsize); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 249, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_3 = __Pyx_PyBytes_FromString(__pyx_v_format); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 249, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_4 = __Pyx_decode_c_string(__pyx_v_mode, 0, strlen(__pyx_v_mode), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 249, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __pyx_t_5 = PyTuple_New(4); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 249, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __Pyx_INCREF(__pyx_v_shape); - __Pyx_GIVEREF(__pyx_v_shape); - PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_v_shape); - __Pyx_GIVEREF(__pyx_t_2); - PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_2); - __Pyx_GIVEREF(__pyx_t_3); - PyTuple_SET_ITEM(__pyx_t_5, 2, __pyx_t_3); - __Pyx_GIVEREF(__pyx_t_4); - PyTuple_SET_ITEM(__pyx_t_5, 3, __pyx_t_4); - __pyx_t_2 = 0; - __pyx_t_3 = 0; - __pyx_t_4 = 0; - __pyx_t_4 = __Pyx_PyObject_Call(((PyObject *)__pyx_array_type), __pyx_t_5, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 249, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_v_result = ((struct __pyx_array_obj *)__pyx_t_4); - __pyx_t_4 = 0; - - /* "View.MemoryView":248 - * cdef array result - * - * if buf == NULL: # <<<<<<<<<<<<<< - * result = array(shape, itemsize, format, mode.decode('ASCII')) - * else: - */ - goto __pyx_L3; - } - - /* "View.MemoryView":251 - * result = array(shape, itemsize, format, mode.decode('ASCII')) - * else: - * result = array(shape, itemsize, format, mode.decode('ASCII'), # <<<<<<<<<<<<<< - * allocate_buffer=False) - * result.data = buf - */ - /*else*/ { - __pyx_t_4 = PyInt_FromSsize_t(__pyx_v_itemsize); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 251, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __pyx_t_5 = __Pyx_PyBytes_FromString(__pyx_v_format); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 251, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_3 = __Pyx_decode_c_string(__pyx_v_mode, 0, strlen(__pyx_v_mode), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 251, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_2 = PyTuple_New(4); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 251, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_INCREF(__pyx_v_shape); - __Pyx_GIVEREF(__pyx_v_shape); - PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_v_shape); - __Pyx_GIVEREF(__pyx_t_4); - PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_t_4); - __Pyx_GIVEREF(__pyx_t_5); - PyTuple_SET_ITEM(__pyx_t_2, 2, __pyx_t_5); - __Pyx_GIVEREF(__pyx_t_3); - PyTuple_SET_ITEM(__pyx_t_2, 3, __pyx_t_3); - __pyx_t_4 = 0; - __pyx_t_5 = 0; - __pyx_t_3 = 0; - - /* "View.MemoryView":252 - * else: - * result = array(shape, itemsize, format, mode.decode('ASCII'), - * allocate_buffer=False) # <<<<<<<<<<<<<< - * result.data = buf - * - */ - __pyx_t_3 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 252, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_allocate_buffer, Py_False) < 0) __PYX_ERR(1, 252, __pyx_L1_error) - - /* "View.MemoryView":251 - * result = array(shape, itemsize, format, mode.decode('ASCII')) - * else: - * result = array(shape, itemsize, format, mode.decode('ASCII'), # <<<<<<<<<<<<<< - * allocate_buffer=False) - * result.data = buf - */ - __pyx_t_5 = __Pyx_PyObject_Call(((PyObject *)__pyx_array_type), __pyx_t_2, __pyx_t_3); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 251, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_v_result = ((struct __pyx_array_obj *)__pyx_t_5); - __pyx_t_5 = 0; - - /* "View.MemoryView":253 - * result = array(shape, itemsize, format, mode.decode('ASCII'), - * allocate_buffer=False) - * result.data = buf # <<<<<<<<<<<<<< - * - * return result - */ - __pyx_v_result->data = __pyx_v_buf; - } - __pyx_L3:; - - /* "View.MemoryView":255 - * result.data = buf - * - * return result # <<<<<<<<<<<<<< - * - * - */ - __Pyx_XDECREF(((PyObject *)__pyx_r)); - __Pyx_INCREF(((PyObject *)__pyx_v_result)); - __pyx_r = __pyx_v_result; - goto __pyx_L0; - - /* "View.MemoryView":244 - * - * @cname("__pyx_array_new") - * cdef array array_cwrapper(tuple shape, Py_ssize_t itemsize, char *format, # <<<<<<<<<<<<<< - * char *mode, char *buf): - * cdef array result - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_2); - __Pyx_XDECREF(__pyx_t_3); - __Pyx_XDECREF(__pyx_t_4); - __Pyx_XDECREF(__pyx_t_5); - __Pyx_AddTraceback("View.MemoryView.array_cwrapper", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = 0; - __pyx_L0:; - __Pyx_XDECREF((PyObject *)__pyx_v_result); - __Pyx_XGIVEREF((PyObject *)__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":281 - * cdef class Enum(object): - * cdef object name - * def __init__(self, name): # <<<<<<<<<<<<<< - * self.name = name - * def __repr__(self): - */ - -/* Python wrapper */ -static int __pyx_MemviewEnum___init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ -static int __pyx_MemviewEnum___init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { - PyObject *__pyx_v_name = 0; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - int __pyx_r; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__init__ (wrapper)", 0); - { - static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_name,0}; - PyObject* values[1] = {0}; - if (unlikely(__pyx_kwds)) { - Py_ssize_t kw_args; - const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); - switch (pos_args) { - case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); - CYTHON_FALLTHROUGH; - case 0: break; - default: goto __pyx_L5_argtuple_error; - } - kw_args = PyDict_Size(__pyx_kwds); - switch (pos_args) { - case 0: - if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_name)) != 0)) kw_args--; - else goto __pyx_L5_argtuple_error; - } - if (unlikely(kw_args > 0)) { - if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__init__") < 0)) __PYX_ERR(1, 281, __pyx_L3_error) - } - } else if (PyTuple_GET_SIZE(__pyx_args) != 1) { - goto __pyx_L5_argtuple_error; - } else { - values[0] = PyTuple_GET_ITEM(__pyx_args, 0); - } - __pyx_v_name = values[0]; - } - goto __pyx_L4_argument_unpacking_done; - __pyx_L5_argtuple_error:; - __Pyx_RaiseArgtupleInvalid("__init__", 1, 1, 1, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(1, 281, __pyx_L3_error) - __pyx_L3_error:; - __Pyx_AddTraceback("View.MemoryView.Enum.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __Pyx_RefNannyFinishContext(); - return -1; - __pyx_L4_argument_unpacking_done:; - __pyx_r = __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum___init__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self), __pyx_v_name); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static int __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum___init__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v_name) { - int __pyx_r; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__init__", 0); - - /* "View.MemoryView":282 - * cdef object name - * def __init__(self, name): - * self.name = name # <<<<<<<<<<<<<< - * def __repr__(self): - * return self.name - */ - __Pyx_INCREF(__pyx_v_name); - __Pyx_GIVEREF(__pyx_v_name); - __Pyx_GOTREF(__pyx_v_self->name); - __Pyx_DECREF(__pyx_v_self->name); - __pyx_v_self->name = __pyx_v_name; - - /* "View.MemoryView":281 - * cdef class Enum(object): - * cdef object name - * def __init__(self, name): # <<<<<<<<<<<<<< - * self.name = name - * def __repr__(self): - */ - - /* function exit code */ - __pyx_r = 0; - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":283 - * def __init__(self, name): - * self.name = name - * def __repr__(self): # <<<<<<<<<<<<<< - * return self.name - * - */ - -/* Python wrapper */ -static PyObject *__pyx_MemviewEnum___repr__(PyObject *__pyx_v_self); /*proto*/ -static PyObject *__pyx_MemviewEnum___repr__(PyObject *__pyx_v_self) { - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0); - __pyx_r = __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum_2__repr__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self)); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum_2__repr__(struct __pyx_MemviewEnum_obj *__pyx_v_self) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__repr__", 0); - - /* "View.MemoryView":284 - * self.name = name - * def __repr__(self): - * return self.name # <<<<<<<<<<<<<< - * - * cdef generic = Enum("") - */ - __Pyx_XDECREF(__pyx_r); - __Pyx_INCREF(__pyx_v_self->name); - __pyx_r = __pyx_v_self->name; - goto __pyx_L0; - - /* "View.MemoryView":283 - * def __init__(self, name): - * self.name = name - * def __repr__(self): # <<<<<<<<<<<<<< - * return self.name - * - */ - - /* function exit code */ - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "(tree fragment)":1 - * def __reduce_cython__(self): # <<<<<<<<<<<<<< - * cdef tuple state - * cdef object _dict - */ - -/* Python wrapper */ -static PyObject *__pyx_pw___pyx_MemviewEnum_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ -static PyObject *__pyx_pw___pyx_MemviewEnum_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0); - __pyx_r = __pyx_pf___pyx_MemviewEnum___reduce_cython__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self)); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_pf___pyx_MemviewEnum___reduce_cython__(struct __pyx_MemviewEnum_obj *__pyx_v_self) { - PyObject *__pyx_v_state = 0; - PyObject *__pyx_v__dict = 0; - int __pyx_v_use_setstate; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - int __pyx_t_2; - int __pyx_t_3; - PyObject *__pyx_t_4 = NULL; - PyObject *__pyx_t_5 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("__reduce_cython__", 0); - - /* "(tree fragment)":5 - * cdef object _dict - * cdef bint use_setstate - * state = (self.name,) # <<<<<<<<<<<<<< - * _dict = getattr(self, '__dict__', None) - * if _dict is not None: - */ - __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 5, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_INCREF(__pyx_v_self->name); - __Pyx_GIVEREF(__pyx_v_self->name); - PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_self->name); - __pyx_v_state = ((PyObject*)__pyx_t_1); - __pyx_t_1 = 0; - - /* "(tree fragment)":6 - * cdef bint use_setstate - * state = (self.name,) - * _dict = getattr(self, '__dict__', None) # <<<<<<<<<<<<<< - * if _dict is not None: - * state += (_dict,) - */ - __pyx_t_1 = __Pyx_GetAttr3(((PyObject *)__pyx_v_self), __pyx_n_s_dict, Py_None); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 6, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_v__dict = __pyx_t_1; - __pyx_t_1 = 0; - - /* "(tree fragment)":7 - * state = (self.name,) - * _dict = getattr(self, '__dict__', None) - * if _dict is not None: # <<<<<<<<<<<<<< - * state += (_dict,) - * use_setstate = True - */ - __pyx_t_2 = (__pyx_v__dict != Py_None); - __pyx_t_3 = (__pyx_t_2 != 0); - if (__pyx_t_3) { - - /* "(tree fragment)":8 - * _dict = getattr(self, '__dict__', None) - * if _dict is not None: - * state += (_dict,) # <<<<<<<<<<<<<< - * use_setstate = True - * else: - */ - __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 8, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_INCREF(__pyx_v__dict); - __Pyx_GIVEREF(__pyx_v__dict); - PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v__dict); - __pyx_t_4 = PyNumber_InPlaceAdd(__pyx_v_state, __pyx_t_1); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 8, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __Pyx_DECREF_SET(__pyx_v_state, ((PyObject*)__pyx_t_4)); - __pyx_t_4 = 0; - - /* "(tree fragment)":9 - * if _dict is not None: - * state += (_dict,) - * use_setstate = True # <<<<<<<<<<<<<< - * else: - * use_setstate = self.name is not None - */ - __pyx_v_use_setstate = 1; - - /* "(tree fragment)":7 - * state = (self.name,) - * _dict = getattr(self, '__dict__', None) - * if _dict is not None: # <<<<<<<<<<<<<< - * state += (_dict,) - * use_setstate = True - */ - goto __pyx_L3; - } - - /* "(tree fragment)":11 - * use_setstate = True - * else: - * use_setstate = self.name is not None # <<<<<<<<<<<<<< - * if use_setstate: - * return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state - */ - /*else*/ { - __pyx_t_3 = (__pyx_v_self->name != Py_None); - __pyx_v_use_setstate = __pyx_t_3; - } - __pyx_L3:; - - /* "(tree fragment)":12 - * else: - * use_setstate = self.name is not None - * if use_setstate: # <<<<<<<<<<<<<< - * return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state - * else: - */ - __pyx_t_3 = (__pyx_v_use_setstate != 0); - if (__pyx_t_3) { - - /* "(tree fragment)":13 - * use_setstate = self.name is not None - * if use_setstate: - * return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state # <<<<<<<<<<<<<< - * else: - * return __pyx_unpickle_Enum, (type(self), 0xb068931, state) - */ - __Pyx_XDECREF(__pyx_r); - __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_n_s_pyx_unpickle_Enum); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 13, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __pyx_t_1 = PyTuple_New(3); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 13, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_INCREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))); - __Pyx_GIVEREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))); - PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))); - __Pyx_INCREF(__pyx_int_184977713); - __Pyx_GIVEREF(__pyx_int_184977713); - PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_int_184977713); - __Pyx_INCREF(Py_None); - __Pyx_GIVEREF(Py_None); - PyTuple_SET_ITEM(__pyx_t_1, 2, Py_None); - __pyx_t_5 = PyTuple_New(3); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 13, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __Pyx_GIVEREF(__pyx_t_4); - PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_4); - __Pyx_GIVEREF(__pyx_t_1); - PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_1); - __Pyx_INCREF(__pyx_v_state); - __Pyx_GIVEREF(__pyx_v_state); - PyTuple_SET_ITEM(__pyx_t_5, 2, __pyx_v_state); - __pyx_t_4 = 0; - __pyx_t_1 = 0; - __pyx_r = __pyx_t_5; - __pyx_t_5 = 0; - goto __pyx_L0; - - /* "(tree fragment)":12 - * else: - * use_setstate = self.name is not None - * if use_setstate: # <<<<<<<<<<<<<< - * return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state - * else: - */ - } - - /* "(tree fragment)":15 - * return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state - * else: - * return __pyx_unpickle_Enum, (type(self), 0xb068931, state) # <<<<<<<<<<<<<< - * def __setstate_cython__(self, __pyx_state): - * __pyx_unpickle_Enum__set_state(self, __pyx_state) - */ - /*else*/ { - __Pyx_XDECREF(__pyx_r); - __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_n_s_pyx_unpickle_Enum); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 15, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_1 = PyTuple_New(3); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 15, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_INCREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))); - __Pyx_GIVEREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))); - PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))); - __Pyx_INCREF(__pyx_int_184977713); - __Pyx_GIVEREF(__pyx_int_184977713); - PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_int_184977713); - __Pyx_INCREF(__pyx_v_state); - __Pyx_GIVEREF(__pyx_v_state); - PyTuple_SET_ITEM(__pyx_t_1, 2, __pyx_v_state); - __pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 15, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __Pyx_GIVEREF(__pyx_t_5); - PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_5); - __Pyx_GIVEREF(__pyx_t_1); - PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_1); - __pyx_t_5 = 0; - __pyx_t_1 = 0; - __pyx_r = __pyx_t_4; - __pyx_t_4 = 0; - goto __pyx_L0; - } - - /* "(tree fragment)":1 - * def __reduce_cython__(self): # <<<<<<<<<<<<<< - * cdef tuple state - * cdef object _dict - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_4); - __Pyx_XDECREF(__pyx_t_5); - __Pyx_AddTraceback("View.MemoryView.Enum.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XDECREF(__pyx_v_state); - __Pyx_XDECREF(__pyx_v__dict); - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "(tree fragment)":16 - * else: - * return __pyx_unpickle_Enum, (type(self), 0xb068931, state) - * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< - * __pyx_unpickle_Enum__set_state(self, __pyx_state) - */ - -/* Python wrapper */ -static PyObject *__pyx_pw___pyx_MemviewEnum_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/ -static PyObject *__pyx_pw___pyx_MemviewEnum_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) { - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0); - __pyx_r = __pyx_pf___pyx_MemviewEnum_2__setstate_cython__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state)); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_pf___pyx_MemviewEnum_2__setstate_cython__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v___pyx_state) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("__setstate_cython__", 0); - - /* "(tree fragment)":17 - * return __pyx_unpickle_Enum, (type(self), 0xb068931, state) - * def __setstate_cython__(self, __pyx_state): - * __pyx_unpickle_Enum__set_state(self, __pyx_state) # <<<<<<<<<<<<<< - */ - if (!(likely(PyTuple_CheckExact(__pyx_v___pyx_state))||((__pyx_v___pyx_state) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "tuple", Py_TYPE(__pyx_v___pyx_state)->tp_name), 0))) __PYX_ERR(1, 17, __pyx_L1_error) - __pyx_t_1 = __pyx_unpickle_Enum__set_state(__pyx_v_self, ((PyObject*)__pyx_v___pyx_state)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 17, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - - /* "(tree fragment)":16 - * else: - * return __pyx_unpickle_Enum, (type(self), 0xb068931, state) - * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< - * __pyx_unpickle_Enum__set_state(self, __pyx_state) - */ - - /* function exit code */ - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_AddTraceback("View.MemoryView.Enum.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":298 - * - * @cname('__pyx_align_pointer') - * cdef void *align_pointer(void *memory, size_t alignment) nogil: # <<<<<<<<<<<<<< - * "Align pointer memory on a given boundary" - * cdef Py_intptr_t aligned_p = memory - */ - -static void *__pyx_align_pointer(void *__pyx_v_memory, size_t __pyx_v_alignment) { - Py_intptr_t __pyx_v_aligned_p; - size_t __pyx_v_offset; - void *__pyx_r; - int __pyx_t_1; - - /* "View.MemoryView":300 - * cdef void *align_pointer(void *memory, size_t alignment) nogil: - * "Align pointer memory on a given boundary" - * cdef Py_intptr_t aligned_p = memory # <<<<<<<<<<<<<< - * cdef size_t offset - * - */ - __pyx_v_aligned_p = ((Py_intptr_t)__pyx_v_memory); - - /* "View.MemoryView":304 - * - * with cython.cdivision(True): - * offset = aligned_p % alignment # <<<<<<<<<<<<<< - * - * if offset > 0: - */ - __pyx_v_offset = (__pyx_v_aligned_p % __pyx_v_alignment); - - /* "View.MemoryView":306 - * offset = aligned_p % alignment - * - * if offset > 0: # <<<<<<<<<<<<<< - * aligned_p += alignment - offset - * - */ - __pyx_t_1 = ((__pyx_v_offset > 0) != 0); - if (__pyx_t_1) { - - /* "View.MemoryView":307 - * - * if offset > 0: - * aligned_p += alignment - offset # <<<<<<<<<<<<<< - * - * return aligned_p - */ - __pyx_v_aligned_p = (__pyx_v_aligned_p + (__pyx_v_alignment - __pyx_v_offset)); - - /* "View.MemoryView":306 - * offset = aligned_p % alignment - * - * if offset > 0: # <<<<<<<<<<<<<< - * aligned_p += alignment - offset - * - */ - } - - /* "View.MemoryView":309 - * aligned_p += alignment - offset - * - * return aligned_p # <<<<<<<<<<<<<< - * - * - */ - __pyx_r = ((void *)__pyx_v_aligned_p); - goto __pyx_L0; - - /* "View.MemoryView":298 - * - * @cname('__pyx_align_pointer') - * cdef void *align_pointer(void *memory, size_t alignment) nogil: # <<<<<<<<<<<<<< - * "Align pointer memory on a given boundary" - * cdef Py_intptr_t aligned_p = memory - */ - - /* function exit code */ - __pyx_L0:; - return __pyx_r; -} - -/* "View.MemoryView":345 - * cdef __Pyx_TypeInfo *typeinfo - * - * def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False): # <<<<<<<<<<<<<< - * self.obj = obj - * self.flags = flags - */ - -/* Python wrapper */ -static int __pyx_memoryview___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ -static int __pyx_memoryview___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { - PyObject *__pyx_v_obj = 0; - int __pyx_v_flags; - int __pyx_v_dtype_is_object; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - int __pyx_r; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__cinit__ (wrapper)", 0); - { - static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_obj,&__pyx_n_s_flags,&__pyx_n_s_dtype_is_object,0}; - PyObject* values[3] = {0,0,0}; - if (unlikely(__pyx_kwds)) { - Py_ssize_t kw_args; - const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); - switch (pos_args) { - case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); - CYTHON_FALLTHROUGH; - case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); - CYTHON_FALLTHROUGH; - case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); - CYTHON_FALLTHROUGH; - case 0: break; - default: goto __pyx_L5_argtuple_error; - } - kw_args = PyDict_Size(__pyx_kwds); - switch (pos_args) { - case 0: - if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_obj)) != 0)) kw_args--; - else goto __pyx_L5_argtuple_error; - CYTHON_FALLTHROUGH; - case 1: - if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_flags)) != 0)) kw_args--; - else { - __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 2, 3, 1); __PYX_ERR(1, 345, __pyx_L3_error) - } - CYTHON_FALLTHROUGH; - case 2: - if (kw_args > 0) { - PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_dtype_is_object); - if (value) { values[2] = value; kw_args--; } - } - } - if (unlikely(kw_args > 0)) { - if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__cinit__") < 0)) __PYX_ERR(1, 345, __pyx_L3_error) - } - } else { - switch (PyTuple_GET_SIZE(__pyx_args)) { - case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); - CYTHON_FALLTHROUGH; - case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); - values[0] = PyTuple_GET_ITEM(__pyx_args, 0); - break; - default: goto __pyx_L5_argtuple_error; - } - } - __pyx_v_obj = values[0]; - __pyx_v_flags = __Pyx_PyInt_As_int(values[1]); if (unlikely((__pyx_v_flags == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 345, __pyx_L3_error) - if (values[2]) { - __pyx_v_dtype_is_object = __Pyx_PyObject_IsTrue(values[2]); if (unlikely((__pyx_v_dtype_is_object == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 345, __pyx_L3_error) - } else { - __pyx_v_dtype_is_object = ((int)0); - } - } - goto __pyx_L4_argument_unpacking_done; - __pyx_L5_argtuple_error:; - __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 2, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(1, 345, __pyx_L3_error) - __pyx_L3_error:; - __Pyx_AddTraceback("View.MemoryView.memoryview.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __Pyx_RefNannyFinishContext(); - return -1; - __pyx_L4_argument_unpacking_done:; - __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview___cinit__(((struct __pyx_memoryview_obj *)__pyx_v_self), __pyx_v_obj, __pyx_v_flags, __pyx_v_dtype_is_object); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview___cinit__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj, int __pyx_v_flags, int __pyx_v_dtype_is_object) { - int __pyx_r; - __Pyx_RefNannyDeclarations - int __pyx_t_1; - int __pyx_t_2; - int __pyx_t_3; - int __pyx_t_4; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("__cinit__", 0); - - /* "View.MemoryView":346 - * - * def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False): - * self.obj = obj # <<<<<<<<<<<<<< - * self.flags = flags - * if type(self) is memoryview or obj is not None: - */ - __Pyx_INCREF(__pyx_v_obj); - __Pyx_GIVEREF(__pyx_v_obj); - __Pyx_GOTREF(__pyx_v_self->obj); - __Pyx_DECREF(__pyx_v_self->obj); - __pyx_v_self->obj = __pyx_v_obj; - - /* "View.MemoryView":347 - * def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False): - * self.obj = obj - * self.flags = flags # <<<<<<<<<<<<<< - * if type(self) is memoryview or obj is not None: - * __Pyx_GetBuffer(obj, &self.view, flags) - */ - __pyx_v_self->flags = __pyx_v_flags; - - /* "View.MemoryView":348 - * self.obj = obj - * self.flags = flags - * if type(self) is memoryview or obj is not None: # <<<<<<<<<<<<<< - * __Pyx_GetBuffer(obj, &self.view, flags) - * if self.view.obj == NULL: - */ - __pyx_t_2 = (((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))) == ((PyObject *)__pyx_memoryview_type)); - __pyx_t_3 = (__pyx_t_2 != 0); - if (!__pyx_t_3) { - } else { - __pyx_t_1 = __pyx_t_3; - goto __pyx_L4_bool_binop_done; - } - __pyx_t_3 = (__pyx_v_obj != Py_None); - __pyx_t_2 = (__pyx_t_3 != 0); - __pyx_t_1 = __pyx_t_2; - __pyx_L4_bool_binop_done:; - if (__pyx_t_1) { - - /* "View.MemoryView":349 - * self.flags = flags - * if type(self) is memoryview or obj is not None: - * __Pyx_GetBuffer(obj, &self.view, flags) # <<<<<<<<<<<<<< - * if self.view.obj == NULL: - * (<__pyx_buffer *> &self.view).obj = Py_None - */ - __pyx_t_4 = __Pyx_GetBuffer(__pyx_v_obj, (&__pyx_v_self->view), __pyx_v_flags); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(1, 349, __pyx_L1_error) - - /* "View.MemoryView":350 - * if type(self) is memoryview or obj is not None: - * __Pyx_GetBuffer(obj, &self.view, flags) - * if self.view.obj == NULL: # <<<<<<<<<<<<<< - * (<__pyx_buffer *> &self.view).obj = Py_None - * Py_INCREF(Py_None) - */ - __pyx_t_1 = ((((PyObject *)__pyx_v_self->view.obj) == NULL) != 0); - if (__pyx_t_1) { - - /* "View.MemoryView":351 - * __Pyx_GetBuffer(obj, &self.view, flags) - * if self.view.obj == NULL: - * (<__pyx_buffer *> &self.view).obj = Py_None # <<<<<<<<<<<<<< - * Py_INCREF(Py_None) - * - */ - ((Py_buffer *)(&__pyx_v_self->view))->obj = Py_None; - - /* "View.MemoryView":352 - * if self.view.obj == NULL: - * (<__pyx_buffer *> &self.view).obj = Py_None - * Py_INCREF(Py_None) # <<<<<<<<<<<<<< - * - * global __pyx_memoryview_thread_locks_used - */ - Py_INCREF(Py_None); - - /* "View.MemoryView":350 - * if type(self) is memoryview or obj is not None: - * __Pyx_GetBuffer(obj, &self.view, flags) - * if self.view.obj == NULL: # <<<<<<<<<<<<<< - * (<__pyx_buffer *> &self.view).obj = Py_None - * Py_INCREF(Py_None) - */ - } - - /* "View.MemoryView":348 - * self.obj = obj - * self.flags = flags - * if type(self) is memoryview or obj is not None: # <<<<<<<<<<<<<< - * __Pyx_GetBuffer(obj, &self.view, flags) - * if self.view.obj == NULL: - */ - } - - /* "View.MemoryView":355 - * - * global __pyx_memoryview_thread_locks_used - * if __pyx_memoryview_thread_locks_used < THREAD_LOCKS_PREALLOCATED: # <<<<<<<<<<<<<< - * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] - * __pyx_memoryview_thread_locks_used += 1 - */ - __pyx_t_1 = ((__pyx_memoryview_thread_locks_used < 8) != 0); - if (__pyx_t_1) { - - /* "View.MemoryView":356 - * global __pyx_memoryview_thread_locks_used - * if __pyx_memoryview_thread_locks_used < THREAD_LOCKS_PREALLOCATED: - * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] # <<<<<<<<<<<<<< - * __pyx_memoryview_thread_locks_used += 1 - * if self.lock is NULL: - */ - __pyx_v_self->lock = (__pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]); - - /* "View.MemoryView":357 - * if __pyx_memoryview_thread_locks_used < THREAD_LOCKS_PREALLOCATED: - * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] - * __pyx_memoryview_thread_locks_used += 1 # <<<<<<<<<<<<<< - * if self.lock is NULL: - * self.lock = PyThread_allocate_lock() - */ - __pyx_memoryview_thread_locks_used = (__pyx_memoryview_thread_locks_used + 1); - - /* "View.MemoryView":355 - * - * global __pyx_memoryview_thread_locks_used - * if __pyx_memoryview_thread_locks_used < THREAD_LOCKS_PREALLOCATED: # <<<<<<<<<<<<<< - * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] - * __pyx_memoryview_thread_locks_used += 1 - */ - } - - /* "View.MemoryView":358 - * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] - * __pyx_memoryview_thread_locks_used += 1 - * if self.lock is NULL: # <<<<<<<<<<<<<< - * self.lock = PyThread_allocate_lock() - * if self.lock is NULL: - */ - __pyx_t_1 = ((__pyx_v_self->lock == NULL) != 0); - if (__pyx_t_1) { - - /* "View.MemoryView":359 - * __pyx_memoryview_thread_locks_used += 1 - * if self.lock is NULL: - * self.lock = PyThread_allocate_lock() # <<<<<<<<<<<<<< - * if self.lock is NULL: - * raise MemoryError - */ - __pyx_v_self->lock = PyThread_allocate_lock(); - - /* "View.MemoryView":360 - * if self.lock is NULL: - * self.lock = PyThread_allocate_lock() - * if self.lock is NULL: # <<<<<<<<<<<<<< - * raise MemoryError - * - */ - __pyx_t_1 = ((__pyx_v_self->lock == NULL) != 0); - if (unlikely(__pyx_t_1)) { - - /* "View.MemoryView":361 - * self.lock = PyThread_allocate_lock() - * if self.lock is NULL: - * raise MemoryError # <<<<<<<<<<<<<< - * - * if flags & PyBUF_FORMAT: - */ - PyErr_NoMemory(); __PYX_ERR(1, 361, __pyx_L1_error) - - /* "View.MemoryView":360 - * if self.lock is NULL: - * self.lock = PyThread_allocate_lock() - * if self.lock is NULL: # <<<<<<<<<<<<<< - * raise MemoryError - * - */ - } - - /* "View.MemoryView":358 - * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] - * __pyx_memoryview_thread_locks_used += 1 - * if self.lock is NULL: # <<<<<<<<<<<<<< - * self.lock = PyThread_allocate_lock() - * if self.lock is NULL: - */ - } - - /* "View.MemoryView":363 - * raise MemoryError - * - * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< - * self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0') - * else: - */ - __pyx_t_1 = ((__pyx_v_flags & PyBUF_FORMAT) != 0); - if (__pyx_t_1) { - - /* "View.MemoryView":364 - * - * if flags & PyBUF_FORMAT: - * self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0') # <<<<<<<<<<<<<< - * else: - * self.dtype_is_object = dtype_is_object - */ - __pyx_t_2 = (((__pyx_v_self->view.format[0]) == 'O') != 0); - if (__pyx_t_2) { - } else { - __pyx_t_1 = __pyx_t_2; - goto __pyx_L11_bool_binop_done; - } - __pyx_t_2 = (((__pyx_v_self->view.format[1]) == '\x00') != 0); - __pyx_t_1 = __pyx_t_2; - __pyx_L11_bool_binop_done:; - __pyx_v_self->dtype_is_object = __pyx_t_1; - - /* "View.MemoryView":363 - * raise MemoryError - * - * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< - * self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0') - * else: - */ - goto __pyx_L10; - } - - /* "View.MemoryView":366 - * self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0') - * else: - * self.dtype_is_object = dtype_is_object # <<<<<<<<<<<<<< - * - * self.acquisition_count_aligned_p = <__pyx_atomic_int *> align_pointer( - */ - /*else*/ { - __pyx_v_self->dtype_is_object = __pyx_v_dtype_is_object; - } - __pyx_L10:; - - /* "View.MemoryView":368 - * self.dtype_is_object = dtype_is_object - * - * self.acquisition_count_aligned_p = <__pyx_atomic_int *> align_pointer( # <<<<<<<<<<<<<< - * &self.acquisition_count[0], sizeof(__pyx_atomic_int)) - * self.typeinfo = NULL - */ - __pyx_v_self->acquisition_count_aligned_p = ((__pyx_atomic_int *)__pyx_align_pointer(((void *)(&(__pyx_v_self->acquisition_count[0]))), (sizeof(__pyx_atomic_int)))); - - /* "View.MemoryView":370 - * self.acquisition_count_aligned_p = <__pyx_atomic_int *> align_pointer( - * &self.acquisition_count[0], sizeof(__pyx_atomic_int)) - * self.typeinfo = NULL # <<<<<<<<<<<<<< - * - * def __dealloc__(memoryview self): - */ - __pyx_v_self->typeinfo = NULL; - - /* "View.MemoryView":345 - * cdef __Pyx_TypeInfo *typeinfo - * - * def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False): # <<<<<<<<<<<<<< - * self.obj = obj - * self.flags = flags - */ - - /* function exit code */ - __pyx_r = 0; - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_AddTraceback("View.MemoryView.memoryview.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = -1; - __pyx_L0:; - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":372 - * self.typeinfo = NULL - * - * def __dealloc__(memoryview self): # <<<<<<<<<<<<<< - * if self.obj is not None: - * __Pyx_ReleaseBuffer(&self.view) - */ - -/* Python wrapper */ -static void __pyx_memoryview___dealloc__(PyObject *__pyx_v_self); /*proto*/ -static void __pyx_memoryview___dealloc__(PyObject *__pyx_v_self) { - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0); - __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_2__dealloc__(((struct __pyx_memoryview_obj *)__pyx_v_self)); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); -} - -static void __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_2__dealloc__(struct __pyx_memoryview_obj *__pyx_v_self) { - int __pyx_v_i; - __Pyx_RefNannyDeclarations - int __pyx_t_1; - int __pyx_t_2; - int __pyx_t_3; - int __pyx_t_4; - int __pyx_t_5; - PyThread_type_lock __pyx_t_6; - PyThread_type_lock __pyx_t_7; - __Pyx_RefNannySetupContext("__dealloc__", 0); - - /* "View.MemoryView":373 - * - * def __dealloc__(memoryview self): - * if self.obj is not None: # <<<<<<<<<<<<<< - * __Pyx_ReleaseBuffer(&self.view) - * elif (<__pyx_buffer *> &self.view).obj == Py_None: - */ - __pyx_t_1 = (__pyx_v_self->obj != Py_None); - __pyx_t_2 = (__pyx_t_1 != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":374 - * def __dealloc__(memoryview self): - * if self.obj is not None: - * __Pyx_ReleaseBuffer(&self.view) # <<<<<<<<<<<<<< - * elif (<__pyx_buffer *> &self.view).obj == Py_None: - * - */ - __Pyx_ReleaseBuffer((&__pyx_v_self->view)); - - /* "View.MemoryView":373 - * - * def __dealloc__(memoryview self): - * if self.obj is not None: # <<<<<<<<<<<<<< - * __Pyx_ReleaseBuffer(&self.view) - * elif (<__pyx_buffer *> &self.view).obj == Py_None: - */ - goto __pyx_L3; - } - - /* "View.MemoryView":375 - * if self.obj is not None: - * __Pyx_ReleaseBuffer(&self.view) - * elif (<__pyx_buffer *> &self.view).obj == Py_None: # <<<<<<<<<<<<<< - * - * (<__pyx_buffer *> &self.view).obj = NULL - */ - __pyx_t_2 = ((((Py_buffer *)(&__pyx_v_self->view))->obj == Py_None) != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":377 - * elif (<__pyx_buffer *> &self.view).obj == Py_None: - * - * (<__pyx_buffer *> &self.view).obj = NULL # <<<<<<<<<<<<<< - * Py_DECREF(Py_None) - * - */ - ((Py_buffer *)(&__pyx_v_self->view))->obj = NULL; - - /* "View.MemoryView":378 - * - * (<__pyx_buffer *> &self.view).obj = NULL - * Py_DECREF(Py_None) # <<<<<<<<<<<<<< - * - * cdef int i - */ - Py_DECREF(Py_None); - - /* "View.MemoryView":375 - * if self.obj is not None: - * __Pyx_ReleaseBuffer(&self.view) - * elif (<__pyx_buffer *> &self.view).obj == Py_None: # <<<<<<<<<<<<<< - * - * (<__pyx_buffer *> &self.view).obj = NULL - */ - } - __pyx_L3:; - - /* "View.MemoryView":382 - * cdef int i - * global __pyx_memoryview_thread_locks_used - * if self.lock != NULL: # <<<<<<<<<<<<<< - * for i in range(__pyx_memoryview_thread_locks_used): - * if __pyx_memoryview_thread_locks[i] is self.lock: - */ - __pyx_t_2 = ((__pyx_v_self->lock != NULL) != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":383 - * global __pyx_memoryview_thread_locks_used - * if self.lock != NULL: - * for i in range(__pyx_memoryview_thread_locks_used): # <<<<<<<<<<<<<< - * if __pyx_memoryview_thread_locks[i] is self.lock: - * __pyx_memoryview_thread_locks_used -= 1 - */ - __pyx_t_3 = __pyx_memoryview_thread_locks_used; - __pyx_t_4 = __pyx_t_3; - for (__pyx_t_5 = 0; __pyx_t_5 < __pyx_t_4; __pyx_t_5+=1) { - __pyx_v_i = __pyx_t_5; - - /* "View.MemoryView":384 - * if self.lock != NULL: - * for i in range(__pyx_memoryview_thread_locks_used): - * if __pyx_memoryview_thread_locks[i] is self.lock: # <<<<<<<<<<<<<< - * __pyx_memoryview_thread_locks_used -= 1 - * if i != __pyx_memoryview_thread_locks_used: - */ - __pyx_t_2 = (((__pyx_memoryview_thread_locks[__pyx_v_i]) == __pyx_v_self->lock) != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":385 - * for i in range(__pyx_memoryview_thread_locks_used): - * if __pyx_memoryview_thread_locks[i] is self.lock: - * __pyx_memoryview_thread_locks_used -= 1 # <<<<<<<<<<<<<< - * if i != __pyx_memoryview_thread_locks_used: - * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( - */ - __pyx_memoryview_thread_locks_used = (__pyx_memoryview_thread_locks_used - 1); - - /* "View.MemoryView":386 - * if __pyx_memoryview_thread_locks[i] is self.lock: - * __pyx_memoryview_thread_locks_used -= 1 - * if i != __pyx_memoryview_thread_locks_used: # <<<<<<<<<<<<<< - * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( - * __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i]) - */ - __pyx_t_2 = ((__pyx_v_i != __pyx_memoryview_thread_locks_used) != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":388 - * if i != __pyx_memoryview_thread_locks_used: - * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( - * __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i]) # <<<<<<<<<<<<<< - * break - * else: - */ - __pyx_t_6 = (__pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]); - __pyx_t_7 = (__pyx_memoryview_thread_locks[__pyx_v_i]); - - /* "View.MemoryView":387 - * __pyx_memoryview_thread_locks_used -= 1 - * if i != __pyx_memoryview_thread_locks_used: - * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( # <<<<<<<<<<<<<< - * __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i]) - * break - */ - (__pyx_memoryview_thread_locks[__pyx_v_i]) = __pyx_t_6; - (__pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]) = __pyx_t_7; - - /* "View.MemoryView":386 - * if __pyx_memoryview_thread_locks[i] is self.lock: - * __pyx_memoryview_thread_locks_used -= 1 - * if i != __pyx_memoryview_thread_locks_used: # <<<<<<<<<<<<<< - * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( - * __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i]) - */ - } - - /* "View.MemoryView":389 - * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( - * __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i]) - * break # <<<<<<<<<<<<<< - * else: - * PyThread_free_lock(self.lock) - */ - goto __pyx_L6_break; - - /* "View.MemoryView":384 - * if self.lock != NULL: - * for i in range(__pyx_memoryview_thread_locks_used): - * if __pyx_memoryview_thread_locks[i] is self.lock: # <<<<<<<<<<<<<< - * __pyx_memoryview_thread_locks_used -= 1 - * if i != __pyx_memoryview_thread_locks_used: - */ - } - } - /*else*/ { - - /* "View.MemoryView":391 - * break - * else: - * PyThread_free_lock(self.lock) # <<<<<<<<<<<<<< - * - * cdef char *get_item_pointer(memoryview self, object index) except NULL: - */ - PyThread_free_lock(__pyx_v_self->lock); - } - __pyx_L6_break:; - - /* "View.MemoryView":382 - * cdef int i - * global __pyx_memoryview_thread_locks_used - * if self.lock != NULL: # <<<<<<<<<<<<<< - * for i in range(__pyx_memoryview_thread_locks_used): - * if __pyx_memoryview_thread_locks[i] is self.lock: - */ - } - - /* "View.MemoryView":372 - * self.typeinfo = NULL - * - * def __dealloc__(memoryview self): # <<<<<<<<<<<<<< - * if self.obj is not None: - * __Pyx_ReleaseBuffer(&self.view) - */ - - /* function exit code */ - __Pyx_RefNannyFinishContext(); -} - -/* "View.MemoryView":393 - * PyThread_free_lock(self.lock) - * - * cdef char *get_item_pointer(memoryview self, object index) except NULL: # <<<<<<<<<<<<<< - * cdef Py_ssize_t dim - * cdef char *itemp = self.view.buf - */ - -static char *__pyx_memoryview_get_item_pointer(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index) { - Py_ssize_t __pyx_v_dim; - char *__pyx_v_itemp; - PyObject *__pyx_v_idx = NULL; - char *__pyx_r; - __Pyx_RefNannyDeclarations - Py_ssize_t __pyx_t_1; - PyObject *__pyx_t_2 = NULL; - Py_ssize_t __pyx_t_3; - PyObject *(*__pyx_t_4)(PyObject *); - PyObject *__pyx_t_5 = NULL; - Py_ssize_t __pyx_t_6; - char *__pyx_t_7; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("get_item_pointer", 0); - - /* "View.MemoryView":395 - * cdef char *get_item_pointer(memoryview self, object index) except NULL: - * cdef Py_ssize_t dim - * cdef char *itemp = self.view.buf # <<<<<<<<<<<<<< - * - * for dim, idx in enumerate(index): - */ - __pyx_v_itemp = ((char *)__pyx_v_self->view.buf); - - /* "View.MemoryView":397 - * cdef char *itemp = self.view.buf - * - * for dim, idx in enumerate(index): # <<<<<<<<<<<<<< - * itemp = pybuffer_index(&self.view, itemp, idx, dim) - * - */ - __pyx_t_1 = 0; - if (likely(PyList_CheckExact(__pyx_v_index)) || PyTuple_CheckExact(__pyx_v_index)) { - __pyx_t_2 = __pyx_v_index; __Pyx_INCREF(__pyx_t_2); __pyx_t_3 = 0; - __pyx_t_4 = NULL; - } else { - __pyx_t_3 = -1; __pyx_t_2 = PyObject_GetIter(__pyx_v_index); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 397, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_4 = Py_TYPE(__pyx_t_2)->tp_iternext; if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 397, __pyx_L1_error) - } - for (;;) { - if (likely(!__pyx_t_4)) { - if (likely(PyList_CheckExact(__pyx_t_2))) { - if (__pyx_t_3 >= PyList_GET_SIZE(__pyx_t_2)) break; - #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS - __pyx_t_5 = PyList_GET_ITEM(__pyx_t_2, __pyx_t_3); __Pyx_INCREF(__pyx_t_5); __pyx_t_3++; if (unlikely(0 < 0)) __PYX_ERR(1, 397, __pyx_L1_error) - #else - __pyx_t_5 = PySequence_ITEM(__pyx_t_2, __pyx_t_3); __pyx_t_3++; if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 397, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - #endif - } else { - if (__pyx_t_3 >= PyTuple_GET_SIZE(__pyx_t_2)) break; - #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS - __pyx_t_5 = PyTuple_GET_ITEM(__pyx_t_2, __pyx_t_3); __Pyx_INCREF(__pyx_t_5); __pyx_t_3++; if (unlikely(0 < 0)) __PYX_ERR(1, 397, __pyx_L1_error) - #else - __pyx_t_5 = PySequence_ITEM(__pyx_t_2, __pyx_t_3); __pyx_t_3++; if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 397, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - #endif - } - } else { - __pyx_t_5 = __pyx_t_4(__pyx_t_2); - if (unlikely(!__pyx_t_5)) { - PyObject* exc_type = PyErr_Occurred(); - if (exc_type) { - if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear(); - else __PYX_ERR(1, 397, __pyx_L1_error) - } - break; - } - __Pyx_GOTREF(__pyx_t_5); - } - __Pyx_XDECREF_SET(__pyx_v_idx, __pyx_t_5); - __pyx_t_5 = 0; - __pyx_v_dim = __pyx_t_1; - __pyx_t_1 = (__pyx_t_1 + 1); - - /* "View.MemoryView":398 - * - * for dim, idx in enumerate(index): - * itemp = pybuffer_index(&self.view, itemp, idx, dim) # <<<<<<<<<<<<<< - * - * return itemp - */ - __pyx_t_6 = __Pyx_PyIndex_AsSsize_t(__pyx_v_idx); if (unlikely((__pyx_t_6 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 398, __pyx_L1_error) - __pyx_t_7 = __pyx_pybuffer_index((&__pyx_v_self->view), __pyx_v_itemp, __pyx_t_6, __pyx_v_dim); if (unlikely(__pyx_t_7 == ((char *)NULL))) __PYX_ERR(1, 398, __pyx_L1_error) - __pyx_v_itemp = __pyx_t_7; - - /* "View.MemoryView":397 - * cdef char *itemp = self.view.buf - * - * for dim, idx in enumerate(index): # <<<<<<<<<<<<<< - * itemp = pybuffer_index(&self.view, itemp, idx, dim) - * - */ - } - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - - /* "View.MemoryView":400 - * itemp = pybuffer_index(&self.view, itemp, idx, dim) - * - * return itemp # <<<<<<<<<<<<<< - * - * - */ - __pyx_r = __pyx_v_itemp; - goto __pyx_L0; - - /* "View.MemoryView":393 - * PyThread_free_lock(self.lock) - * - * cdef char *get_item_pointer(memoryview self, object index) except NULL: # <<<<<<<<<<<<<< - * cdef Py_ssize_t dim - * cdef char *itemp = self.view.buf - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_2); - __Pyx_XDECREF(__pyx_t_5); - __Pyx_AddTraceback("View.MemoryView.memoryview.get_item_pointer", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XDECREF(__pyx_v_idx); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":403 - * - * - * def __getitem__(memoryview self, object index): # <<<<<<<<<<<<<< - * if index is Ellipsis: - * return self - */ - -/* Python wrapper */ -static PyObject *__pyx_memoryview___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index); /*proto*/ -static PyObject *__pyx_memoryview___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index) { - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__getitem__ (wrapper)", 0); - __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_4__getitem__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((PyObject *)__pyx_v_index)); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_4__getitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index) { - PyObject *__pyx_v_have_slices = NULL; - PyObject *__pyx_v_indices = NULL; - char *__pyx_v_itemp; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - int __pyx_t_1; - int __pyx_t_2; - PyObject *__pyx_t_3 = NULL; - PyObject *__pyx_t_4 = NULL; - PyObject *__pyx_t_5 = NULL; - char *__pyx_t_6; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("__getitem__", 0); - - /* "View.MemoryView":404 - * - * def __getitem__(memoryview self, object index): - * if index is Ellipsis: # <<<<<<<<<<<<<< - * return self - * - */ - __pyx_t_1 = (__pyx_v_index == __pyx_builtin_Ellipsis); - __pyx_t_2 = (__pyx_t_1 != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":405 - * def __getitem__(memoryview self, object index): - * if index is Ellipsis: - * return self # <<<<<<<<<<<<<< - * - * have_slices, indices = _unellipsify(index, self.view.ndim) - */ - __Pyx_XDECREF(__pyx_r); - __Pyx_INCREF(((PyObject *)__pyx_v_self)); - __pyx_r = ((PyObject *)__pyx_v_self); - goto __pyx_L0; - - /* "View.MemoryView":404 - * - * def __getitem__(memoryview self, object index): - * if index is Ellipsis: # <<<<<<<<<<<<<< - * return self - * - */ - } - - /* "View.MemoryView":407 - * return self - * - * have_slices, indices = _unellipsify(index, self.view.ndim) # <<<<<<<<<<<<<< - * - * cdef char *itemp - */ - __pyx_t_3 = _unellipsify(__pyx_v_index, __pyx_v_self->view.ndim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 407, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - if (likely(__pyx_t_3 != Py_None)) { - PyObject* sequence = __pyx_t_3; - Py_ssize_t size = __Pyx_PySequence_SIZE(sequence); - if (unlikely(size != 2)) { - if (size > 2) __Pyx_RaiseTooManyValuesError(2); - else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size); - __PYX_ERR(1, 407, __pyx_L1_error) - } - #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS - __pyx_t_4 = PyTuple_GET_ITEM(sequence, 0); - __pyx_t_5 = PyTuple_GET_ITEM(sequence, 1); - __Pyx_INCREF(__pyx_t_4); - __Pyx_INCREF(__pyx_t_5); - #else - __pyx_t_4 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 407, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __pyx_t_5 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 407, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - #endif - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - } else { - __Pyx_RaiseNoneNotIterableError(); __PYX_ERR(1, 407, __pyx_L1_error) - } - __pyx_v_have_slices = __pyx_t_4; - __pyx_t_4 = 0; - __pyx_v_indices = __pyx_t_5; - __pyx_t_5 = 0; - - /* "View.MemoryView":410 - * - * cdef char *itemp - * if have_slices: # <<<<<<<<<<<<<< - * return memview_slice(self, indices) - * else: - */ - __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_v_have_slices); if (unlikely(__pyx_t_2 < 0)) __PYX_ERR(1, 410, __pyx_L1_error) - if (__pyx_t_2) { - - /* "View.MemoryView":411 - * cdef char *itemp - * if have_slices: - * return memview_slice(self, indices) # <<<<<<<<<<<<<< - * else: - * itemp = self.get_item_pointer(indices) - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_3 = ((PyObject *)__pyx_memview_slice(__pyx_v_self, __pyx_v_indices)); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 411, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __pyx_r = __pyx_t_3; - __pyx_t_3 = 0; - goto __pyx_L0; - - /* "View.MemoryView":410 - * - * cdef char *itemp - * if have_slices: # <<<<<<<<<<<<<< - * return memview_slice(self, indices) - * else: - */ - } - - /* "View.MemoryView":413 - * return memview_slice(self, indices) - * else: - * itemp = self.get_item_pointer(indices) # <<<<<<<<<<<<<< - * return self.convert_item_to_object(itemp) - * - */ - /*else*/ { - __pyx_t_6 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->get_item_pointer(__pyx_v_self, __pyx_v_indices); if (unlikely(__pyx_t_6 == ((char *)NULL))) __PYX_ERR(1, 413, __pyx_L1_error) - __pyx_v_itemp = __pyx_t_6; - - /* "View.MemoryView":414 - * else: - * itemp = self.get_item_pointer(indices) - * return self.convert_item_to_object(itemp) # <<<<<<<<<<<<<< - * - * def __setitem__(memoryview self, object index, object value): - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_3 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->convert_item_to_object(__pyx_v_self, __pyx_v_itemp); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 414, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __pyx_r = __pyx_t_3; - __pyx_t_3 = 0; - goto __pyx_L0; - } - - /* "View.MemoryView":403 - * - * - * def __getitem__(memoryview self, object index): # <<<<<<<<<<<<<< - * if index is Ellipsis: - * return self - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_3); - __Pyx_XDECREF(__pyx_t_4); - __Pyx_XDECREF(__pyx_t_5); - __Pyx_AddTraceback("View.MemoryView.memoryview.__getitem__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XDECREF(__pyx_v_have_slices); - __Pyx_XDECREF(__pyx_v_indices); - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":416 - * return self.convert_item_to_object(itemp) - * - * def __setitem__(memoryview self, object index, object value): # <<<<<<<<<<<<<< - * if self.view.readonly: - * raise TypeError("Cannot assign to read-only memoryview") - */ - -/* Python wrapper */ -static int __pyx_memoryview___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value); /*proto*/ -static int __pyx_memoryview___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value) { - int __pyx_r; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0); - __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_6__setitem__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((PyObject *)__pyx_v_index), ((PyObject *)__pyx_v_value)); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_6__setitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value) { - PyObject *__pyx_v_have_slices = NULL; - PyObject *__pyx_v_obj = NULL; - int __pyx_r; - __Pyx_RefNannyDeclarations - int __pyx_t_1; - PyObject *__pyx_t_2 = NULL; - PyObject *__pyx_t_3 = NULL; - PyObject *__pyx_t_4 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("__setitem__", 0); - __Pyx_INCREF(__pyx_v_index); - - /* "View.MemoryView":417 - * - * def __setitem__(memoryview self, object index, object value): - * if self.view.readonly: # <<<<<<<<<<<<<< - * raise TypeError("Cannot assign to read-only memoryview") - * - */ - __pyx_t_1 = (__pyx_v_self->view.readonly != 0); - if (unlikely(__pyx_t_1)) { - - /* "View.MemoryView":418 - * def __setitem__(memoryview self, object index, object value): - * if self.view.readonly: - * raise TypeError("Cannot assign to read-only memoryview") # <<<<<<<<<<<<<< - * - * have_slices, index = _unellipsify(index, self.view.ndim) - */ - __pyx_t_2 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__9, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 418, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_Raise(__pyx_t_2, 0, 0, 0); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __PYX_ERR(1, 418, __pyx_L1_error) - - /* "View.MemoryView":417 - * - * def __setitem__(memoryview self, object index, object value): - * if self.view.readonly: # <<<<<<<<<<<<<< - * raise TypeError("Cannot assign to read-only memoryview") - * - */ - } - - /* "View.MemoryView":420 - * raise TypeError("Cannot assign to read-only memoryview") - * - * have_slices, index = _unellipsify(index, self.view.ndim) # <<<<<<<<<<<<<< - * - * if have_slices: - */ - __pyx_t_2 = _unellipsify(__pyx_v_index, __pyx_v_self->view.ndim); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 420, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - if (likely(__pyx_t_2 != Py_None)) { - PyObject* sequence = __pyx_t_2; - Py_ssize_t size = __Pyx_PySequence_SIZE(sequence); - if (unlikely(size != 2)) { - if (size > 2) __Pyx_RaiseTooManyValuesError(2); - else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size); - __PYX_ERR(1, 420, __pyx_L1_error) - } - #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS - __pyx_t_3 = PyTuple_GET_ITEM(sequence, 0); - __pyx_t_4 = PyTuple_GET_ITEM(sequence, 1); - __Pyx_INCREF(__pyx_t_3); - __Pyx_INCREF(__pyx_t_4); - #else - __pyx_t_3 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 420, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_4 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 420, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - #endif - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - } else { - __Pyx_RaiseNoneNotIterableError(); __PYX_ERR(1, 420, __pyx_L1_error) - } - __pyx_v_have_slices = __pyx_t_3; - __pyx_t_3 = 0; - __Pyx_DECREF_SET(__pyx_v_index, __pyx_t_4); - __pyx_t_4 = 0; - - /* "View.MemoryView":422 - * have_slices, index = _unellipsify(index, self.view.ndim) - * - * if have_slices: # <<<<<<<<<<<<<< - * obj = self.is_slice(value) - * if obj: - */ - __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_v_have_slices); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 422, __pyx_L1_error) - if (__pyx_t_1) { - - /* "View.MemoryView":423 - * - * if have_slices: - * obj = self.is_slice(value) # <<<<<<<<<<<<<< - * if obj: - * self.setitem_slice_assignment(self[index], obj) - */ - __pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->is_slice(__pyx_v_self, __pyx_v_value); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 423, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_v_obj = __pyx_t_2; - __pyx_t_2 = 0; - - /* "View.MemoryView":424 - * if have_slices: - * obj = self.is_slice(value) - * if obj: # <<<<<<<<<<<<<< - * self.setitem_slice_assignment(self[index], obj) - * else: - */ - __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_v_obj); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 424, __pyx_L1_error) - if (__pyx_t_1) { - - /* "View.MemoryView":425 - * obj = self.is_slice(value) - * if obj: - * self.setitem_slice_assignment(self[index], obj) # <<<<<<<<<<<<<< - * else: - * self.setitem_slice_assign_scalar(self[index], value) - */ - __pyx_t_2 = __Pyx_PyObject_GetItem(((PyObject *)__pyx_v_self), __pyx_v_index); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 425, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_4 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->setitem_slice_assignment(__pyx_v_self, __pyx_t_2, __pyx_v_obj); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 425, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - - /* "View.MemoryView":424 - * if have_slices: - * obj = self.is_slice(value) - * if obj: # <<<<<<<<<<<<<< - * self.setitem_slice_assignment(self[index], obj) - * else: - */ - goto __pyx_L5; - } - - /* "View.MemoryView":427 - * self.setitem_slice_assignment(self[index], obj) - * else: - * self.setitem_slice_assign_scalar(self[index], value) # <<<<<<<<<<<<<< - * else: - * self.setitem_indexed(index, value) - */ - /*else*/ { - __pyx_t_4 = __Pyx_PyObject_GetItem(((PyObject *)__pyx_v_self), __pyx_v_index); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 427, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - if (!(likely(((__pyx_t_4) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_4, __pyx_memoryview_type))))) __PYX_ERR(1, 427, __pyx_L1_error) - __pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->setitem_slice_assign_scalar(__pyx_v_self, ((struct __pyx_memoryview_obj *)__pyx_t_4), __pyx_v_value); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 427, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - } - __pyx_L5:; - - /* "View.MemoryView":422 - * have_slices, index = _unellipsify(index, self.view.ndim) - * - * if have_slices: # <<<<<<<<<<<<<< - * obj = self.is_slice(value) - * if obj: - */ - goto __pyx_L4; - } - - /* "View.MemoryView":429 - * self.setitem_slice_assign_scalar(self[index], value) - * else: - * self.setitem_indexed(index, value) # <<<<<<<<<<<<<< - * - * cdef is_slice(self, obj): - */ - /*else*/ { - __pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->setitem_indexed(__pyx_v_self, __pyx_v_index, __pyx_v_value); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 429, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - } - __pyx_L4:; - - /* "View.MemoryView":416 - * return self.convert_item_to_object(itemp) - * - * def __setitem__(memoryview self, object index, object value): # <<<<<<<<<<<<<< - * if self.view.readonly: - * raise TypeError("Cannot assign to read-only memoryview") - */ - - /* function exit code */ - __pyx_r = 0; - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_2); - __Pyx_XDECREF(__pyx_t_3); - __Pyx_XDECREF(__pyx_t_4); - __Pyx_AddTraceback("View.MemoryView.memoryview.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = -1; - __pyx_L0:; - __Pyx_XDECREF(__pyx_v_have_slices); - __Pyx_XDECREF(__pyx_v_obj); - __Pyx_XDECREF(__pyx_v_index); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":431 - * self.setitem_indexed(index, value) - * - * cdef is_slice(self, obj): # <<<<<<<<<<<<<< - * if not isinstance(obj, memoryview): - * try: - */ - -static PyObject *__pyx_memoryview_is_slice(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - int __pyx_t_1; - int __pyx_t_2; - PyObject *__pyx_t_3 = NULL; - PyObject *__pyx_t_4 = NULL; - PyObject *__pyx_t_5 = NULL; - PyObject *__pyx_t_6 = NULL; - PyObject *__pyx_t_7 = NULL; - PyObject *__pyx_t_8 = NULL; - int __pyx_t_9; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("is_slice", 0); - __Pyx_INCREF(__pyx_v_obj); - - /* "View.MemoryView":432 - * - * cdef is_slice(self, obj): - * if not isinstance(obj, memoryview): # <<<<<<<<<<<<<< - * try: - * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, - */ - __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_obj, __pyx_memoryview_type); - __pyx_t_2 = ((!(__pyx_t_1 != 0)) != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":433 - * cdef is_slice(self, obj): - * if not isinstance(obj, memoryview): - * try: # <<<<<<<<<<<<<< - * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, - * self.dtype_is_object) - */ - { - __Pyx_PyThreadState_declare - __Pyx_PyThreadState_assign - __Pyx_ExceptionSave(&__pyx_t_3, &__pyx_t_4, &__pyx_t_5); - __Pyx_XGOTREF(__pyx_t_3); - __Pyx_XGOTREF(__pyx_t_4); - __Pyx_XGOTREF(__pyx_t_5); - /*try:*/ { - - /* "View.MemoryView":434 - * if not isinstance(obj, memoryview): - * try: - * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, # <<<<<<<<<<<<<< - * self.dtype_is_object) - * except TypeError: - */ - __pyx_t_6 = __Pyx_PyInt_From_int(((__pyx_v_self->flags & (~PyBUF_WRITABLE)) | PyBUF_ANY_CONTIGUOUS)); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 434, __pyx_L4_error) - __Pyx_GOTREF(__pyx_t_6); - - /* "View.MemoryView":435 - * try: - * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, - * self.dtype_is_object) # <<<<<<<<<<<<<< - * except TypeError: - * return None - */ - __pyx_t_7 = __Pyx_PyBool_FromLong(__pyx_v_self->dtype_is_object); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 435, __pyx_L4_error) - __Pyx_GOTREF(__pyx_t_7); - - /* "View.MemoryView":434 - * if not isinstance(obj, memoryview): - * try: - * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, # <<<<<<<<<<<<<< - * self.dtype_is_object) - * except TypeError: - */ - __pyx_t_8 = PyTuple_New(3); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 434, __pyx_L4_error) - __Pyx_GOTREF(__pyx_t_8); - __Pyx_INCREF(__pyx_v_obj); - __Pyx_GIVEREF(__pyx_v_obj); - PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_v_obj); - __Pyx_GIVEREF(__pyx_t_6); - PyTuple_SET_ITEM(__pyx_t_8, 1, __pyx_t_6); - __Pyx_GIVEREF(__pyx_t_7); - PyTuple_SET_ITEM(__pyx_t_8, 2, __pyx_t_7); - __pyx_t_6 = 0; - __pyx_t_7 = 0; - __pyx_t_7 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryview_type), __pyx_t_8, NULL); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 434, __pyx_L4_error) - __Pyx_GOTREF(__pyx_t_7); - __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; - __Pyx_DECREF_SET(__pyx_v_obj, __pyx_t_7); - __pyx_t_7 = 0; - - /* "View.MemoryView":433 - * cdef is_slice(self, obj): - * if not isinstance(obj, memoryview): - * try: # <<<<<<<<<<<<<< - * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, - * self.dtype_is_object) - */ - } - __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; - __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; - __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; - goto __pyx_L9_try_end; - __pyx_L4_error:; - __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; - __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; - __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; - - /* "View.MemoryView":436 - * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, - * self.dtype_is_object) - * except TypeError: # <<<<<<<<<<<<<< - * return None - * - */ - __pyx_t_9 = __Pyx_PyErr_ExceptionMatches(__pyx_builtin_TypeError); - if (__pyx_t_9) { - __Pyx_AddTraceback("View.MemoryView.memoryview.is_slice", __pyx_clineno, __pyx_lineno, __pyx_filename); - if (__Pyx_GetException(&__pyx_t_7, &__pyx_t_8, &__pyx_t_6) < 0) __PYX_ERR(1, 436, __pyx_L6_except_error) - __Pyx_GOTREF(__pyx_t_7); - __Pyx_GOTREF(__pyx_t_8); - __Pyx_GOTREF(__pyx_t_6); - - /* "View.MemoryView":437 - * self.dtype_is_object) - * except TypeError: - * return None # <<<<<<<<<<<<<< - * - * return obj - */ - __Pyx_XDECREF(__pyx_r); - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; - __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; - __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; - goto __pyx_L7_except_return; - } - goto __pyx_L6_except_error; - __pyx_L6_except_error:; - - /* "View.MemoryView":433 - * cdef is_slice(self, obj): - * if not isinstance(obj, memoryview): - * try: # <<<<<<<<<<<<<< - * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, - * self.dtype_is_object) - */ - __Pyx_XGIVEREF(__pyx_t_3); - __Pyx_XGIVEREF(__pyx_t_4); - __Pyx_XGIVEREF(__pyx_t_5); - __Pyx_ExceptionReset(__pyx_t_3, __pyx_t_4, __pyx_t_5); - goto __pyx_L1_error; - __pyx_L7_except_return:; - __Pyx_XGIVEREF(__pyx_t_3); - __Pyx_XGIVEREF(__pyx_t_4); - __Pyx_XGIVEREF(__pyx_t_5); - __Pyx_ExceptionReset(__pyx_t_3, __pyx_t_4, __pyx_t_5); - goto __pyx_L0; - __pyx_L9_try_end:; - } - - /* "View.MemoryView":432 - * - * cdef is_slice(self, obj): - * if not isinstance(obj, memoryview): # <<<<<<<<<<<<<< - * try: - * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, - */ - } - - /* "View.MemoryView":439 - * return None - * - * return obj # <<<<<<<<<<<<<< - * - * cdef setitem_slice_assignment(self, dst, src): - */ - __Pyx_XDECREF(__pyx_r); - __Pyx_INCREF(__pyx_v_obj); - __pyx_r = __pyx_v_obj; - goto __pyx_L0; - - /* "View.MemoryView":431 - * self.setitem_indexed(index, value) - * - * cdef is_slice(self, obj): # <<<<<<<<<<<<<< - * if not isinstance(obj, memoryview): - * try: - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_6); - __Pyx_XDECREF(__pyx_t_7); - __Pyx_XDECREF(__pyx_t_8); - __Pyx_AddTraceback("View.MemoryView.memoryview.is_slice", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = 0; - __pyx_L0:; - __Pyx_XDECREF(__pyx_v_obj); - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":441 - * return obj - * - * cdef setitem_slice_assignment(self, dst, src): # <<<<<<<<<<<<<< - * cdef __Pyx_memviewslice dst_slice - * cdef __Pyx_memviewslice src_slice - */ - -static PyObject *__pyx_memoryview_setitem_slice_assignment(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_dst, PyObject *__pyx_v_src) { - __Pyx_memviewslice __pyx_v_dst_slice; - __Pyx_memviewslice __pyx_v_src_slice; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - __Pyx_memviewslice *__pyx_t_1; - __Pyx_memviewslice *__pyx_t_2; - PyObject *__pyx_t_3 = NULL; - int __pyx_t_4; - int __pyx_t_5; - int __pyx_t_6; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("setitem_slice_assignment", 0); - - /* "View.MemoryView":445 - * cdef __Pyx_memviewslice src_slice - * - * memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0], # <<<<<<<<<<<<<< - * get_slice_from_memview(dst, &dst_slice)[0], - * src.ndim, dst.ndim, self.dtype_is_object) - */ - if (!(likely(((__pyx_v_src) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_src, __pyx_memoryview_type))))) __PYX_ERR(1, 445, __pyx_L1_error) - __pyx_t_1 = __pyx_memoryview_get_slice_from_memoryview(((struct __pyx_memoryview_obj *)__pyx_v_src), (&__pyx_v_src_slice)); if (unlikely(__pyx_t_1 == ((__Pyx_memviewslice *)NULL))) __PYX_ERR(1, 445, __pyx_L1_error) - - /* "View.MemoryView":446 - * - * memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0], - * get_slice_from_memview(dst, &dst_slice)[0], # <<<<<<<<<<<<<< - * src.ndim, dst.ndim, self.dtype_is_object) - * - */ - if (!(likely(((__pyx_v_dst) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_dst, __pyx_memoryview_type))))) __PYX_ERR(1, 446, __pyx_L1_error) - __pyx_t_2 = __pyx_memoryview_get_slice_from_memoryview(((struct __pyx_memoryview_obj *)__pyx_v_dst), (&__pyx_v_dst_slice)); if (unlikely(__pyx_t_2 == ((__Pyx_memviewslice *)NULL))) __PYX_ERR(1, 446, __pyx_L1_error) - - /* "View.MemoryView":447 - * memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0], - * get_slice_from_memview(dst, &dst_slice)[0], - * src.ndim, dst.ndim, self.dtype_is_object) # <<<<<<<<<<<<<< - * - * cdef setitem_slice_assign_scalar(self, memoryview dst, value): - */ - __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_src, __pyx_n_s_ndim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 447, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_4 = __Pyx_PyInt_As_int(__pyx_t_3); if (unlikely((__pyx_t_4 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 447, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_dst, __pyx_n_s_ndim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 447, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_5 = __Pyx_PyInt_As_int(__pyx_t_3); if (unlikely((__pyx_t_5 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 447, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - - /* "View.MemoryView":445 - * cdef __Pyx_memviewslice src_slice - * - * memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0], # <<<<<<<<<<<<<< - * get_slice_from_memview(dst, &dst_slice)[0], - * src.ndim, dst.ndim, self.dtype_is_object) - */ - __pyx_t_6 = __pyx_memoryview_copy_contents((__pyx_t_1[0]), (__pyx_t_2[0]), __pyx_t_4, __pyx_t_5, __pyx_v_self->dtype_is_object); if (unlikely(__pyx_t_6 == ((int)-1))) __PYX_ERR(1, 445, __pyx_L1_error) - - /* "View.MemoryView":441 - * return obj - * - * cdef setitem_slice_assignment(self, dst, src): # <<<<<<<<<<<<<< - * cdef __Pyx_memviewslice dst_slice - * cdef __Pyx_memviewslice src_slice - */ - - /* function exit code */ - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_3); - __Pyx_AddTraceback("View.MemoryView.memoryview.setitem_slice_assignment", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = 0; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":449 - * src.ndim, dst.ndim, self.dtype_is_object) - * - * cdef setitem_slice_assign_scalar(self, memoryview dst, value): # <<<<<<<<<<<<<< - * cdef int array[128] - * cdef void *tmp = NULL - */ - -static PyObject *__pyx_memoryview_setitem_slice_assign_scalar(struct __pyx_memoryview_obj *__pyx_v_self, struct __pyx_memoryview_obj *__pyx_v_dst, PyObject *__pyx_v_value) { - int __pyx_v_array[0x80]; - void *__pyx_v_tmp; - void *__pyx_v_item; - __Pyx_memviewslice *__pyx_v_dst_slice; - __Pyx_memviewslice __pyx_v_tmp_slice; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - __Pyx_memviewslice *__pyx_t_1; - int __pyx_t_2; - PyObject *__pyx_t_3 = NULL; - int __pyx_t_4; - int __pyx_t_5; - char const *__pyx_t_6; - PyObject *__pyx_t_7 = NULL; - PyObject *__pyx_t_8 = NULL; - PyObject *__pyx_t_9 = NULL; - PyObject *__pyx_t_10 = NULL; - PyObject *__pyx_t_11 = NULL; - PyObject *__pyx_t_12 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("setitem_slice_assign_scalar", 0); - - /* "View.MemoryView":451 - * cdef setitem_slice_assign_scalar(self, memoryview dst, value): - * cdef int array[128] - * cdef void *tmp = NULL # <<<<<<<<<<<<<< - * cdef void *item - * - */ - __pyx_v_tmp = NULL; - - /* "View.MemoryView":456 - * cdef __Pyx_memviewslice *dst_slice - * cdef __Pyx_memviewslice tmp_slice - * dst_slice = get_slice_from_memview(dst, &tmp_slice) # <<<<<<<<<<<<<< - * - * if self.view.itemsize > sizeof(array): - */ - __pyx_t_1 = __pyx_memoryview_get_slice_from_memoryview(__pyx_v_dst, (&__pyx_v_tmp_slice)); if (unlikely(__pyx_t_1 == ((__Pyx_memviewslice *)NULL))) __PYX_ERR(1, 456, __pyx_L1_error) - __pyx_v_dst_slice = __pyx_t_1; - - /* "View.MemoryView":458 - * dst_slice = get_slice_from_memview(dst, &tmp_slice) - * - * if self.view.itemsize > sizeof(array): # <<<<<<<<<<<<<< - * tmp = PyMem_Malloc(self.view.itemsize) - * if tmp == NULL: - */ - __pyx_t_2 = ((((size_t)__pyx_v_self->view.itemsize) > (sizeof(__pyx_v_array))) != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":459 - * - * if self.view.itemsize > sizeof(array): - * tmp = PyMem_Malloc(self.view.itemsize) # <<<<<<<<<<<<<< - * if tmp == NULL: - * raise MemoryError - */ - __pyx_v_tmp = PyMem_Malloc(__pyx_v_self->view.itemsize); - - /* "View.MemoryView":460 - * if self.view.itemsize > sizeof(array): - * tmp = PyMem_Malloc(self.view.itemsize) - * if tmp == NULL: # <<<<<<<<<<<<<< - * raise MemoryError - * item = tmp - */ - __pyx_t_2 = ((__pyx_v_tmp == NULL) != 0); - if (unlikely(__pyx_t_2)) { - - /* "View.MemoryView":461 - * tmp = PyMem_Malloc(self.view.itemsize) - * if tmp == NULL: - * raise MemoryError # <<<<<<<<<<<<<< - * item = tmp - * else: - */ - PyErr_NoMemory(); __PYX_ERR(1, 461, __pyx_L1_error) - - /* "View.MemoryView":460 - * if self.view.itemsize > sizeof(array): - * tmp = PyMem_Malloc(self.view.itemsize) - * if tmp == NULL: # <<<<<<<<<<<<<< - * raise MemoryError - * item = tmp - */ - } - - /* "View.MemoryView":462 - * if tmp == NULL: - * raise MemoryError - * item = tmp # <<<<<<<<<<<<<< - * else: - * item = array - */ - __pyx_v_item = __pyx_v_tmp; - - /* "View.MemoryView":458 - * dst_slice = get_slice_from_memview(dst, &tmp_slice) - * - * if self.view.itemsize > sizeof(array): # <<<<<<<<<<<<<< - * tmp = PyMem_Malloc(self.view.itemsize) - * if tmp == NULL: - */ - goto __pyx_L3; - } - - /* "View.MemoryView":464 - * item = tmp - * else: - * item = array # <<<<<<<<<<<<<< - * - * try: - */ - /*else*/ { - __pyx_v_item = ((void *)__pyx_v_array); - } - __pyx_L3:; - - /* "View.MemoryView":466 - * item = array - * - * try: # <<<<<<<<<<<<<< - * if self.dtype_is_object: - * ( item)[0] = value - */ - /*try:*/ { - - /* "View.MemoryView":467 - * - * try: - * if self.dtype_is_object: # <<<<<<<<<<<<<< - * ( item)[0] = value - * else: - */ - __pyx_t_2 = (__pyx_v_self->dtype_is_object != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":468 - * try: - * if self.dtype_is_object: - * ( item)[0] = value # <<<<<<<<<<<<<< - * else: - * self.assign_item_from_object( item, value) - */ - (((PyObject **)__pyx_v_item)[0]) = ((PyObject *)__pyx_v_value); - - /* "View.MemoryView":467 - * - * try: - * if self.dtype_is_object: # <<<<<<<<<<<<<< - * ( item)[0] = value - * else: - */ - goto __pyx_L8; - } - - /* "View.MemoryView":470 - * ( item)[0] = value - * else: - * self.assign_item_from_object( item, value) # <<<<<<<<<<<<<< - * - * - */ - /*else*/ { - __pyx_t_3 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->assign_item_from_object(__pyx_v_self, ((char *)__pyx_v_item), __pyx_v_value); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 470, __pyx_L6_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - } - __pyx_L8:; - - /* "View.MemoryView":474 - * - * - * if self.view.suboffsets != NULL: # <<<<<<<<<<<<<< - * assert_direct_dimensions(self.view.suboffsets, self.view.ndim) - * slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize, - */ - __pyx_t_2 = ((__pyx_v_self->view.suboffsets != NULL) != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":475 - * - * if self.view.suboffsets != NULL: - * assert_direct_dimensions(self.view.suboffsets, self.view.ndim) # <<<<<<<<<<<<<< - * slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize, - * item, self.dtype_is_object) - */ - __pyx_t_3 = assert_direct_dimensions(__pyx_v_self->view.suboffsets, __pyx_v_self->view.ndim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 475, __pyx_L6_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - - /* "View.MemoryView":474 - * - * - * if self.view.suboffsets != NULL: # <<<<<<<<<<<<<< - * assert_direct_dimensions(self.view.suboffsets, self.view.ndim) - * slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize, - */ - } - - /* "View.MemoryView":476 - * if self.view.suboffsets != NULL: - * assert_direct_dimensions(self.view.suboffsets, self.view.ndim) - * slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize, # <<<<<<<<<<<<<< - * item, self.dtype_is_object) - * finally: - */ - __pyx_memoryview_slice_assign_scalar(__pyx_v_dst_slice, __pyx_v_dst->view.ndim, __pyx_v_self->view.itemsize, __pyx_v_item, __pyx_v_self->dtype_is_object); - } - - /* "View.MemoryView":479 - * item, self.dtype_is_object) - * finally: - * PyMem_Free(tmp) # <<<<<<<<<<<<<< - * - * cdef setitem_indexed(self, index, value): - */ - /*finally:*/ { - /*normal exit:*/{ - PyMem_Free(__pyx_v_tmp); - goto __pyx_L7; - } - __pyx_L6_error:; - /*exception exit:*/{ - __Pyx_PyThreadState_declare - __Pyx_PyThreadState_assign - __pyx_t_7 = 0; __pyx_t_8 = 0; __pyx_t_9 = 0; __pyx_t_10 = 0; __pyx_t_11 = 0; __pyx_t_12 = 0; - __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; - if (PY_MAJOR_VERSION >= 3) __Pyx_ExceptionSwap(&__pyx_t_10, &__pyx_t_11, &__pyx_t_12); - if ((PY_MAJOR_VERSION < 3) || unlikely(__Pyx_GetException(&__pyx_t_7, &__pyx_t_8, &__pyx_t_9) < 0)) __Pyx_ErrFetch(&__pyx_t_7, &__pyx_t_8, &__pyx_t_9); - __Pyx_XGOTREF(__pyx_t_7); - __Pyx_XGOTREF(__pyx_t_8); - __Pyx_XGOTREF(__pyx_t_9); - __Pyx_XGOTREF(__pyx_t_10); - __Pyx_XGOTREF(__pyx_t_11); - __Pyx_XGOTREF(__pyx_t_12); - __pyx_t_4 = __pyx_lineno; __pyx_t_5 = __pyx_clineno; __pyx_t_6 = __pyx_filename; - { - PyMem_Free(__pyx_v_tmp); - } - if (PY_MAJOR_VERSION >= 3) { - __Pyx_XGIVEREF(__pyx_t_10); - __Pyx_XGIVEREF(__pyx_t_11); - __Pyx_XGIVEREF(__pyx_t_12); - __Pyx_ExceptionReset(__pyx_t_10, __pyx_t_11, __pyx_t_12); - } - __Pyx_XGIVEREF(__pyx_t_7); - __Pyx_XGIVEREF(__pyx_t_8); - __Pyx_XGIVEREF(__pyx_t_9); - __Pyx_ErrRestore(__pyx_t_7, __pyx_t_8, __pyx_t_9); - __pyx_t_7 = 0; __pyx_t_8 = 0; __pyx_t_9 = 0; __pyx_t_10 = 0; __pyx_t_11 = 0; __pyx_t_12 = 0; - __pyx_lineno = __pyx_t_4; __pyx_clineno = __pyx_t_5; __pyx_filename = __pyx_t_6; - goto __pyx_L1_error; - } - __pyx_L7:; - } - - /* "View.MemoryView":449 - * src.ndim, dst.ndim, self.dtype_is_object) - * - * cdef setitem_slice_assign_scalar(self, memoryview dst, value): # <<<<<<<<<<<<<< - * cdef int array[128] - * cdef void *tmp = NULL - */ - - /* function exit code */ - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_3); - __Pyx_AddTraceback("View.MemoryView.memoryview.setitem_slice_assign_scalar", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = 0; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":481 - * PyMem_Free(tmp) - * - * cdef setitem_indexed(self, index, value): # <<<<<<<<<<<<<< - * cdef char *itemp = self.get_item_pointer(index) - * self.assign_item_from_object(itemp, value) - */ - -static PyObject *__pyx_memoryview_setitem_indexed(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value) { - char *__pyx_v_itemp; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - char *__pyx_t_1; - PyObject *__pyx_t_2 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("setitem_indexed", 0); - - /* "View.MemoryView":482 - * - * cdef setitem_indexed(self, index, value): - * cdef char *itemp = self.get_item_pointer(index) # <<<<<<<<<<<<<< - * self.assign_item_from_object(itemp, value) - * - */ - __pyx_t_1 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->get_item_pointer(__pyx_v_self, __pyx_v_index); if (unlikely(__pyx_t_1 == ((char *)NULL))) __PYX_ERR(1, 482, __pyx_L1_error) - __pyx_v_itemp = __pyx_t_1; - - /* "View.MemoryView":483 - * cdef setitem_indexed(self, index, value): - * cdef char *itemp = self.get_item_pointer(index) - * self.assign_item_from_object(itemp, value) # <<<<<<<<<<<<<< - * - * cdef convert_item_to_object(self, char *itemp): - */ - __pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->assign_item_from_object(__pyx_v_self, __pyx_v_itemp, __pyx_v_value); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 483, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - - /* "View.MemoryView":481 - * PyMem_Free(tmp) - * - * cdef setitem_indexed(self, index, value): # <<<<<<<<<<<<<< - * cdef char *itemp = self.get_item_pointer(index) - * self.assign_item_from_object(itemp, value) - */ - - /* function exit code */ - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_2); - __Pyx_AddTraceback("View.MemoryView.memoryview.setitem_indexed", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = 0; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":485 - * self.assign_item_from_object(itemp, value) - * - * cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<< - * """Only used if instantiated manually by the user, or if Cython doesn't - * know how to convert the type""" - */ - -static PyObject *__pyx_memoryview_convert_item_to_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp) { - PyObject *__pyx_v_struct = NULL; - PyObject *__pyx_v_bytesitem = 0; - PyObject *__pyx_v_result = NULL; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - PyObject *__pyx_t_2 = NULL; - PyObject *__pyx_t_3 = NULL; - PyObject *__pyx_t_4 = NULL; - PyObject *__pyx_t_5 = NULL; - PyObject *__pyx_t_6 = NULL; - PyObject *__pyx_t_7 = NULL; - int __pyx_t_8; - PyObject *__pyx_t_9 = NULL; - size_t __pyx_t_10; - int __pyx_t_11; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("convert_item_to_object", 0); - - /* "View.MemoryView":488 - * """Only used if instantiated manually by the user, or if Cython doesn't - * know how to convert the type""" - * import struct # <<<<<<<<<<<<<< - * cdef bytes bytesitem - * - */ - __pyx_t_1 = __Pyx_Import(__pyx_n_s_struct, 0, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 488, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_v_struct = __pyx_t_1; - __pyx_t_1 = 0; - - /* "View.MemoryView":491 - * cdef bytes bytesitem - * - * bytesitem = itemp[:self.view.itemsize] # <<<<<<<<<<<<<< - * try: - * result = struct.unpack(self.view.format, bytesitem) - */ - __pyx_t_1 = __Pyx_PyBytes_FromStringAndSize(__pyx_v_itemp + 0, __pyx_v_self->view.itemsize - 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 491, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_v_bytesitem = ((PyObject*)__pyx_t_1); - __pyx_t_1 = 0; - - /* "View.MemoryView":492 - * - * bytesitem = itemp[:self.view.itemsize] - * try: # <<<<<<<<<<<<<< - * result = struct.unpack(self.view.format, bytesitem) - * except struct.error: - */ - { - __Pyx_PyThreadState_declare - __Pyx_PyThreadState_assign - __Pyx_ExceptionSave(&__pyx_t_2, &__pyx_t_3, &__pyx_t_4); - __Pyx_XGOTREF(__pyx_t_2); - __Pyx_XGOTREF(__pyx_t_3); - __Pyx_XGOTREF(__pyx_t_4); - /*try:*/ { - - /* "View.MemoryView":493 - * bytesitem = itemp[:self.view.itemsize] - * try: - * result = struct.unpack(self.view.format, bytesitem) # <<<<<<<<<<<<<< - * except struct.error: - * raise ValueError("Unable to convert item to object") - */ - __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_unpack); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 493, __pyx_L3_error) - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_6 = __Pyx_PyBytes_FromString(__pyx_v_self->view.format); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 493, __pyx_L3_error) - __Pyx_GOTREF(__pyx_t_6); - __pyx_t_7 = NULL; - __pyx_t_8 = 0; - if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_5))) { - __pyx_t_7 = PyMethod_GET_SELF(__pyx_t_5); - if (likely(__pyx_t_7)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5); - __Pyx_INCREF(__pyx_t_7); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_5, function); - __pyx_t_8 = 1; - } - } - #if CYTHON_FAST_PYCALL - if (PyFunction_Check(__pyx_t_5)) { - PyObject *__pyx_temp[3] = {__pyx_t_7, __pyx_t_6, __pyx_v_bytesitem}; - __pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_5, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 493, __pyx_L3_error) - __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; - } else - #endif - #if CYTHON_FAST_PYCCALL - if (__Pyx_PyFastCFunction_Check(__pyx_t_5)) { - PyObject *__pyx_temp[3] = {__pyx_t_7, __pyx_t_6, __pyx_v_bytesitem}; - __pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_5, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 493, __pyx_L3_error) - __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; - } else - #endif - { - __pyx_t_9 = PyTuple_New(2+__pyx_t_8); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 493, __pyx_L3_error) - __Pyx_GOTREF(__pyx_t_9); - if (__pyx_t_7) { - __Pyx_GIVEREF(__pyx_t_7); PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_t_7); __pyx_t_7 = NULL; - } - __Pyx_GIVEREF(__pyx_t_6); - PyTuple_SET_ITEM(__pyx_t_9, 0+__pyx_t_8, __pyx_t_6); - __Pyx_INCREF(__pyx_v_bytesitem); - __Pyx_GIVEREF(__pyx_v_bytesitem); - PyTuple_SET_ITEM(__pyx_t_9, 1+__pyx_t_8, __pyx_v_bytesitem); - __pyx_t_6 = 0; - __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_5, __pyx_t_9, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 493, __pyx_L3_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; - } - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_v_result = __pyx_t_1; - __pyx_t_1 = 0; - - /* "View.MemoryView":492 - * - * bytesitem = itemp[:self.view.itemsize] - * try: # <<<<<<<<<<<<<< - * result = struct.unpack(self.view.format, bytesitem) - * except struct.error: - */ - } - - /* "View.MemoryView":497 - * raise ValueError("Unable to convert item to object") - * else: - * if len(self.view.format) == 1: # <<<<<<<<<<<<<< - * return result[0] - * return result - */ - /*else:*/ { - __pyx_t_10 = strlen(__pyx_v_self->view.format); - __pyx_t_11 = ((__pyx_t_10 == 1) != 0); - if (__pyx_t_11) { - - /* "View.MemoryView":498 - * else: - * if len(self.view.format) == 1: - * return result[0] # <<<<<<<<<<<<<< - * return result - * - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = __Pyx_GetItemInt(__pyx_v_result, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 498, __pyx_L5_except_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_r = __pyx_t_1; - __pyx_t_1 = 0; - goto __pyx_L6_except_return; - - /* "View.MemoryView":497 - * raise ValueError("Unable to convert item to object") - * else: - * if len(self.view.format) == 1: # <<<<<<<<<<<<<< - * return result[0] - * return result - */ - } - - /* "View.MemoryView":499 - * if len(self.view.format) == 1: - * return result[0] - * return result # <<<<<<<<<<<<<< - * - * cdef assign_item_from_object(self, char *itemp, object value): - */ - __Pyx_XDECREF(__pyx_r); - __Pyx_INCREF(__pyx_v_result); - __pyx_r = __pyx_v_result; - goto __pyx_L6_except_return; - } - __pyx_L3_error:; - __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; - __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; - __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; - __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; - __Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0; - - /* "View.MemoryView":494 - * try: - * result = struct.unpack(self.view.format, bytesitem) - * except struct.error: # <<<<<<<<<<<<<< - * raise ValueError("Unable to convert item to object") - * else: - */ - __Pyx_ErrFetch(&__pyx_t_1, &__pyx_t_5, &__pyx_t_9); - __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_error); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 494, __pyx_L5_except_error) - __Pyx_GOTREF(__pyx_t_6); - __pyx_t_8 = __Pyx_PyErr_GivenExceptionMatches(__pyx_t_1, __pyx_t_6); - __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; - __Pyx_ErrRestore(__pyx_t_1, __pyx_t_5, __pyx_t_9); - __pyx_t_1 = 0; __pyx_t_5 = 0; __pyx_t_9 = 0; - if (__pyx_t_8) { - __Pyx_AddTraceback("View.MemoryView.memoryview.convert_item_to_object", __pyx_clineno, __pyx_lineno, __pyx_filename); - if (__Pyx_GetException(&__pyx_t_9, &__pyx_t_5, &__pyx_t_1) < 0) __PYX_ERR(1, 494, __pyx_L5_except_error) - __Pyx_GOTREF(__pyx_t_9); - __Pyx_GOTREF(__pyx_t_5); - __Pyx_GOTREF(__pyx_t_1); - - /* "View.MemoryView":495 - * result = struct.unpack(self.view.format, bytesitem) - * except struct.error: - * raise ValueError("Unable to convert item to object") # <<<<<<<<<<<<<< - * else: - * if len(self.view.format) == 1: - */ - __pyx_t_6 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__10, NULL); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 495, __pyx_L5_except_error) - __Pyx_GOTREF(__pyx_t_6); - __Pyx_Raise(__pyx_t_6, 0, 0, 0); - __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; - __PYX_ERR(1, 495, __pyx_L5_except_error) - } - goto __pyx_L5_except_error; - __pyx_L5_except_error:; - - /* "View.MemoryView":492 - * - * bytesitem = itemp[:self.view.itemsize] - * try: # <<<<<<<<<<<<<< - * result = struct.unpack(self.view.format, bytesitem) - * except struct.error: - */ - __Pyx_XGIVEREF(__pyx_t_2); - __Pyx_XGIVEREF(__pyx_t_3); - __Pyx_XGIVEREF(__pyx_t_4); - __Pyx_ExceptionReset(__pyx_t_2, __pyx_t_3, __pyx_t_4); - goto __pyx_L1_error; - __pyx_L6_except_return:; - __Pyx_XGIVEREF(__pyx_t_2); - __Pyx_XGIVEREF(__pyx_t_3); - __Pyx_XGIVEREF(__pyx_t_4); - __Pyx_ExceptionReset(__pyx_t_2, __pyx_t_3, __pyx_t_4); - goto __pyx_L0; - } - - /* "View.MemoryView":485 - * self.assign_item_from_object(itemp, value) - * - * cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<< - * """Only used if instantiated manually by the user, or if Cython doesn't - * know how to convert the type""" - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_5); - __Pyx_XDECREF(__pyx_t_6); - __Pyx_XDECREF(__pyx_t_7); - __Pyx_XDECREF(__pyx_t_9); - __Pyx_AddTraceback("View.MemoryView.memoryview.convert_item_to_object", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = 0; - __pyx_L0:; - __Pyx_XDECREF(__pyx_v_struct); - __Pyx_XDECREF(__pyx_v_bytesitem); - __Pyx_XDECREF(__pyx_v_result); - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":501 - * return result - * - * cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<< - * """Only used if instantiated manually by the user, or if Cython doesn't - * know how to convert the type""" - */ - -static PyObject *__pyx_memoryview_assign_item_from_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value) { - PyObject *__pyx_v_struct = NULL; - char __pyx_v_c; - PyObject *__pyx_v_bytesvalue = 0; - Py_ssize_t __pyx_v_i; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - int __pyx_t_2; - int __pyx_t_3; - PyObject *__pyx_t_4 = NULL; - PyObject *__pyx_t_5 = NULL; - PyObject *__pyx_t_6 = NULL; - int __pyx_t_7; - PyObject *__pyx_t_8 = NULL; - Py_ssize_t __pyx_t_9; - PyObject *__pyx_t_10 = NULL; - char *__pyx_t_11; - char *__pyx_t_12; - char *__pyx_t_13; - char *__pyx_t_14; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("assign_item_from_object", 0); - - /* "View.MemoryView":504 - * """Only used if instantiated manually by the user, or if Cython doesn't - * know how to convert the type""" - * import struct # <<<<<<<<<<<<<< - * cdef char c - * cdef bytes bytesvalue - */ - __pyx_t_1 = __Pyx_Import(__pyx_n_s_struct, 0, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 504, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_v_struct = __pyx_t_1; - __pyx_t_1 = 0; - - /* "View.MemoryView":509 - * cdef Py_ssize_t i - * - * if isinstance(value, tuple): # <<<<<<<<<<<<<< - * bytesvalue = struct.pack(self.view.format, *value) - * else: - */ - __pyx_t_2 = PyTuple_Check(__pyx_v_value); - __pyx_t_3 = (__pyx_t_2 != 0); - if (__pyx_t_3) { - - /* "View.MemoryView":510 - * - * if isinstance(value, tuple): - * bytesvalue = struct.pack(self.view.format, *value) # <<<<<<<<<<<<<< - * else: - * bytesvalue = struct.pack(self.view.format, value) - */ - __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_pack); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 510, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_4 = __Pyx_PyBytes_FromString(__pyx_v_self->view.format); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 510, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 510, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __Pyx_GIVEREF(__pyx_t_4); - PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_4); - __pyx_t_4 = 0; - __pyx_t_4 = __Pyx_PySequence_Tuple(__pyx_v_value); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 510, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __pyx_t_6 = PyNumber_Add(__pyx_t_5, __pyx_t_4); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 510, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_6); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_t_6, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 510, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; - if (!(likely(PyBytes_CheckExact(__pyx_t_4))||((__pyx_t_4) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "bytes", Py_TYPE(__pyx_t_4)->tp_name), 0))) __PYX_ERR(1, 510, __pyx_L1_error) - __pyx_v_bytesvalue = ((PyObject*)__pyx_t_4); - __pyx_t_4 = 0; - - /* "View.MemoryView":509 - * cdef Py_ssize_t i - * - * if isinstance(value, tuple): # <<<<<<<<<<<<<< - * bytesvalue = struct.pack(self.view.format, *value) - * else: - */ - goto __pyx_L3; - } - - /* "View.MemoryView":512 - * bytesvalue = struct.pack(self.view.format, *value) - * else: - * bytesvalue = struct.pack(self.view.format, value) # <<<<<<<<<<<<<< - * - * for i, c in enumerate(bytesvalue): - */ - /*else*/ { - __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_pack); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 512, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_6); - __pyx_t_1 = __Pyx_PyBytes_FromString(__pyx_v_self->view.format); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 512, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_5 = NULL; - __pyx_t_7 = 0; - if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_6))) { - __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_6); - if (likely(__pyx_t_5)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_6); - __Pyx_INCREF(__pyx_t_5); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_6, function); - __pyx_t_7 = 1; - } - } - #if CYTHON_FAST_PYCALL - if (PyFunction_Check(__pyx_t_6)) { - PyObject *__pyx_temp[3] = {__pyx_t_5, __pyx_t_1, __pyx_v_value}; - __pyx_t_4 = __Pyx_PyFunction_FastCall(__pyx_t_6, __pyx_temp+1-__pyx_t_7, 2+__pyx_t_7); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 512, __pyx_L1_error) - __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - } else - #endif - #if CYTHON_FAST_PYCCALL - if (__Pyx_PyFastCFunction_Check(__pyx_t_6)) { - PyObject *__pyx_temp[3] = {__pyx_t_5, __pyx_t_1, __pyx_v_value}; - __pyx_t_4 = __Pyx_PyCFunction_FastCall(__pyx_t_6, __pyx_temp+1-__pyx_t_7, 2+__pyx_t_7); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 512, __pyx_L1_error) - __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - } else - #endif - { - __pyx_t_8 = PyTuple_New(2+__pyx_t_7); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 512, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_8); - if (__pyx_t_5) { - __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_t_5); __pyx_t_5 = NULL; - } - __Pyx_GIVEREF(__pyx_t_1); - PyTuple_SET_ITEM(__pyx_t_8, 0+__pyx_t_7, __pyx_t_1); - __Pyx_INCREF(__pyx_v_value); - __Pyx_GIVEREF(__pyx_v_value); - PyTuple_SET_ITEM(__pyx_t_8, 1+__pyx_t_7, __pyx_v_value); - __pyx_t_1 = 0; - __pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_6, __pyx_t_8, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 512, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; - } - __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; - if (!(likely(PyBytes_CheckExact(__pyx_t_4))||((__pyx_t_4) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "bytes", Py_TYPE(__pyx_t_4)->tp_name), 0))) __PYX_ERR(1, 512, __pyx_L1_error) - __pyx_v_bytesvalue = ((PyObject*)__pyx_t_4); - __pyx_t_4 = 0; - } - __pyx_L3:; - - /* "View.MemoryView":514 - * bytesvalue = struct.pack(self.view.format, value) - * - * for i, c in enumerate(bytesvalue): # <<<<<<<<<<<<<< - * itemp[i] = c - * - */ - __pyx_t_9 = 0; - if (unlikely(__pyx_v_bytesvalue == Py_None)) { - PyErr_SetString(PyExc_TypeError, "'NoneType' is not iterable"); - __PYX_ERR(1, 514, __pyx_L1_error) - } - __Pyx_INCREF(__pyx_v_bytesvalue); - __pyx_t_10 = __pyx_v_bytesvalue; - __pyx_t_12 = PyBytes_AS_STRING(__pyx_t_10); - __pyx_t_13 = (__pyx_t_12 + PyBytes_GET_SIZE(__pyx_t_10)); - for (__pyx_t_14 = __pyx_t_12; __pyx_t_14 < __pyx_t_13; __pyx_t_14++) { - __pyx_t_11 = __pyx_t_14; - __pyx_v_c = (__pyx_t_11[0]); - - /* "View.MemoryView":515 - * - * for i, c in enumerate(bytesvalue): - * itemp[i] = c # <<<<<<<<<<<<<< - * - * @cname('getbuffer') - */ - __pyx_v_i = __pyx_t_9; - - /* "View.MemoryView":514 - * bytesvalue = struct.pack(self.view.format, value) - * - * for i, c in enumerate(bytesvalue): # <<<<<<<<<<<<<< - * itemp[i] = c - * - */ - __pyx_t_9 = (__pyx_t_9 + 1); - - /* "View.MemoryView":515 - * - * for i, c in enumerate(bytesvalue): - * itemp[i] = c # <<<<<<<<<<<<<< - * - * @cname('getbuffer') - */ - (__pyx_v_itemp[__pyx_v_i]) = __pyx_v_c; - } - __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; - - /* "View.MemoryView":501 - * return result - * - * cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<< - * """Only used if instantiated manually by the user, or if Cython doesn't - * know how to convert the type""" - */ - - /* function exit code */ - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_4); - __Pyx_XDECREF(__pyx_t_5); - __Pyx_XDECREF(__pyx_t_6); - __Pyx_XDECREF(__pyx_t_8); - __Pyx_XDECREF(__pyx_t_10); - __Pyx_AddTraceback("View.MemoryView.memoryview.assign_item_from_object", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = 0; - __pyx_L0:; - __Pyx_XDECREF(__pyx_v_struct); - __Pyx_XDECREF(__pyx_v_bytesvalue); - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":518 - * - * @cname('getbuffer') - * def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<< - * if flags & PyBUF_WRITABLE and self.view.readonly: - * raise ValueError("Cannot create writable memory view from read-only memoryview") - */ - -/* Python wrapper */ -static CYTHON_UNUSED int __pyx_memoryview_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ -static CYTHON_UNUSED int __pyx_memoryview_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { - int __pyx_r; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__getbuffer__ (wrapper)", 0); - __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_8__getbuffer__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((Py_buffer *)__pyx_v_info), ((int)__pyx_v_flags)); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_8__getbuffer__(struct __pyx_memoryview_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { - int __pyx_r; - __Pyx_RefNannyDeclarations - int __pyx_t_1; - int __pyx_t_2; - PyObject *__pyx_t_3 = NULL; - Py_ssize_t *__pyx_t_4; - char *__pyx_t_5; - void *__pyx_t_6; - int __pyx_t_7; - Py_ssize_t __pyx_t_8; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - if (__pyx_v_info == NULL) { - PyErr_SetString(PyExc_BufferError, "PyObject_GetBuffer: view==NULL argument is obsolete"); - return -1; - } - __Pyx_RefNannySetupContext("__getbuffer__", 0); - __pyx_v_info->obj = Py_None; __Pyx_INCREF(Py_None); - __Pyx_GIVEREF(__pyx_v_info->obj); - - /* "View.MemoryView":519 - * @cname('getbuffer') - * def __getbuffer__(self, Py_buffer *info, int flags): - * if flags & PyBUF_WRITABLE and self.view.readonly: # <<<<<<<<<<<<<< - * raise ValueError("Cannot create writable memory view from read-only memoryview") - * - */ - __pyx_t_2 = ((__pyx_v_flags & PyBUF_WRITABLE) != 0); - if (__pyx_t_2) { - } else { - __pyx_t_1 = __pyx_t_2; - goto __pyx_L4_bool_binop_done; - } - __pyx_t_2 = (__pyx_v_self->view.readonly != 0); - __pyx_t_1 = __pyx_t_2; - __pyx_L4_bool_binop_done:; - if (unlikely(__pyx_t_1)) { - - /* "View.MemoryView":520 - * def __getbuffer__(self, Py_buffer *info, int flags): - * if flags & PyBUF_WRITABLE and self.view.readonly: - * raise ValueError("Cannot create writable memory view from read-only memoryview") # <<<<<<<<<<<<<< - * - * if flags & PyBUF_ND: - */ - __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__11, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 520, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_Raise(__pyx_t_3, 0, 0, 0); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __PYX_ERR(1, 520, __pyx_L1_error) - - /* "View.MemoryView":519 - * @cname('getbuffer') - * def __getbuffer__(self, Py_buffer *info, int flags): - * if flags & PyBUF_WRITABLE and self.view.readonly: # <<<<<<<<<<<<<< - * raise ValueError("Cannot create writable memory view from read-only memoryview") - * - */ - } - - /* "View.MemoryView":522 - * raise ValueError("Cannot create writable memory view from read-only memoryview") - * - * if flags & PyBUF_ND: # <<<<<<<<<<<<<< - * info.shape = self.view.shape - * else: - */ - __pyx_t_1 = ((__pyx_v_flags & PyBUF_ND) != 0); - if (__pyx_t_1) { - - /* "View.MemoryView":523 - * - * if flags & PyBUF_ND: - * info.shape = self.view.shape # <<<<<<<<<<<<<< - * else: - * info.shape = NULL - */ - __pyx_t_4 = __pyx_v_self->view.shape; - __pyx_v_info->shape = __pyx_t_4; - - /* "View.MemoryView":522 - * raise ValueError("Cannot create writable memory view from read-only memoryview") - * - * if flags & PyBUF_ND: # <<<<<<<<<<<<<< - * info.shape = self.view.shape - * else: - */ - goto __pyx_L6; - } - - /* "View.MemoryView":525 - * info.shape = self.view.shape - * else: - * info.shape = NULL # <<<<<<<<<<<<<< - * - * if flags & PyBUF_STRIDES: - */ - /*else*/ { - __pyx_v_info->shape = NULL; - } - __pyx_L6:; - - /* "View.MemoryView":527 - * info.shape = NULL - * - * if flags & PyBUF_STRIDES: # <<<<<<<<<<<<<< - * info.strides = self.view.strides - * else: - */ - __pyx_t_1 = ((__pyx_v_flags & PyBUF_STRIDES) != 0); - if (__pyx_t_1) { - - /* "View.MemoryView":528 - * - * if flags & PyBUF_STRIDES: - * info.strides = self.view.strides # <<<<<<<<<<<<<< - * else: - * info.strides = NULL - */ - __pyx_t_4 = __pyx_v_self->view.strides; - __pyx_v_info->strides = __pyx_t_4; - - /* "View.MemoryView":527 - * info.shape = NULL - * - * if flags & PyBUF_STRIDES: # <<<<<<<<<<<<<< - * info.strides = self.view.strides - * else: - */ - goto __pyx_L7; - } - - /* "View.MemoryView":530 - * info.strides = self.view.strides - * else: - * info.strides = NULL # <<<<<<<<<<<<<< - * - * if flags & PyBUF_INDIRECT: - */ - /*else*/ { - __pyx_v_info->strides = NULL; - } - __pyx_L7:; - - /* "View.MemoryView":532 - * info.strides = NULL - * - * if flags & PyBUF_INDIRECT: # <<<<<<<<<<<<<< - * info.suboffsets = self.view.suboffsets - * else: - */ - __pyx_t_1 = ((__pyx_v_flags & PyBUF_INDIRECT) != 0); - if (__pyx_t_1) { - - /* "View.MemoryView":533 - * - * if flags & PyBUF_INDIRECT: - * info.suboffsets = self.view.suboffsets # <<<<<<<<<<<<<< - * else: - * info.suboffsets = NULL - */ - __pyx_t_4 = __pyx_v_self->view.suboffsets; - __pyx_v_info->suboffsets = __pyx_t_4; - - /* "View.MemoryView":532 - * info.strides = NULL - * - * if flags & PyBUF_INDIRECT: # <<<<<<<<<<<<<< - * info.suboffsets = self.view.suboffsets - * else: - */ - goto __pyx_L8; - } - - /* "View.MemoryView":535 - * info.suboffsets = self.view.suboffsets - * else: - * info.suboffsets = NULL # <<<<<<<<<<<<<< - * - * if flags & PyBUF_FORMAT: - */ - /*else*/ { - __pyx_v_info->suboffsets = NULL; - } - __pyx_L8:; - - /* "View.MemoryView":537 - * info.suboffsets = NULL - * - * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< - * info.format = self.view.format - * else: - */ - __pyx_t_1 = ((__pyx_v_flags & PyBUF_FORMAT) != 0); - if (__pyx_t_1) { - - /* "View.MemoryView":538 - * - * if flags & PyBUF_FORMAT: - * info.format = self.view.format # <<<<<<<<<<<<<< - * else: - * info.format = NULL - */ - __pyx_t_5 = __pyx_v_self->view.format; - __pyx_v_info->format = __pyx_t_5; - - /* "View.MemoryView":537 - * info.suboffsets = NULL - * - * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< - * info.format = self.view.format - * else: - */ - goto __pyx_L9; - } - - /* "View.MemoryView":540 - * info.format = self.view.format - * else: - * info.format = NULL # <<<<<<<<<<<<<< - * - * info.buf = self.view.buf - */ - /*else*/ { - __pyx_v_info->format = NULL; - } - __pyx_L9:; - - /* "View.MemoryView":542 - * info.format = NULL - * - * info.buf = self.view.buf # <<<<<<<<<<<<<< - * info.ndim = self.view.ndim - * info.itemsize = self.view.itemsize - */ - __pyx_t_6 = __pyx_v_self->view.buf; - __pyx_v_info->buf = __pyx_t_6; - - /* "View.MemoryView":543 - * - * info.buf = self.view.buf - * info.ndim = self.view.ndim # <<<<<<<<<<<<<< - * info.itemsize = self.view.itemsize - * info.len = self.view.len - */ - __pyx_t_7 = __pyx_v_self->view.ndim; - __pyx_v_info->ndim = __pyx_t_7; - - /* "View.MemoryView":544 - * info.buf = self.view.buf - * info.ndim = self.view.ndim - * info.itemsize = self.view.itemsize # <<<<<<<<<<<<<< - * info.len = self.view.len - * info.readonly = self.view.readonly - */ - __pyx_t_8 = __pyx_v_self->view.itemsize; - __pyx_v_info->itemsize = __pyx_t_8; - - /* "View.MemoryView":545 - * info.ndim = self.view.ndim - * info.itemsize = self.view.itemsize - * info.len = self.view.len # <<<<<<<<<<<<<< - * info.readonly = self.view.readonly - * info.obj = self - */ - __pyx_t_8 = __pyx_v_self->view.len; - __pyx_v_info->len = __pyx_t_8; - - /* "View.MemoryView":546 - * info.itemsize = self.view.itemsize - * info.len = self.view.len - * info.readonly = self.view.readonly # <<<<<<<<<<<<<< - * info.obj = self - * - */ - __pyx_t_1 = __pyx_v_self->view.readonly; - __pyx_v_info->readonly = __pyx_t_1; - - /* "View.MemoryView":547 - * info.len = self.view.len - * info.readonly = self.view.readonly - * info.obj = self # <<<<<<<<<<<<<< - * - * __pyx_getbuffer = capsule( &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)") - */ - __Pyx_INCREF(((PyObject *)__pyx_v_self)); - __Pyx_GIVEREF(((PyObject *)__pyx_v_self)); - __Pyx_GOTREF(__pyx_v_info->obj); - __Pyx_DECREF(__pyx_v_info->obj); - __pyx_v_info->obj = ((PyObject *)__pyx_v_self); - - /* "View.MemoryView":518 - * - * @cname('getbuffer') - * def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<< - * if flags & PyBUF_WRITABLE and self.view.readonly: - * raise ValueError("Cannot create writable memory view from read-only memoryview") - */ - - /* function exit code */ - __pyx_r = 0; - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_3); - __Pyx_AddTraceback("View.MemoryView.memoryview.__getbuffer__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = -1; - if (__pyx_v_info->obj != NULL) { - __Pyx_GOTREF(__pyx_v_info->obj); - __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = 0; - } - goto __pyx_L2; - __pyx_L0:; - if (__pyx_v_info->obj == Py_None) { - __Pyx_GOTREF(__pyx_v_info->obj); - __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = 0; - } - __pyx_L2:; - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":553 - * - * @property - * def T(self): # <<<<<<<<<<<<<< - * cdef _memoryviewslice result = memoryview_copy(self) - * transpose_memslice(&result.from_slice) - */ - -/* Python wrapper */ -static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_1T_1__get__(PyObject *__pyx_v_self); /*proto*/ -static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_1T_1__get__(PyObject *__pyx_v_self) { - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); - __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_1T___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_1T___get__(struct __pyx_memoryview_obj *__pyx_v_self) { - struct __pyx_memoryviewslice_obj *__pyx_v_result = 0; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - int __pyx_t_2; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("__get__", 0); - - /* "View.MemoryView":554 - * @property - * def T(self): - * cdef _memoryviewslice result = memoryview_copy(self) # <<<<<<<<<<<<<< - * transpose_memslice(&result.from_slice) - * return result - */ - __pyx_t_1 = __pyx_memoryview_copy_object(__pyx_v_self); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 554, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_memoryviewslice_type))))) __PYX_ERR(1, 554, __pyx_L1_error) - __pyx_v_result = ((struct __pyx_memoryviewslice_obj *)__pyx_t_1); - __pyx_t_1 = 0; - - /* "View.MemoryView":555 - * def T(self): - * cdef _memoryviewslice result = memoryview_copy(self) - * transpose_memslice(&result.from_slice) # <<<<<<<<<<<<<< - * return result - * - */ - __pyx_t_2 = __pyx_memslice_transpose((&__pyx_v_result->from_slice)); if (unlikely(__pyx_t_2 == ((int)0))) __PYX_ERR(1, 555, __pyx_L1_error) - - /* "View.MemoryView":556 - * cdef _memoryviewslice result = memoryview_copy(self) - * transpose_memslice(&result.from_slice) - * return result # <<<<<<<<<<<<<< - * - * @property - */ - __Pyx_XDECREF(__pyx_r); - __Pyx_INCREF(((PyObject *)__pyx_v_result)); - __pyx_r = ((PyObject *)__pyx_v_result); - goto __pyx_L0; - - /* "View.MemoryView":553 - * - * @property - * def T(self): # <<<<<<<<<<<<<< - * cdef _memoryviewslice result = memoryview_copy(self) - * transpose_memslice(&result.from_slice) - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_AddTraceback("View.MemoryView.memoryview.T.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XDECREF((PyObject *)__pyx_v_result); - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":559 - * - * @property - * def base(self): # <<<<<<<<<<<<<< - * return self.obj - * - */ - -/* Python wrapper */ -static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4base_1__get__(PyObject *__pyx_v_self); /*proto*/ -static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4base_1__get__(PyObject *__pyx_v_self) { - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); - __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_4base___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4base___get__(struct __pyx_memoryview_obj *__pyx_v_self) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__get__", 0); - - /* "View.MemoryView":560 - * @property - * def base(self): - * return self.obj # <<<<<<<<<<<<<< - * - * @property - */ - __Pyx_XDECREF(__pyx_r); - __Pyx_INCREF(__pyx_v_self->obj); - __pyx_r = __pyx_v_self->obj; - goto __pyx_L0; - - /* "View.MemoryView":559 - * - * @property - * def base(self): # <<<<<<<<<<<<<< - * return self.obj - * - */ - - /* function exit code */ - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":563 - * - * @property - * def shape(self): # <<<<<<<<<<<<<< - * return tuple([length for length in self.view.shape[:self.view.ndim]]) - * - */ - -/* Python wrapper */ -static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_5shape_1__get__(PyObject *__pyx_v_self); /*proto*/ -static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_5shape_1__get__(PyObject *__pyx_v_self) { - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); - __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_5shape___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_5shape___get__(struct __pyx_memoryview_obj *__pyx_v_self) { - Py_ssize_t __pyx_v_length; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - Py_ssize_t *__pyx_t_2; - Py_ssize_t *__pyx_t_3; - Py_ssize_t *__pyx_t_4; - PyObject *__pyx_t_5 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("__get__", 0); - - /* "View.MemoryView":564 - * @property - * def shape(self): - * return tuple([length for length in self.view.shape[:self.view.ndim]]) # <<<<<<<<<<<<<< - * - * @property - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = PyList_New(0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 564, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_3 = (__pyx_v_self->view.shape + __pyx_v_self->view.ndim); - for (__pyx_t_4 = __pyx_v_self->view.shape; __pyx_t_4 < __pyx_t_3; __pyx_t_4++) { - __pyx_t_2 = __pyx_t_4; - __pyx_v_length = (__pyx_t_2[0]); - __pyx_t_5 = PyInt_FromSsize_t(__pyx_v_length); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 564, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - if (unlikely(__Pyx_ListComp_Append(__pyx_t_1, (PyObject*)__pyx_t_5))) __PYX_ERR(1, 564, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - } - __pyx_t_5 = PyList_AsTuple(((PyObject*)__pyx_t_1)); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 564, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_r = __pyx_t_5; - __pyx_t_5 = 0; - goto __pyx_L0; - - /* "View.MemoryView":563 - * - * @property - * def shape(self): # <<<<<<<<<<<<<< - * return tuple([length for length in self.view.shape[:self.view.ndim]]) - * - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_5); - __Pyx_AddTraceback("View.MemoryView.memoryview.shape.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":567 - * - * @property - * def strides(self): # <<<<<<<<<<<<<< - * if self.view.strides == NULL: - * - */ - -/* Python wrapper */ -static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_7strides_1__get__(PyObject *__pyx_v_self); /*proto*/ -static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_7strides_1__get__(PyObject *__pyx_v_self) { - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); - __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_7strides___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_7strides___get__(struct __pyx_memoryview_obj *__pyx_v_self) { - Py_ssize_t __pyx_v_stride; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - int __pyx_t_1; - PyObject *__pyx_t_2 = NULL; - Py_ssize_t *__pyx_t_3; - Py_ssize_t *__pyx_t_4; - Py_ssize_t *__pyx_t_5; - PyObject *__pyx_t_6 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("__get__", 0); - - /* "View.MemoryView":568 - * @property - * def strides(self): - * if self.view.strides == NULL: # <<<<<<<<<<<<<< - * - * raise ValueError("Buffer view does not expose strides") - */ - __pyx_t_1 = ((__pyx_v_self->view.strides == NULL) != 0); - if (unlikely(__pyx_t_1)) { - - /* "View.MemoryView":570 - * if self.view.strides == NULL: - * - * raise ValueError("Buffer view does not expose strides") # <<<<<<<<<<<<<< - * - * return tuple([stride for stride in self.view.strides[:self.view.ndim]]) - */ - __pyx_t_2 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__12, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 570, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_Raise(__pyx_t_2, 0, 0, 0); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __PYX_ERR(1, 570, __pyx_L1_error) - - /* "View.MemoryView":568 - * @property - * def strides(self): - * if self.view.strides == NULL: # <<<<<<<<<<<<<< - * - * raise ValueError("Buffer view does not expose strides") - */ - } - - /* "View.MemoryView":572 - * raise ValueError("Buffer view does not expose strides") - * - * return tuple([stride for stride in self.view.strides[:self.view.ndim]]) # <<<<<<<<<<<<<< - * - * @property - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_2 = PyList_New(0); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 572, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_4 = (__pyx_v_self->view.strides + __pyx_v_self->view.ndim); - for (__pyx_t_5 = __pyx_v_self->view.strides; __pyx_t_5 < __pyx_t_4; __pyx_t_5++) { - __pyx_t_3 = __pyx_t_5; - __pyx_v_stride = (__pyx_t_3[0]); - __pyx_t_6 = PyInt_FromSsize_t(__pyx_v_stride); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 572, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_6); - if (unlikely(__Pyx_ListComp_Append(__pyx_t_2, (PyObject*)__pyx_t_6))) __PYX_ERR(1, 572, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; - } - __pyx_t_6 = PyList_AsTuple(((PyObject*)__pyx_t_2)); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 572, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_6); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_r = __pyx_t_6; - __pyx_t_6 = 0; - goto __pyx_L0; - - /* "View.MemoryView":567 - * - * @property - * def strides(self): # <<<<<<<<<<<<<< - * if self.view.strides == NULL: - * - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_2); - __Pyx_XDECREF(__pyx_t_6); - __Pyx_AddTraceback("View.MemoryView.memoryview.strides.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":575 - * - * @property - * def suboffsets(self): # <<<<<<<<<<<<<< - * if self.view.suboffsets == NULL: - * return (-1,) * self.view.ndim - */ - -/* Python wrapper */ -static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_10suboffsets_1__get__(PyObject *__pyx_v_self); /*proto*/ -static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_10suboffsets_1__get__(PyObject *__pyx_v_self) { - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); - __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_10suboffsets___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_10suboffsets___get__(struct __pyx_memoryview_obj *__pyx_v_self) { - Py_ssize_t __pyx_v_suboffset; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - int __pyx_t_1; - PyObject *__pyx_t_2 = NULL; - PyObject *__pyx_t_3 = NULL; - Py_ssize_t *__pyx_t_4; - Py_ssize_t *__pyx_t_5; - Py_ssize_t *__pyx_t_6; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("__get__", 0); - - /* "View.MemoryView":576 - * @property - * def suboffsets(self): - * if self.view.suboffsets == NULL: # <<<<<<<<<<<<<< - * return (-1,) * self.view.ndim - * - */ - __pyx_t_1 = ((__pyx_v_self->view.suboffsets == NULL) != 0); - if (__pyx_t_1) { - - /* "View.MemoryView":577 - * def suboffsets(self): - * if self.view.suboffsets == NULL: - * return (-1,) * self.view.ndim # <<<<<<<<<<<<<< - * - * return tuple([suboffset for suboffset in self.view.suboffsets[:self.view.ndim]]) - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_2 = __Pyx_PyInt_From_int(__pyx_v_self->view.ndim); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 577, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_3 = PyNumber_Multiply(__pyx_tuple__13, __pyx_t_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 577, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_r = __pyx_t_3; - __pyx_t_3 = 0; - goto __pyx_L0; - - /* "View.MemoryView":576 - * @property - * def suboffsets(self): - * if self.view.suboffsets == NULL: # <<<<<<<<<<<<<< - * return (-1,) * self.view.ndim - * - */ - } - - /* "View.MemoryView":579 - * return (-1,) * self.view.ndim - * - * return tuple([suboffset for suboffset in self.view.suboffsets[:self.view.ndim]]) # <<<<<<<<<<<<<< - * - * @property - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_3 = PyList_New(0); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 579, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_5 = (__pyx_v_self->view.suboffsets + __pyx_v_self->view.ndim); - for (__pyx_t_6 = __pyx_v_self->view.suboffsets; __pyx_t_6 < __pyx_t_5; __pyx_t_6++) { - __pyx_t_4 = __pyx_t_6; - __pyx_v_suboffset = (__pyx_t_4[0]); - __pyx_t_2 = PyInt_FromSsize_t(__pyx_v_suboffset); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 579, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - if (unlikely(__Pyx_ListComp_Append(__pyx_t_3, (PyObject*)__pyx_t_2))) __PYX_ERR(1, 579, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - } - __pyx_t_2 = PyList_AsTuple(((PyObject*)__pyx_t_3)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 579, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_r = __pyx_t_2; - __pyx_t_2 = 0; - goto __pyx_L0; - - /* "View.MemoryView":575 - * - * @property - * def suboffsets(self): # <<<<<<<<<<<<<< - * if self.view.suboffsets == NULL: - * return (-1,) * self.view.ndim - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_2); - __Pyx_XDECREF(__pyx_t_3); - __Pyx_AddTraceback("View.MemoryView.memoryview.suboffsets.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":582 - * - * @property - * def ndim(self): # <<<<<<<<<<<<<< - * return self.view.ndim - * - */ - -/* Python wrapper */ -static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4ndim_1__get__(PyObject *__pyx_v_self); /*proto*/ -static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4ndim_1__get__(PyObject *__pyx_v_self) { - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); - __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_4ndim___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4ndim___get__(struct __pyx_memoryview_obj *__pyx_v_self) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("__get__", 0); - - /* "View.MemoryView":583 - * @property - * def ndim(self): - * return self.view.ndim # <<<<<<<<<<<<<< - * - * @property - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_self->view.ndim); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 583, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_r = __pyx_t_1; - __pyx_t_1 = 0; - goto __pyx_L0; - - /* "View.MemoryView":582 - * - * @property - * def ndim(self): # <<<<<<<<<<<<<< - * return self.view.ndim - * - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_AddTraceback("View.MemoryView.memoryview.ndim.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":586 - * - * @property - * def itemsize(self): # <<<<<<<<<<<<<< - * return self.view.itemsize - * - */ - -/* Python wrapper */ -static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_8itemsize_1__get__(PyObject *__pyx_v_self); /*proto*/ -static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_8itemsize_1__get__(PyObject *__pyx_v_self) { - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); - __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_8itemsize___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_8itemsize___get__(struct __pyx_memoryview_obj *__pyx_v_self) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("__get__", 0); - - /* "View.MemoryView":587 - * @property - * def itemsize(self): - * return self.view.itemsize # <<<<<<<<<<<<<< - * - * @property - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = PyInt_FromSsize_t(__pyx_v_self->view.itemsize); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 587, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_r = __pyx_t_1; - __pyx_t_1 = 0; - goto __pyx_L0; - - /* "View.MemoryView":586 - * - * @property - * def itemsize(self): # <<<<<<<<<<<<<< - * return self.view.itemsize - * - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_AddTraceback("View.MemoryView.memoryview.itemsize.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":590 - * - * @property - * def nbytes(self): # <<<<<<<<<<<<<< - * return self.size * self.view.itemsize - * - */ - -/* Python wrapper */ -static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_6nbytes_1__get__(PyObject *__pyx_v_self); /*proto*/ -static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_6nbytes_1__get__(PyObject *__pyx_v_self) { - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); - __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_6nbytes___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_6nbytes___get__(struct __pyx_memoryview_obj *__pyx_v_self) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - PyObject *__pyx_t_2 = NULL; - PyObject *__pyx_t_3 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("__get__", 0); - - /* "View.MemoryView":591 - * @property - * def nbytes(self): - * return self.size * self.view.itemsize # <<<<<<<<<<<<<< - * - * @property - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 591, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = PyInt_FromSsize_t(__pyx_v_self->view.itemsize); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 591, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_3 = PyNumber_Multiply(__pyx_t_1, __pyx_t_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 591, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_r = __pyx_t_3; - __pyx_t_3 = 0; - goto __pyx_L0; - - /* "View.MemoryView":590 - * - * @property - * def nbytes(self): # <<<<<<<<<<<<<< - * return self.size * self.view.itemsize - * - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_2); - __Pyx_XDECREF(__pyx_t_3); - __Pyx_AddTraceback("View.MemoryView.memoryview.nbytes.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":594 - * - * @property - * def size(self): # <<<<<<<<<<<<<< - * if self._size is None: - * result = 1 - */ - -/* Python wrapper */ -static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4size_1__get__(PyObject *__pyx_v_self); /*proto*/ -static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4size_1__get__(PyObject *__pyx_v_self) { - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); - __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_4size___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4size___get__(struct __pyx_memoryview_obj *__pyx_v_self) { - PyObject *__pyx_v_result = NULL; - PyObject *__pyx_v_length = NULL; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - int __pyx_t_1; - int __pyx_t_2; - Py_ssize_t *__pyx_t_3; - Py_ssize_t *__pyx_t_4; - Py_ssize_t *__pyx_t_5; - PyObject *__pyx_t_6 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("__get__", 0); - - /* "View.MemoryView":595 - * @property - * def size(self): - * if self._size is None: # <<<<<<<<<<<<<< - * result = 1 - * - */ - __pyx_t_1 = (__pyx_v_self->_size == Py_None); - __pyx_t_2 = (__pyx_t_1 != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":596 - * def size(self): - * if self._size is None: - * result = 1 # <<<<<<<<<<<<<< - * - * for length in self.view.shape[:self.view.ndim]: - */ - __Pyx_INCREF(__pyx_int_1); - __pyx_v_result = __pyx_int_1; - - /* "View.MemoryView":598 - * result = 1 - * - * for length in self.view.shape[:self.view.ndim]: # <<<<<<<<<<<<<< - * result *= length - * - */ - __pyx_t_4 = (__pyx_v_self->view.shape + __pyx_v_self->view.ndim); - for (__pyx_t_5 = __pyx_v_self->view.shape; __pyx_t_5 < __pyx_t_4; __pyx_t_5++) { - __pyx_t_3 = __pyx_t_5; - __pyx_t_6 = PyInt_FromSsize_t((__pyx_t_3[0])); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 598, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_6); - __Pyx_XDECREF_SET(__pyx_v_length, __pyx_t_6); - __pyx_t_6 = 0; - - /* "View.MemoryView":599 - * - * for length in self.view.shape[:self.view.ndim]: - * result *= length # <<<<<<<<<<<<<< - * - * self._size = result - */ - __pyx_t_6 = PyNumber_InPlaceMultiply(__pyx_v_result, __pyx_v_length); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 599, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_6); - __Pyx_DECREF_SET(__pyx_v_result, __pyx_t_6); - __pyx_t_6 = 0; - } - - /* "View.MemoryView":601 - * result *= length - * - * self._size = result # <<<<<<<<<<<<<< - * - * return self._size - */ - __Pyx_INCREF(__pyx_v_result); - __Pyx_GIVEREF(__pyx_v_result); - __Pyx_GOTREF(__pyx_v_self->_size); - __Pyx_DECREF(__pyx_v_self->_size); - __pyx_v_self->_size = __pyx_v_result; - - /* "View.MemoryView":595 - * @property - * def size(self): - * if self._size is None: # <<<<<<<<<<<<<< - * result = 1 - * - */ - } - - /* "View.MemoryView":603 - * self._size = result - * - * return self._size # <<<<<<<<<<<<<< - * - * def __len__(self): - */ - __Pyx_XDECREF(__pyx_r); - __Pyx_INCREF(__pyx_v_self->_size); - __pyx_r = __pyx_v_self->_size; - goto __pyx_L0; - - /* "View.MemoryView":594 - * - * @property - * def size(self): # <<<<<<<<<<<<<< - * if self._size is None: - * result = 1 - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_6); - __Pyx_AddTraceback("View.MemoryView.memoryview.size.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XDECREF(__pyx_v_result); - __Pyx_XDECREF(__pyx_v_length); - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":605 - * return self._size - * - * def __len__(self): # <<<<<<<<<<<<<< - * if self.view.ndim >= 1: - * return self.view.shape[0] - */ - -/* Python wrapper */ -static Py_ssize_t __pyx_memoryview___len__(PyObject *__pyx_v_self); /*proto*/ -static Py_ssize_t __pyx_memoryview___len__(PyObject *__pyx_v_self) { - Py_ssize_t __pyx_r; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__len__ (wrapper)", 0); - __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_10__len__(((struct __pyx_memoryview_obj *)__pyx_v_self)); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static Py_ssize_t __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_10__len__(struct __pyx_memoryview_obj *__pyx_v_self) { - Py_ssize_t __pyx_r; - __Pyx_RefNannyDeclarations - int __pyx_t_1; - __Pyx_RefNannySetupContext("__len__", 0); - - /* "View.MemoryView":606 - * - * def __len__(self): - * if self.view.ndim >= 1: # <<<<<<<<<<<<<< - * return self.view.shape[0] - * - */ - __pyx_t_1 = ((__pyx_v_self->view.ndim >= 1) != 0); - if (__pyx_t_1) { - - /* "View.MemoryView":607 - * def __len__(self): - * if self.view.ndim >= 1: - * return self.view.shape[0] # <<<<<<<<<<<<<< - * - * return 0 - */ - __pyx_r = (__pyx_v_self->view.shape[0]); - goto __pyx_L0; - - /* "View.MemoryView":606 - * - * def __len__(self): - * if self.view.ndim >= 1: # <<<<<<<<<<<<<< - * return self.view.shape[0] - * - */ - } - - /* "View.MemoryView":609 - * return self.view.shape[0] - * - * return 0 # <<<<<<<<<<<<<< - * - * def __repr__(self): - */ - __pyx_r = 0; - goto __pyx_L0; - - /* "View.MemoryView":605 - * return self._size - * - * def __len__(self): # <<<<<<<<<<<<<< - * if self.view.ndim >= 1: - * return self.view.shape[0] - */ - - /* function exit code */ - __pyx_L0:; - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":611 - * return 0 - * - * def __repr__(self): # <<<<<<<<<<<<<< - * return "" % (self.base.__class__.__name__, - * id(self)) - */ - -/* Python wrapper */ -static PyObject *__pyx_memoryview___repr__(PyObject *__pyx_v_self); /*proto*/ -static PyObject *__pyx_memoryview___repr__(PyObject *__pyx_v_self) { - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0); - __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_12__repr__(((struct __pyx_memoryview_obj *)__pyx_v_self)); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_12__repr__(struct __pyx_memoryview_obj *__pyx_v_self) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - PyObject *__pyx_t_2 = NULL; - PyObject *__pyx_t_3 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("__repr__", 0); - - /* "View.MemoryView":612 - * - * def __repr__(self): - * return "" % (self.base.__class__.__name__, # <<<<<<<<<<<<<< - * id(self)) - * - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_base); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 612, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_class); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 612, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_name_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 612, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - - /* "View.MemoryView":613 - * def __repr__(self): - * return "" % (self.base.__class__.__name__, - * id(self)) # <<<<<<<<<<<<<< - * - * def __str__(self): - */ - __pyx_t_2 = __Pyx_PyObject_CallOneArg(__pyx_builtin_id, ((PyObject *)__pyx_v_self)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 613, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - - /* "View.MemoryView":612 - * - * def __repr__(self): - * return "" % (self.base.__class__.__name__, # <<<<<<<<<<<<<< - * id(self)) - * - */ - __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 612, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_GIVEREF(__pyx_t_1); - PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_1); - __Pyx_GIVEREF(__pyx_t_2); - PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_2); - __pyx_t_1 = 0; - __pyx_t_2 = 0; - __pyx_t_2 = __Pyx_PyString_Format(__pyx_kp_s_MemoryView_of_r_at_0x_x, __pyx_t_3); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 612, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_r = __pyx_t_2; - __pyx_t_2 = 0; - goto __pyx_L0; - - /* "View.MemoryView":611 - * return 0 - * - * def __repr__(self): # <<<<<<<<<<<<<< - * return "" % (self.base.__class__.__name__, - * id(self)) - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_2); - __Pyx_XDECREF(__pyx_t_3); - __Pyx_AddTraceback("View.MemoryView.memoryview.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":615 - * id(self)) - * - * def __str__(self): # <<<<<<<<<<<<<< - * return "" % (self.base.__class__.__name__,) - * - */ - -/* Python wrapper */ -static PyObject *__pyx_memoryview___str__(PyObject *__pyx_v_self); /*proto*/ -static PyObject *__pyx_memoryview___str__(PyObject *__pyx_v_self) { - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__str__ (wrapper)", 0); - __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_14__str__(((struct __pyx_memoryview_obj *)__pyx_v_self)); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_14__str__(struct __pyx_memoryview_obj *__pyx_v_self) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - PyObject *__pyx_t_2 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("__str__", 0); - - /* "View.MemoryView":616 - * - * def __str__(self): - * return "" % (self.base.__class__.__name__,) # <<<<<<<<<<<<<< - * - * - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_base); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 616, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_class); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 616, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_name_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 616, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 616, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_GIVEREF(__pyx_t_1); - PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_1); - __pyx_t_1 = 0; - __pyx_t_1 = __Pyx_PyString_Format(__pyx_kp_s_MemoryView_of_r_object, __pyx_t_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 616, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_r = __pyx_t_1; - __pyx_t_1 = 0; - goto __pyx_L0; - - /* "View.MemoryView":615 - * id(self)) - * - * def __str__(self): # <<<<<<<<<<<<<< - * return "" % (self.base.__class__.__name__,) - * - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_2); - __Pyx_AddTraceback("View.MemoryView.memoryview.__str__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":619 - * - * - * def is_c_contig(self): # <<<<<<<<<<<<<< - * cdef __Pyx_memviewslice *mslice - * cdef __Pyx_memviewslice tmp - */ - -/* Python wrapper */ -static PyObject *__pyx_memoryview_is_c_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ -static PyObject *__pyx_memoryview_is_c_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("is_c_contig (wrapper)", 0); - __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_16is_c_contig(((struct __pyx_memoryview_obj *)__pyx_v_self)); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_16is_c_contig(struct __pyx_memoryview_obj *__pyx_v_self) { - __Pyx_memviewslice *__pyx_v_mslice; - __Pyx_memviewslice __pyx_v_tmp; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - __Pyx_memviewslice *__pyx_t_1; - PyObject *__pyx_t_2 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("is_c_contig", 0); - - /* "View.MemoryView":622 - * cdef __Pyx_memviewslice *mslice - * cdef __Pyx_memviewslice tmp - * mslice = get_slice_from_memview(self, &tmp) # <<<<<<<<<<<<<< - * return slice_is_contig(mslice[0], 'C', self.view.ndim) - * - */ - __pyx_t_1 = __pyx_memoryview_get_slice_from_memoryview(__pyx_v_self, (&__pyx_v_tmp)); if (unlikely(__pyx_t_1 == ((__Pyx_memviewslice *)NULL))) __PYX_ERR(1, 622, __pyx_L1_error) - __pyx_v_mslice = __pyx_t_1; - - /* "View.MemoryView":623 - * cdef __Pyx_memviewslice tmp - * mslice = get_slice_from_memview(self, &tmp) - * return slice_is_contig(mslice[0], 'C', self.view.ndim) # <<<<<<<<<<<<<< - * - * def is_f_contig(self): - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_memviewslice_is_contig((__pyx_v_mslice[0]), 'C', __pyx_v_self->view.ndim)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 623, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_r = __pyx_t_2; - __pyx_t_2 = 0; - goto __pyx_L0; - - /* "View.MemoryView":619 - * - * - * def is_c_contig(self): # <<<<<<<<<<<<<< - * cdef __Pyx_memviewslice *mslice - * cdef __Pyx_memviewslice tmp - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_2); - __Pyx_AddTraceback("View.MemoryView.memoryview.is_c_contig", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":625 - * return slice_is_contig(mslice[0], 'C', self.view.ndim) - * - * def is_f_contig(self): # <<<<<<<<<<<<<< - * cdef __Pyx_memviewslice *mslice - * cdef __Pyx_memviewslice tmp - */ - -/* Python wrapper */ -static PyObject *__pyx_memoryview_is_f_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ -static PyObject *__pyx_memoryview_is_f_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("is_f_contig (wrapper)", 0); - __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_18is_f_contig(((struct __pyx_memoryview_obj *)__pyx_v_self)); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_18is_f_contig(struct __pyx_memoryview_obj *__pyx_v_self) { - __Pyx_memviewslice *__pyx_v_mslice; - __Pyx_memviewslice __pyx_v_tmp; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - __Pyx_memviewslice *__pyx_t_1; - PyObject *__pyx_t_2 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("is_f_contig", 0); - - /* "View.MemoryView":628 - * cdef __Pyx_memviewslice *mslice - * cdef __Pyx_memviewslice tmp - * mslice = get_slice_from_memview(self, &tmp) # <<<<<<<<<<<<<< - * return slice_is_contig(mslice[0], 'F', self.view.ndim) - * - */ - __pyx_t_1 = __pyx_memoryview_get_slice_from_memoryview(__pyx_v_self, (&__pyx_v_tmp)); if (unlikely(__pyx_t_1 == ((__Pyx_memviewslice *)NULL))) __PYX_ERR(1, 628, __pyx_L1_error) - __pyx_v_mslice = __pyx_t_1; - - /* "View.MemoryView":629 - * cdef __Pyx_memviewslice tmp - * mslice = get_slice_from_memview(self, &tmp) - * return slice_is_contig(mslice[0], 'F', self.view.ndim) # <<<<<<<<<<<<<< - * - * def copy(self): - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_memviewslice_is_contig((__pyx_v_mslice[0]), 'F', __pyx_v_self->view.ndim)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 629, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_r = __pyx_t_2; - __pyx_t_2 = 0; - goto __pyx_L0; - - /* "View.MemoryView":625 - * return slice_is_contig(mslice[0], 'C', self.view.ndim) - * - * def is_f_contig(self): # <<<<<<<<<<<<<< - * cdef __Pyx_memviewslice *mslice - * cdef __Pyx_memviewslice tmp - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_2); - __Pyx_AddTraceback("View.MemoryView.memoryview.is_f_contig", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":631 - * return slice_is_contig(mslice[0], 'F', self.view.ndim) - * - * def copy(self): # <<<<<<<<<<<<<< - * cdef __Pyx_memviewslice mslice - * cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS - */ - -/* Python wrapper */ -static PyObject *__pyx_memoryview_copy(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ -static PyObject *__pyx_memoryview_copy(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("copy (wrapper)", 0); - __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_20copy(((struct __pyx_memoryview_obj *)__pyx_v_self)); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_20copy(struct __pyx_memoryview_obj *__pyx_v_self) { - __Pyx_memviewslice __pyx_v_mslice; - int __pyx_v_flags; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - __Pyx_memviewslice __pyx_t_1; - PyObject *__pyx_t_2 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("copy", 0); - - /* "View.MemoryView":633 - * def copy(self): - * cdef __Pyx_memviewslice mslice - * cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS # <<<<<<<<<<<<<< - * - * slice_copy(self, &mslice) - */ - __pyx_v_flags = (__pyx_v_self->flags & (~PyBUF_F_CONTIGUOUS)); - - /* "View.MemoryView":635 - * cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS - * - * slice_copy(self, &mslice) # <<<<<<<<<<<<<< - * mslice = slice_copy_contig(&mslice, "c", self.view.ndim, - * self.view.itemsize, - */ - __pyx_memoryview_slice_copy(__pyx_v_self, (&__pyx_v_mslice)); - - /* "View.MemoryView":636 - * - * slice_copy(self, &mslice) - * mslice = slice_copy_contig(&mslice, "c", self.view.ndim, # <<<<<<<<<<<<<< - * self.view.itemsize, - * flags|PyBUF_C_CONTIGUOUS, - */ - __pyx_t_1 = __pyx_memoryview_copy_new_contig((&__pyx_v_mslice), ((char *)"c"), __pyx_v_self->view.ndim, __pyx_v_self->view.itemsize, (__pyx_v_flags | PyBUF_C_CONTIGUOUS), __pyx_v_self->dtype_is_object); if (unlikely(PyErr_Occurred())) __PYX_ERR(1, 636, __pyx_L1_error) - __pyx_v_mslice = __pyx_t_1; - - /* "View.MemoryView":641 - * self.dtype_is_object) - * - * return memoryview_copy_from_slice(self, &mslice) # <<<<<<<<<<<<<< - * - * def copy_fortran(self): - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_2 = __pyx_memoryview_copy_object_from_slice(__pyx_v_self, (&__pyx_v_mslice)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 641, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_r = __pyx_t_2; - __pyx_t_2 = 0; - goto __pyx_L0; - - /* "View.MemoryView":631 - * return slice_is_contig(mslice[0], 'F', self.view.ndim) - * - * def copy(self): # <<<<<<<<<<<<<< - * cdef __Pyx_memviewslice mslice - * cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_2); - __Pyx_AddTraceback("View.MemoryView.memoryview.copy", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":643 - * return memoryview_copy_from_slice(self, &mslice) - * - * def copy_fortran(self): # <<<<<<<<<<<<<< - * cdef __Pyx_memviewslice src, dst - * cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS - */ - -/* Python wrapper */ -static PyObject *__pyx_memoryview_copy_fortran(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ -static PyObject *__pyx_memoryview_copy_fortran(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("copy_fortran (wrapper)", 0); - __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_22copy_fortran(((struct __pyx_memoryview_obj *)__pyx_v_self)); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_22copy_fortran(struct __pyx_memoryview_obj *__pyx_v_self) { - __Pyx_memviewslice __pyx_v_src; - __Pyx_memviewslice __pyx_v_dst; - int __pyx_v_flags; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - __Pyx_memviewslice __pyx_t_1; - PyObject *__pyx_t_2 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("copy_fortran", 0); - - /* "View.MemoryView":645 - * def copy_fortran(self): - * cdef __Pyx_memviewslice src, dst - * cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS # <<<<<<<<<<<<<< - * - * slice_copy(self, &src) - */ - __pyx_v_flags = (__pyx_v_self->flags & (~PyBUF_C_CONTIGUOUS)); - - /* "View.MemoryView":647 - * cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS - * - * slice_copy(self, &src) # <<<<<<<<<<<<<< - * dst = slice_copy_contig(&src, "fortran", self.view.ndim, - * self.view.itemsize, - */ - __pyx_memoryview_slice_copy(__pyx_v_self, (&__pyx_v_src)); - - /* "View.MemoryView":648 - * - * slice_copy(self, &src) - * dst = slice_copy_contig(&src, "fortran", self.view.ndim, # <<<<<<<<<<<<<< - * self.view.itemsize, - * flags|PyBUF_F_CONTIGUOUS, - */ - __pyx_t_1 = __pyx_memoryview_copy_new_contig((&__pyx_v_src), ((char *)"fortran"), __pyx_v_self->view.ndim, __pyx_v_self->view.itemsize, (__pyx_v_flags | PyBUF_F_CONTIGUOUS), __pyx_v_self->dtype_is_object); if (unlikely(PyErr_Occurred())) __PYX_ERR(1, 648, __pyx_L1_error) - __pyx_v_dst = __pyx_t_1; - - /* "View.MemoryView":653 - * self.dtype_is_object) - * - * return memoryview_copy_from_slice(self, &dst) # <<<<<<<<<<<<<< - * - * - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_2 = __pyx_memoryview_copy_object_from_slice(__pyx_v_self, (&__pyx_v_dst)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 653, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_r = __pyx_t_2; - __pyx_t_2 = 0; - goto __pyx_L0; - - /* "View.MemoryView":643 - * return memoryview_copy_from_slice(self, &mslice) - * - * def copy_fortran(self): # <<<<<<<<<<<<<< - * cdef __Pyx_memviewslice src, dst - * cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_2); - __Pyx_AddTraceback("View.MemoryView.memoryview.copy_fortran", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "(tree fragment)":1 - * def __reduce_cython__(self): # <<<<<<<<<<<<<< - * raise TypeError("no default __reduce__ due to non-trivial __cinit__") - * def __setstate_cython__(self, __pyx_state): - */ - -/* Python wrapper */ -static PyObject *__pyx_pw___pyx_memoryview_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ -static PyObject *__pyx_pw___pyx_memoryview_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0); - __pyx_r = __pyx_pf___pyx_memoryview___reduce_cython__(((struct __pyx_memoryview_obj *)__pyx_v_self)); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_pf___pyx_memoryview___reduce_cython__(CYTHON_UNUSED struct __pyx_memoryview_obj *__pyx_v_self) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("__reduce_cython__", 0); - - /* "(tree fragment)":2 - * def __reduce_cython__(self): - * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< - * def __setstate_cython__(self, __pyx_state): - * raise TypeError("no default __reduce__ due to non-trivial __cinit__") - */ - __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__14, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 2, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_Raise(__pyx_t_1, 0, 0, 0); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __PYX_ERR(1, 2, __pyx_L1_error) - - /* "(tree fragment)":1 - * def __reduce_cython__(self): # <<<<<<<<<<<<<< - * raise TypeError("no default __reduce__ due to non-trivial __cinit__") - * def __setstate_cython__(self, __pyx_state): - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_AddTraceback("View.MemoryView.memoryview.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "(tree fragment)":3 - * def __reduce_cython__(self): - * raise TypeError("no default __reduce__ due to non-trivial __cinit__") - * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< - * raise TypeError("no default __reduce__ due to non-trivial __cinit__") - */ - -/* Python wrapper */ -static PyObject *__pyx_pw___pyx_memoryview_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/ -static PyObject *__pyx_pw___pyx_memoryview_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) { - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0); - __pyx_r = __pyx_pf___pyx_memoryview_2__setstate_cython__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state)); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_pf___pyx_memoryview_2__setstate_cython__(CYTHON_UNUSED struct __pyx_memoryview_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("__setstate_cython__", 0); - - /* "(tree fragment)":4 - * raise TypeError("no default __reduce__ due to non-trivial __cinit__") - * def __setstate_cython__(self, __pyx_state): - * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< - */ - __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__15, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 4, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_Raise(__pyx_t_1, 0, 0, 0); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __PYX_ERR(1, 4, __pyx_L1_error) - - /* "(tree fragment)":3 - * def __reduce_cython__(self): - * raise TypeError("no default __reduce__ due to non-trivial __cinit__") - * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< - * raise TypeError("no default __reduce__ due to non-trivial __cinit__") - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_AddTraceback("View.MemoryView.memoryview.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":657 - * - * @cname('__pyx_memoryview_new') - * cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo): # <<<<<<<<<<<<<< - * cdef memoryview result = memoryview(o, flags, dtype_is_object) - * result.typeinfo = typeinfo - */ - -static PyObject *__pyx_memoryview_new(PyObject *__pyx_v_o, int __pyx_v_flags, int __pyx_v_dtype_is_object, __Pyx_TypeInfo *__pyx_v_typeinfo) { - struct __pyx_memoryview_obj *__pyx_v_result = 0; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - PyObject *__pyx_t_2 = NULL; - PyObject *__pyx_t_3 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("memoryview_cwrapper", 0); - - /* "View.MemoryView":658 - * @cname('__pyx_memoryview_new') - * cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo): - * cdef memoryview result = memoryview(o, flags, dtype_is_object) # <<<<<<<<<<<<<< - * result.typeinfo = typeinfo - * return result - */ - __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_flags); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 658, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_v_dtype_is_object); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 658, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 658, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_INCREF(__pyx_v_o); - __Pyx_GIVEREF(__pyx_v_o); - PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_o); - __Pyx_GIVEREF(__pyx_t_1); - PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_1); - __Pyx_GIVEREF(__pyx_t_2); - PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_t_2); - __pyx_t_1 = 0; - __pyx_t_2 = 0; - __pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryview_type), __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 658, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_v_result = ((struct __pyx_memoryview_obj *)__pyx_t_2); - __pyx_t_2 = 0; - - /* "View.MemoryView":659 - * cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo): - * cdef memoryview result = memoryview(o, flags, dtype_is_object) - * result.typeinfo = typeinfo # <<<<<<<<<<<<<< - * return result - * - */ - __pyx_v_result->typeinfo = __pyx_v_typeinfo; - - /* "View.MemoryView":660 - * cdef memoryview result = memoryview(o, flags, dtype_is_object) - * result.typeinfo = typeinfo - * return result # <<<<<<<<<<<<<< - * - * @cname('__pyx_memoryview_check') - */ - __Pyx_XDECREF(__pyx_r); - __Pyx_INCREF(((PyObject *)__pyx_v_result)); - __pyx_r = ((PyObject *)__pyx_v_result); - goto __pyx_L0; - - /* "View.MemoryView":657 - * - * @cname('__pyx_memoryview_new') - * cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo): # <<<<<<<<<<<<<< - * cdef memoryview result = memoryview(o, flags, dtype_is_object) - * result.typeinfo = typeinfo - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_2); - __Pyx_XDECREF(__pyx_t_3); - __Pyx_AddTraceback("View.MemoryView.memoryview_cwrapper", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = 0; - __pyx_L0:; - __Pyx_XDECREF((PyObject *)__pyx_v_result); - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":663 - * - * @cname('__pyx_memoryview_check') - * cdef inline bint memoryview_check(object o): # <<<<<<<<<<<<<< - * return isinstance(o, memoryview) - * - */ - -static CYTHON_INLINE int __pyx_memoryview_check(PyObject *__pyx_v_o) { - int __pyx_r; - __Pyx_RefNannyDeclarations - int __pyx_t_1; - __Pyx_RefNannySetupContext("memoryview_check", 0); - - /* "View.MemoryView":664 - * @cname('__pyx_memoryview_check') - * cdef inline bint memoryview_check(object o): - * return isinstance(o, memoryview) # <<<<<<<<<<<<<< - * - * cdef tuple _unellipsify(object index, int ndim): - */ - __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_o, __pyx_memoryview_type); - __pyx_r = __pyx_t_1; - goto __pyx_L0; - - /* "View.MemoryView":663 - * - * @cname('__pyx_memoryview_check') - * cdef inline bint memoryview_check(object o): # <<<<<<<<<<<<<< - * return isinstance(o, memoryview) - * - */ - - /* function exit code */ - __pyx_L0:; - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":666 - * return isinstance(o, memoryview) - * - * cdef tuple _unellipsify(object index, int ndim): # <<<<<<<<<<<<<< - * """ - * Replace all ellipses with full slices and fill incomplete indices with - */ - -static PyObject *_unellipsify(PyObject *__pyx_v_index, int __pyx_v_ndim) { - PyObject *__pyx_v_tup = NULL; - PyObject *__pyx_v_result = NULL; - int __pyx_v_have_slices; - int __pyx_v_seen_ellipsis; - CYTHON_UNUSED PyObject *__pyx_v_idx = NULL; - PyObject *__pyx_v_item = NULL; - Py_ssize_t __pyx_v_nslices; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - int __pyx_t_1; - int __pyx_t_2; - PyObject *__pyx_t_3 = NULL; - PyObject *__pyx_t_4 = NULL; - Py_ssize_t __pyx_t_5; - PyObject *(*__pyx_t_6)(PyObject *); - PyObject *__pyx_t_7 = NULL; - Py_ssize_t __pyx_t_8; - int __pyx_t_9; - int __pyx_t_10; - PyObject *__pyx_t_11 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("_unellipsify", 0); - - /* "View.MemoryView":671 - * full slices. - * """ - * if not isinstance(index, tuple): # <<<<<<<<<<<<<< - * tup = (index,) - * else: - */ - __pyx_t_1 = PyTuple_Check(__pyx_v_index); - __pyx_t_2 = ((!(__pyx_t_1 != 0)) != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":672 - * """ - * if not isinstance(index, tuple): - * tup = (index,) # <<<<<<<<<<<<<< - * else: - * tup = index - */ - __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 672, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_INCREF(__pyx_v_index); - __Pyx_GIVEREF(__pyx_v_index); - PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_index); - __pyx_v_tup = __pyx_t_3; - __pyx_t_3 = 0; - - /* "View.MemoryView":671 - * full slices. - * """ - * if not isinstance(index, tuple): # <<<<<<<<<<<<<< - * tup = (index,) - * else: - */ - goto __pyx_L3; - } - - /* "View.MemoryView":674 - * tup = (index,) - * else: - * tup = index # <<<<<<<<<<<<<< - * - * result = [] - */ - /*else*/ { - __Pyx_INCREF(__pyx_v_index); - __pyx_v_tup = __pyx_v_index; - } - __pyx_L3:; - - /* "View.MemoryView":676 - * tup = index - * - * result = [] # <<<<<<<<<<<<<< - * have_slices = False - * seen_ellipsis = False - */ - __pyx_t_3 = PyList_New(0); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 676, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __pyx_v_result = ((PyObject*)__pyx_t_3); - __pyx_t_3 = 0; - - /* "View.MemoryView":677 - * - * result = [] - * have_slices = False # <<<<<<<<<<<<<< - * seen_ellipsis = False - * for idx, item in enumerate(tup): - */ - __pyx_v_have_slices = 0; - - /* "View.MemoryView":678 - * result = [] - * have_slices = False - * seen_ellipsis = False # <<<<<<<<<<<<<< - * for idx, item in enumerate(tup): - * if item is Ellipsis: - */ - __pyx_v_seen_ellipsis = 0; - - /* "View.MemoryView":679 - * have_slices = False - * seen_ellipsis = False - * for idx, item in enumerate(tup): # <<<<<<<<<<<<<< - * if item is Ellipsis: - * if not seen_ellipsis: - */ - __Pyx_INCREF(__pyx_int_0); - __pyx_t_3 = __pyx_int_0; - if (likely(PyList_CheckExact(__pyx_v_tup)) || PyTuple_CheckExact(__pyx_v_tup)) { - __pyx_t_4 = __pyx_v_tup; __Pyx_INCREF(__pyx_t_4); __pyx_t_5 = 0; - __pyx_t_6 = NULL; - } else { - __pyx_t_5 = -1; __pyx_t_4 = PyObject_GetIter(__pyx_v_tup); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 679, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __pyx_t_6 = Py_TYPE(__pyx_t_4)->tp_iternext; if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 679, __pyx_L1_error) - } - for (;;) { - if (likely(!__pyx_t_6)) { - if (likely(PyList_CheckExact(__pyx_t_4))) { - if (__pyx_t_5 >= PyList_GET_SIZE(__pyx_t_4)) break; - #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS - __pyx_t_7 = PyList_GET_ITEM(__pyx_t_4, __pyx_t_5); __Pyx_INCREF(__pyx_t_7); __pyx_t_5++; if (unlikely(0 < 0)) __PYX_ERR(1, 679, __pyx_L1_error) - #else - __pyx_t_7 = PySequence_ITEM(__pyx_t_4, __pyx_t_5); __pyx_t_5++; if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 679, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_7); - #endif - } else { - if (__pyx_t_5 >= PyTuple_GET_SIZE(__pyx_t_4)) break; - #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS - __pyx_t_7 = PyTuple_GET_ITEM(__pyx_t_4, __pyx_t_5); __Pyx_INCREF(__pyx_t_7); __pyx_t_5++; if (unlikely(0 < 0)) __PYX_ERR(1, 679, __pyx_L1_error) - #else - __pyx_t_7 = PySequence_ITEM(__pyx_t_4, __pyx_t_5); __pyx_t_5++; if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 679, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_7); - #endif - } - } else { - __pyx_t_7 = __pyx_t_6(__pyx_t_4); - if (unlikely(!__pyx_t_7)) { - PyObject* exc_type = PyErr_Occurred(); - if (exc_type) { - if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear(); - else __PYX_ERR(1, 679, __pyx_L1_error) - } - break; - } - __Pyx_GOTREF(__pyx_t_7); - } - __Pyx_XDECREF_SET(__pyx_v_item, __pyx_t_7); - __pyx_t_7 = 0; - __Pyx_INCREF(__pyx_t_3); - __Pyx_XDECREF_SET(__pyx_v_idx, __pyx_t_3); - __pyx_t_7 = __Pyx_PyInt_AddObjC(__pyx_t_3, __pyx_int_1, 1, 0, 0); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 679, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_7); - __Pyx_DECREF(__pyx_t_3); - __pyx_t_3 = __pyx_t_7; - __pyx_t_7 = 0; - - /* "View.MemoryView":680 - * seen_ellipsis = False - * for idx, item in enumerate(tup): - * if item is Ellipsis: # <<<<<<<<<<<<<< - * if not seen_ellipsis: - * result.extend([slice(None)] * (ndim - len(tup) + 1)) - */ - __pyx_t_2 = (__pyx_v_item == __pyx_builtin_Ellipsis); - __pyx_t_1 = (__pyx_t_2 != 0); - if (__pyx_t_1) { - - /* "View.MemoryView":681 - * for idx, item in enumerate(tup): - * if item is Ellipsis: - * if not seen_ellipsis: # <<<<<<<<<<<<<< - * result.extend([slice(None)] * (ndim - len(tup) + 1)) - * seen_ellipsis = True - */ - __pyx_t_1 = ((!(__pyx_v_seen_ellipsis != 0)) != 0); - if (__pyx_t_1) { - - /* "View.MemoryView":682 - * if item is Ellipsis: - * if not seen_ellipsis: - * result.extend([slice(None)] * (ndim - len(tup) + 1)) # <<<<<<<<<<<<<< - * seen_ellipsis = True - * else: - */ - __pyx_t_8 = PyObject_Length(__pyx_v_tup); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(1, 682, __pyx_L1_error) - __pyx_t_7 = PyList_New(1 * ((((__pyx_v_ndim - __pyx_t_8) + 1)<0) ? 0:((__pyx_v_ndim - __pyx_t_8) + 1))); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 682, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_7); - { Py_ssize_t __pyx_temp; - for (__pyx_temp=0; __pyx_temp < ((__pyx_v_ndim - __pyx_t_8) + 1); __pyx_temp++) { - __Pyx_INCREF(__pyx_slice__16); - __Pyx_GIVEREF(__pyx_slice__16); - PyList_SET_ITEM(__pyx_t_7, __pyx_temp, __pyx_slice__16); - } - } - __pyx_t_9 = __Pyx_PyList_Extend(__pyx_v_result, __pyx_t_7); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(1, 682, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; - - /* "View.MemoryView":683 - * if not seen_ellipsis: - * result.extend([slice(None)] * (ndim - len(tup) + 1)) - * seen_ellipsis = True # <<<<<<<<<<<<<< - * else: - * result.append(slice(None)) - */ - __pyx_v_seen_ellipsis = 1; - - /* "View.MemoryView":681 - * for idx, item in enumerate(tup): - * if item is Ellipsis: - * if not seen_ellipsis: # <<<<<<<<<<<<<< - * result.extend([slice(None)] * (ndim - len(tup) + 1)) - * seen_ellipsis = True - */ - goto __pyx_L7; - } - - /* "View.MemoryView":685 - * seen_ellipsis = True - * else: - * result.append(slice(None)) # <<<<<<<<<<<<<< - * have_slices = True - * else: - */ - /*else*/ { - __pyx_t_9 = __Pyx_PyList_Append(__pyx_v_result, __pyx_slice__16); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(1, 685, __pyx_L1_error) - } - __pyx_L7:; - - /* "View.MemoryView":686 - * else: - * result.append(slice(None)) - * have_slices = True # <<<<<<<<<<<<<< - * else: - * if not isinstance(item, slice) and not PyIndex_Check(item): - */ - __pyx_v_have_slices = 1; - - /* "View.MemoryView":680 - * seen_ellipsis = False - * for idx, item in enumerate(tup): - * if item is Ellipsis: # <<<<<<<<<<<<<< - * if not seen_ellipsis: - * result.extend([slice(None)] * (ndim - len(tup) + 1)) - */ - goto __pyx_L6; - } - - /* "View.MemoryView":688 - * have_slices = True - * else: - * if not isinstance(item, slice) and not PyIndex_Check(item): # <<<<<<<<<<<<<< - * raise TypeError("Cannot index with type '%s'" % type(item)) - * - */ - /*else*/ { - __pyx_t_2 = PySlice_Check(__pyx_v_item); - __pyx_t_10 = ((!(__pyx_t_2 != 0)) != 0); - if (__pyx_t_10) { - } else { - __pyx_t_1 = __pyx_t_10; - goto __pyx_L9_bool_binop_done; - } - __pyx_t_10 = ((!(PyIndex_Check(__pyx_v_item) != 0)) != 0); - __pyx_t_1 = __pyx_t_10; - __pyx_L9_bool_binop_done:; - if (unlikely(__pyx_t_1)) { - - /* "View.MemoryView":689 - * else: - * if not isinstance(item, slice) and not PyIndex_Check(item): - * raise TypeError("Cannot index with type '%s'" % type(item)) # <<<<<<<<<<<<<< - * - * have_slices = have_slices or isinstance(item, slice) - */ - __pyx_t_7 = __Pyx_PyString_FormatSafe(__pyx_kp_s_Cannot_index_with_type_s, ((PyObject *)Py_TYPE(__pyx_v_item))); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 689, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_7); - __pyx_t_11 = __Pyx_PyObject_CallOneArg(__pyx_builtin_TypeError, __pyx_t_7); if (unlikely(!__pyx_t_11)) __PYX_ERR(1, 689, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_11); - __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; - __Pyx_Raise(__pyx_t_11, 0, 0, 0); - __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; - __PYX_ERR(1, 689, __pyx_L1_error) - - /* "View.MemoryView":688 - * have_slices = True - * else: - * if not isinstance(item, slice) and not PyIndex_Check(item): # <<<<<<<<<<<<<< - * raise TypeError("Cannot index with type '%s'" % type(item)) - * - */ - } - - /* "View.MemoryView":691 - * raise TypeError("Cannot index with type '%s'" % type(item)) - * - * have_slices = have_slices or isinstance(item, slice) # <<<<<<<<<<<<<< - * result.append(item) - * - */ - __pyx_t_10 = (__pyx_v_have_slices != 0); - if (!__pyx_t_10) { - } else { - __pyx_t_1 = __pyx_t_10; - goto __pyx_L11_bool_binop_done; - } - __pyx_t_10 = PySlice_Check(__pyx_v_item); - __pyx_t_2 = (__pyx_t_10 != 0); - __pyx_t_1 = __pyx_t_2; - __pyx_L11_bool_binop_done:; - __pyx_v_have_slices = __pyx_t_1; - - /* "View.MemoryView":692 - * - * have_slices = have_slices or isinstance(item, slice) - * result.append(item) # <<<<<<<<<<<<<< - * - * nslices = ndim - len(result) - */ - __pyx_t_9 = __Pyx_PyList_Append(__pyx_v_result, __pyx_v_item); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(1, 692, __pyx_L1_error) - } - __pyx_L6:; - - /* "View.MemoryView":679 - * have_slices = False - * seen_ellipsis = False - * for idx, item in enumerate(tup): # <<<<<<<<<<<<<< - * if item is Ellipsis: - * if not seen_ellipsis: - */ - } - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - - /* "View.MemoryView":694 - * result.append(item) - * - * nslices = ndim - len(result) # <<<<<<<<<<<<<< - * if nslices: - * result.extend([slice(None)] * nslices) - */ - __pyx_t_5 = PyList_GET_SIZE(__pyx_v_result); if (unlikely(__pyx_t_5 == ((Py_ssize_t)-1))) __PYX_ERR(1, 694, __pyx_L1_error) - __pyx_v_nslices = (__pyx_v_ndim - __pyx_t_5); - - /* "View.MemoryView":695 - * - * nslices = ndim - len(result) - * if nslices: # <<<<<<<<<<<<<< - * result.extend([slice(None)] * nslices) - * - */ - __pyx_t_1 = (__pyx_v_nslices != 0); - if (__pyx_t_1) { - - /* "View.MemoryView":696 - * nslices = ndim - len(result) - * if nslices: - * result.extend([slice(None)] * nslices) # <<<<<<<<<<<<<< - * - * return have_slices or nslices, tuple(result) - */ - __pyx_t_3 = PyList_New(1 * ((__pyx_v_nslices<0) ? 0:__pyx_v_nslices)); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 696, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - { Py_ssize_t __pyx_temp; - for (__pyx_temp=0; __pyx_temp < __pyx_v_nslices; __pyx_temp++) { - __Pyx_INCREF(__pyx_slice__16); - __Pyx_GIVEREF(__pyx_slice__16); - PyList_SET_ITEM(__pyx_t_3, __pyx_temp, __pyx_slice__16); - } - } - __pyx_t_9 = __Pyx_PyList_Extend(__pyx_v_result, __pyx_t_3); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(1, 696, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - - /* "View.MemoryView":695 - * - * nslices = ndim - len(result) - * if nslices: # <<<<<<<<<<<<<< - * result.extend([slice(None)] * nslices) - * - */ - } - - /* "View.MemoryView":698 - * result.extend([slice(None)] * nslices) - * - * return have_slices or nslices, tuple(result) # <<<<<<<<<<<<<< - * - * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): - */ - __Pyx_XDECREF(__pyx_r); - if (!__pyx_v_have_slices) { - } else { - __pyx_t_4 = __Pyx_PyBool_FromLong(__pyx_v_have_slices); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 698, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __pyx_t_3 = __pyx_t_4; - __pyx_t_4 = 0; - goto __pyx_L14_bool_binop_done; - } - __pyx_t_4 = PyInt_FromSsize_t(__pyx_v_nslices); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 698, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __pyx_t_3 = __pyx_t_4; - __pyx_t_4 = 0; - __pyx_L14_bool_binop_done:; - __pyx_t_4 = PyList_AsTuple(__pyx_v_result); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 698, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __pyx_t_11 = PyTuple_New(2); if (unlikely(!__pyx_t_11)) __PYX_ERR(1, 698, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_11); - __Pyx_GIVEREF(__pyx_t_3); - PyTuple_SET_ITEM(__pyx_t_11, 0, __pyx_t_3); - __Pyx_GIVEREF(__pyx_t_4); - PyTuple_SET_ITEM(__pyx_t_11, 1, __pyx_t_4); - __pyx_t_3 = 0; - __pyx_t_4 = 0; - __pyx_r = ((PyObject*)__pyx_t_11); - __pyx_t_11 = 0; - goto __pyx_L0; - - /* "View.MemoryView":666 - * return isinstance(o, memoryview) - * - * cdef tuple _unellipsify(object index, int ndim): # <<<<<<<<<<<<<< - * """ - * Replace all ellipses with full slices and fill incomplete indices with - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_3); - __Pyx_XDECREF(__pyx_t_4); - __Pyx_XDECREF(__pyx_t_7); - __Pyx_XDECREF(__pyx_t_11); - __Pyx_AddTraceback("View.MemoryView._unellipsify", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = 0; - __pyx_L0:; - __Pyx_XDECREF(__pyx_v_tup); - __Pyx_XDECREF(__pyx_v_result); - __Pyx_XDECREF(__pyx_v_idx); - __Pyx_XDECREF(__pyx_v_item); - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":700 - * return have_slices or nslices, tuple(result) - * - * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): # <<<<<<<<<<<<<< - * for suboffset in suboffsets[:ndim]: - * if suboffset >= 0: - */ - -static PyObject *assert_direct_dimensions(Py_ssize_t *__pyx_v_suboffsets, int __pyx_v_ndim) { - Py_ssize_t __pyx_v_suboffset; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - Py_ssize_t *__pyx_t_1; - Py_ssize_t *__pyx_t_2; - Py_ssize_t *__pyx_t_3; - int __pyx_t_4; - PyObject *__pyx_t_5 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("assert_direct_dimensions", 0); - - /* "View.MemoryView":701 - * - * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): - * for suboffset in suboffsets[:ndim]: # <<<<<<<<<<<<<< - * if suboffset >= 0: - * raise ValueError("Indirect dimensions not supported") - */ - __pyx_t_2 = (__pyx_v_suboffsets + __pyx_v_ndim); - for (__pyx_t_3 = __pyx_v_suboffsets; __pyx_t_3 < __pyx_t_2; __pyx_t_3++) { - __pyx_t_1 = __pyx_t_3; - __pyx_v_suboffset = (__pyx_t_1[0]); - - /* "View.MemoryView":702 - * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): - * for suboffset in suboffsets[:ndim]: - * if suboffset >= 0: # <<<<<<<<<<<<<< - * raise ValueError("Indirect dimensions not supported") - * - */ - __pyx_t_4 = ((__pyx_v_suboffset >= 0) != 0); - if (unlikely(__pyx_t_4)) { - - /* "View.MemoryView":703 - * for suboffset in suboffsets[:ndim]: - * if suboffset >= 0: - * raise ValueError("Indirect dimensions not supported") # <<<<<<<<<<<<<< - * - * - */ - __pyx_t_5 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__17, NULL); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 703, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __Pyx_Raise(__pyx_t_5, 0, 0, 0); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __PYX_ERR(1, 703, __pyx_L1_error) - - /* "View.MemoryView":702 - * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): - * for suboffset in suboffsets[:ndim]: - * if suboffset >= 0: # <<<<<<<<<<<<<< - * raise ValueError("Indirect dimensions not supported") - * - */ - } - } - - /* "View.MemoryView":700 - * return have_slices or nslices, tuple(result) - * - * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): # <<<<<<<<<<<<<< - * for suboffset in suboffsets[:ndim]: - * if suboffset >= 0: - */ - - /* function exit code */ - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_5); - __Pyx_AddTraceback("View.MemoryView.assert_direct_dimensions", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = 0; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":710 - * - * @cname('__pyx_memview_slice') - * cdef memoryview memview_slice(memoryview memview, object indices): # <<<<<<<<<<<<<< - * cdef int new_ndim = 0, suboffset_dim = -1, dim - * cdef bint negative_step - */ - -static struct __pyx_memoryview_obj *__pyx_memview_slice(struct __pyx_memoryview_obj *__pyx_v_memview, PyObject *__pyx_v_indices) { - int __pyx_v_new_ndim; - int __pyx_v_suboffset_dim; - int __pyx_v_dim; - __Pyx_memviewslice __pyx_v_src; - __Pyx_memviewslice __pyx_v_dst; - __Pyx_memviewslice *__pyx_v_p_src; - struct __pyx_memoryviewslice_obj *__pyx_v_memviewsliceobj = 0; - __Pyx_memviewslice *__pyx_v_p_dst; - int *__pyx_v_p_suboffset_dim; - Py_ssize_t __pyx_v_start; - Py_ssize_t __pyx_v_stop; - Py_ssize_t __pyx_v_step; - int __pyx_v_have_start; - int __pyx_v_have_stop; - int __pyx_v_have_step; - PyObject *__pyx_v_index = NULL; - struct __pyx_memoryview_obj *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - int __pyx_t_1; - int __pyx_t_2; - PyObject *__pyx_t_3 = NULL; - struct __pyx_memoryview_obj *__pyx_t_4; - char *__pyx_t_5; - int __pyx_t_6; - Py_ssize_t __pyx_t_7; - PyObject *(*__pyx_t_8)(PyObject *); - PyObject *__pyx_t_9 = NULL; - Py_ssize_t __pyx_t_10; - int __pyx_t_11; - Py_ssize_t __pyx_t_12; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("memview_slice", 0); - - /* "View.MemoryView":711 - * @cname('__pyx_memview_slice') - * cdef memoryview memview_slice(memoryview memview, object indices): - * cdef int new_ndim = 0, suboffset_dim = -1, dim # <<<<<<<<<<<<<< - * cdef bint negative_step - * cdef __Pyx_memviewslice src, dst - */ - __pyx_v_new_ndim = 0; - __pyx_v_suboffset_dim = -1; - - /* "View.MemoryView":718 - * - * - * memset(&dst, 0, sizeof(dst)) # <<<<<<<<<<<<<< - * - * cdef _memoryviewslice memviewsliceobj - */ - (void)(memset((&__pyx_v_dst), 0, (sizeof(__pyx_v_dst)))); - - /* "View.MemoryView":722 - * cdef _memoryviewslice memviewsliceobj - * - * assert memview.view.ndim > 0 # <<<<<<<<<<<<<< - * - * if isinstance(memview, _memoryviewslice): - */ - #ifndef CYTHON_WITHOUT_ASSERTIONS - if (unlikely(!Py_OptimizeFlag)) { - if (unlikely(!((__pyx_v_memview->view.ndim > 0) != 0))) { - PyErr_SetNone(PyExc_AssertionError); - __PYX_ERR(1, 722, __pyx_L1_error) - } - } - #endif - - /* "View.MemoryView":724 - * assert memview.view.ndim > 0 - * - * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< - * memviewsliceobj = memview - * p_src = &memviewsliceobj.from_slice - */ - __pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type); - __pyx_t_2 = (__pyx_t_1 != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":725 - * - * if isinstance(memview, _memoryviewslice): - * memviewsliceobj = memview # <<<<<<<<<<<<<< - * p_src = &memviewsliceobj.from_slice - * else: - */ - if (!(likely(((((PyObject *)__pyx_v_memview)) == Py_None) || likely(__Pyx_TypeTest(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type))))) __PYX_ERR(1, 725, __pyx_L1_error) - __pyx_t_3 = ((PyObject *)__pyx_v_memview); - __Pyx_INCREF(__pyx_t_3); - __pyx_v_memviewsliceobj = ((struct __pyx_memoryviewslice_obj *)__pyx_t_3); - __pyx_t_3 = 0; - - /* "View.MemoryView":726 - * if isinstance(memview, _memoryviewslice): - * memviewsliceobj = memview - * p_src = &memviewsliceobj.from_slice # <<<<<<<<<<<<<< - * else: - * slice_copy(memview, &src) - */ - __pyx_v_p_src = (&__pyx_v_memviewsliceobj->from_slice); - - /* "View.MemoryView":724 - * assert memview.view.ndim > 0 - * - * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< - * memviewsliceobj = memview - * p_src = &memviewsliceobj.from_slice - */ - goto __pyx_L3; - } - - /* "View.MemoryView":728 - * p_src = &memviewsliceobj.from_slice - * else: - * slice_copy(memview, &src) # <<<<<<<<<<<<<< - * p_src = &src - * - */ - /*else*/ { - __pyx_memoryview_slice_copy(__pyx_v_memview, (&__pyx_v_src)); - - /* "View.MemoryView":729 - * else: - * slice_copy(memview, &src) - * p_src = &src # <<<<<<<<<<<<<< - * - * - */ - __pyx_v_p_src = (&__pyx_v_src); - } - __pyx_L3:; - - /* "View.MemoryView":735 - * - * - * dst.memview = p_src.memview # <<<<<<<<<<<<<< - * dst.data = p_src.data - * - */ - __pyx_t_4 = __pyx_v_p_src->memview; - __pyx_v_dst.memview = __pyx_t_4; - - /* "View.MemoryView":736 - * - * dst.memview = p_src.memview - * dst.data = p_src.data # <<<<<<<<<<<<<< - * - * - */ - __pyx_t_5 = __pyx_v_p_src->data; - __pyx_v_dst.data = __pyx_t_5; - - /* "View.MemoryView":741 - * - * - * cdef __Pyx_memviewslice *p_dst = &dst # <<<<<<<<<<<<<< - * cdef int *p_suboffset_dim = &suboffset_dim - * cdef Py_ssize_t start, stop, step - */ - __pyx_v_p_dst = (&__pyx_v_dst); - - /* "View.MemoryView":742 - * - * cdef __Pyx_memviewslice *p_dst = &dst - * cdef int *p_suboffset_dim = &suboffset_dim # <<<<<<<<<<<<<< - * cdef Py_ssize_t start, stop, step - * cdef bint have_start, have_stop, have_step - */ - __pyx_v_p_suboffset_dim = (&__pyx_v_suboffset_dim); - - /* "View.MemoryView":746 - * cdef bint have_start, have_stop, have_step - * - * for dim, index in enumerate(indices): # <<<<<<<<<<<<<< - * if PyIndex_Check(index): - * slice_memviewslice( - */ - __pyx_t_6 = 0; - if (likely(PyList_CheckExact(__pyx_v_indices)) || PyTuple_CheckExact(__pyx_v_indices)) { - __pyx_t_3 = __pyx_v_indices; __Pyx_INCREF(__pyx_t_3); __pyx_t_7 = 0; - __pyx_t_8 = NULL; - } else { - __pyx_t_7 = -1; __pyx_t_3 = PyObject_GetIter(__pyx_v_indices); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 746, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_8 = Py_TYPE(__pyx_t_3)->tp_iternext; if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 746, __pyx_L1_error) - } - for (;;) { - if (likely(!__pyx_t_8)) { - if (likely(PyList_CheckExact(__pyx_t_3))) { - if (__pyx_t_7 >= PyList_GET_SIZE(__pyx_t_3)) break; - #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS - __pyx_t_9 = PyList_GET_ITEM(__pyx_t_3, __pyx_t_7); __Pyx_INCREF(__pyx_t_9); __pyx_t_7++; if (unlikely(0 < 0)) __PYX_ERR(1, 746, __pyx_L1_error) - #else - __pyx_t_9 = PySequence_ITEM(__pyx_t_3, __pyx_t_7); __pyx_t_7++; if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 746, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_9); - #endif - } else { - if (__pyx_t_7 >= PyTuple_GET_SIZE(__pyx_t_3)) break; - #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS - __pyx_t_9 = PyTuple_GET_ITEM(__pyx_t_3, __pyx_t_7); __Pyx_INCREF(__pyx_t_9); __pyx_t_7++; if (unlikely(0 < 0)) __PYX_ERR(1, 746, __pyx_L1_error) - #else - __pyx_t_9 = PySequence_ITEM(__pyx_t_3, __pyx_t_7); __pyx_t_7++; if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 746, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_9); - #endif - } - } else { - __pyx_t_9 = __pyx_t_8(__pyx_t_3); - if (unlikely(!__pyx_t_9)) { - PyObject* exc_type = PyErr_Occurred(); - if (exc_type) { - if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear(); - else __PYX_ERR(1, 746, __pyx_L1_error) - } - break; - } - __Pyx_GOTREF(__pyx_t_9); - } - __Pyx_XDECREF_SET(__pyx_v_index, __pyx_t_9); - __pyx_t_9 = 0; - __pyx_v_dim = __pyx_t_6; - __pyx_t_6 = (__pyx_t_6 + 1); - - /* "View.MemoryView":747 - * - * for dim, index in enumerate(indices): - * if PyIndex_Check(index): # <<<<<<<<<<<<<< - * slice_memviewslice( - * p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim], - */ - __pyx_t_2 = (PyIndex_Check(__pyx_v_index) != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":751 - * p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim], - * dim, new_ndim, p_suboffset_dim, - * index, 0, 0, # start, stop, step # <<<<<<<<<<<<<< - * 0, 0, 0, # have_{start,stop,step} - * False) - */ - __pyx_t_10 = __Pyx_PyIndex_AsSsize_t(__pyx_v_index); if (unlikely((__pyx_t_10 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 751, __pyx_L1_error) - - /* "View.MemoryView":748 - * for dim, index in enumerate(indices): - * if PyIndex_Check(index): - * slice_memviewslice( # <<<<<<<<<<<<<< - * p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim], - * dim, new_ndim, p_suboffset_dim, - */ - __pyx_t_11 = __pyx_memoryview_slice_memviewslice(__pyx_v_p_dst, (__pyx_v_p_src->shape[__pyx_v_dim]), (__pyx_v_p_src->strides[__pyx_v_dim]), (__pyx_v_p_src->suboffsets[__pyx_v_dim]), __pyx_v_dim, __pyx_v_new_ndim, __pyx_v_p_suboffset_dim, __pyx_t_10, 0, 0, 0, 0, 0, 0); if (unlikely(__pyx_t_11 == ((int)-1))) __PYX_ERR(1, 748, __pyx_L1_error) - - /* "View.MemoryView":747 - * - * for dim, index in enumerate(indices): - * if PyIndex_Check(index): # <<<<<<<<<<<<<< - * slice_memviewslice( - * p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim], - */ - goto __pyx_L6; - } - - /* "View.MemoryView":754 - * 0, 0, 0, # have_{start,stop,step} - * False) - * elif index is None: # <<<<<<<<<<<<<< - * p_dst.shape[new_ndim] = 1 - * p_dst.strides[new_ndim] = 0 - */ - __pyx_t_2 = (__pyx_v_index == Py_None); - __pyx_t_1 = (__pyx_t_2 != 0); - if (__pyx_t_1) { - - /* "View.MemoryView":755 - * False) - * elif index is None: - * p_dst.shape[new_ndim] = 1 # <<<<<<<<<<<<<< - * p_dst.strides[new_ndim] = 0 - * p_dst.suboffsets[new_ndim] = -1 - */ - (__pyx_v_p_dst->shape[__pyx_v_new_ndim]) = 1; - - /* "View.MemoryView":756 - * elif index is None: - * p_dst.shape[new_ndim] = 1 - * p_dst.strides[new_ndim] = 0 # <<<<<<<<<<<<<< - * p_dst.suboffsets[new_ndim] = -1 - * new_ndim += 1 - */ - (__pyx_v_p_dst->strides[__pyx_v_new_ndim]) = 0; - - /* "View.MemoryView":757 - * p_dst.shape[new_ndim] = 1 - * p_dst.strides[new_ndim] = 0 - * p_dst.suboffsets[new_ndim] = -1 # <<<<<<<<<<<<<< - * new_ndim += 1 - * else: - */ - (__pyx_v_p_dst->suboffsets[__pyx_v_new_ndim]) = -1L; - - /* "View.MemoryView":758 - * p_dst.strides[new_ndim] = 0 - * p_dst.suboffsets[new_ndim] = -1 - * new_ndim += 1 # <<<<<<<<<<<<<< - * else: - * start = index.start or 0 - */ - __pyx_v_new_ndim = (__pyx_v_new_ndim + 1); - - /* "View.MemoryView":754 - * 0, 0, 0, # have_{start,stop,step} - * False) - * elif index is None: # <<<<<<<<<<<<<< - * p_dst.shape[new_ndim] = 1 - * p_dst.strides[new_ndim] = 0 - */ - goto __pyx_L6; - } - - /* "View.MemoryView":760 - * new_ndim += 1 - * else: - * start = index.start or 0 # <<<<<<<<<<<<<< - * stop = index.stop or 0 - * step = index.step or 0 - */ - /*else*/ { - __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_start); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 760, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_9); - __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_9); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 760, __pyx_L1_error) - if (!__pyx_t_1) { - __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; - } else { - __pyx_t_12 = __Pyx_PyIndex_AsSsize_t(__pyx_t_9); if (unlikely((__pyx_t_12 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 760, __pyx_L1_error) - __pyx_t_10 = __pyx_t_12; - __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; - goto __pyx_L7_bool_binop_done; - } - __pyx_t_10 = 0; - __pyx_L7_bool_binop_done:; - __pyx_v_start = __pyx_t_10; - - /* "View.MemoryView":761 - * else: - * start = index.start or 0 - * stop = index.stop or 0 # <<<<<<<<<<<<<< - * step = index.step or 0 - * - */ - __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_stop); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 761, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_9); - __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_9); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 761, __pyx_L1_error) - if (!__pyx_t_1) { - __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; - } else { - __pyx_t_12 = __Pyx_PyIndex_AsSsize_t(__pyx_t_9); if (unlikely((__pyx_t_12 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 761, __pyx_L1_error) - __pyx_t_10 = __pyx_t_12; - __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; - goto __pyx_L9_bool_binop_done; - } - __pyx_t_10 = 0; - __pyx_L9_bool_binop_done:; - __pyx_v_stop = __pyx_t_10; - - /* "View.MemoryView":762 - * start = index.start or 0 - * stop = index.stop or 0 - * step = index.step or 0 # <<<<<<<<<<<<<< - * - * have_start = index.start is not None - */ - __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_step); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 762, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_9); - __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_9); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 762, __pyx_L1_error) - if (!__pyx_t_1) { - __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; - } else { - __pyx_t_12 = __Pyx_PyIndex_AsSsize_t(__pyx_t_9); if (unlikely((__pyx_t_12 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 762, __pyx_L1_error) - __pyx_t_10 = __pyx_t_12; - __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; - goto __pyx_L11_bool_binop_done; - } - __pyx_t_10 = 0; - __pyx_L11_bool_binop_done:; - __pyx_v_step = __pyx_t_10; - - /* "View.MemoryView":764 - * step = index.step or 0 - * - * have_start = index.start is not None # <<<<<<<<<<<<<< - * have_stop = index.stop is not None - * have_step = index.step is not None - */ - __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_start); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 764, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_9); - __pyx_t_1 = (__pyx_t_9 != Py_None); - __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; - __pyx_v_have_start = __pyx_t_1; - - /* "View.MemoryView":765 - * - * have_start = index.start is not None - * have_stop = index.stop is not None # <<<<<<<<<<<<<< - * have_step = index.step is not None - * - */ - __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_stop); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 765, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_9); - __pyx_t_1 = (__pyx_t_9 != Py_None); - __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; - __pyx_v_have_stop = __pyx_t_1; - - /* "View.MemoryView":766 - * have_start = index.start is not None - * have_stop = index.stop is not None - * have_step = index.step is not None # <<<<<<<<<<<<<< - * - * slice_memviewslice( - */ - __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_step); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 766, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_9); - __pyx_t_1 = (__pyx_t_9 != Py_None); - __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; - __pyx_v_have_step = __pyx_t_1; - - /* "View.MemoryView":768 - * have_step = index.step is not None - * - * slice_memviewslice( # <<<<<<<<<<<<<< - * p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim], - * dim, new_ndim, p_suboffset_dim, - */ - __pyx_t_11 = __pyx_memoryview_slice_memviewslice(__pyx_v_p_dst, (__pyx_v_p_src->shape[__pyx_v_dim]), (__pyx_v_p_src->strides[__pyx_v_dim]), (__pyx_v_p_src->suboffsets[__pyx_v_dim]), __pyx_v_dim, __pyx_v_new_ndim, __pyx_v_p_suboffset_dim, __pyx_v_start, __pyx_v_stop, __pyx_v_step, __pyx_v_have_start, __pyx_v_have_stop, __pyx_v_have_step, 1); if (unlikely(__pyx_t_11 == ((int)-1))) __PYX_ERR(1, 768, __pyx_L1_error) - - /* "View.MemoryView":774 - * have_start, have_stop, have_step, - * True) - * new_ndim += 1 # <<<<<<<<<<<<<< - * - * if isinstance(memview, _memoryviewslice): - */ - __pyx_v_new_ndim = (__pyx_v_new_ndim + 1); - } - __pyx_L6:; - - /* "View.MemoryView":746 - * cdef bint have_start, have_stop, have_step - * - * for dim, index in enumerate(indices): # <<<<<<<<<<<<<< - * if PyIndex_Check(index): - * slice_memviewslice( - */ - } - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - - /* "View.MemoryView":776 - * new_ndim += 1 - * - * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< - * return memoryview_fromslice(dst, new_ndim, - * memviewsliceobj.to_object_func, - */ - __pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type); - __pyx_t_2 = (__pyx_t_1 != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":777 - * - * if isinstance(memview, _memoryviewslice): - * return memoryview_fromslice(dst, new_ndim, # <<<<<<<<<<<<<< - * memviewsliceobj.to_object_func, - * memviewsliceobj.to_dtype_func, - */ - __Pyx_XDECREF(((PyObject *)__pyx_r)); - - /* "View.MemoryView":778 - * if isinstance(memview, _memoryviewslice): - * return memoryview_fromslice(dst, new_ndim, - * memviewsliceobj.to_object_func, # <<<<<<<<<<<<<< - * memviewsliceobj.to_dtype_func, - * memview.dtype_is_object) - */ - if (unlikely(!__pyx_v_memviewsliceobj)) { __Pyx_RaiseUnboundLocalError("memviewsliceobj"); __PYX_ERR(1, 778, __pyx_L1_error) } - - /* "View.MemoryView":779 - * return memoryview_fromslice(dst, new_ndim, - * memviewsliceobj.to_object_func, - * memviewsliceobj.to_dtype_func, # <<<<<<<<<<<<<< - * memview.dtype_is_object) - * else: - */ - if (unlikely(!__pyx_v_memviewsliceobj)) { __Pyx_RaiseUnboundLocalError("memviewsliceobj"); __PYX_ERR(1, 779, __pyx_L1_error) } - - /* "View.MemoryView":777 - * - * if isinstance(memview, _memoryviewslice): - * return memoryview_fromslice(dst, new_ndim, # <<<<<<<<<<<<<< - * memviewsliceobj.to_object_func, - * memviewsliceobj.to_dtype_func, - */ - __pyx_t_3 = __pyx_memoryview_fromslice(__pyx_v_dst, __pyx_v_new_ndim, __pyx_v_memviewsliceobj->to_object_func, __pyx_v_memviewsliceobj->to_dtype_func, __pyx_v_memview->dtype_is_object); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 777, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_memoryview_type))))) __PYX_ERR(1, 777, __pyx_L1_error) - __pyx_r = ((struct __pyx_memoryview_obj *)__pyx_t_3); - __pyx_t_3 = 0; - goto __pyx_L0; - - /* "View.MemoryView":776 - * new_ndim += 1 - * - * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< - * return memoryview_fromslice(dst, new_ndim, - * memviewsliceobj.to_object_func, - */ - } - - /* "View.MemoryView":782 - * memview.dtype_is_object) - * else: - * return memoryview_fromslice(dst, new_ndim, NULL, NULL, # <<<<<<<<<<<<<< - * memview.dtype_is_object) - * - */ - /*else*/ { - __Pyx_XDECREF(((PyObject *)__pyx_r)); - - /* "View.MemoryView":783 - * else: - * return memoryview_fromslice(dst, new_ndim, NULL, NULL, - * memview.dtype_is_object) # <<<<<<<<<<<<<< - * - * - */ - __pyx_t_3 = __pyx_memoryview_fromslice(__pyx_v_dst, __pyx_v_new_ndim, NULL, NULL, __pyx_v_memview->dtype_is_object); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 782, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - - /* "View.MemoryView":782 - * memview.dtype_is_object) - * else: - * return memoryview_fromslice(dst, new_ndim, NULL, NULL, # <<<<<<<<<<<<<< - * memview.dtype_is_object) - * - */ - if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_memoryview_type))))) __PYX_ERR(1, 782, __pyx_L1_error) - __pyx_r = ((struct __pyx_memoryview_obj *)__pyx_t_3); - __pyx_t_3 = 0; - goto __pyx_L0; - } - - /* "View.MemoryView":710 - * - * @cname('__pyx_memview_slice') - * cdef memoryview memview_slice(memoryview memview, object indices): # <<<<<<<<<<<<<< - * cdef int new_ndim = 0, suboffset_dim = -1, dim - * cdef bint negative_step - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_3); - __Pyx_XDECREF(__pyx_t_9); - __Pyx_AddTraceback("View.MemoryView.memview_slice", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = 0; - __pyx_L0:; - __Pyx_XDECREF((PyObject *)__pyx_v_memviewsliceobj); - __Pyx_XDECREF(__pyx_v_index); - __Pyx_XGIVEREF((PyObject *)__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":807 - * - * @cname('__pyx_memoryview_slice_memviewslice') - * cdef int slice_memviewslice( # <<<<<<<<<<<<<< - * __Pyx_memviewslice *dst, - * Py_ssize_t shape, Py_ssize_t stride, Py_ssize_t suboffset, - */ - -static int __pyx_memoryview_slice_memviewslice(__Pyx_memviewslice *__pyx_v_dst, Py_ssize_t __pyx_v_shape, Py_ssize_t __pyx_v_stride, Py_ssize_t __pyx_v_suboffset, int __pyx_v_dim, int __pyx_v_new_ndim, int *__pyx_v_suboffset_dim, Py_ssize_t __pyx_v_start, Py_ssize_t __pyx_v_stop, Py_ssize_t __pyx_v_step, int __pyx_v_have_start, int __pyx_v_have_stop, int __pyx_v_have_step, int __pyx_v_is_slice) { - Py_ssize_t __pyx_v_new_shape; - int __pyx_v_negative_step; - int __pyx_r; - int __pyx_t_1; - int __pyx_t_2; - int __pyx_t_3; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - - /* "View.MemoryView":827 - * cdef bint negative_step - * - * if not is_slice: # <<<<<<<<<<<<<< - * - * if start < 0: - */ - __pyx_t_1 = ((!(__pyx_v_is_slice != 0)) != 0); - if (__pyx_t_1) { - - /* "View.MemoryView":829 - * if not is_slice: - * - * if start < 0: # <<<<<<<<<<<<<< - * start += shape - * if not 0 <= start < shape: - */ - __pyx_t_1 = ((__pyx_v_start < 0) != 0); - if (__pyx_t_1) { - - /* "View.MemoryView":830 - * - * if start < 0: - * start += shape # <<<<<<<<<<<<<< - * if not 0 <= start < shape: - * _err_dim(IndexError, "Index out of bounds (axis %d)", dim) - */ - __pyx_v_start = (__pyx_v_start + __pyx_v_shape); - - /* "View.MemoryView":829 - * if not is_slice: - * - * if start < 0: # <<<<<<<<<<<<<< - * start += shape - * if not 0 <= start < shape: - */ - } - - /* "View.MemoryView":831 - * if start < 0: - * start += shape - * if not 0 <= start < shape: # <<<<<<<<<<<<<< - * _err_dim(IndexError, "Index out of bounds (axis %d)", dim) - * else: - */ - __pyx_t_1 = (0 <= __pyx_v_start); - if (__pyx_t_1) { - __pyx_t_1 = (__pyx_v_start < __pyx_v_shape); - } - __pyx_t_2 = ((!(__pyx_t_1 != 0)) != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":832 - * start += shape - * if not 0 <= start < shape: - * _err_dim(IndexError, "Index out of bounds (axis %d)", dim) # <<<<<<<<<<<<<< - * else: - * - */ - __pyx_t_3 = __pyx_memoryview_err_dim(__pyx_builtin_IndexError, ((char *)"Index out of bounds (axis %d)"), __pyx_v_dim); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(1, 832, __pyx_L1_error) - - /* "View.MemoryView":831 - * if start < 0: - * start += shape - * if not 0 <= start < shape: # <<<<<<<<<<<<<< - * _err_dim(IndexError, "Index out of bounds (axis %d)", dim) - * else: - */ - } - - /* "View.MemoryView":827 - * cdef bint negative_step - * - * if not is_slice: # <<<<<<<<<<<<<< - * - * if start < 0: - */ - goto __pyx_L3; - } - - /* "View.MemoryView":835 - * else: - * - * negative_step = have_step != 0 and step < 0 # <<<<<<<<<<<<<< - * - * if have_step and step == 0: - */ - /*else*/ { - __pyx_t_1 = ((__pyx_v_have_step != 0) != 0); - if (__pyx_t_1) { - } else { - __pyx_t_2 = __pyx_t_1; - goto __pyx_L6_bool_binop_done; - } - __pyx_t_1 = ((__pyx_v_step < 0) != 0); - __pyx_t_2 = __pyx_t_1; - __pyx_L6_bool_binop_done:; - __pyx_v_negative_step = __pyx_t_2; - - /* "View.MemoryView":837 - * negative_step = have_step != 0 and step < 0 - * - * if have_step and step == 0: # <<<<<<<<<<<<<< - * _err_dim(ValueError, "Step may not be zero (axis %d)", dim) - * - */ - __pyx_t_1 = (__pyx_v_have_step != 0); - if (__pyx_t_1) { - } else { - __pyx_t_2 = __pyx_t_1; - goto __pyx_L9_bool_binop_done; - } - __pyx_t_1 = ((__pyx_v_step == 0) != 0); - __pyx_t_2 = __pyx_t_1; - __pyx_L9_bool_binop_done:; - if (__pyx_t_2) { - - /* "View.MemoryView":838 - * - * if have_step and step == 0: - * _err_dim(ValueError, "Step may not be zero (axis %d)", dim) # <<<<<<<<<<<<<< - * - * - */ - __pyx_t_3 = __pyx_memoryview_err_dim(__pyx_builtin_ValueError, ((char *)"Step may not be zero (axis %d)"), __pyx_v_dim); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(1, 838, __pyx_L1_error) - - /* "View.MemoryView":837 - * negative_step = have_step != 0 and step < 0 - * - * if have_step and step == 0: # <<<<<<<<<<<<<< - * _err_dim(ValueError, "Step may not be zero (axis %d)", dim) - * - */ - } - - /* "View.MemoryView":841 - * - * - * if have_start: # <<<<<<<<<<<<<< - * if start < 0: - * start += shape - */ - __pyx_t_2 = (__pyx_v_have_start != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":842 - * - * if have_start: - * if start < 0: # <<<<<<<<<<<<<< - * start += shape - * if start < 0: - */ - __pyx_t_2 = ((__pyx_v_start < 0) != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":843 - * if have_start: - * if start < 0: - * start += shape # <<<<<<<<<<<<<< - * if start < 0: - * start = 0 - */ - __pyx_v_start = (__pyx_v_start + __pyx_v_shape); - - /* "View.MemoryView":844 - * if start < 0: - * start += shape - * if start < 0: # <<<<<<<<<<<<<< - * start = 0 - * elif start >= shape: - */ - __pyx_t_2 = ((__pyx_v_start < 0) != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":845 - * start += shape - * if start < 0: - * start = 0 # <<<<<<<<<<<<<< - * elif start >= shape: - * if negative_step: - */ - __pyx_v_start = 0; - - /* "View.MemoryView":844 - * if start < 0: - * start += shape - * if start < 0: # <<<<<<<<<<<<<< - * start = 0 - * elif start >= shape: - */ - } - - /* "View.MemoryView":842 - * - * if have_start: - * if start < 0: # <<<<<<<<<<<<<< - * start += shape - * if start < 0: - */ - goto __pyx_L12; - } - - /* "View.MemoryView":846 - * if start < 0: - * start = 0 - * elif start >= shape: # <<<<<<<<<<<<<< - * if negative_step: - * start = shape - 1 - */ - __pyx_t_2 = ((__pyx_v_start >= __pyx_v_shape) != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":847 - * start = 0 - * elif start >= shape: - * if negative_step: # <<<<<<<<<<<<<< - * start = shape - 1 - * else: - */ - __pyx_t_2 = (__pyx_v_negative_step != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":848 - * elif start >= shape: - * if negative_step: - * start = shape - 1 # <<<<<<<<<<<<<< - * else: - * start = shape - */ - __pyx_v_start = (__pyx_v_shape - 1); - - /* "View.MemoryView":847 - * start = 0 - * elif start >= shape: - * if negative_step: # <<<<<<<<<<<<<< - * start = shape - 1 - * else: - */ - goto __pyx_L14; - } - - /* "View.MemoryView":850 - * start = shape - 1 - * else: - * start = shape # <<<<<<<<<<<<<< - * else: - * if negative_step: - */ - /*else*/ { - __pyx_v_start = __pyx_v_shape; - } - __pyx_L14:; - - /* "View.MemoryView":846 - * if start < 0: - * start = 0 - * elif start >= shape: # <<<<<<<<<<<<<< - * if negative_step: - * start = shape - 1 - */ - } - __pyx_L12:; - - /* "View.MemoryView":841 - * - * - * if have_start: # <<<<<<<<<<<<<< - * if start < 0: - * start += shape - */ - goto __pyx_L11; - } - - /* "View.MemoryView":852 - * start = shape - * else: - * if negative_step: # <<<<<<<<<<<<<< - * start = shape - 1 - * else: - */ - /*else*/ { - __pyx_t_2 = (__pyx_v_negative_step != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":853 - * else: - * if negative_step: - * start = shape - 1 # <<<<<<<<<<<<<< - * else: - * start = 0 - */ - __pyx_v_start = (__pyx_v_shape - 1); - - /* "View.MemoryView":852 - * start = shape - * else: - * if negative_step: # <<<<<<<<<<<<<< - * start = shape - 1 - * else: - */ - goto __pyx_L15; - } - - /* "View.MemoryView":855 - * start = shape - 1 - * else: - * start = 0 # <<<<<<<<<<<<<< - * - * if have_stop: - */ - /*else*/ { - __pyx_v_start = 0; - } - __pyx_L15:; - } - __pyx_L11:; - - /* "View.MemoryView":857 - * start = 0 - * - * if have_stop: # <<<<<<<<<<<<<< - * if stop < 0: - * stop += shape - */ - __pyx_t_2 = (__pyx_v_have_stop != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":858 - * - * if have_stop: - * if stop < 0: # <<<<<<<<<<<<<< - * stop += shape - * if stop < 0: - */ - __pyx_t_2 = ((__pyx_v_stop < 0) != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":859 - * if have_stop: - * if stop < 0: - * stop += shape # <<<<<<<<<<<<<< - * if stop < 0: - * stop = 0 - */ - __pyx_v_stop = (__pyx_v_stop + __pyx_v_shape); - - /* "View.MemoryView":860 - * if stop < 0: - * stop += shape - * if stop < 0: # <<<<<<<<<<<<<< - * stop = 0 - * elif stop > shape: - */ - __pyx_t_2 = ((__pyx_v_stop < 0) != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":861 - * stop += shape - * if stop < 0: - * stop = 0 # <<<<<<<<<<<<<< - * elif stop > shape: - * stop = shape - */ - __pyx_v_stop = 0; - - /* "View.MemoryView":860 - * if stop < 0: - * stop += shape - * if stop < 0: # <<<<<<<<<<<<<< - * stop = 0 - * elif stop > shape: - */ - } - - /* "View.MemoryView":858 - * - * if have_stop: - * if stop < 0: # <<<<<<<<<<<<<< - * stop += shape - * if stop < 0: - */ - goto __pyx_L17; - } - - /* "View.MemoryView":862 - * if stop < 0: - * stop = 0 - * elif stop > shape: # <<<<<<<<<<<<<< - * stop = shape - * else: - */ - __pyx_t_2 = ((__pyx_v_stop > __pyx_v_shape) != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":863 - * stop = 0 - * elif stop > shape: - * stop = shape # <<<<<<<<<<<<<< - * else: - * if negative_step: - */ - __pyx_v_stop = __pyx_v_shape; - - /* "View.MemoryView":862 - * if stop < 0: - * stop = 0 - * elif stop > shape: # <<<<<<<<<<<<<< - * stop = shape - * else: - */ - } - __pyx_L17:; - - /* "View.MemoryView":857 - * start = 0 - * - * if have_stop: # <<<<<<<<<<<<<< - * if stop < 0: - * stop += shape - */ - goto __pyx_L16; - } - - /* "View.MemoryView":865 - * stop = shape - * else: - * if negative_step: # <<<<<<<<<<<<<< - * stop = -1 - * else: - */ - /*else*/ { - __pyx_t_2 = (__pyx_v_negative_step != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":866 - * else: - * if negative_step: - * stop = -1 # <<<<<<<<<<<<<< - * else: - * stop = shape - */ - __pyx_v_stop = -1L; - - /* "View.MemoryView":865 - * stop = shape - * else: - * if negative_step: # <<<<<<<<<<<<<< - * stop = -1 - * else: - */ - goto __pyx_L19; - } - - /* "View.MemoryView":868 - * stop = -1 - * else: - * stop = shape # <<<<<<<<<<<<<< - * - * if not have_step: - */ - /*else*/ { - __pyx_v_stop = __pyx_v_shape; - } - __pyx_L19:; - } - __pyx_L16:; - - /* "View.MemoryView":870 - * stop = shape - * - * if not have_step: # <<<<<<<<<<<<<< - * step = 1 - * - */ - __pyx_t_2 = ((!(__pyx_v_have_step != 0)) != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":871 - * - * if not have_step: - * step = 1 # <<<<<<<<<<<<<< - * - * - */ - __pyx_v_step = 1; - - /* "View.MemoryView":870 - * stop = shape - * - * if not have_step: # <<<<<<<<<<<<<< - * step = 1 - * - */ - } - - /* "View.MemoryView":875 - * - * with cython.cdivision(True): - * new_shape = (stop - start) // step # <<<<<<<<<<<<<< - * - * if (stop - start) - step * new_shape: - */ - __pyx_v_new_shape = ((__pyx_v_stop - __pyx_v_start) / __pyx_v_step); - - /* "View.MemoryView":877 - * new_shape = (stop - start) // step - * - * if (stop - start) - step * new_shape: # <<<<<<<<<<<<<< - * new_shape += 1 - * - */ - __pyx_t_2 = (((__pyx_v_stop - __pyx_v_start) - (__pyx_v_step * __pyx_v_new_shape)) != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":878 - * - * if (stop - start) - step * new_shape: - * new_shape += 1 # <<<<<<<<<<<<<< - * - * if new_shape < 0: - */ - __pyx_v_new_shape = (__pyx_v_new_shape + 1); - - /* "View.MemoryView":877 - * new_shape = (stop - start) // step - * - * if (stop - start) - step * new_shape: # <<<<<<<<<<<<<< - * new_shape += 1 - * - */ - } - - /* "View.MemoryView":880 - * new_shape += 1 - * - * if new_shape < 0: # <<<<<<<<<<<<<< - * new_shape = 0 - * - */ - __pyx_t_2 = ((__pyx_v_new_shape < 0) != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":881 - * - * if new_shape < 0: - * new_shape = 0 # <<<<<<<<<<<<<< - * - * - */ - __pyx_v_new_shape = 0; - - /* "View.MemoryView":880 - * new_shape += 1 - * - * if new_shape < 0: # <<<<<<<<<<<<<< - * new_shape = 0 - * - */ - } - - /* "View.MemoryView":884 - * - * - * dst.strides[new_ndim] = stride * step # <<<<<<<<<<<<<< - * dst.shape[new_ndim] = new_shape - * dst.suboffsets[new_ndim] = suboffset - */ - (__pyx_v_dst->strides[__pyx_v_new_ndim]) = (__pyx_v_stride * __pyx_v_step); - - /* "View.MemoryView":885 - * - * dst.strides[new_ndim] = stride * step - * dst.shape[new_ndim] = new_shape # <<<<<<<<<<<<<< - * dst.suboffsets[new_ndim] = suboffset - * - */ - (__pyx_v_dst->shape[__pyx_v_new_ndim]) = __pyx_v_new_shape; - - /* "View.MemoryView":886 - * dst.strides[new_ndim] = stride * step - * dst.shape[new_ndim] = new_shape - * dst.suboffsets[new_ndim] = suboffset # <<<<<<<<<<<<<< - * - * - */ - (__pyx_v_dst->suboffsets[__pyx_v_new_ndim]) = __pyx_v_suboffset; - } - __pyx_L3:; - - /* "View.MemoryView":889 - * - * - * if suboffset_dim[0] < 0: # <<<<<<<<<<<<<< - * dst.data += start * stride - * else: - */ - __pyx_t_2 = (((__pyx_v_suboffset_dim[0]) < 0) != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":890 - * - * if suboffset_dim[0] < 0: - * dst.data += start * stride # <<<<<<<<<<<<<< - * else: - * dst.suboffsets[suboffset_dim[0]] += start * stride - */ - __pyx_v_dst->data = (__pyx_v_dst->data + (__pyx_v_start * __pyx_v_stride)); - - /* "View.MemoryView":889 - * - * - * if suboffset_dim[0] < 0: # <<<<<<<<<<<<<< - * dst.data += start * stride - * else: - */ - goto __pyx_L23; - } - - /* "View.MemoryView":892 - * dst.data += start * stride - * else: - * dst.suboffsets[suboffset_dim[0]] += start * stride # <<<<<<<<<<<<<< - * - * if suboffset >= 0: - */ - /*else*/ { - __pyx_t_3 = (__pyx_v_suboffset_dim[0]); - (__pyx_v_dst->suboffsets[__pyx_t_3]) = ((__pyx_v_dst->suboffsets[__pyx_t_3]) + (__pyx_v_start * __pyx_v_stride)); - } - __pyx_L23:; - - /* "View.MemoryView":894 - * dst.suboffsets[suboffset_dim[0]] += start * stride - * - * if suboffset >= 0: # <<<<<<<<<<<<<< - * if not is_slice: - * if new_ndim == 0: - */ - __pyx_t_2 = ((__pyx_v_suboffset >= 0) != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":895 - * - * if suboffset >= 0: - * if not is_slice: # <<<<<<<<<<<<<< - * if new_ndim == 0: - * dst.data = ( dst.data)[0] + suboffset - */ - __pyx_t_2 = ((!(__pyx_v_is_slice != 0)) != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":896 - * if suboffset >= 0: - * if not is_slice: - * if new_ndim == 0: # <<<<<<<<<<<<<< - * dst.data = ( dst.data)[0] + suboffset - * else: - */ - __pyx_t_2 = ((__pyx_v_new_ndim == 0) != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":897 - * if not is_slice: - * if new_ndim == 0: - * dst.data = ( dst.data)[0] + suboffset # <<<<<<<<<<<<<< - * else: - * _err_dim(IndexError, "All dimensions preceding dimension %d " - */ - __pyx_v_dst->data = ((((char **)__pyx_v_dst->data)[0]) + __pyx_v_suboffset); - - /* "View.MemoryView":896 - * if suboffset >= 0: - * if not is_slice: - * if new_ndim == 0: # <<<<<<<<<<<<<< - * dst.data = ( dst.data)[0] + suboffset - * else: - */ - goto __pyx_L26; - } - - /* "View.MemoryView":899 - * dst.data = ( dst.data)[0] + suboffset - * else: - * _err_dim(IndexError, "All dimensions preceding dimension %d " # <<<<<<<<<<<<<< - * "must be indexed and not sliced", dim) - * else: - */ - /*else*/ { - - /* "View.MemoryView":900 - * else: - * _err_dim(IndexError, "All dimensions preceding dimension %d " - * "must be indexed and not sliced", dim) # <<<<<<<<<<<<<< - * else: - * suboffset_dim[0] = new_ndim - */ - __pyx_t_3 = __pyx_memoryview_err_dim(__pyx_builtin_IndexError, ((char *)"All dimensions preceding dimension %d must be indexed and not sliced"), __pyx_v_dim); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(1, 899, __pyx_L1_error) - } - __pyx_L26:; - - /* "View.MemoryView":895 - * - * if suboffset >= 0: - * if not is_slice: # <<<<<<<<<<<<<< - * if new_ndim == 0: - * dst.data = ( dst.data)[0] + suboffset - */ - goto __pyx_L25; - } - - /* "View.MemoryView":902 - * "must be indexed and not sliced", dim) - * else: - * suboffset_dim[0] = new_ndim # <<<<<<<<<<<<<< - * - * return 0 - */ - /*else*/ { - (__pyx_v_suboffset_dim[0]) = __pyx_v_new_ndim; - } - __pyx_L25:; - - /* "View.MemoryView":894 - * dst.suboffsets[suboffset_dim[0]] += start * stride - * - * if suboffset >= 0: # <<<<<<<<<<<<<< - * if not is_slice: - * if new_ndim == 0: - */ - } - - /* "View.MemoryView":904 - * suboffset_dim[0] = new_ndim - * - * return 0 # <<<<<<<<<<<<<< - * - * - */ - __pyx_r = 0; - goto __pyx_L0; - - /* "View.MemoryView":807 - * - * @cname('__pyx_memoryview_slice_memviewslice') - * cdef int slice_memviewslice( # <<<<<<<<<<<<<< - * __Pyx_memviewslice *dst, - * Py_ssize_t shape, Py_ssize_t stride, Py_ssize_t suboffset, - */ - - /* function exit code */ - __pyx_L1_error:; - { - #ifdef WITH_THREAD - PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); - #endif - __Pyx_AddTraceback("View.MemoryView.slice_memviewslice", __pyx_clineno, __pyx_lineno, __pyx_filename); - #ifdef WITH_THREAD - __Pyx_PyGILState_Release(__pyx_gilstate_save); - #endif - } - __pyx_r = -1; - __pyx_L0:; - return __pyx_r; -} - -/* "View.MemoryView":910 - * - * @cname('__pyx_pybuffer_index') - * cdef char *pybuffer_index(Py_buffer *view, char *bufp, Py_ssize_t index, # <<<<<<<<<<<<<< - * Py_ssize_t dim) except NULL: - * cdef Py_ssize_t shape, stride, suboffset = -1 - */ - -static char *__pyx_pybuffer_index(Py_buffer *__pyx_v_view, char *__pyx_v_bufp, Py_ssize_t __pyx_v_index, Py_ssize_t __pyx_v_dim) { - Py_ssize_t __pyx_v_shape; - Py_ssize_t __pyx_v_stride; - Py_ssize_t __pyx_v_suboffset; - Py_ssize_t __pyx_v_itemsize; - char *__pyx_v_resultp; - char *__pyx_r; - __Pyx_RefNannyDeclarations - Py_ssize_t __pyx_t_1; - int __pyx_t_2; - PyObject *__pyx_t_3 = NULL; - PyObject *__pyx_t_4 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("pybuffer_index", 0); - - /* "View.MemoryView":912 - * cdef char *pybuffer_index(Py_buffer *view, char *bufp, Py_ssize_t index, - * Py_ssize_t dim) except NULL: - * cdef Py_ssize_t shape, stride, suboffset = -1 # <<<<<<<<<<<<<< - * cdef Py_ssize_t itemsize = view.itemsize - * cdef char *resultp - */ - __pyx_v_suboffset = -1L; - - /* "View.MemoryView":913 - * Py_ssize_t dim) except NULL: - * cdef Py_ssize_t shape, stride, suboffset = -1 - * cdef Py_ssize_t itemsize = view.itemsize # <<<<<<<<<<<<<< - * cdef char *resultp - * - */ - __pyx_t_1 = __pyx_v_view->itemsize; - __pyx_v_itemsize = __pyx_t_1; - - /* "View.MemoryView":916 - * cdef char *resultp - * - * if view.ndim == 0: # <<<<<<<<<<<<<< - * shape = view.len / itemsize - * stride = itemsize - */ - __pyx_t_2 = ((__pyx_v_view->ndim == 0) != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":917 - * - * if view.ndim == 0: - * shape = view.len / itemsize # <<<<<<<<<<<<<< - * stride = itemsize - * else: - */ - if (unlikely(__pyx_v_itemsize == 0)) { - PyErr_SetString(PyExc_ZeroDivisionError, "integer division or modulo by zero"); - __PYX_ERR(1, 917, __pyx_L1_error) - } - else if (sizeof(Py_ssize_t) == sizeof(long) && (!(((Py_ssize_t)-1) > 0)) && unlikely(__pyx_v_itemsize == (Py_ssize_t)-1) && unlikely(UNARY_NEG_WOULD_OVERFLOW(__pyx_v_view->len))) { - PyErr_SetString(PyExc_OverflowError, "value too large to perform division"); - __PYX_ERR(1, 917, __pyx_L1_error) - } - __pyx_v_shape = __Pyx_div_Py_ssize_t(__pyx_v_view->len, __pyx_v_itemsize); - - /* "View.MemoryView":918 - * if view.ndim == 0: - * shape = view.len / itemsize - * stride = itemsize # <<<<<<<<<<<<<< - * else: - * shape = view.shape[dim] - */ - __pyx_v_stride = __pyx_v_itemsize; - - /* "View.MemoryView":916 - * cdef char *resultp - * - * if view.ndim == 0: # <<<<<<<<<<<<<< - * shape = view.len / itemsize - * stride = itemsize - */ - goto __pyx_L3; - } - - /* "View.MemoryView":920 - * stride = itemsize - * else: - * shape = view.shape[dim] # <<<<<<<<<<<<<< - * stride = view.strides[dim] - * if view.suboffsets != NULL: - */ - /*else*/ { - __pyx_v_shape = (__pyx_v_view->shape[__pyx_v_dim]); - - /* "View.MemoryView":921 - * else: - * shape = view.shape[dim] - * stride = view.strides[dim] # <<<<<<<<<<<<<< - * if view.suboffsets != NULL: - * suboffset = view.suboffsets[dim] - */ - __pyx_v_stride = (__pyx_v_view->strides[__pyx_v_dim]); - - /* "View.MemoryView":922 - * shape = view.shape[dim] - * stride = view.strides[dim] - * if view.suboffsets != NULL: # <<<<<<<<<<<<<< - * suboffset = view.suboffsets[dim] - * - */ - __pyx_t_2 = ((__pyx_v_view->suboffsets != NULL) != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":923 - * stride = view.strides[dim] - * if view.suboffsets != NULL: - * suboffset = view.suboffsets[dim] # <<<<<<<<<<<<<< - * - * if index < 0: - */ - __pyx_v_suboffset = (__pyx_v_view->suboffsets[__pyx_v_dim]); - - /* "View.MemoryView":922 - * shape = view.shape[dim] - * stride = view.strides[dim] - * if view.suboffsets != NULL: # <<<<<<<<<<<<<< - * suboffset = view.suboffsets[dim] - * - */ - } - } - __pyx_L3:; - - /* "View.MemoryView":925 - * suboffset = view.suboffsets[dim] - * - * if index < 0: # <<<<<<<<<<<<<< - * index += view.shape[dim] - * if index < 0: - */ - __pyx_t_2 = ((__pyx_v_index < 0) != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":926 - * - * if index < 0: - * index += view.shape[dim] # <<<<<<<<<<<<<< - * if index < 0: - * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) - */ - __pyx_v_index = (__pyx_v_index + (__pyx_v_view->shape[__pyx_v_dim])); - - /* "View.MemoryView":927 - * if index < 0: - * index += view.shape[dim] - * if index < 0: # <<<<<<<<<<<<<< - * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) - * - */ - __pyx_t_2 = ((__pyx_v_index < 0) != 0); - if (unlikely(__pyx_t_2)) { - - /* "View.MemoryView":928 - * index += view.shape[dim] - * if index < 0: - * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) # <<<<<<<<<<<<<< - * - * if index >= shape: - */ - __pyx_t_3 = PyInt_FromSsize_t(__pyx_v_dim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 928, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_4 = __Pyx_PyString_Format(__pyx_kp_s_Out_of_bounds_on_buffer_access_a, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 928, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_3 = __Pyx_PyObject_CallOneArg(__pyx_builtin_IndexError, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 928, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __Pyx_Raise(__pyx_t_3, 0, 0, 0); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __PYX_ERR(1, 928, __pyx_L1_error) - - /* "View.MemoryView":927 - * if index < 0: - * index += view.shape[dim] - * if index < 0: # <<<<<<<<<<<<<< - * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) - * - */ - } - - /* "View.MemoryView":925 - * suboffset = view.suboffsets[dim] - * - * if index < 0: # <<<<<<<<<<<<<< - * index += view.shape[dim] - * if index < 0: - */ - } - - /* "View.MemoryView":930 - * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) - * - * if index >= shape: # <<<<<<<<<<<<<< - * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) - * - */ - __pyx_t_2 = ((__pyx_v_index >= __pyx_v_shape) != 0); - if (unlikely(__pyx_t_2)) { - - /* "View.MemoryView":931 - * - * if index >= shape: - * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) # <<<<<<<<<<<<<< - * - * resultp = bufp + index * stride - */ - __pyx_t_3 = PyInt_FromSsize_t(__pyx_v_dim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 931, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_4 = __Pyx_PyString_Format(__pyx_kp_s_Out_of_bounds_on_buffer_access_a, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 931, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_3 = __Pyx_PyObject_CallOneArg(__pyx_builtin_IndexError, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 931, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __Pyx_Raise(__pyx_t_3, 0, 0, 0); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __PYX_ERR(1, 931, __pyx_L1_error) - - /* "View.MemoryView":930 - * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) - * - * if index >= shape: # <<<<<<<<<<<<<< - * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) - * - */ - } - - /* "View.MemoryView":933 - * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) - * - * resultp = bufp + index * stride # <<<<<<<<<<<<<< - * if suboffset >= 0: - * resultp = ( resultp)[0] + suboffset - */ - __pyx_v_resultp = (__pyx_v_bufp + (__pyx_v_index * __pyx_v_stride)); - - /* "View.MemoryView":934 - * - * resultp = bufp + index * stride - * if suboffset >= 0: # <<<<<<<<<<<<<< - * resultp = ( resultp)[0] + suboffset - * - */ - __pyx_t_2 = ((__pyx_v_suboffset >= 0) != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":935 - * resultp = bufp + index * stride - * if suboffset >= 0: - * resultp = ( resultp)[0] + suboffset # <<<<<<<<<<<<<< - * - * return resultp - */ - __pyx_v_resultp = ((((char **)__pyx_v_resultp)[0]) + __pyx_v_suboffset); - - /* "View.MemoryView":934 - * - * resultp = bufp + index * stride - * if suboffset >= 0: # <<<<<<<<<<<<<< - * resultp = ( resultp)[0] + suboffset - * - */ - } - - /* "View.MemoryView":937 - * resultp = ( resultp)[0] + suboffset - * - * return resultp # <<<<<<<<<<<<<< - * - * - */ - __pyx_r = __pyx_v_resultp; - goto __pyx_L0; - - /* "View.MemoryView":910 - * - * @cname('__pyx_pybuffer_index') - * cdef char *pybuffer_index(Py_buffer *view, char *bufp, Py_ssize_t index, # <<<<<<<<<<<<<< - * Py_ssize_t dim) except NULL: - * cdef Py_ssize_t shape, stride, suboffset = -1 - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_3); - __Pyx_XDECREF(__pyx_t_4); - __Pyx_AddTraceback("View.MemoryView.pybuffer_index", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":943 - * - * @cname('__pyx_memslice_transpose') - * cdef int transpose_memslice(__Pyx_memviewslice *memslice) nogil except 0: # <<<<<<<<<<<<<< - * cdef int ndim = memslice.memview.view.ndim - * - */ - -static int __pyx_memslice_transpose(__Pyx_memviewslice *__pyx_v_memslice) { - int __pyx_v_ndim; - Py_ssize_t *__pyx_v_shape; - Py_ssize_t *__pyx_v_strides; - int __pyx_v_i; - int __pyx_v_j; - int __pyx_r; - int __pyx_t_1; - Py_ssize_t *__pyx_t_2; - long __pyx_t_3; - long __pyx_t_4; - Py_ssize_t __pyx_t_5; - Py_ssize_t __pyx_t_6; - int __pyx_t_7; - int __pyx_t_8; - int __pyx_t_9; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - - /* "View.MemoryView":944 - * @cname('__pyx_memslice_transpose') - * cdef int transpose_memslice(__Pyx_memviewslice *memslice) nogil except 0: - * cdef int ndim = memslice.memview.view.ndim # <<<<<<<<<<<<<< - * - * cdef Py_ssize_t *shape = memslice.shape - */ - __pyx_t_1 = __pyx_v_memslice->memview->view.ndim; - __pyx_v_ndim = __pyx_t_1; - - /* "View.MemoryView":946 - * cdef int ndim = memslice.memview.view.ndim - * - * cdef Py_ssize_t *shape = memslice.shape # <<<<<<<<<<<<<< - * cdef Py_ssize_t *strides = memslice.strides - * - */ - __pyx_t_2 = __pyx_v_memslice->shape; - __pyx_v_shape = __pyx_t_2; - - /* "View.MemoryView":947 - * - * cdef Py_ssize_t *shape = memslice.shape - * cdef Py_ssize_t *strides = memslice.strides # <<<<<<<<<<<<<< - * - * - */ - __pyx_t_2 = __pyx_v_memslice->strides; - __pyx_v_strides = __pyx_t_2; - - /* "View.MemoryView":951 - * - * cdef int i, j - * for i in range(ndim / 2): # <<<<<<<<<<<<<< - * j = ndim - 1 - i - * strides[i], strides[j] = strides[j], strides[i] - */ - __pyx_t_3 = __Pyx_div_long(__pyx_v_ndim, 2); - __pyx_t_4 = __pyx_t_3; - for (__pyx_t_1 = 0; __pyx_t_1 < __pyx_t_4; __pyx_t_1+=1) { - __pyx_v_i = __pyx_t_1; - - /* "View.MemoryView":952 - * cdef int i, j - * for i in range(ndim / 2): - * j = ndim - 1 - i # <<<<<<<<<<<<<< - * strides[i], strides[j] = strides[j], strides[i] - * shape[i], shape[j] = shape[j], shape[i] - */ - __pyx_v_j = ((__pyx_v_ndim - 1) - __pyx_v_i); - - /* "View.MemoryView":953 - * for i in range(ndim / 2): - * j = ndim - 1 - i - * strides[i], strides[j] = strides[j], strides[i] # <<<<<<<<<<<<<< - * shape[i], shape[j] = shape[j], shape[i] - * - */ - __pyx_t_5 = (__pyx_v_strides[__pyx_v_j]); - __pyx_t_6 = (__pyx_v_strides[__pyx_v_i]); - (__pyx_v_strides[__pyx_v_i]) = __pyx_t_5; - (__pyx_v_strides[__pyx_v_j]) = __pyx_t_6; - - /* "View.MemoryView":954 - * j = ndim - 1 - i - * strides[i], strides[j] = strides[j], strides[i] - * shape[i], shape[j] = shape[j], shape[i] # <<<<<<<<<<<<<< - * - * if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0: - */ - __pyx_t_6 = (__pyx_v_shape[__pyx_v_j]); - __pyx_t_5 = (__pyx_v_shape[__pyx_v_i]); - (__pyx_v_shape[__pyx_v_i]) = __pyx_t_6; - (__pyx_v_shape[__pyx_v_j]) = __pyx_t_5; - - /* "View.MemoryView":956 - * shape[i], shape[j] = shape[j], shape[i] - * - * if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0: # <<<<<<<<<<<<<< - * _err(ValueError, "Cannot transpose memoryview with indirect dimensions") - * - */ - __pyx_t_8 = (((__pyx_v_memslice->suboffsets[__pyx_v_i]) >= 0) != 0); - if (!__pyx_t_8) { - } else { - __pyx_t_7 = __pyx_t_8; - goto __pyx_L6_bool_binop_done; - } - __pyx_t_8 = (((__pyx_v_memslice->suboffsets[__pyx_v_j]) >= 0) != 0); - __pyx_t_7 = __pyx_t_8; - __pyx_L6_bool_binop_done:; - if (__pyx_t_7) { - - /* "View.MemoryView":957 - * - * if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0: - * _err(ValueError, "Cannot transpose memoryview with indirect dimensions") # <<<<<<<<<<<<<< - * - * return 1 - */ - __pyx_t_9 = __pyx_memoryview_err(__pyx_builtin_ValueError, ((char *)"Cannot transpose memoryview with indirect dimensions")); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(1, 957, __pyx_L1_error) - - /* "View.MemoryView":956 - * shape[i], shape[j] = shape[j], shape[i] - * - * if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0: # <<<<<<<<<<<<<< - * _err(ValueError, "Cannot transpose memoryview with indirect dimensions") - * - */ - } - } - - /* "View.MemoryView":959 - * _err(ValueError, "Cannot transpose memoryview with indirect dimensions") - * - * return 1 # <<<<<<<<<<<<<< - * - * - */ - __pyx_r = 1; - goto __pyx_L0; - - /* "View.MemoryView":943 - * - * @cname('__pyx_memslice_transpose') - * cdef int transpose_memslice(__Pyx_memviewslice *memslice) nogil except 0: # <<<<<<<<<<<<<< - * cdef int ndim = memslice.memview.view.ndim - * - */ - - /* function exit code */ - __pyx_L1_error:; - { - #ifdef WITH_THREAD - PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); - #endif - __Pyx_AddTraceback("View.MemoryView.transpose_memslice", __pyx_clineno, __pyx_lineno, __pyx_filename); - #ifdef WITH_THREAD - __Pyx_PyGILState_Release(__pyx_gilstate_save); - #endif - } - __pyx_r = 0; - __pyx_L0:; - return __pyx_r; -} - -/* "View.MemoryView":976 - * cdef int (*to_dtype_func)(char *, object) except 0 - * - * def __dealloc__(self): # <<<<<<<<<<<<<< - * __PYX_XDEC_MEMVIEW(&self.from_slice, 1) - * - */ - -/* Python wrapper */ -static void __pyx_memoryviewslice___dealloc__(PyObject *__pyx_v_self); /*proto*/ -static void __pyx_memoryviewslice___dealloc__(PyObject *__pyx_v_self) { - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0); - __pyx_memoryviewslice___pyx_pf_15View_dot_MemoryView_16_memoryviewslice___dealloc__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self)); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); -} - -static void __pyx_memoryviewslice___pyx_pf_15View_dot_MemoryView_16_memoryviewslice___dealloc__(struct __pyx_memoryviewslice_obj *__pyx_v_self) { - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__dealloc__", 0); - - /* "View.MemoryView":977 - * - * def __dealloc__(self): - * __PYX_XDEC_MEMVIEW(&self.from_slice, 1) # <<<<<<<<<<<<<< - * - * cdef convert_item_to_object(self, char *itemp): - */ - __PYX_XDEC_MEMVIEW((&__pyx_v_self->from_slice), 1); - - /* "View.MemoryView":976 - * cdef int (*to_dtype_func)(char *, object) except 0 - * - * def __dealloc__(self): # <<<<<<<<<<<<<< - * __PYX_XDEC_MEMVIEW(&self.from_slice, 1) - * - */ - - /* function exit code */ - __Pyx_RefNannyFinishContext(); -} - -/* "View.MemoryView":979 - * __PYX_XDEC_MEMVIEW(&self.from_slice, 1) - * - * cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<< - * if self.to_object_func != NULL: - * return self.to_object_func(itemp) - */ - -static PyObject *__pyx_memoryviewslice_convert_item_to_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - int __pyx_t_1; - PyObject *__pyx_t_2 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("convert_item_to_object", 0); - - /* "View.MemoryView":980 - * - * cdef convert_item_to_object(self, char *itemp): - * if self.to_object_func != NULL: # <<<<<<<<<<<<<< - * return self.to_object_func(itemp) - * else: - */ - __pyx_t_1 = ((__pyx_v_self->to_object_func != NULL) != 0); - if (__pyx_t_1) { - - /* "View.MemoryView":981 - * cdef convert_item_to_object(self, char *itemp): - * if self.to_object_func != NULL: - * return self.to_object_func(itemp) # <<<<<<<<<<<<<< - * else: - * return memoryview.convert_item_to_object(self, itemp) - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_2 = __pyx_v_self->to_object_func(__pyx_v_itemp); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 981, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_r = __pyx_t_2; - __pyx_t_2 = 0; - goto __pyx_L0; - - /* "View.MemoryView":980 - * - * cdef convert_item_to_object(self, char *itemp): - * if self.to_object_func != NULL: # <<<<<<<<<<<<<< - * return self.to_object_func(itemp) - * else: - */ - } - - /* "View.MemoryView":983 - * return self.to_object_func(itemp) - * else: - * return memoryview.convert_item_to_object(self, itemp) # <<<<<<<<<<<<<< - * - * cdef assign_item_from_object(self, char *itemp, object value): - */ - /*else*/ { - __Pyx_XDECREF(__pyx_r); - __pyx_t_2 = __pyx_memoryview_convert_item_to_object(((struct __pyx_memoryview_obj *)__pyx_v_self), __pyx_v_itemp); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 983, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_r = __pyx_t_2; - __pyx_t_2 = 0; - goto __pyx_L0; - } - - /* "View.MemoryView":979 - * __PYX_XDEC_MEMVIEW(&self.from_slice, 1) - * - * cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<< - * if self.to_object_func != NULL: - * return self.to_object_func(itemp) - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_2); - __Pyx_AddTraceback("View.MemoryView._memoryviewslice.convert_item_to_object", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = 0; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":985 - * return memoryview.convert_item_to_object(self, itemp) - * - * cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<< - * if self.to_dtype_func != NULL: - * self.to_dtype_func(itemp, value) - */ - -static PyObject *__pyx_memoryviewslice_assign_item_from_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - int __pyx_t_1; - int __pyx_t_2; - PyObject *__pyx_t_3 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("assign_item_from_object", 0); - - /* "View.MemoryView":986 - * - * cdef assign_item_from_object(self, char *itemp, object value): - * if self.to_dtype_func != NULL: # <<<<<<<<<<<<<< - * self.to_dtype_func(itemp, value) - * else: - */ - __pyx_t_1 = ((__pyx_v_self->to_dtype_func != NULL) != 0); - if (__pyx_t_1) { - - /* "View.MemoryView":987 - * cdef assign_item_from_object(self, char *itemp, object value): - * if self.to_dtype_func != NULL: - * self.to_dtype_func(itemp, value) # <<<<<<<<<<<<<< - * else: - * memoryview.assign_item_from_object(self, itemp, value) - */ - __pyx_t_2 = __pyx_v_self->to_dtype_func(__pyx_v_itemp, __pyx_v_value); if (unlikely(__pyx_t_2 == ((int)0))) __PYX_ERR(1, 987, __pyx_L1_error) - - /* "View.MemoryView":986 - * - * cdef assign_item_from_object(self, char *itemp, object value): - * if self.to_dtype_func != NULL: # <<<<<<<<<<<<<< - * self.to_dtype_func(itemp, value) - * else: - */ - goto __pyx_L3; - } - - /* "View.MemoryView":989 - * self.to_dtype_func(itemp, value) - * else: - * memoryview.assign_item_from_object(self, itemp, value) # <<<<<<<<<<<<<< - * - * @property - */ - /*else*/ { - __pyx_t_3 = __pyx_memoryview_assign_item_from_object(((struct __pyx_memoryview_obj *)__pyx_v_self), __pyx_v_itemp, __pyx_v_value); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 989, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - } - __pyx_L3:; - - /* "View.MemoryView":985 - * return memoryview.convert_item_to_object(self, itemp) - * - * cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<< - * if self.to_dtype_func != NULL: - * self.to_dtype_func(itemp, value) - */ - - /* function exit code */ - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_3); - __Pyx_AddTraceback("View.MemoryView._memoryviewslice.assign_item_from_object", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = 0; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":992 - * - * @property - * def base(self): # <<<<<<<<<<<<<< - * return self.from_object - * - */ - -/* Python wrapper */ -static PyObject *__pyx_pw_15View_dot_MemoryView_16_memoryviewslice_4base_1__get__(PyObject *__pyx_v_self); /*proto*/ -static PyObject *__pyx_pw_15View_dot_MemoryView_16_memoryviewslice_4base_1__get__(PyObject *__pyx_v_self) { - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); - __pyx_r = __pyx_pf_15View_dot_MemoryView_16_memoryviewslice_4base___get__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self)); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_pf_15View_dot_MemoryView_16_memoryviewslice_4base___get__(struct __pyx_memoryviewslice_obj *__pyx_v_self) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__get__", 0); - - /* "View.MemoryView":993 - * @property - * def base(self): - * return self.from_object # <<<<<<<<<<<<<< - * - * __pyx_getbuffer = capsule( &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)") - */ - __Pyx_XDECREF(__pyx_r); - __Pyx_INCREF(__pyx_v_self->from_object); - __pyx_r = __pyx_v_self->from_object; - goto __pyx_L0; - - /* "View.MemoryView":992 - * - * @property - * def base(self): # <<<<<<<<<<<<<< - * return self.from_object - * - */ - - /* function exit code */ - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "(tree fragment)":1 - * def __reduce_cython__(self): # <<<<<<<<<<<<<< - * raise TypeError("no default __reduce__ due to non-trivial __cinit__") - * def __setstate_cython__(self, __pyx_state): - */ - -/* Python wrapper */ -static PyObject *__pyx_pw___pyx_memoryviewslice_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ -static PyObject *__pyx_pw___pyx_memoryviewslice_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0); - __pyx_r = __pyx_pf___pyx_memoryviewslice___reduce_cython__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self)); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_pf___pyx_memoryviewslice___reduce_cython__(CYTHON_UNUSED struct __pyx_memoryviewslice_obj *__pyx_v_self) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("__reduce_cython__", 0); - - /* "(tree fragment)":2 - * def __reduce_cython__(self): - * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< - * def __setstate_cython__(self, __pyx_state): - * raise TypeError("no default __reduce__ due to non-trivial __cinit__") - */ - __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__18, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 2, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_Raise(__pyx_t_1, 0, 0, 0); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __PYX_ERR(1, 2, __pyx_L1_error) - - /* "(tree fragment)":1 - * def __reduce_cython__(self): # <<<<<<<<<<<<<< - * raise TypeError("no default __reduce__ due to non-trivial __cinit__") - * def __setstate_cython__(self, __pyx_state): - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_AddTraceback("View.MemoryView._memoryviewslice.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "(tree fragment)":3 - * def __reduce_cython__(self): - * raise TypeError("no default __reduce__ due to non-trivial __cinit__") - * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< - * raise TypeError("no default __reduce__ due to non-trivial __cinit__") - */ - -/* Python wrapper */ -static PyObject *__pyx_pw___pyx_memoryviewslice_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/ -static PyObject *__pyx_pw___pyx_memoryviewslice_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) { - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0); - __pyx_r = __pyx_pf___pyx_memoryviewslice_2__setstate_cython__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state)); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_pf___pyx_memoryviewslice_2__setstate_cython__(CYTHON_UNUSED struct __pyx_memoryviewslice_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("__setstate_cython__", 0); - - /* "(tree fragment)":4 - * raise TypeError("no default __reduce__ due to non-trivial __cinit__") - * def __setstate_cython__(self, __pyx_state): - * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< - */ - __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__19, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 4, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_Raise(__pyx_t_1, 0, 0, 0); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __PYX_ERR(1, 4, __pyx_L1_error) - - /* "(tree fragment)":3 - * def __reduce_cython__(self): - * raise TypeError("no default __reduce__ due to non-trivial __cinit__") - * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< - * raise TypeError("no default __reduce__ due to non-trivial __cinit__") - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_AddTraceback("View.MemoryView._memoryviewslice.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":999 - * - * @cname('__pyx_memoryview_fromslice') - * cdef memoryview_fromslice(__Pyx_memviewslice memviewslice, # <<<<<<<<<<<<<< - * int ndim, - * object (*to_object_func)(char *), - */ - -static PyObject *__pyx_memoryview_fromslice(__Pyx_memviewslice __pyx_v_memviewslice, int __pyx_v_ndim, PyObject *(*__pyx_v_to_object_func)(char *), int (*__pyx_v_to_dtype_func)(char *, PyObject *), int __pyx_v_dtype_is_object) { - struct __pyx_memoryviewslice_obj *__pyx_v_result = 0; - Py_ssize_t __pyx_v_suboffset; - PyObject *__pyx_v_length = NULL; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - int __pyx_t_1; - PyObject *__pyx_t_2 = NULL; - PyObject *__pyx_t_3 = NULL; - __Pyx_TypeInfo *__pyx_t_4; - Py_buffer __pyx_t_5; - Py_ssize_t *__pyx_t_6; - Py_ssize_t *__pyx_t_7; - Py_ssize_t *__pyx_t_8; - Py_ssize_t __pyx_t_9; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("memoryview_fromslice", 0); - - /* "View.MemoryView":1007 - * cdef _memoryviewslice result - * - * if memviewslice.memview == Py_None: # <<<<<<<<<<<<<< - * return None - * - */ - __pyx_t_1 = ((((PyObject *)__pyx_v_memviewslice.memview) == Py_None) != 0); - if (__pyx_t_1) { - - /* "View.MemoryView":1008 - * - * if memviewslice.memview == Py_None: - * return None # <<<<<<<<<<<<<< - * - * - */ - __Pyx_XDECREF(__pyx_r); - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - goto __pyx_L0; - - /* "View.MemoryView":1007 - * cdef _memoryviewslice result - * - * if memviewslice.memview == Py_None: # <<<<<<<<<<<<<< - * return None - * - */ - } - - /* "View.MemoryView":1013 - * - * - * result = _memoryviewslice(None, 0, dtype_is_object) # <<<<<<<<<<<<<< - * - * result.from_slice = memviewslice - */ - __pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_v_dtype_is_object); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1013, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1013, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_INCREF(Py_None); - __Pyx_GIVEREF(Py_None); - PyTuple_SET_ITEM(__pyx_t_3, 0, Py_None); - __Pyx_INCREF(__pyx_int_0); - __Pyx_GIVEREF(__pyx_int_0); - PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_int_0); - __Pyx_GIVEREF(__pyx_t_2); - PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_t_2); - __pyx_t_2 = 0; - __pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryviewslice_type), __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1013, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_v_result = ((struct __pyx_memoryviewslice_obj *)__pyx_t_2); - __pyx_t_2 = 0; - - /* "View.MemoryView":1015 - * result = _memoryviewslice(None, 0, dtype_is_object) - * - * result.from_slice = memviewslice # <<<<<<<<<<<<<< - * __PYX_INC_MEMVIEW(&memviewslice, 1) - * - */ - __pyx_v_result->from_slice = __pyx_v_memviewslice; - - /* "View.MemoryView":1016 - * - * result.from_slice = memviewslice - * __PYX_INC_MEMVIEW(&memviewslice, 1) # <<<<<<<<<<<<<< - * - * result.from_object = ( memviewslice.memview).base - */ - __PYX_INC_MEMVIEW((&__pyx_v_memviewslice), 1); - - /* "View.MemoryView":1018 - * __PYX_INC_MEMVIEW(&memviewslice, 1) - * - * result.from_object = ( memviewslice.memview).base # <<<<<<<<<<<<<< - * result.typeinfo = memviewslice.memview.typeinfo - * - */ - __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_memviewslice.memview), __pyx_n_s_base); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1018, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_GIVEREF(__pyx_t_2); - __Pyx_GOTREF(__pyx_v_result->from_object); - __Pyx_DECREF(__pyx_v_result->from_object); - __pyx_v_result->from_object = __pyx_t_2; - __pyx_t_2 = 0; - - /* "View.MemoryView":1019 - * - * result.from_object = ( memviewslice.memview).base - * result.typeinfo = memviewslice.memview.typeinfo # <<<<<<<<<<<<<< - * - * result.view = memviewslice.memview.view - */ - __pyx_t_4 = __pyx_v_memviewslice.memview->typeinfo; - __pyx_v_result->__pyx_base.typeinfo = __pyx_t_4; - - /* "View.MemoryView":1021 - * result.typeinfo = memviewslice.memview.typeinfo - * - * result.view = memviewslice.memview.view # <<<<<<<<<<<<<< - * result.view.buf = memviewslice.data - * result.view.ndim = ndim - */ - __pyx_t_5 = __pyx_v_memviewslice.memview->view; - __pyx_v_result->__pyx_base.view = __pyx_t_5; - - /* "View.MemoryView":1022 - * - * result.view = memviewslice.memview.view - * result.view.buf = memviewslice.data # <<<<<<<<<<<<<< - * result.view.ndim = ndim - * (<__pyx_buffer *> &result.view).obj = Py_None - */ - __pyx_v_result->__pyx_base.view.buf = ((void *)__pyx_v_memviewslice.data); - - /* "View.MemoryView":1023 - * result.view = memviewslice.memview.view - * result.view.buf = memviewslice.data - * result.view.ndim = ndim # <<<<<<<<<<<<<< - * (<__pyx_buffer *> &result.view).obj = Py_None - * Py_INCREF(Py_None) - */ - __pyx_v_result->__pyx_base.view.ndim = __pyx_v_ndim; - - /* "View.MemoryView":1024 - * result.view.buf = memviewslice.data - * result.view.ndim = ndim - * (<__pyx_buffer *> &result.view).obj = Py_None # <<<<<<<<<<<<<< - * Py_INCREF(Py_None) - * - */ - ((Py_buffer *)(&__pyx_v_result->__pyx_base.view))->obj = Py_None; - - /* "View.MemoryView":1025 - * result.view.ndim = ndim - * (<__pyx_buffer *> &result.view).obj = Py_None - * Py_INCREF(Py_None) # <<<<<<<<<<<<<< - * - * if (memviewslice.memview).flags & PyBUF_WRITABLE: - */ - Py_INCREF(Py_None); - - /* "View.MemoryView":1027 - * Py_INCREF(Py_None) - * - * if (memviewslice.memview).flags & PyBUF_WRITABLE: # <<<<<<<<<<<<<< - * result.flags = PyBUF_RECORDS - * else: - */ - __pyx_t_1 = ((((struct __pyx_memoryview_obj *)__pyx_v_memviewslice.memview)->flags & PyBUF_WRITABLE) != 0); - if (__pyx_t_1) { - - /* "View.MemoryView":1028 - * - * if (memviewslice.memview).flags & PyBUF_WRITABLE: - * result.flags = PyBUF_RECORDS # <<<<<<<<<<<<<< - * else: - * result.flags = PyBUF_RECORDS_RO - */ - __pyx_v_result->__pyx_base.flags = PyBUF_RECORDS; - - /* "View.MemoryView":1027 - * Py_INCREF(Py_None) - * - * if (memviewslice.memview).flags & PyBUF_WRITABLE: # <<<<<<<<<<<<<< - * result.flags = PyBUF_RECORDS - * else: - */ - goto __pyx_L4; - } - - /* "View.MemoryView":1030 - * result.flags = PyBUF_RECORDS - * else: - * result.flags = PyBUF_RECORDS_RO # <<<<<<<<<<<<<< - * - * result.view.shape = result.from_slice.shape - */ - /*else*/ { - __pyx_v_result->__pyx_base.flags = PyBUF_RECORDS_RO; - } - __pyx_L4:; - - /* "View.MemoryView":1032 - * result.flags = PyBUF_RECORDS_RO - * - * result.view.shape = result.from_slice.shape # <<<<<<<<<<<<<< - * result.view.strides = result.from_slice.strides - * - */ - __pyx_v_result->__pyx_base.view.shape = ((Py_ssize_t *)__pyx_v_result->from_slice.shape); - - /* "View.MemoryView":1033 - * - * result.view.shape = result.from_slice.shape - * result.view.strides = result.from_slice.strides # <<<<<<<<<<<<<< - * - * - */ - __pyx_v_result->__pyx_base.view.strides = ((Py_ssize_t *)__pyx_v_result->from_slice.strides); - - /* "View.MemoryView":1036 - * - * - * result.view.suboffsets = NULL # <<<<<<<<<<<<<< - * for suboffset in result.from_slice.suboffsets[:ndim]: - * if suboffset >= 0: - */ - __pyx_v_result->__pyx_base.view.suboffsets = NULL; - - /* "View.MemoryView":1037 - * - * result.view.suboffsets = NULL - * for suboffset in result.from_slice.suboffsets[:ndim]: # <<<<<<<<<<<<<< - * if suboffset >= 0: - * result.view.suboffsets = result.from_slice.suboffsets - */ - __pyx_t_7 = (__pyx_v_result->from_slice.suboffsets + __pyx_v_ndim); - for (__pyx_t_8 = __pyx_v_result->from_slice.suboffsets; __pyx_t_8 < __pyx_t_7; __pyx_t_8++) { - __pyx_t_6 = __pyx_t_8; - __pyx_v_suboffset = (__pyx_t_6[0]); - - /* "View.MemoryView":1038 - * result.view.suboffsets = NULL - * for suboffset in result.from_slice.suboffsets[:ndim]: - * if suboffset >= 0: # <<<<<<<<<<<<<< - * result.view.suboffsets = result.from_slice.suboffsets - * break - */ - __pyx_t_1 = ((__pyx_v_suboffset >= 0) != 0); - if (__pyx_t_1) { - - /* "View.MemoryView":1039 - * for suboffset in result.from_slice.suboffsets[:ndim]: - * if suboffset >= 0: - * result.view.suboffsets = result.from_slice.suboffsets # <<<<<<<<<<<<<< - * break - * - */ - __pyx_v_result->__pyx_base.view.suboffsets = ((Py_ssize_t *)__pyx_v_result->from_slice.suboffsets); - - /* "View.MemoryView":1040 - * if suboffset >= 0: - * result.view.suboffsets = result.from_slice.suboffsets - * break # <<<<<<<<<<<<<< - * - * result.view.len = result.view.itemsize - */ - goto __pyx_L6_break; - - /* "View.MemoryView":1038 - * result.view.suboffsets = NULL - * for suboffset in result.from_slice.suboffsets[:ndim]: - * if suboffset >= 0: # <<<<<<<<<<<<<< - * result.view.suboffsets = result.from_slice.suboffsets - * break - */ - } - } - __pyx_L6_break:; - - /* "View.MemoryView":1042 - * break - * - * result.view.len = result.view.itemsize # <<<<<<<<<<<<<< - * for length in result.view.shape[:ndim]: - * result.view.len *= length - */ - __pyx_t_9 = __pyx_v_result->__pyx_base.view.itemsize; - __pyx_v_result->__pyx_base.view.len = __pyx_t_9; - - /* "View.MemoryView":1043 - * - * result.view.len = result.view.itemsize - * for length in result.view.shape[:ndim]: # <<<<<<<<<<<<<< - * result.view.len *= length - * - */ - __pyx_t_7 = (__pyx_v_result->__pyx_base.view.shape + __pyx_v_ndim); - for (__pyx_t_8 = __pyx_v_result->__pyx_base.view.shape; __pyx_t_8 < __pyx_t_7; __pyx_t_8++) { - __pyx_t_6 = __pyx_t_8; - __pyx_t_2 = PyInt_FromSsize_t((__pyx_t_6[0])); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1043, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_XDECREF_SET(__pyx_v_length, __pyx_t_2); - __pyx_t_2 = 0; - - /* "View.MemoryView":1044 - * result.view.len = result.view.itemsize - * for length in result.view.shape[:ndim]: - * result.view.len *= length # <<<<<<<<<<<<<< - * - * result.to_object_func = to_object_func - */ - __pyx_t_2 = PyInt_FromSsize_t(__pyx_v_result->__pyx_base.view.len); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1044, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_3 = PyNumber_InPlaceMultiply(__pyx_t_2, __pyx_v_length); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1044, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_9 = __Pyx_PyIndex_AsSsize_t(__pyx_t_3); if (unlikely((__pyx_t_9 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 1044, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_v_result->__pyx_base.view.len = __pyx_t_9; - } - - /* "View.MemoryView":1046 - * result.view.len *= length - * - * result.to_object_func = to_object_func # <<<<<<<<<<<<<< - * result.to_dtype_func = to_dtype_func - * - */ - __pyx_v_result->to_object_func = __pyx_v_to_object_func; - - /* "View.MemoryView":1047 - * - * result.to_object_func = to_object_func - * result.to_dtype_func = to_dtype_func # <<<<<<<<<<<<<< - * - * return result - */ - __pyx_v_result->to_dtype_func = __pyx_v_to_dtype_func; - - /* "View.MemoryView":1049 - * result.to_dtype_func = to_dtype_func - * - * return result # <<<<<<<<<<<<<< - * - * @cname('__pyx_memoryview_get_slice_from_memoryview') - */ - __Pyx_XDECREF(__pyx_r); - __Pyx_INCREF(((PyObject *)__pyx_v_result)); - __pyx_r = ((PyObject *)__pyx_v_result); - goto __pyx_L0; - - /* "View.MemoryView":999 - * - * @cname('__pyx_memoryview_fromslice') - * cdef memoryview_fromslice(__Pyx_memviewslice memviewslice, # <<<<<<<<<<<<<< - * int ndim, - * object (*to_object_func)(char *), - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_2); - __Pyx_XDECREF(__pyx_t_3); - __Pyx_AddTraceback("View.MemoryView.memoryview_fromslice", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = 0; - __pyx_L0:; - __Pyx_XDECREF((PyObject *)__pyx_v_result); - __Pyx_XDECREF(__pyx_v_length); - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":1052 - * - * @cname('__pyx_memoryview_get_slice_from_memoryview') - * cdef __Pyx_memviewslice *get_slice_from_memview(memoryview memview, # <<<<<<<<<<<<<< - * __Pyx_memviewslice *mslice) except NULL: - * cdef _memoryviewslice obj - */ - -static __Pyx_memviewslice *__pyx_memoryview_get_slice_from_memoryview(struct __pyx_memoryview_obj *__pyx_v_memview, __Pyx_memviewslice *__pyx_v_mslice) { - struct __pyx_memoryviewslice_obj *__pyx_v_obj = 0; - __Pyx_memviewslice *__pyx_r; - __Pyx_RefNannyDeclarations - int __pyx_t_1; - int __pyx_t_2; - PyObject *__pyx_t_3 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("get_slice_from_memview", 0); - - /* "View.MemoryView":1055 - * __Pyx_memviewslice *mslice) except NULL: - * cdef _memoryviewslice obj - * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< - * obj = memview - * return &obj.from_slice - */ - __pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type); - __pyx_t_2 = (__pyx_t_1 != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":1056 - * cdef _memoryviewslice obj - * if isinstance(memview, _memoryviewslice): - * obj = memview # <<<<<<<<<<<<<< - * return &obj.from_slice - * else: - */ - if (!(likely(((((PyObject *)__pyx_v_memview)) == Py_None) || likely(__Pyx_TypeTest(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type))))) __PYX_ERR(1, 1056, __pyx_L1_error) - __pyx_t_3 = ((PyObject *)__pyx_v_memview); - __Pyx_INCREF(__pyx_t_3); - __pyx_v_obj = ((struct __pyx_memoryviewslice_obj *)__pyx_t_3); - __pyx_t_3 = 0; - - /* "View.MemoryView":1057 - * if isinstance(memview, _memoryviewslice): - * obj = memview - * return &obj.from_slice # <<<<<<<<<<<<<< - * else: - * slice_copy(memview, mslice) - */ - __pyx_r = (&__pyx_v_obj->from_slice); - goto __pyx_L0; - - /* "View.MemoryView":1055 - * __Pyx_memviewslice *mslice) except NULL: - * cdef _memoryviewslice obj - * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< - * obj = memview - * return &obj.from_slice - */ - } - - /* "View.MemoryView":1059 - * return &obj.from_slice - * else: - * slice_copy(memview, mslice) # <<<<<<<<<<<<<< - * return mslice - * - */ - /*else*/ { - __pyx_memoryview_slice_copy(__pyx_v_memview, __pyx_v_mslice); - - /* "View.MemoryView":1060 - * else: - * slice_copy(memview, mslice) - * return mslice # <<<<<<<<<<<<<< - * - * @cname('__pyx_memoryview_slice_copy') - */ - __pyx_r = __pyx_v_mslice; - goto __pyx_L0; - } - - /* "View.MemoryView":1052 - * - * @cname('__pyx_memoryview_get_slice_from_memoryview') - * cdef __Pyx_memviewslice *get_slice_from_memview(memoryview memview, # <<<<<<<<<<<<<< - * __Pyx_memviewslice *mslice) except NULL: - * cdef _memoryviewslice obj - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_3); - __Pyx_AddTraceback("View.MemoryView.get_slice_from_memview", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XDECREF((PyObject *)__pyx_v_obj); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":1063 - * - * @cname('__pyx_memoryview_slice_copy') - * cdef void slice_copy(memoryview memview, __Pyx_memviewslice *dst): # <<<<<<<<<<<<<< - * cdef int dim - * cdef (Py_ssize_t*) shape, strides, suboffsets - */ - -static void __pyx_memoryview_slice_copy(struct __pyx_memoryview_obj *__pyx_v_memview, __Pyx_memviewslice *__pyx_v_dst) { - int __pyx_v_dim; - Py_ssize_t *__pyx_v_shape; - Py_ssize_t *__pyx_v_strides; - Py_ssize_t *__pyx_v_suboffsets; - __Pyx_RefNannyDeclarations - Py_ssize_t *__pyx_t_1; - int __pyx_t_2; - int __pyx_t_3; - int __pyx_t_4; - Py_ssize_t __pyx_t_5; - __Pyx_RefNannySetupContext("slice_copy", 0); - - /* "View.MemoryView":1067 - * cdef (Py_ssize_t*) shape, strides, suboffsets - * - * shape = memview.view.shape # <<<<<<<<<<<<<< - * strides = memview.view.strides - * suboffsets = memview.view.suboffsets - */ - __pyx_t_1 = __pyx_v_memview->view.shape; - __pyx_v_shape = __pyx_t_1; - - /* "View.MemoryView":1068 - * - * shape = memview.view.shape - * strides = memview.view.strides # <<<<<<<<<<<<<< - * suboffsets = memview.view.suboffsets - * - */ - __pyx_t_1 = __pyx_v_memview->view.strides; - __pyx_v_strides = __pyx_t_1; - - /* "View.MemoryView":1069 - * shape = memview.view.shape - * strides = memview.view.strides - * suboffsets = memview.view.suboffsets # <<<<<<<<<<<<<< - * - * dst.memview = <__pyx_memoryview *> memview - */ - __pyx_t_1 = __pyx_v_memview->view.suboffsets; - __pyx_v_suboffsets = __pyx_t_1; - - /* "View.MemoryView":1071 - * suboffsets = memview.view.suboffsets - * - * dst.memview = <__pyx_memoryview *> memview # <<<<<<<<<<<<<< - * dst.data = memview.view.buf - * - */ - __pyx_v_dst->memview = ((struct __pyx_memoryview_obj *)__pyx_v_memview); - - /* "View.MemoryView":1072 - * - * dst.memview = <__pyx_memoryview *> memview - * dst.data = memview.view.buf # <<<<<<<<<<<<<< - * - * for dim in range(memview.view.ndim): - */ - __pyx_v_dst->data = ((char *)__pyx_v_memview->view.buf); - - /* "View.MemoryView":1074 - * dst.data = memview.view.buf - * - * for dim in range(memview.view.ndim): # <<<<<<<<<<<<<< - * dst.shape[dim] = shape[dim] - * dst.strides[dim] = strides[dim] - */ - __pyx_t_2 = __pyx_v_memview->view.ndim; - __pyx_t_3 = __pyx_t_2; - for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { - __pyx_v_dim = __pyx_t_4; - - /* "View.MemoryView":1075 - * - * for dim in range(memview.view.ndim): - * dst.shape[dim] = shape[dim] # <<<<<<<<<<<<<< - * dst.strides[dim] = strides[dim] - * dst.suboffsets[dim] = suboffsets[dim] if suboffsets else -1 - */ - (__pyx_v_dst->shape[__pyx_v_dim]) = (__pyx_v_shape[__pyx_v_dim]); - - /* "View.MemoryView":1076 - * for dim in range(memview.view.ndim): - * dst.shape[dim] = shape[dim] - * dst.strides[dim] = strides[dim] # <<<<<<<<<<<<<< - * dst.suboffsets[dim] = suboffsets[dim] if suboffsets else -1 - * - */ - (__pyx_v_dst->strides[__pyx_v_dim]) = (__pyx_v_strides[__pyx_v_dim]); - - /* "View.MemoryView":1077 - * dst.shape[dim] = shape[dim] - * dst.strides[dim] = strides[dim] - * dst.suboffsets[dim] = suboffsets[dim] if suboffsets else -1 # <<<<<<<<<<<<<< - * - * @cname('__pyx_memoryview_copy_object') - */ - if ((__pyx_v_suboffsets != 0)) { - __pyx_t_5 = (__pyx_v_suboffsets[__pyx_v_dim]); - } else { - __pyx_t_5 = -1L; - } - (__pyx_v_dst->suboffsets[__pyx_v_dim]) = __pyx_t_5; - } - - /* "View.MemoryView":1063 - * - * @cname('__pyx_memoryview_slice_copy') - * cdef void slice_copy(memoryview memview, __Pyx_memviewslice *dst): # <<<<<<<<<<<<<< - * cdef int dim - * cdef (Py_ssize_t*) shape, strides, suboffsets - */ - - /* function exit code */ - __Pyx_RefNannyFinishContext(); -} - -/* "View.MemoryView":1080 - * - * @cname('__pyx_memoryview_copy_object') - * cdef memoryview_copy(memoryview memview): # <<<<<<<<<<<<<< - * "Create a new memoryview object" - * cdef __Pyx_memviewslice memviewslice - */ - -static PyObject *__pyx_memoryview_copy_object(struct __pyx_memoryview_obj *__pyx_v_memview) { - __Pyx_memviewslice __pyx_v_memviewslice; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("memoryview_copy", 0); - - /* "View.MemoryView":1083 - * "Create a new memoryview object" - * cdef __Pyx_memviewslice memviewslice - * slice_copy(memview, &memviewslice) # <<<<<<<<<<<<<< - * return memoryview_copy_from_slice(memview, &memviewslice) - * - */ - __pyx_memoryview_slice_copy(__pyx_v_memview, (&__pyx_v_memviewslice)); - - /* "View.MemoryView":1084 - * cdef __Pyx_memviewslice memviewslice - * slice_copy(memview, &memviewslice) - * return memoryview_copy_from_slice(memview, &memviewslice) # <<<<<<<<<<<<<< - * - * @cname('__pyx_memoryview_copy_object_from_slice') - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = __pyx_memoryview_copy_object_from_slice(__pyx_v_memview, (&__pyx_v_memviewslice)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1084, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_r = __pyx_t_1; - __pyx_t_1 = 0; - goto __pyx_L0; - - /* "View.MemoryView":1080 - * - * @cname('__pyx_memoryview_copy_object') - * cdef memoryview_copy(memoryview memview): # <<<<<<<<<<<<<< - * "Create a new memoryview object" - * cdef __Pyx_memviewslice memviewslice - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_AddTraceback("View.MemoryView.memoryview_copy", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = 0; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":1087 - * - * @cname('__pyx_memoryview_copy_object_from_slice') - * cdef memoryview_copy_from_slice(memoryview memview, __Pyx_memviewslice *memviewslice): # <<<<<<<<<<<<<< - * """ - * Create a new memoryview object from a given memoryview object and slice. - */ - -static PyObject *__pyx_memoryview_copy_object_from_slice(struct __pyx_memoryview_obj *__pyx_v_memview, __Pyx_memviewslice *__pyx_v_memviewslice) { - PyObject *(*__pyx_v_to_object_func)(char *); - int (*__pyx_v_to_dtype_func)(char *, PyObject *); - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - int __pyx_t_1; - int __pyx_t_2; - PyObject *(*__pyx_t_3)(char *); - int (*__pyx_t_4)(char *, PyObject *); - PyObject *__pyx_t_5 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("memoryview_copy_from_slice", 0); - - /* "View.MemoryView":1094 - * cdef int (*to_dtype_func)(char *, object) except 0 - * - * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< - * to_object_func = (<_memoryviewslice> memview).to_object_func - * to_dtype_func = (<_memoryviewslice> memview).to_dtype_func - */ - __pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type); - __pyx_t_2 = (__pyx_t_1 != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":1095 - * - * if isinstance(memview, _memoryviewslice): - * to_object_func = (<_memoryviewslice> memview).to_object_func # <<<<<<<<<<<<<< - * to_dtype_func = (<_memoryviewslice> memview).to_dtype_func - * else: - */ - __pyx_t_3 = ((struct __pyx_memoryviewslice_obj *)__pyx_v_memview)->to_object_func; - __pyx_v_to_object_func = __pyx_t_3; - - /* "View.MemoryView":1096 - * if isinstance(memview, _memoryviewslice): - * to_object_func = (<_memoryviewslice> memview).to_object_func - * to_dtype_func = (<_memoryviewslice> memview).to_dtype_func # <<<<<<<<<<<<<< - * else: - * to_object_func = NULL - */ - __pyx_t_4 = ((struct __pyx_memoryviewslice_obj *)__pyx_v_memview)->to_dtype_func; - __pyx_v_to_dtype_func = __pyx_t_4; - - /* "View.MemoryView":1094 - * cdef int (*to_dtype_func)(char *, object) except 0 - * - * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< - * to_object_func = (<_memoryviewslice> memview).to_object_func - * to_dtype_func = (<_memoryviewslice> memview).to_dtype_func - */ - goto __pyx_L3; - } - - /* "View.MemoryView":1098 - * to_dtype_func = (<_memoryviewslice> memview).to_dtype_func - * else: - * to_object_func = NULL # <<<<<<<<<<<<<< - * to_dtype_func = NULL - * - */ - /*else*/ { - __pyx_v_to_object_func = NULL; - - /* "View.MemoryView":1099 - * else: - * to_object_func = NULL - * to_dtype_func = NULL # <<<<<<<<<<<<<< - * - * return memoryview_fromslice(memviewslice[0], memview.view.ndim, - */ - __pyx_v_to_dtype_func = NULL; - } - __pyx_L3:; - - /* "View.MemoryView":1101 - * to_dtype_func = NULL - * - * return memoryview_fromslice(memviewslice[0], memview.view.ndim, # <<<<<<<<<<<<<< - * to_object_func, to_dtype_func, - * memview.dtype_is_object) - */ - __Pyx_XDECREF(__pyx_r); - - /* "View.MemoryView":1103 - * return memoryview_fromslice(memviewslice[0], memview.view.ndim, - * to_object_func, to_dtype_func, - * memview.dtype_is_object) # <<<<<<<<<<<<<< - * - * - */ - __pyx_t_5 = __pyx_memoryview_fromslice((__pyx_v_memviewslice[0]), __pyx_v_memview->view.ndim, __pyx_v_to_object_func, __pyx_v_to_dtype_func, __pyx_v_memview->dtype_is_object); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 1101, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __pyx_r = __pyx_t_5; - __pyx_t_5 = 0; - goto __pyx_L0; - - /* "View.MemoryView":1087 - * - * @cname('__pyx_memoryview_copy_object_from_slice') - * cdef memoryview_copy_from_slice(memoryview memview, __Pyx_memviewslice *memviewslice): # <<<<<<<<<<<<<< - * """ - * Create a new memoryview object from a given memoryview object and slice. - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_5); - __Pyx_AddTraceback("View.MemoryView.memoryview_copy_from_slice", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = 0; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":1109 - * - * - * cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: # <<<<<<<<<<<<<< - * if arg < 0: - * return -arg - */ - -static Py_ssize_t abs_py_ssize_t(Py_ssize_t __pyx_v_arg) { - Py_ssize_t __pyx_r; - int __pyx_t_1; - - /* "View.MemoryView":1110 - * - * cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: - * if arg < 0: # <<<<<<<<<<<<<< - * return -arg - * else: - */ - __pyx_t_1 = ((__pyx_v_arg < 0) != 0); - if (__pyx_t_1) { - - /* "View.MemoryView":1111 - * cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: - * if arg < 0: - * return -arg # <<<<<<<<<<<<<< - * else: - * return arg - */ - __pyx_r = (-__pyx_v_arg); - goto __pyx_L0; - - /* "View.MemoryView":1110 - * - * cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: - * if arg < 0: # <<<<<<<<<<<<<< - * return -arg - * else: - */ - } - - /* "View.MemoryView":1113 - * return -arg - * else: - * return arg # <<<<<<<<<<<<<< - * - * @cname('__pyx_get_best_slice_order') - */ - /*else*/ { - __pyx_r = __pyx_v_arg; - goto __pyx_L0; - } - - /* "View.MemoryView":1109 - * - * - * cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: # <<<<<<<<<<<<<< - * if arg < 0: - * return -arg - */ - - /* function exit code */ - __pyx_L0:; - return __pyx_r; -} - -/* "View.MemoryView":1116 - * - * @cname('__pyx_get_best_slice_order') - * cdef char get_best_order(__Pyx_memviewslice *mslice, int ndim) nogil: # <<<<<<<<<<<<<< - * """ - * Figure out the best memory access order for a given slice. - */ - -static char __pyx_get_best_slice_order(__Pyx_memviewslice *__pyx_v_mslice, int __pyx_v_ndim) { - int __pyx_v_i; - Py_ssize_t __pyx_v_c_stride; - Py_ssize_t __pyx_v_f_stride; - char __pyx_r; - int __pyx_t_1; - int __pyx_t_2; - int __pyx_t_3; - int __pyx_t_4; - - /* "View.MemoryView":1121 - * """ - * cdef int i - * cdef Py_ssize_t c_stride = 0 # <<<<<<<<<<<<<< - * cdef Py_ssize_t f_stride = 0 - * - */ - __pyx_v_c_stride = 0; - - /* "View.MemoryView":1122 - * cdef int i - * cdef Py_ssize_t c_stride = 0 - * cdef Py_ssize_t f_stride = 0 # <<<<<<<<<<<<<< - * - * for i in range(ndim - 1, -1, -1): - */ - __pyx_v_f_stride = 0; - - /* "View.MemoryView":1124 - * cdef Py_ssize_t f_stride = 0 - * - * for i in range(ndim - 1, -1, -1): # <<<<<<<<<<<<<< - * if mslice.shape[i] > 1: - * c_stride = mslice.strides[i] - */ - for (__pyx_t_1 = (__pyx_v_ndim - 1); __pyx_t_1 > -1; __pyx_t_1-=1) { - __pyx_v_i = __pyx_t_1; - - /* "View.MemoryView":1125 - * - * for i in range(ndim - 1, -1, -1): - * if mslice.shape[i] > 1: # <<<<<<<<<<<<<< - * c_stride = mslice.strides[i] - * break - */ - __pyx_t_2 = (((__pyx_v_mslice->shape[__pyx_v_i]) > 1) != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":1126 - * for i in range(ndim - 1, -1, -1): - * if mslice.shape[i] > 1: - * c_stride = mslice.strides[i] # <<<<<<<<<<<<<< - * break - * - */ - __pyx_v_c_stride = (__pyx_v_mslice->strides[__pyx_v_i]); - - /* "View.MemoryView":1127 - * if mslice.shape[i] > 1: - * c_stride = mslice.strides[i] - * break # <<<<<<<<<<<<<< - * - * for i in range(ndim): - */ - goto __pyx_L4_break; - - /* "View.MemoryView":1125 - * - * for i in range(ndim - 1, -1, -1): - * if mslice.shape[i] > 1: # <<<<<<<<<<<<<< - * c_stride = mslice.strides[i] - * break - */ - } - } - __pyx_L4_break:; - - /* "View.MemoryView":1129 - * break - * - * for i in range(ndim): # <<<<<<<<<<<<<< - * if mslice.shape[i] > 1: - * f_stride = mslice.strides[i] - */ - __pyx_t_1 = __pyx_v_ndim; - __pyx_t_3 = __pyx_t_1; - for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { - __pyx_v_i = __pyx_t_4; - - /* "View.MemoryView":1130 - * - * for i in range(ndim): - * if mslice.shape[i] > 1: # <<<<<<<<<<<<<< - * f_stride = mslice.strides[i] - * break - */ - __pyx_t_2 = (((__pyx_v_mslice->shape[__pyx_v_i]) > 1) != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":1131 - * for i in range(ndim): - * if mslice.shape[i] > 1: - * f_stride = mslice.strides[i] # <<<<<<<<<<<<<< - * break - * - */ - __pyx_v_f_stride = (__pyx_v_mslice->strides[__pyx_v_i]); - - /* "View.MemoryView":1132 - * if mslice.shape[i] > 1: - * f_stride = mslice.strides[i] - * break # <<<<<<<<<<<<<< - * - * if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride): - */ - goto __pyx_L7_break; - - /* "View.MemoryView":1130 - * - * for i in range(ndim): - * if mslice.shape[i] > 1: # <<<<<<<<<<<<<< - * f_stride = mslice.strides[i] - * break - */ - } - } - __pyx_L7_break:; - - /* "View.MemoryView":1134 - * break - * - * if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride): # <<<<<<<<<<<<<< - * return 'C' - * else: - */ - __pyx_t_2 = ((abs_py_ssize_t(__pyx_v_c_stride) <= abs_py_ssize_t(__pyx_v_f_stride)) != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":1135 - * - * if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride): - * return 'C' # <<<<<<<<<<<<<< - * else: - * return 'F' - */ - __pyx_r = 'C'; - goto __pyx_L0; - - /* "View.MemoryView":1134 - * break - * - * if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride): # <<<<<<<<<<<<<< - * return 'C' - * else: - */ - } - - /* "View.MemoryView":1137 - * return 'C' - * else: - * return 'F' # <<<<<<<<<<<<<< - * - * @cython.cdivision(True) - */ - /*else*/ { - __pyx_r = 'F'; - goto __pyx_L0; - } - - /* "View.MemoryView":1116 - * - * @cname('__pyx_get_best_slice_order') - * cdef char get_best_order(__Pyx_memviewslice *mslice, int ndim) nogil: # <<<<<<<<<<<<<< - * """ - * Figure out the best memory access order for a given slice. - */ - - /* function exit code */ - __pyx_L0:; - return __pyx_r; -} - -/* "View.MemoryView":1140 - * - * @cython.cdivision(True) - * cdef void _copy_strided_to_strided(char *src_data, Py_ssize_t *src_strides, # <<<<<<<<<<<<<< - * char *dst_data, Py_ssize_t *dst_strides, - * Py_ssize_t *src_shape, Py_ssize_t *dst_shape, - */ - -static void _copy_strided_to_strided(char *__pyx_v_src_data, Py_ssize_t *__pyx_v_src_strides, char *__pyx_v_dst_data, Py_ssize_t *__pyx_v_dst_strides, Py_ssize_t *__pyx_v_src_shape, Py_ssize_t *__pyx_v_dst_shape, int __pyx_v_ndim, size_t __pyx_v_itemsize) { - CYTHON_UNUSED Py_ssize_t __pyx_v_i; - CYTHON_UNUSED Py_ssize_t __pyx_v_src_extent; - Py_ssize_t __pyx_v_dst_extent; - Py_ssize_t __pyx_v_src_stride; - Py_ssize_t __pyx_v_dst_stride; - int __pyx_t_1; - int __pyx_t_2; - int __pyx_t_3; - Py_ssize_t __pyx_t_4; - Py_ssize_t __pyx_t_5; - Py_ssize_t __pyx_t_6; - - /* "View.MemoryView":1147 - * - * cdef Py_ssize_t i - * cdef Py_ssize_t src_extent = src_shape[0] # <<<<<<<<<<<<<< - * cdef Py_ssize_t dst_extent = dst_shape[0] - * cdef Py_ssize_t src_stride = src_strides[0] - */ - __pyx_v_src_extent = (__pyx_v_src_shape[0]); - - /* "View.MemoryView":1148 - * cdef Py_ssize_t i - * cdef Py_ssize_t src_extent = src_shape[0] - * cdef Py_ssize_t dst_extent = dst_shape[0] # <<<<<<<<<<<<<< - * cdef Py_ssize_t src_stride = src_strides[0] - * cdef Py_ssize_t dst_stride = dst_strides[0] - */ - __pyx_v_dst_extent = (__pyx_v_dst_shape[0]); - - /* "View.MemoryView":1149 - * cdef Py_ssize_t src_extent = src_shape[0] - * cdef Py_ssize_t dst_extent = dst_shape[0] - * cdef Py_ssize_t src_stride = src_strides[0] # <<<<<<<<<<<<<< - * cdef Py_ssize_t dst_stride = dst_strides[0] - * - */ - __pyx_v_src_stride = (__pyx_v_src_strides[0]); - - /* "View.MemoryView":1150 - * cdef Py_ssize_t dst_extent = dst_shape[0] - * cdef Py_ssize_t src_stride = src_strides[0] - * cdef Py_ssize_t dst_stride = dst_strides[0] # <<<<<<<<<<<<<< - * - * if ndim == 1: - */ - __pyx_v_dst_stride = (__pyx_v_dst_strides[0]); - - /* "View.MemoryView":1152 - * cdef Py_ssize_t dst_stride = dst_strides[0] - * - * if ndim == 1: # <<<<<<<<<<<<<< - * if (src_stride > 0 and dst_stride > 0 and - * src_stride == itemsize == dst_stride): - */ - __pyx_t_1 = ((__pyx_v_ndim == 1) != 0); - if (__pyx_t_1) { - - /* "View.MemoryView":1153 - * - * if ndim == 1: - * if (src_stride > 0 and dst_stride > 0 and # <<<<<<<<<<<<<< - * src_stride == itemsize == dst_stride): - * memcpy(dst_data, src_data, itemsize * dst_extent) - */ - __pyx_t_2 = ((__pyx_v_src_stride > 0) != 0); - if (__pyx_t_2) { - } else { - __pyx_t_1 = __pyx_t_2; - goto __pyx_L5_bool_binop_done; - } - __pyx_t_2 = ((__pyx_v_dst_stride > 0) != 0); - if (__pyx_t_2) { - } else { - __pyx_t_1 = __pyx_t_2; - goto __pyx_L5_bool_binop_done; - } - - /* "View.MemoryView":1154 - * if ndim == 1: - * if (src_stride > 0 and dst_stride > 0 and - * src_stride == itemsize == dst_stride): # <<<<<<<<<<<<<< - * memcpy(dst_data, src_data, itemsize * dst_extent) - * else: - */ - __pyx_t_2 = (((size_t)__pyx_v_src_stride) == __pyx_v_itemsize); - if (__pyx_t_2) { - __pyx_t_2 = (__pyx_v_itemsize == ((size_t)__pyx_v_dst_stride)); - } - __pyx_t_3 = (__pyx_t_2 != 0); - __pyx_t_1 = __pyx_t_3; - __pyx_L5_bool_binop_done:; - - /* "View.MemoryView":1153 - * - * if ndim == 1: - * if (src_stride > 0 and dst_stride > 0 and # <<<<<<<<<<<<<< - * src_stride == itemsize == dst_stride): - * memcpy(dst_data, src_data, itemsize * dst_extent) - */ - if (__pyx_t_1) { - - /* "View.MemoryView":1155 - * if (src_stride > 0 and dst_stride > 0 and - * src_stride == itemsize == dst_stride): - * memcpy(dst_data, src_data, itemsize * dst_extent) # <<<<<<<<<<<<<< - * else: - * for i in range(dst_extent): - */ - (void)(memcpy(__pyx_v_dst_data, __pyx_v_src_data, (__pyx_v_itemsize * __pyx_v_dst_extent))); - - /* "View.MemoryView":1153 - * - * if ndim == 1: - * if (src_stride > 0 and dst_stride > 0 and # <<<<<<<<<<<<<< - * src_stride == itemsize == dst_stride): - * memcpy(dst_data, src_data, itemsize * dst_extent) - */ - goto __pyx_L4; - } - - /* "View.MemoryView":1157 - * memcpy(dst_data, src_data, itemsize * dst_extent) - * else: - * for i in range(dst_extent): # <<<<<<<<<<<<<< - * memcpy(dst_data, src_data, itemsize) - * src_data += src_stride - */ - /*else*/ { - __pyx_t_4 = __pyx_v_dst_extent; - __pyx_t_5 = __pyx_t_4; - for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) { - __pyx_v_i = __pyx_t_6; - - /* "View.MemoryView":1158 - * else: - * for i in range(dst_extent): - * memcpy(dst_data, src_data, itemsize) # <<<<<<<<<<<<<< - * src_data += src_stride - * dst_data += dst_stride - */ - (void)(memcpy(__pyx_v_dst_data, __pyx_v_src_data, __pyx_v_itemsize)); - - /* "View.MemoryView":1159 - * for i in range(dst_extent): - * memcpy(dst_data, src_data, itemsize) - * src_data += src_stride # <<<<<<<<<<<<<< - * dst_data += dst_stride - * else: - */ - __pyx_v_src_data = (__pyx_v_src_data + __pyx_v_src_stride); - - /* "View.MemoryView":1160 - * memcpy(dst_data, src_data, itemsize) - * src_data += src_stride - * dst_data += dst_stride # <<<<<<<<<<<<<< - * else: - * for i in range(dst_extent): - */ - __pyx_v_dst_data = (__pyx_v_dst_data + __pyx_v_dst_stride); - } - } - __pyx_L4:; - - /* "View.MemoryView":1152 - * cdef Py_ssize_t dst_stride = dst_strides[0] - * - * if ndim == 1: # <<<<<<<<<<<<<< - * if (src_stride > 0 and dst_stride > 0 and - * src_stride == itemsize == dst_stride): - */ - goto __pyx_L3; - } - - /* "View.MemoryView":1162 - * dst_data += dst_stride - * else: - * for i in range(dst_extent): # <<<<<<<<<<<<<< - * _copy_strided_to_strided(src_data, src_strides + 1, - * dst_data, dst_strides + 1, - */ - /*else*/ { - __pyx_t_4 = __pyx_v_dst_extent; - __pyx_t_5 = __pyx_t_4; - for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) { - __pyx_v_i = __pyx_t_6; - - /* "View.MemoryView":1163 - * else: - * for i in range(dst_extent): - * _copy_strided_to_strided(src_data, src_strides + 1, # <<<<<<<<<<<<<< - * dst_data, dst_strides + 1, - * src_shape + 1, dst_shape + 1, - */ - _copy_strided_to_strided(__pyx_v_src_data, (__pyx_v_src_strides + 1), __pyx_v_dst_data, (__pyx_v_dst_strides + 1), (__pyx_v_src_shape + 1), (__pyx_v_dst_shape + 1), (__pyx_v_ndim - 1), __pyx_v_itemsize); - - /* "View.MemoryView":1167 - * src_shape + 1, dst_shape + 1, - * ndim - 1, itemsize) - * src_data += src_stride # <<<<<<<<<<<<<< - * dst_data += dst_stride - * - */ - __pyx_v_src_data = (__pyx_v_src_data + __pyx_v_src_stride); - - /* "View.MemoryView":1168 - * ndim - 1, itemsize) - * src_data += src_stride - * dst_data += dst_stride # <<<<<<<<<<<<<< - * - * cdef void copy_strided_to_strided(__Pyx_memviewslice *src, - */ - __pyx_v_dst_data = (__pyx_v_dst_data + __pyx_v_dst_stride); - } - } - __pyx_L3:; - - /* "View.MemoryView":1140 - * - * @cython.cdivision(True) - * cdef void _copy_strided_to_strided(char *src_data, Py_ssize_t *src_strides, # <<<<<<<<<<<<<< - * char *dst_data, Py_ssize_t *dst_strides, - * Py_ssize_t *src_shape, Py_ssize_t *dst_shape, - */ - - /* function exit code */ -} - -/* "View.MemoryView":1170 - * dst_data += dst_stride - * - * cdef void copy_strided_to_strided(__Pyx_memviewslice *src, # <<<<<<<<<<<<<< - * __Pyx_memviewslice *dst, - * int ndim, size_t itemsize) nogil: - */ - -static void copy_strided_to_strided(__Pyx_memviewslice *__pyx_v_src, __Pyx_memviewslice *__pyx_v_dst, int __pyx_v_ndim, size_t __pyx_v_itemsize) { - - /* "View.MemoryView":1173 - * __Pyx_memviewslice *dst, - * int ndim, size_t itemsize) nogil: - * _copy_strided_to_strided(src.data, src.strides, dst.data, dst.strides, # <<<<<<<<<<<<<< - * src.shape, dst.shape, ndim, itemsize) - * - */ - _copy_strided_to_strided(__pyx_v_src->data, __pyx_v_src->strides, __pyx_v_dst->data, __pyx_v_dst->strides, __pyx_v_src->shape, __pyx_v_dst->shape, __pyx_v_ndim, __pyx_v_itemsize); - - /* "View.MemoryView":1170 - * dst_data += dst_stride - * - * cdef void copy_strided_to_strided(__Pyx_memviewslice *src, # <<<<<<<<<<<<<< - * __Pyx_memviewslice *dst, - * int ndim, size_t itemsize) nogil: - */ - - /* function exit code */ -} - -/* "View.MemoryView":1177 - * - * @cname('__pyx_memoryview_slice_get_size') - * cdef Py_ssize_t slice_get_size(__Pyx_memviewslice *src, int ndim) nogil: # <<<<<<<<<<<<<< - * "Return the size of the memory occupied by the slice in number of bytes" - * cdef Py_ssize_t shape, size = src.memview.view.itemsize - */ - -static Py_ssize_t __pyx_memoryview_slice_get_size(__Pyx_memviewslice *__pyx_v_src, int __pyx_v_ndim) { - Py_ssize_t __pyx_v_shape; - Py_ssize_t __pyx_v_size; - Py_ssize_t __pyx_r; - Py_ssize_t __pyx_t_1; - Py_ssize_t *__pyx_t_2; - Py_ssize_t *__pyx_t_3; - Py_ssize_t *__pyx_t_4; - - /* "View.MemoryView":1179 - * cdef Py_ssize_t slice_get_size(__Pyx_memviewslice *src, int ndim) nogil: - * "Return the size of the memory occupied by the slice in number of bytes" - * cdef Py_ssize_t shape, size = src.memview.view.itemsize # <<<<<<<<<<<<<< - * - * for shape in src.shape[:ndim]: - */ - __pyx_t_1 = __pyx_v_src->memview->view.itemsize; - __pyx_v_size = __pyx_t_1; - - /* "View.MemoryView":1181 - * cdef Py_ssize_t shape, size = src.memview.view.itemsize - * - * for shape in src.shape[:ndim]: # <<<<<<<<<<<<<< - * size *= shape - * - */ - __pyx_t_3 = (__pyx_v_src->shape + __pyx_v_ndim); - for (__pyx_t_4 = __pyx_v_src->shape; __pyx_t_4 < __pyx_t_3; __pyx_t_4++) { - __pyx_t_2 = __pyx_t_4; - __pyx_v_shape = (__pyx_t_2[0]); - - /* "View.MemoryView":1182 - * - * for shape in src.shape[:ndim]: - * size *= shape # <<<<<<<<<<<<<< - * - * return size - */ - __pyx_v_size = (__pyx_v_size * __pyx_v_shape); - } - - /* "View.MemoryView":1184 - * size *= shape - * - * return size # <<<<<<<<<<<<<< - * - * @cname('__pyx_fill_contig_strides_array') - */ - __pyx_r = __pyx_v_size; - goto __pyx_L0; - - /* "View.MemoryView":1177 - * - * @cname('__pyx_memoryview_slice_get_size') - * cdef Py_ssize_t slice_get_size(__Pyx_memviewslice *src, int ndim) nogil: # <<<<<<<<<<<<<< - * "Return the size of the memory occupied by the slice in number of bytes" - * cdef Py_ssize_t shape, size = src.memview.view.itemsize - */ - - /* function exit code */ - __pyx_L0:; - return __pyx_r; -} - -/* "View.MemoryView":1187 - * - * @cname('__pyx_fill_contig_strides_array') - * cdef Py_ssize_t fill_contig_strides_array( # <<<<<<<<<<<<<< - * Py_ssize_t *shape, Py_ssize_t *strides, Py_ssize_t stride, - * int ndim, char order) nogil: - */ - -static Py_ssize_t __pyx_fill_contig_strides_array(Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, Py_ssize_t __pyx_v_stride, int __pyx_v_ndim, char __pyx_v_order) { - int __pyx_v_idx; - Py_ssize_t __pyx_r; - int __pyx_t_1; - int __pyx_t_2; - int __pyx_t_3; - int __pyx_t_4; - - /* "View.MemoryView":1196 - * cdef int idx - * - * if order == 'F': # <<<<<<<<<<<<<< - * for idx in range(ndim): - * strides[idx] = stride - */ - __pyx_t_1 = ((__pyx_v_order == 'F') != 0); - if (__pyx_t_1) { - - /* "View.MemoryView":1197 - * - * if order == 'F': - * for idx in range(ndim): # <<<<<<<<<<<<<< - * strides[idx] = stride - * stride *= shape[idx] - */ - __pyx_t_2 = __pyx_v_ndim; - __pyx_t_3 = __pyx_t_2; - for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { - __pyx_v_idx = __pyx_t_4; - - /* "View.MemoryView":1198 - * if order == 'F': - * for idx in range(ndim): - * strides[idx] = stride # <<<<<<<<<<<<<< - * stride *= shape[idx] - * else: - */ - (__pyx_v_strides[__pyx_v_idx]) = __pyx_v_stride; - - /* "View.MemoryView":1199 - * for idx in range(ndim): - * strides[idx] = stride - * stride *= shape[idx] # <<<<<<<<<<<<<< - * else: - * for idx in range(ndim - 1, -1, -1): - */ - __pyx_v_stride = (__pyx_v_stride * (__pyx_v_shape[__pyx_v_idx])); - } - - /* "View.MemoryView":1196 - * cdef int idx - * - * if order == 'F': # <<<<<<<<<<<<<< - * for idx in range(ndim): - * strides[idx] = stride - */ - goto __pyx_L3; - } - - /* "View.MemoryView":1201 - * stride *= shape[idx] - * else: - * for idx in range(ndim - 1, -1, -1): # <<<<<<<<<<<<<< - * strides[idx] = stride - * stride *= shape[idx] - */ - /*else*/ { - for (__pyx_t_2 = (__pyx_v_ndim - 1); __pyx_t_2 > -1; __pyx_t_2-=1) { - __pyx_v_idx = __pyx_t_2; - - /* "View.MemoryView":1202 - * else: - * for idx in range(ndim - 1, -1, -1): - * strides[idx] = stride # <<<<<<<<<<<<<< - * stride *= shape[idx] - * - */ - (__pyx_v_strides[__pyx_v_idx]) = __pyx_v_stride; - - /* "View.MemoryView":1203 - * for idx in range(ndim - 1, -1, -1): - * strides[idx] = stride - * stride *= shape[idx] # <<<<<<<<<<<<<< - * - * return stride - */ - __pyx_v_stride = (__pyx_v_stride * (__pyx_v_shape[__pyx_v_idx])); - } - } - __pyx_L3:; - - /* "View.MemoryView":1205 - * stride *= shape[idx] - * - * return stride # <<<<<<<<<<<<<< - * - * @cname('__pyx_memoryview_copy_data_to_temp') - */ - __pyx_r = __pyx_v_stride; - goto __pyx_L0; - - /* "View.MemoryView":1187 - * - * @cname('__pyx_fill_contig_strides_array') - * cdef Py_ssize_t fill_contig_strides_array( # <<<<<<<<<<<<<< - * Py_ssize_t *shape, Py_ssize_t *strides, Py_ssize_t stride, - * int ndim, char order) nogil: - */ - - /* function exit code */ - __pyx_L0:; - return __pyx_r; -} - -/* "View.MemoryView":1208 - * - * @cname('__pyx_memoryview_copy_data_to_temp') - * cdef void *copy_data_to_temp(__Pyx_memviewslice *src, # <<<<<<<<<<<<<< - * __Pyx_memviewslice *tmpslice, - * char order, - */ - -static void *__pyx_memoryview_copy_data_to_temp(__Pyx_memviewslice *__pyx_v_src, __Pyx_memviewslice *__pyx_v_tmpslice, char __pyx_v_order, int __pyx_v_ndim) { - int __pyx_v_i; - void *__pyx_v_result; - size_t __pyx_v_itemsize; - size_t __pyx_v_size; - void *__pyx_r; - Py_ssize_t __pyx_t_1; - int __pyx_t_2; - int __pyx_t_3; - struct __pyx_memoryview_obj *__pyx_t_4; - int __pyx_t_5; - int __pyx_t_6; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - - /* "View.MemoryView":1219 - * cdef void *result - * - * cdef size_t itemsize = src.memview.view.itemsize # <<<<<<<<<<<<<< - * cdef size_t size = slice_get_size(src, ndim) - * - */ - __pyx_t_1 = __pyx_v_src->memview->view.itemsize; - __pyx_v_itemsize = __pyx_t_1; - - /* "View.MemoryView":1220 - * - * cdef size_t itemsize = src.memview.view.itemsize - * cdef size_t size = slice_get_size(src, ndim) # <<<<<<<<<<<<<< - * - * result = malloc(size) - */ - __pyx_v_size = __pyx_memoryview_slice_get_size(__pyx_v_src, __pyx_v_ndim); - - /* "View.MemoryView":1222 - * cdef size_t size = slice_get_size(src, ndim) - * - * result = malloc(size) # <<<<<<<<<<<<<< - * if not result: - * _err(MemoryError, NULL) - */ - __pyx_v_result = malloc(__pyx_v_size); - - /* "View.MemoryView":1223 - * - * result = malloc(size) - * if not result: # <<<<<<<<<<<<<< - * _err(MemoryError, NULL) - * - */ - __pyx_t_2 = ((!(__pyx_v_result != 0)) != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":1224 - * result = malloc(size) - * if not result: - * _err(MemoryError, NULL) # <<<<<<<<<<<<<< - * - * - */ - __pyx_t_3 = __pyx_memoryview_err(__pyx_builtin_MemoryError, NULL); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(1, 1224, __pyx_L1_error) - - /* "View.MemoryView":1223 - * - * result = malloc(size) - * if not result: # <<<<<<<<<<<<<< - * _err(MemoryError, NULL) - * - */ - } - - /* "View.MemoryView":1227 - * - * - * tmpslice.data = result # <<<<<<<<<<<<<< - * tmpslice.memview = src.memview - * for i in range(ndim): - */ - __pyx_v_tmpslice->data = ((char *)__pyx_v_result); - - /* "View.MemoryView":1228 - * - * tmpslice.data = result - * tmpslice.memview = src.memview # <<<<<<<<<<<<<< - * for i in range(ndim): - * tmpslice.shape[i] = src.shape[i] - */ - __pyx_t_4 = __pyx_v_src->memview; - __pyx_v_tmpslice->memview = __pyx_t_4; - - /* "View.MemoryView":1229 - * tmpslice.data = result - * tmpslice.memview = src.memview - * for i in range(ndim): # <<<<<<<<<<<<<< - * tmpslice.shape[i] = src.shape[i] - * tmpslice.suboffsets[i] = -1 - */ - __pyx_t_3 = __pyx_v_ndim; - __pyx_t_5 = __pyx_t_3; - for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) { - __pyx_v_i = __pyx_t_6; - - /* "View.MemoryView":1230 - * tmpslice.memview = src.memview - * for i in range(ndim): - * tmpslice.shape[i] = src.shape[i] # <<<<<<<<<<<<<< - * tmpslice.suboffsets[i] = -1 - * - */ - (__pyx_v_tmpslice->shape[__pyx_v_i]) = (__pyx_v_src->shape[__pyx_v_i]); - - /* "View.MemoryView":1231 - * for i in range(ndim): - * tmpslice.shape[i] = src.shape[i] - * tmpslice.suboffsets[i] = -1 # <<<<<<<<<<<<<< - * - * fill_contig_strides_array(&tmpslice.shape[0], &tmpslice.strides[0], itemsize, - */ - (__pyx_v_tmpslice->suboffsets[__pyx_v_i]) = -1L; - } - - /* "View.MemoryView":1233 - * tmpslice.suboffsets[i] = -1 - * - * fill_contig_strides_array(&tmpslice.shape[0], &tmpslice.strides[0], itemsize, # <<<<<<<<<<<<<< - * ndim, order) - * - */ - (void)(__pyx_fill_contig_strides_array((&(__pyx_v_tmpslice->shape[0])), (&(__pyx_v_tmpslice->strides[0])), __pyx_v_itemsize, __pyx_v_ndim, __pyx_v_order)); - - /* "View.MemoryView":1237 - * - * - * for i in range(ndim): # <<<<<<<<<<<<<< - * if tmpslice.shape[i] == 1: - * tmpslice.strides[i] = 0 - */ - __pyx_t_3 = __pyx_v_ndim; - __pyx_t_5 = __pyx_t_3; - for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) { - __pyx_v_i = __pyx_t_6; - - /* "View.MemoryView":1238 - * - * for i in range(ndim): - * if tmpslice.shape[i] == 1: # <<<<<<<<<<<<<< - * tmpslice.strides[i] = 0 - * - */ - __pyx_t_2 = (((__pyx_v_tmpslice->shape[__pyx_v_i]) == 1) != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":1239 - * for i in range(ndim): - * if tmpslice.shape[i] == 1: - * tmpslice.strides[i] = 0 # <<<<<<<<<<<<<< - * - * if slice_is_contig(src[0], order, ndim): - */ - (__pyx_v_tmpslice->strides[__pyx_v_i]) = 0; - - /* "View.MemoryView":1238 - * - * for i in range(ndim): - * if tmpslice.shape[i] == 1: # <<<<<<<<<<<<<< - * tmpslice.strides[i] = 0 - * - */ - } - } - - /* "View.MemoryView":1241 - * tmpslice.strides[i] = 0 - * - * if slice_is_contig(src[0], order, ndim): # <<<<<<<<<<<<<< - * memcpy(result, src.data, size) - * else: - */ - __pyx_t_2 = (__pyx_memviewslice_is_contig((__pyx_v_src[0]), __pyx_v_order, __pyx_v_ndim) != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":1242 - * - * if slice_is_contig(src[0], order, ndim): - * memcpy(result, src.data, size) # <<<<<<<<<<<<<< - * else: - * copy_strided_to_strided(src, tmpslice, ndim, itemsize) - */ - (void)(memcpy(__pyx_v_result, __pyx_v_src->data, __pyx_v_size)); - - /* "View.MemoryView":1241 - * tmpslice.strides[i] = 0 - * - * if slice_is_contig(src[0], order, ndim): # <<<<<<<<<<<<<< - * memcpy(result, src.data, size) - * else: - */ - goto __pyx_L9; - } - - /* "View.MemoryView":1244 - * memcpy(result, src.data, size) - * else: - * copy_strided_to_strided(src, tmpslice, ndim, itemsize) # <<<<<<<<<<<<<< - * - * return result - */ - /*else*/ { - copy_strided_to_strided(__pyx_v_src, __pyx_v_tmpslice, __pyx_v_ndim, __pyx_v_itemsize); - } - __pyx_L9:; - - /* "View.MemoryView":1246 - * copy_strided_to_strided(src, tmpslice, ndim, itemsize) - * - * return result # <<<<<<<<<<<<<< - * - * - */ - __pyx_r = __pyx_v_result; - goto __pyx_L0; - - /* "View.MemoryView":1208 - * - * @cname('__pyx_memoryview_copy_data_to_temp') - * cdef void *copy_data_to_temp(__Pyx_memviewslice *src, # <<<<<<<<<<<<<< - * __Pyx_memviewslice *tmpslice, - * char order, - */ - - /* function exit code */ - __pyx_L1_error:; - { - #ifdef WITH_THREAD - PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); - #endif - __Pyx_AddTraceback("View.MemoryView.copy_data_to_temp", __pyx_clineno, __pyx_lineno, __pyx_filename); - #ifdef WITH_THREAD - __Pyx_PyGILState_Release(__pyx_gilstate_save); - #endif - } - __pyx_r = NULL; - __pyx_L0:; - return __pyx_r; -} - -/* "View.MemoryView":1251 - * - * @cname('__pyx_memoryview_err_extents') - * cdef int _err_extents(int i, Py_ssize_t extent1, # <<<<<<<<<<<<<< - * Py_ssize_t extent2) except -1 with gil: - * raise ValueError("got differing extents in dimension %d (got %d and %d)" % - */ - -static int __pyx_memoryview_err_extents(int __pyx_v_i, Py_ssize_t __pyx_v_extent1, Py_ssize_t __pyx_v_extent2) { - int __pyx_r; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - PyObject *__pyx_t_2 = NULL; - PyObject *__pyx_t_3 = NULL; - PyObject *__pyx_t_4 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - #ifdef WITH_THREAD - PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); - #endif - __Pyx_RefNannySetupContext("_err_extents", 0); - - /* "View.MemoryView":1254 - * Py_ssize_t extent2) except -1 with gil: - * raise ValueError("got differing extents in dimension %d (got %d and %d)" % - * (i, extent1, extent2)) # <<<<<<<<<<<<<< - * - * @cname('__pyx_memoryview_err_dim') - */ - __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_i); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1254, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = PyInt_FromSsize_t(__pyx_v_extent1); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1254, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_3 = PyInt_FromSsize_t(__pyx_v_extent2); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1254, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_4 = PyTuple_New(3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 1254, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __Pyx_GIVEREF(__pyx_t_1); - PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_1); - __Pyx_GIVEREF(__pyx_t_2); - PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_2); - __Pyx_GIVEREF(__pyx_t_3); - PyTuple_SET_ITEM(__pyx_t_4, 2, __pyx_t_3); - __pyx_t_1 = 0; - __pyx_t_2 = 0; - __pyx_t_3 = 0; - - /* "View.MemoryView":1253 - * cdef int _err_extents(int i, Py_ssize_t extent1, - * Py_ssize_t extent2) except -1 with gil: - * raise ValueError("got differing extents in dimension %d (got %d and %d)" % # <<<<<<<<<<<<<< - * (i, extent1, extent2)) - * - */ - __pyx_t_3 = __Pyx_PyString_Format(__pyx_kp_s_got_differing_extents_in_dimensi, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1253, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __pyx_t_4 = __Pyx_PyObject_CallOneArg(__pyx_builtin_ValueError, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 1253, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __Pyx_Raise(__pyx_t_4, 0, 0, 0); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __PYX_ERR(1, 1253, __pyx_L1_error) - - /* "View.MemoryView":1251 - * - * @cname('__pyx_memoryview_err_extents') - * cdef int _err_extents(int i, Py_ssize_t extent1, # <<<<<<<<<<<<<< - * Py_ssize_t extent2) except -1 with gil: - * raise ValueError("got differing extents in dimension %d (got %d and %d)" % - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_2); - __Pyx_XDECREF(__pyx_t_3); - __Pyx_XDECREF(__pyx_t_4); - __Pyx_AddTraceback("View.MemoryView._err_extents", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = -1; - __Pyx_RefNannyFinishContext(); - #ifdef WITH_THREAD - __Pyx_PyGILState_Release(__pyx_gilstate_save); - #endif - return __pyx_r; -} - -/* "View.MemoryView":1257 - * - * @cname('__pyx_memoryview_err_dim') - * cdef int _err_dim(object error, char *msg, int dim) except -1 with gil: # <<<<<<<<<<<<<< - * raise error(msg.decode('ascii') % dim) - * - */ - -static int __pyx_memoryview_err_dim(PyObject *__pyx_v_error, char *__pyx_v_msg, int __pyx_v_dim) { - int __pyx_r; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - PyObject *__pyx_t_2 = NULL; - PyObject *__pyx_t_3 = NULL; - PyObject *__pyx_t_4 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - #ifdef WITH_THREAD - PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); - #endif - __Pyx_RefNannySetupContext("_err_dim", 0); - __Pyx_INCREF(__pyx_v_error); - - /* "View.MemoryView":1258 - * @cname('__pyx_memoryview_err_dim') - * cdef int _err_dim(object error, char *msg, int dim) except -1 with gil: - * raise error(msg.decode('ascii') % dim) # <<<<<<<<<<<<<< - * - * @cname('__pyx_memoryview_err') - */ - __pyx_t_2 = __Pyx_decode_c_string(__pyx_v_msg, 0, strlen(__pyx_v_msg), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1258, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_3 = __Pyx_PyInt_From_int(__pyx_v_dim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1258, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_4 = PyUnicode_Format(__pyx_t_2, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 1258, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __Pyx_INCREF(__pyx_v_error); - __pyx_t_3 = __pyx_v_error; __pyx_t_2 = NULL; - if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_3))) { - __pyx_t_2 = PyMethod_GET_SELF(__pyx_t_3); - if (likely(__pyx_t_2)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3); - __Pyx_INCREF(__pyx_t_2); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_3, function); - } - } - __pyx_t_1 = (__pyx_t_2) ? __Pyx_PyObject_Call2Args(__pyx_t_3, __pyx_t_2, __pyx_t_4) : __Pyx_PyObject_CallOneArg(__pyx_t_3, __pyx_t_4); - __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1258, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __Pyx_Raise(__pyx_t_1, 0, 0, 0); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __PYX_ERR(1, 1258, __pyx_L1_error) - - /* "View.MemoryView":1257 - * - * @cname('__pyx_memoryview_err_dim') - * cdef int _err_dim(object error, char *msg, int dim) except -1 with gil: # <<<<<<<<<<<<<< - * raise error(msg.decode('ascii') % dim) - * - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_2); - __Pyx_XDECREF(__pyx_t_3); - __Pyx_XDECREF(__pyx_t_4); - __Pyx_AddTraceback("View.MemoryView._err_dim", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = -1; - __Pyx_XDECREF(__pyx_v_error); - __Pyx_RefNannyFinishContext(); - #ifdef WITH_THREAD - __Pyx_PyGILState_Release(__pyx_gilstate_save); - #endif - return __pyx_r; -} - -/* "View.MemoryView":1261 - * - * @cname('__pyx_memoryview_err') - * cdef int _err(object error, char *msg) except -1 with gil: # <<<<<<<<<<<<<< - * if msg != NULL: - * raise error(msg.decode('ascii')) - */ - -static int __pyx_memoryview_err(PyObject *__pyx_v_error, char *__pyx_v_msg) { - int __pyx_r; - __Pyx_RefNannyDeclarations - int __pyx_t_1; - PyObject *__pyx_t_2 = NULL; - PyObject *__pyx_t_3 = NULL; - PyObject *__pyx_t_4 = NULL; - PyObject *__pyx_t_5 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - #ifdef WITH_THREAD - PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); - #endif - __Pyx_RefNannySetupContext("_err", 0); - __Pyx_INCREF(__pyx_v_error); - - /* "View.MemoryView":1262 - * @cname('__pyx_memoryview_err') - * cdef int _err(object error, char *msg) except -1 with gil: - * if msg != NULL: # <<<<<<<<<<<<<< - * raise error(msg.decode('ascii')) - * else: - */ - __pyx_t_1 = ((__pyx_v_msg != NULL) != 0); - if (unlikely(__pyx_t_1)) { - - /* "View.MemoryView":1263 - * cdef int _err(object error, char *msg) except -1 with gil: - * if msg != NULL: - * raise error(msg.decode('ascii')) # <<<<<<<<<<<<<< - * else: - * raise error - */ - __pyx_t_3 = __Pyx_decode_c_string(__pyx_v_msg, 0, strlen(__pyx_v_msg), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1263, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_INCREF(__pyx_v_error); - __pyx_t_4 = __pyx_v_error; __pyx_t_5 = NULL; - if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_4))) { - __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_4); - if (likely(__pyx_t_5)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_4); - __Pyx_INCREF(__pyx_t_5); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_4, function); - } - } - __pyx_t_2 = (__pyx_t_5) ? __Pyx_PyObject_Call2Args(__pyx_t_4, __pyx_t_5, __pyx_t_3) : __Pyx_PyObject_CallOneArg(__pyx_t_4, __pyx_t_3); - __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1263, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __Pyx_Raise(__pyx_t_2, 0, 0, 0); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __PYX_ERR(1, 1263, __pyx_L1_error) - - /* "View.MemoryView":1262 - * @cname('__pyx_memoryview_err') - * cdef int _err(object error, char *msg) except -1 with gil: - * if msg != NULL: # <<<<<<<<<<<<<< - * raise error(msg.decode('ascii')) - * else: - */ - } - - /* "View.MemoryView":1265 - * raise error(msg.decode('ascii')) - * else: - * raise error # <<<<<<<<<<<<<< - * - * @cname('__pyx_memoryview_copy_contents') - */ - /*else*/ { - __Pyx_Raise(__pyx_v_error, 0, 0, 0); - __PYX_ERR(1, 1265, __pyx_L1_error) - } - - /* "View.MemoryView":1261 - * - * @cname('__pyx_memoryview_err') - * cdef int _err(object error, char *msg) except -1 with gil: # <<<<<<<<<<<<<< - * if msg != NULL: - * raise error(msg.decode('ascii')) - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_2); - __Pyx_XDECREF(__pyx_t_3); - __Pyx_XDECREF(__pyx_t_4); - __Pyx_XDECREF(__pyx_t_5); - __Pyx_AddTraceback("View.MemoryView._err", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = -1; - __Pyx_XDECREF(__pyx_v_error); - __Pyx_RefNannyFinishContext(); - #ifdef WITH_THREAD - __Pyx_PyGILState_Release(__pyx_gilstate_save); - #endif - return __pyx_r; -} - -/* "View.MemoryView":1268 - * - * @cname('__pyx_memoryview_copy_contents') - * cdef int memoryview_copy_contents(__Pyx_memviewslice src, # <<<<<<<<<<<<<< - * __Pyx_memviewslice dst, - * int src_ndim, int dst_ndim, - */ - -static int __pyx_memoryview_copy_contents(__Pyx_memviewslice __pyx_v_src, __Pyx_memviewslice __pyx_v_dst, int __pyx_v_src_ndim, int __pyx_v_dst_ndim, int __pyx_v_dtype_is_object) { - void *__pyx_v_tmpdata; - size_t __pyx_v_itemsize; - int __pyx_v_i; - char __pyx_v_order; - int __pyx_v_broadcasting; - int __pyx_v_direct_copy; - __Pyx_memviewslice __pyx_v_tmp; - int __pyx_v_ndim; - int __pyx_r; - Py_ssize_t __pyx_t_1; - int __pyx_t_2; - int __pyx_t_3; - int __pyx_t_4; - int __pyx_t_5; - int __pyx_t_6; - void *__pyx_t_7; - int __pyx_t_8; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - - /* "View.MemoryView":1276 - * Check for overlapping memory and verify the shapes. - * """ - * cdef void *tmpdata = NULL # <<<<<<<<<<<<<< - * cdef size_t itemsize = src.memview.view.itemsize - * cdef int i - */ - __pyx_v_tmpdata = NULL; - - /* "View.MemoryView":1277 - * """ - * cdef void *tmpdata = NULL - * cdef size_t itemsize = src.memview.view.itemsize # <<<<<<<<<<<<<< - * cdef int i - * cdef char order = get_best_order(&src, src_ndim) - */ - __pyx_t_1 = __pyx_v_src.memview->view.itemsize; - __pyx_v_itemsize = __pyx_t_1; - - /* "View.MemoryView":1279 - * cdef size_t itemsize = src.memview.view.itemsize - * cdef int i - * cdef char order = get_best_order(&src, src_ndim) # <<<<<<<<<<<<<< - * cdef bint broadcasting = False - * cdef bint direct_copy = False - */ - __pyx_v_order = __pyx_get_best_slice_order((&__pyx_v_src), __pyx_v_src_ndim); - - /* "View.MemoryView":1280 - * cdef int i - * cdef char order = get_best_order(&src, src_ndim) - * cdef bint broadcasting = False # <<<<<<<<<<<<<< - * cdef bint direct_copy = False - * cdef __Pyx_memviewslice tmp - */ - __pyx_v_broadcasting = 0; - - /* "View.MemoryView":1281 - * cdef char order = get_best_order(&src, src_ndim) - * cdef bint broadcasting = False - * cdef bint direct_copy = False # <<<<<<<<<<<<<< - * cdef __Pyx_memviewslice tmp - * - */ - __pyx_v_direct_copy = 0; - - /* "View.MemoryView":1284 - * cdef __Pyx_memviewslice tmp - * - * if src_ndim < dst_ndim: # <<<<<<<<<<<<<< - * broadcast_leading(&src, src_ndim, dst_ndim) - * elif dst_ndim < src_ndim: - */ - __pyx_t_2 = ((__pyx_v_src_ndim < __pyx_v_dst_ndim) != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":1285 - * - * if src_ndim < dst_ndim: - * broadcast_leading(&src, src_ndim, dst_ndim) # <<<<<<<<<<<<<< - * elif dst_ndim < src_ndim: - * broadcast_leading(&dst, dst_ndim, src_ndim) - */ - __pyx_memoryview_broadcast_leading((&__pyx_v_src), __pyx_v_src_ndim, __pyx_v_dst_ndim); - - /* "View.MemoryView":1284 - * cdef __Pyx_memviewslice tmp - * - * if src_ndim < dst_ndim: # <<<<<<<<<<<<<< - * broadcast_leading(&src, src_ndim, dst_ndim) - * elif dst_ndim < src_ndim: - */ - goto __pyx_L3; - } - - /* "View.MemoryView":1286 - * if src_ndim < dst_ndim: - * broadcast_leading(&src, src_ndim, dst_ndim) - * elif dst_ndim < src_ndim: # <<<<<<<<<<<<<< - * broadcast_leading(&dst, dst_ndim, src_ndim) - * - */ - __pyx_t_2 = ((__pyx_v_dst_ndim < __pyx_v_src_ndim) != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":1287 - * broadcast_leading(&src, src_ndim, dst_ndim) - * elif dst_ndim < src_ndim: - * broadcast_leading(&dst, dst_ndim, src_ndim) # <<<<<<<<<<<<<< - * - * cdef int ndim = max(src_ndim, dst_ndim) - */ - __pyx_memoryview_broadcast_leading((&__pyx_v_dst), __pyx_v_dst_ndim, __pyx_v_src_ndim); - - /* "View.MemoryView":1286 - * if src_ndim < dst_ndim: - * broadcast_leading(&src, src_ndim, dst_ndim) - * elif dst_ndim < src_ndim: # <<<<<<<<<<<<<< - * broadcast_leading(&dst, dst_ndim, src_ndim) - * - */ - } - __pyx_L3:; - - /* "View.MemoryView":1289 - * broadcast_leading(&dst, dst_ndim, src_ndim) - * - * cdef int ndim = max(src_ndim, dst_ndim) # <<<<<<<<<<<<<< - * - * for i in range(ndim): - */ - __pyx_t_3 = __pyx_v_dst_ndim; - __pyx_t_4 = __pyx_v_src_ndim; - if (((__pyx_t_3 > __pyx_t_4) != 0)) { - __pyx_t_5 = __pyx_t_3; - } else { - __pyx_t_5 = __pyx_t_4; - } - __pyx_v_ndim = __pyx_t_5; - - /* "View.MemoryView":1291 - * cdef int ndim = max(src_ndim, dst_ndim) - * - * for i in range(ndim): # <<<<<<<<<<<<<< - * if src.shape[i] != dst.shape[i]: - * if src.shape[i] == 1: - */ - __pyx_t_5 = __pyx_v_ndim; - __pyx_t_3 = __pyx_t_5; - for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { - __pyx_v_i = __pyx_t_4; - - /* "View.MemoryView":1292 - * - * for i in range(ndim): - * if src.shape[i] != dst.shape[i]: # <<<<<<<<<<<<<< - * if src.shape[i] == 1: - * broadcasting = True - */ - __pyx_t_2 = (((__pyx_v_src.shape[__pyx_v_i]) != (__pyx_v_dst.shape[__pyx_v_i])) != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":1293 - * for i in range(ndim): - * if src.shape[i] != dst.shape[i]: - * if src.shape[i] == 1: # <<<<<<<<<<<<<< - * broadcasting = True - * src.strides[i] = 0 - */ - __pyx_t_2 = (((__pyx_v_src.shape[__pyx_v_i]) == 1) != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":1294 - * if src.shape[i] != dst.shape[i]: - * if src.shape[i] == 1: - * broadcasting = True # <<<<<<<<<<<<<< - * src.strides[i] = 0 - * else: - */ - __pyx_v_broadcasting = 1; - - /* "View.MemoryView":1295 - * if src.shape[i] == 1: - * broadcasting = True - * src.strides[i] = 0 # <<<<<<<<<<<<<< - * else: - * _err_extents(i, dst.shape[i], src.shape[i]) - */ - (__pyx_v_src.strides[__pyx_v_i]) = 0; - - /* "View.MemoryView":1293 - * for i in range(ndim): - * if src.shape[i] != dst.shape[i]: - * if src.shape[i] == 1: # <<<<<<<<<<<<<< - * broadcasting = True - * src.strides[i] = 0 - */ - goto __pyx_L7; - } - - /* "View.MemoryView":1297 - * src.strides[i] = 0 - * else: - * _err_extents(i, dst.shape[i], src.shape[i]) # <<<<<<<<<<<<<< - * - * if src.suboffsets[i] >= 0: - */ - /*else*/ { - __pyx_t_6 = __pyx_memoryview_err_extents(__pyx_v_i, (__pyx_v_dst.shape[__pyx_v_i]), (__pyx_v_src.shape[__pyx_v_i])); if (unlikely(__pyx_t_6 == ((int)-1))) __PYX_ERR(1, 1297, __pyx_L1_error) - } - __pyx_L7:; - - /* "View.MemoryView":1292 - * - * for i in range(ndim): - * if src.shape[i] != dst.shape[i]: # <<<<<<<<<<<<<< - * if src.shape[i] == 1: - * broadcasting = True - */ - } - - /* "View.MemoryView":1299 - * _err_extents(i, dst.shape[i], src.shape[i]) - * - * if src.suboffsets[i] >= 0: # <<<<<<<<<<<<<< - * _err_dim(ValueError, "Dimension %d is not direct", i) - * - */ - __pyx_t_2 = (((__pyx_v_src.suboffsets[__pyx_v_i]) >= 0) != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":1300 - * - * if src.suboffsets[i] >= 0: - * _err_dim(ValueError, "Dimension %d is not direct", i) # <<<<<<<<<<<<<< - * - * if slices_overlap(&src, &dst, ndim, itemsize): - */ - __pyx_t_6 = __pyx_memoryview_err_dim(__pyx_builtin_ValueError, ((char *)"Dimension %d is not direct"), __pyx_v_i); if (unlikely(__pyx_t_6 == ((int)-1))) __PYX_ERR(1, 1300, __pyx_L1_error) - - /* "View.MemoryView":1299 - * _err_extents(i, dst.shape[i], src.shape[i]) - * - * if src.suboffsets[i] >= 0: # <<<<<<<<<<<<<< - * _err_dim(ValueError, "Dimension %d is not direct", i) - * - */ - } - } - - /* "View.MemoryView":1302 - * _err_dim(ValueError, "Dimension %d is not direct", i) - * - * if slices_overlap(&src, &dst, ndim, itemsize): # <<<<<<<<<<<<<< - * - * if not slice_is_contig(src, order, ndim): - */ - __pyx_t_2 = (__pyx_slices_overlap((&__pyx_v_src), (&__pyx_v_dst), __pyx_v_ndim, __pyx_v_itemsize) != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":1304 - * if slices_overlap(&src, &dst, ndim, itemsize): - * - * if not slice_is_contig(src, order, ndim): # <<<<<<<<<<<<<< - * order = get_best_order(&dst, ndim) - * - */ - __pyx_t_2 = ((!(__pyx_memviewslice_is_contig(__pyx_v_src, __pyx_v_order, __pyx_v_ndim) != 0)) != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":1305 - * - * if not slice_is_contig(src, order, ndim): - * order = get_best_order(&dst, ndim) # <<<<<<<<<<<<<< - * - * tmpdata = copy_data_to_temp(&src, &tmp, order, ndim) - */ - __pyx_v_order = __pyx_get_best_slice_order((&__pyx_v_dst), __pyx_v_ndim); - - /* "View.MemoryView":1304 - * if slices_overlap(&src, &dst, ndim, itemsize): - * - * if not slice_is_contig(src, order, ndim): # <<<<<<<<<<<<<< - * order = get_best_order(&dst, ndim) - * - */ - } - - /* "View.MemoryView":1307 - * order = get_best_order(&dst, ndim) - * - * tmpdata = copy_data_to_temp(&src, &tmp, order, ndim) # <<<<<<<<<<<<<< - * src = tmp - * - */ - __pyx_t_7 = __pyx_memoryview_copy_data_to_temp((&__pyx_v_src), (&__pyx_v_tmp), __pyx_v_order, __pyx_v_ndim); if (unlikely(__pyx_t_7 == ((void *)NULL))) __PYX_ERR(1, 1307, __pyx_L1_error) - __pyx_v_tmpdata = __pyx_t_7; - - /* "View.MemoryView":1308 - * - * tmpdata = copy_data_to_temp(&src, &tmp, order, ndim) - * src = tmp # <<<<<<<<<<<<<< - * - * if not broadcasting: - */ - __pyx_v_src = __pyx_v_tmp; - - /* "View.MemoryView":1302 - * _err_dim(ValueError, "Dimension %d is not direct", i) - * - * if slices_overlap(&src, &dst, ndim, itemsize): # <<<<<<<<<<<<<< - * - * if not slice_is_contig(src, order, ndim): - */ - } - - /* "View.MemoryView":1310 - * src = tmp - * - * if not broadcasting: # <<<<<<<<<<<<<< - * - * - */ - __pyx_t_2 = ((!(__pyx_v_broadcasting != 0)) != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":1313 - * - * - * if slice_is_contig(src, 'C', ndim): # <<<<<<<<<<<<<< - * direct_copy = slice_is_contig(dst, 'C', ndim) - * elif slice_is_contig(src, 'F', ndim): - */ - __pyx_t_2 = (__pyx_memviewslice_is_contig(__pyx_v_src, 'C', __pyx_v_ndim) != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":1314 - * - * if slice_is_contig(src, 'C', ndim): - * direct_copy = slice_is_contig(dst, 'C', ndim) # <<<<<<<<<<<<<< - * elif slice_is_contig(src, 'F', ndim): - * direct_copy = slice_is_contig(dst, 'F', ndim) - */ - __pyx_v_direct_copy = __pyx_memviewslice_is_contig(__pyx_v_dst, 'C', __pyx_v_ndim); - - /* "View.MemoryView":1313 - * - * - * if slice_is_contig(src, 'C', ndim): # <<<<<<<<<<<<<< - * direct_copy = slice_is_contig(dst, 'C', ndim) - * elif slice_is_contig(src, 'F', ndim): - */ - goto __pyx_L12; - } - - /* "View.MemoryView":1315 - * if slice_is_contig(src, 'C', ndim): - * direct_copy = slice_is_contig(dst, 'C', ndim) - * elif slice_is_contig(src, 'F', ndim): # <<<<<<<<<<<<<< - * direct_copy = slice_is_contig(dst, 'F', ndim) - * - */ - __pyx_t_2 = (__pyx_memviewslice_is_contig(__pyx_v_src, 'F', __pyx_v_ndim) != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":1316 - * direct_copy = slice_is_contig(dst, 'C', ndim) - * elif slice_is_contig(src, 'F', ndim): - * direct_copy = slice_is_contig(dst, 'F', ndim) # <<<<<<<<<<<<<< - * - * if direct_copy: - */ - __pyx_v_direct_copy = __pyx_memviewslice_is_contig(__pyx_v_dst, 'F', __pyx_v_ndim); - - /* "View.MemoryView":1315 - * if slice_is_contig(src, 'C', ndim): - * direct_copy = slice_is_contig(dst, 'C', ndim) - * elif slice_is_contig(src, 'F', ndim): # <<<<<<<<<<<<<< - * direct_copy = slice_is_contig(dst, 'F', ndim) - * - */ - } - __pyx_L12:; - - /* "View.MemoryView":1318 - * direct_copy = slice_is_contig(dst, 'F', ndim) - * - * if direct_copy: # <<<<<<<<<<<<<< - * - * refcount_copying(&dst, dtype_is_object, ndim, False) - */ - __pyx_t_2 = (__pyx_v_direct_copy != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":1320 - * if direct_copy: - * - * refcount_copying(&dst, dtype_is_object, ndim, False) # <<<<<<<<<<<<<< - * memcpy(dst.data, src.data, slice_get_size(&src, ndim)) - * refcount_copying(&dst, dtype_is_object, ndim, True) - */ - __pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 0); - - /* "View.MemoryView":1321 - * - * refcount_copying(&dst, dtype_is_object, ndim, False) - * memcpy(dst.data, src.data, slice_get_size(&src, ndim)) # <<<<<<<<<<<<<< - * refcount_copying(&dst, dtype_is_object, ndim, True) - * free(tmpdata) - */ - (void)(memcpy(__pyx_v_dst.data, __pyx_v_src.data, __pyx_memoryview_slice_get_size((&__pyx_v_src), __pyx_v_ndim))); - - /* "View.MemoryView":1322 - * refcount_copying(&dst, dtype_is_object, ndim, False) - * memcpy(dst.data, src.data, slice_get_size(&src, ndim)) - * refcount_copying(&dst, dtype_is_object, ndim, True) # <<<<<<<<<<<<<< - * free(tmpdata) - * return 0 - */ - __pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 1); - - /* "View.MemoryView":1323 - * memcpy(dst.data, src.data, slice_get_size(&src, ndim)) - * refcount_copying(&dst, dtype_is_object, ndim, True) - * free(tmpdata) # <<<<<<<<<<<<<< - * return 0 - * - */ - free(__pyx_v_tmpdata); - - /* "View.MemoryView":1324 - * refcount_copying(&dst, dtype_is_object, ndim, True) - * free(tmpdata) - * return 0 # <<<<<<<<<<<<<< - * - * if order == 'F' == get_best_order(&dst, ndim): - */ - __pyx_r = 0; - goto __pyx_L0; - - /* "View.MemoryView":1318 - * direct_copy = slice_is_contig(dst, 'F', ndim) - * - * if direct_copy: # <<<<<<<<<<<<<< - * - * refcount_copying(&dst, dtype_is_object, ndim, False) - */ - } - - /* "View.MemoryView":1310 - * src = tmp - * - * if not broadcasting: # <<<<<<<<<<<<<< - * - * - */ - } - - /* "View.MemoryView":1326 - * return 0 - * - * if order == 'F' == get_best_order(&dst, ndim): # <<<<<<<<<<<<<< - * - * - */ - __pyx_t_2 = (__pyx_v_order == 'F'); - if (__pyx_t_2) { - __pyx_t_2 = ('F' == __pyx_get_best_slice_order((&__pyx_v_dst), __pyx_v_ndim)); - } - __pyx_t_8 = (__pyx_t_2 != 0); - if (__pyx_t_8) { - - /* "View.MemoryView":1329 - * - * - * transpose_memslice(&src) # <<<<<<<<<<<<<< - * transpose_memslice(&dst) - * - */ - __pyx_t_5 = __pyx_memslice_transpose((&__pyx_v_src)); if (unlikely(__pyx_t_5 == ((int)0))) __PYX_ERR(1, 1329, __pyx_L1_error) - - /* "View.MemoryView":1330 - * - * transpose_memslice(&src) - * transpose_memslice(&dst) # <<<<<<<<<<<<<< - * - * refcount_copying(&dst, dtype_is_object, ndim, False) - */ - __pyx_t_5 = __pyx_memslice_transpose((&__pyx_v_dst)); if (unlikely(__pyx_t_5 == ((int)0))) __PYX_ERR(1, 1330, __pyx_L1_error) - - /* "View.MemoryView":1326 - * return 0 - * - * if order == 'F' == get_best_order(&dst, ndim): # <<<<<<<<<<<<<< - * - * - */ - } - - /* "View.MemoryView":1332 - * transpose_memslice(&dst) - * - * refcount_copying(&dst, dtype_is_object, ndim, False) # <<<<<<<<<<<<<< - * copy_strided_to_strided(&src, &dst, ndim, itemsize) - * refcount_copying(&dst, dtype_is_object, ndim, True) - */ - __pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 0); - - /* "View.MemoryView":1333 - * - * refcount_copying(&dst, dtype_is_object, ndim, False) - * copy_strided_to_strided(&src, &dst, ndim, itemsize) # <<<<<<<<<<<<<< - * refcount_copying(&dst, dtype_is_object, ndim, True) - * - */ - copy_strided_to_strided((&__pyx_v_src), (&__pyx_v_dst), __pyx_v_ndim, __pyx_v_itemsize); - - /* "View.MemoryView":1334 - * refcount_copying(&dst, dtype_is_object, ndim, False) - * copy_strided_to_strided(&src, &dst, ndim, itemsize) - * refcount_copying(&dst, dtype_is_object, ndim, True) # <<<<<<<<<<<<<< - * - * free(tmpdata) - */ - __pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 1); - - /* "View.MemoryView":1336 - * refcount_copying(&dst, dtype_is_object, ndim, True) - * - * free(tmpdata) # <<<<<<<<<<<<<< - * return 0 - * - */ - free(__pyx_v_tmpdata); - - /* "View.MemoryView":1337 - * - * free(tmpdata) - * return 0 # <<<<<<<<<<<<<< - * - * @cname('__pyx_memoryview_broadcast_leading') - */ - __pyx_r = 0; - goto __pyx_L0; - - /* "View.MemoryView":1268 - * - * @cname('__pyx_memoryview_copy_contents') - * cdef int memoryview_copy_contents(__Pyx_memviewslice src, # <<<<<<<<<<<<<< - * __Pyx_memviewslice dst, - * int src_ndim, int dst_ndim, - */ - - /* function exit code */ - __pyx_L1_error:; - { - #ifdef WITH_THREAD - PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); - #endif - __Pyx_AddTraceback("View.MemoryView.memoryview_copy_contents", __pyx_clineno, __pyx_lineno, __pyx_filename); - #ifdef WITH_THREAD - __Pyx_PyGILState_Release(__pyx_gilstate_save); - #endif - } - __pyx_r = -1; - __pyx_L0:; - return __pyx_r; -} - -/* "View.MemoryView":1340 - * - * @cname('__pyx_memoryview_broadcast_leading') - * cdef void broadcast_leading(__Pyx_memviewslice *mslice, # <<<<<<<<<<<<<< - * int ndim, - * int ndim_other) nogil: - */ - -static void __pyx_memoryview_broadcast_leading(__Pyx_memviewslice *__pyx_v_mslice, int __pyx_v_ndim, int __pyx_v_ndim_other) { - int __pyx_v_i; - int __pyx_v_offset; - int __pyx_t_1; - int __pyx_t_2; - int __pyx_t_3; - - /* "View.MemoryView":1344 - * int ndim_other) nogil: - * cdef int i - * cdef int offset = ndim_other - ndim # <<<<<<<<<<<<<< - * - * for i in range(ndim - 1, -1, -1): - */ - __pyx_v_offset = (__pyx_v_ndim_other - __pyx_v_ndim); - - /* "View.MemoryView":1346 - * cdef int offset = ndim_other - ndim - * - * for i in range(ndim - 1, -1, -1): # <<<<<<<<<<<<<< - * mslice.shape[i + offset] = mslice.shape[i] - * mslice.strides[i + offset] = mslice.strides[i] - */ - for (__pyx_t_1 = (__pyx_v_ndim - 1); __pyx_t_1 > -1; __pyx_t_1-=1) { - __pyx_v_i = __pyx_t_1; - - /* "View.MemoryView":1347 - * - * for i in range(ndim - 1, -1, -1): - * mslice.shape[i + offset] = mslice.shape[i] # <<<<<<<<<<<<<< - * mslice.strides[i + offset] = mslice.strides[i] - * mslice.suboffsets[i + offset] = mslice.suboffsets[i] - */ - (__pyx_v_mslice->shape[(__pyx_v_i + __pyx_v_offset)]) = (__pyx_v_mslice->shape[__pyx_v_i]); - - /* "View.MemoryView":1348 - * for i in range(ndim - 1, -1, -1): - * mslice.shape[i + offset] = mslice.shape[i] - * mslice.strides[i + offset] = mslice.strides[i] # <<<<<<<<<<<<<< - * mslice.suboffsets[i + offset] = mslice.suboffsets[i] - * - */ - (__pyx_v_mslice->strides[(__pyx_v_i + __pyx_v_offset)]) = (__pyx_v_mslice->strides[__pyx_v_i]); - - /* "View.MemoryView":1349 - * mslice.shape[i + offset] = mslice.shape[i] - * mslice.strides[i + offset] = mslice.strides[i] - * mslice.suboffsets[i + offset] = mslice.suboffsets[i] # <<<<<<<<<<<<<< - * - * for i in range(offset): - */ - (__pyx_v_mslice->suboffsets[(__pyx_v_i + __pyx_v_offset)]) = (__pyx_v_mslice->suboffsets[__pyx_v_i]); - } - - /* "View.MemoryView":1351 - * mslice.suboffsets[i + offset] = mslice.suboffsets[i] - * - * for i in range(offset): # <<<<<<<<<<<<<< - * mslice.shape[i] = 1 - * mslice.strides[i] = mslice.strides[0] - */ - __pyx_t_1 = __pyx_v_offset; - __pyx_t_2 = __pyx_t_1; - for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) { - __pyx_v_i = __pyx_t_3; - - /* "View.MemoryView":1352 - * - * for i in range(offset): - * mslice.shape[i] = 1 # <<<<<<<<<<<<<< - * mslice.strides[i] = mslice.strides[0] - * mslice.suboffsets[i] = -1 - */ - (__pyx_v_mslice->shape[__pyx_v_i]) = 1; - - /* "View.MemoryView":1353 - * for i in range(offset): - * mslice.shape[i] = 1 - * mslice.strides[i] = mslice.strides[0] # <<<<<<<<<<<<<< - * mslice.suboffsets[i] = -1 - * - */ - (__pyx_v_mslice->strides[__pyx_v_i]) = (__pyx_v_mslice->strides[0]); - - /* "View.MemoryView":1354 - * mslice.shape[i] = 1 - * mslice.strides[i] = mslice.strides[0] - * mslice.suboffsets[i] = -1 # <<<<<<<<<<<<<< - * - * - */ - (__pyx_v_mslice->suboffsets[__pyx_v_i]) = -1L; - } - - /* "View.MemoryView":1340 - * - * @cname('__pyx_memoryview_broadcast_leading') - * cdef void broadcast_leading(__Pyx_memviewslice *mslice, # <<<<<<<<<<<<<< - * int ndim, - * int ndim_other) nogil: - */ - - /* function exit code */ -} - -/* "View.MemoryView":1362 - * - * @cname('__pyx_memoryview_refcount_copying') - * cdef void refcount_copying(__Pyx_memviewslice *dst, bint dtype_is_object, # <<<<<<<<<<<<<< - * int ndim, bint inc) nogil: - * - */ - -static void __pyx_memoryview_refcount_copying(__Pyx_memviewslice *__pyx_v_dst, int __pyx_v_dtype_is_object, int __pyx_v_ndim, int __pyx_v_inc) { - int __pyx_t_1; - - /* "View.MemoryView":1366 - * - * - * if dtype_is_object: # <<<<<<<<<<<<<< - * refcount_objects_in_slice_with_gil(dst.data, dst.shape, - * dst.strides, ndim, inc) - */ - __pyx_t_1 = (__pyx_v_dtype_is_object != 0); - if (__pyx_t_1) { - - /* "View.MemoryView":1367 - * - * if dtype_is_object: - * refcount_objects_in_slice_with_gil(dst.data, dst.shape, # <<<<<<<<<<<<<< - * dst.strides, ndim, inc) - * - */ - __pyx_memoryview_refcount_objects_in_slice_with_gil(__pyx_v_dst->data, __pyx_v_dst->shape, __pyx_v_dst->strides, __pyx_v_ndim, __pyx_v_inc); - - /* "View.MemoryView":1366 - * - * - * if dtype_is_object: # <<<<<<<<<<<<<< - * refcount_objects_in_slice_with_gil(dst.data, dst.shape, - * dst.strides, ndim, inc) - */ - } - - /* "View.MemoryView":1362 - * - * @cname('__pyx_memoryview_refcount_copying') - * cdef void refcount_copying(__Pyx_memviewslice *dst, bint dtype_is_object, # <<<<<<<<<<<<<< - * int ndim, bint inc) nogil: - * - */ - - /* function exit code */ -} - -/* "View.MemoryView":1371 - * - * @cname('__pyx_memoryview_refcount_objects_in_slice_with_gil') - * cdef void refcount_objects_in_slice_with_gil(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< - * Py_ssize_t *strides, int ndim, - * bint inc) with gil: - */ - -static void __pyx_memoryview_refcount_objects_in_slice_with_gil(char *__pyx_v_data, Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, int __pyx_v_ndim, int __pyx_v_inc) { - __Pyx_RefNannyDeclarations - #ifdef WITH_THREAD - PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); - #endif - __Pyx_RefNannySetupContext("refcount_objects_in_slice_with_gil", 0); - - /* "View.MemoryView":1374 - * Py_ssize_t *strides, int ndim, - * bint inc) with gil: - * refcount_objects_in_slice(data, shape, strides, ndim, inc) # <<<<<<<<<<<<<< - * - * @cname('__pyx_memoryview_refcount_objects_in_slice') - */ - __pyx_memoryview_refcount_objects_in_slice(__pyx_v_data, __pyx_v_shape, __pyx_v_strides, __pyx_v_ndim, __pyx_v_inc); - - /* "View.MemoryView":1371 - * - * @cname('__pyx_memoryview_refcount_objects_in_slice_with_gil') - * cdef void refcount_objects_in_slice_with_gil(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< - * Py_ssize_t *strides, int ndim, - * bint inc) with gil: - */ - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - #ifdef WITH_THREAD - __Pyx_PyGILState_Release(__pyx_gilstate_save); - #endif -} - -/* "View.MemoryView":1377 - * - * @cname('__pyx_memoryview_refcount_objects_in_slice') - * cdef void refcount_objects_in_slice(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< - * Py_ssize_t *strides, int ndim, bint inc): - * cdef Py_ssize_t i - */ - -static void __pyx_memoryview_refcount_objects_in_slice(char *__pyx_v_data, Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, int __pyx_v_ndim, int __pyx_v_inc) { - CYTHON_UNUSED Py_ssize_t __pyx_v_i; - __Pyx_RefNannyDeclarations - Py_ssize_t __pyx_t_1; - Py_ssize_t __pyx_t_2; - Py_ssize_t __pyx_t_3; - int __pyx_t_4; - __Pyx_RefNannySetupContext("refcount_objects_in_slice", 0); - - /* "View.MemoryView":1381 - * cdef Py_ssize_t i - * - * for i in range(shape[0]): # <<<<<<<<<<<<<< - * if ndim == 1: - * if inc: - */ - __pyx_t_1 = (__pyx_v_shape[0]); - __pyx_t_2 = __pyx_t_1; - for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) { - __pyx_v_i = __pyx_t_3; - - /* "View.MemoryView":1382 - * - * for i in range(shape[0]): - * if ndim == 1: # <<<<<<<<<<<<<< - * if inc: - * Py_INCREF(( data)[0]) - */ - __pyx_t_4 = ((__pyx_v_ndim == 1) != 0); - if (__pyx_t_4) { - - /* "View.MemoryView":1383 - * for i in range(shape[0]): - * if ndim == 1: - * if inc: # <<<<<<<<<<<<<< - * Py_INCREF(( data)[0]) - * else: - */ - __pyx_t_4 = (__pyx_v_inc != 0); - if (__pyx_t_4) { - - /* "View.MemoryView":1384 - * if ndim == 1: - * if inc: - * Py_INCREF(( data)[0]) # <<<<<<<<<<<<<< - * else: - * Py_DECREF(( data)[0]) - */ - Py_INCREF((((PyObject **)__pyx_v_data)[0])); - - /* "View.MemoryView":1383 - * for i in range(shape[0]): - * if ndim == 1: - * if inc: # <<<<<<<<<<<<<< - * Py_INCREF(( data)[0]) - * else: - */ - goto __pyx_L6; - } - - /* "View.MemoryView":1386 - * Py_INCREF(( data)[0]) - * else: - * Py_DECREF(( data)[0]) # <<<<<<<<<<<<<< - * else: - * refcount_objects_in_slice(data, shape + 1, strides + 1, - */ - /*else*/ { - Py_DECREF((((PyObject **)__pyx_v_data)[0])); - } - __pyx_L6:; - - /* "View.MemoryView":1382 - * - * for i in range(shape[0]): - * if ndim == 1: # <<<<<<<<<<<<<< - * if inc: - * Py_INCREF(( data)[0]) - */ - goto __pyx_L5; - } - - /* "View.MemoryView":1388 - * Py_DECREF(( data)[0]) - * else: - * refcount_objects_in_slice(data, shape + 1, strides + 1, # <<<<<<<<<<<<<< - * ndim - 1, inc) - * - */ - /*else*/ { - - /* "View.MemoryView":1389 - * else: - * refcount_objects_in_slice(data, shape + 1, strides + 1, - * ndim - 1, inc) # <<<<<<<<<<<<<< - * - * data += strides[0] - */ - __pyx_memoryview_refcount_objects_in_slice(__pyx_v_data, (__pyx_v_shape + 1), (__pyx_v_strides + 1), (__pyx_v_ndim - 1), __pyx_v_inc); - } - __pyx_L5:; - - /* "View.MemoryView":1391 - * ndim - 1, inc) - * - * data += strides[0] # <<<<<<<<<<<<<< - * - * - */ - __pyx_v_data = (__pyx_v_data + (__pyx_v_strides[0])); - } - - /* "View.MemoryView":1377 - * - * @cname('__pyx_memoryview_refcount_objects_in_slice') - * cdef void refcount_objects_in_slice(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< - * Py_ssize_t *strides, int ndim, bint inc): - * cdef Py_ssize_t i - */ - - /* function exit code */ - __Pyx_RefNannyFinishContext(); -} - -/* "View.MemoryView":1397 - * - * @cname('__pyx_memoryview_slice_assign_scalar') - * cdef void slice_assign_scalar(__Pyx_memviewslice *dst, int ndim, # <<<<<<<<<<<<<< - * size_t itemsize, void *item, - * bint dtype_is_object) nogil: - */ - -static void __pyx_memoryview_slice_assign_scalar(__Pyx_memviewslice *__pyx_v_dst, int __pyx_v_ndim, size_t __pyx_v_itemsize, void *__pyx_v_item, int __pyx_v_dtype_is_object) { - - /* "View.MemoryView":1400 - * size_t itemsize, void *item, - * bint dtype_is_object) nogil: - * refcount_copying(dst, dtype_is_object, ndim, False) # <<<<<<<<<<<<<< - * _slice_assign_scalar(dst.data, dst.shape, dst.strides, ndim, - * itemsize, item) - */ - __pyx_memoryview_refcount_copying(__pyx_v_dst, __pyx_v_dtype_is_object, __pyx_v_ndim, 0); - - /* "View.MemoryView":1401 - * bint dtype_is_object) nogil: - * refcount_copying(dst, dtype_is_object, ndim, False) - * _slice_assign_scalar(dst.data, dst.shape, dst.strides, ndim, # <<<<<<<<<<<<<< - * itemsize, item) - * refcount_copying(dst, dtype_is_object, ndim, True) - */ - __pyx_memoryview__slice_assign_scalar(__pyx_v_dst->data, __pyx_v_dst->shape, __pyx_v_dst->strides, __pyx_v_ndim, __pyx_v_itemsize, __pyx_v_item); - - /* "View.MemoryView":1403 - * _slice_assign_scalar(dst.data, dst.shape, dst.strides, ndim, - * itemsize, item) - * refcount_copying(dst, dtype_is_object, ndim, True) # <<<<<<<<<<<<<< - * - * - */ - __pyx_memoryview_refcount_copying(__pyx_v_dst, __pyx_v_dtype_is_object, __pyx_v_ndim, 1); - - /* "View.MemoryView":1397 - * - * @cname('__pyx_memoryview_slice_assign_scalar') - * cdef void slice_assign_scalar(__Pyx_memviewslice *dst, int ndim, # <<<<<<<<<<<<<< - * size_t itemsize, void *item, - * bint dtype_is_object) nogil: - */ - - /* function exit code */ -} - -/* "View.MemoryView":1407 - * - * @cname('__pyx_memoryview__slice_assign_scalar') - * cdef void _slice_assign_scalar(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< - * Py_ssize_t *strides, int ndim, - * size_t itemsize, void *item) nogil: - */ - -static void __pyx_memoryview__slice_assign_scalar(char *__pyx_v_data, Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, int __pyx_v_ndim, size_t __pyx_v_itemsize, void *__pyx_v_item) { - CYTHON_UNUSED Py_ssize_t __pyx_v_i; - Py_ssize_t __pyx_v_stride; - Py_ssize_t __pyx_v_extent; - int __pyx_t_1; - Py_ssize_t __pyx_t_2; - Py_ssize_t __pyx_t_3; - Py_ssize_t __pyx_t_4; - - /* "View.MemoryView":1411 - * size_t itemsize, void *item) nogil: - * cdef Py_ssize_t i - * cdef Py_ssize_t stride = strides[0] # <<<<<<<<<<<<<< - * cdef Py_ssize_t extent = shape[0] - * - */ - __pyx_v_stride = (__pyx_v_strides[0]); - - /* "View.MemoryView":1412 - * cdef Py_ssize_t i - * cdef Py_ssize_t stride = strides[0] - * cdef Py_ssize_t extent = shape[0] # <<<<<<<<<<<<<< - * - * if ndim == 1: - */ - __pyx_v_extent = (__pyx_v_shape[0]); - - /* "View.MemoryView":1414 - * cdef Py_ssize_t extent = shape[0] - * - * if ndim == 1: # <<<<<<<<<<<<<< - * for i in range(extent): - * memcpy(data, item, itemsize) - */ - __pyx_t_1 = ((__pyx_v_ndim == 1) != 0); - if (__pyx_t_1) { - - /* "View.MemoryView":1415 - * - * if ndim == 1: - * for i in range(extent): # <<<<<<<<<<<<<< - * memcpy(data, item, itemsize) - * data += stride - */ - __pyx_t_2 = __pyx_v_extent; - __pyx_t_3 = __pyx_t_2; - for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { - __pyx_v_i = __pyx_t_4; - - /* "View.MemoryView":1416 - * if ndim == 1: - * for i in range(extent): - * memcpy(data, item, itemsize) # <<<<<<<<<<<<<< - * data += stride - * else: - */ - (void)(memcpy(__pyx_v_data, __pyx_v_item, __pyx_v_itemsize)); - - /* "View.MemoryView":1417 - * for i in range(extent): - * memcpy(data, item, itemsize) - * data += stride # <<<<<<<<<<<<<< - * else: - * for i in range(extent): - */ - __pyx_v_data = (__pyx_v_data + __pyx_v_stride); - } - - /* "View.MemoryView":1414 - * cdef Py_ssize_t extent = shape[0] - * - * if ndim == 1: # <<<<<<<<<<<<<< - * for i in range(extent): - * memcpy(data, item, itemsize) - */ - goto __pyx_L3; - } - - /* "View.MemoryView":1419 - * data += stride - * else: - * for i in range(extent): # <<<<<<<<<<<<<< - * _slice_assign_scalar(data, shape + 1, strides + 1, - * ndim - 1, itemsize, item) - */ - /*else*/ { - __pyx_t_2 = __pyx_v_extent; - __pyx_t_3 = __pyx_t_2; - for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { - __pyx_v_i = __pyx_t_4; - - /* "View.MemoryView":1420 - * else: - * for i in range(extent): - * _slice_assign_scalar(data, shape + 1, strides + 1, # <<<<<<<<<<<<<< - * ndim - 1, itemsize, item) - * data += stride - */ - __pyx_memoryview__slice_assign_scalar(__pyx_v_data, (__pyx_v_shape + 1), (__pyx_v_strides + 1), (__pyx_v_ndim - 1), __pyx_v_itemsize, __pyx_v_item); - - /* "View.MemoryView":1422 - * _slice_assign_scalar(data, shape + 1, strides + 1, - * ndim - 1, itemsize, item) - * data += stride # <<<<<<<<<<<<<< - * - * - */ - __pyx_v_data = (__pyx_v_data + __pyx_v_stride); - } - } - __pyx_L3:; - - /* "View.MemoryView":1407 - * - * @cname('__pyx_memoryview__slice_assign_scalar') - * cdef void _slice_assign_scalar(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< - * Py_ssize_t *strides, int ndim, - * size_t itemsize, void *item) nogil: - */ - - /* function exit code */ -} - -/* "(tree fragment)":1 - * def __pyx_unpickle_Enum(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<< - * cdef object __pyx_PickleError - * cdef object __pyx_result - */ - -/* Python wrapper */ -static PyObject *__pyx_pw_15View_dot_MemoryView_1__pyx_unpickle_Enum(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ -static PyMethodDef __pyx_mdef_15View_dot_MemoryView_1__pyx_unpickle_Enum = {"__pyx_unpickle_Enum", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_15View_dot_MemoryView_1__pyx_unpickle_Enum, METH_VARARGS|METH_KEYWORDS, 0}; -static PyObject *__pyx_pw_15View_dot_MemoryView_1__pyx_unpickle_Enum(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { - PyObject *__pyx_v___pyx_type = 0; - long __pyx_v___pyx_checksum; - PyObject *__pyx_v___pyx_state = 0; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__pyx_unpickle_Enum (wrapper)", 0); - { - static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_pyx_type,&__pyx_n_s_pyx_checksum,&__pyx_n_s_pyx_state,0}; - PyObject* values[3] = {0,0,0}; - if (unlikely(__pyx_kwds)) { - Py_ssize_t kw_args; - const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); - switch (pos_args) { - case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); - CYTHON_FALLTHROUGH; - case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); - CYTHON_FALLTHROUGH; - case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); - CYTHON_FALLTHROUGH; - case 0: break; - default: goto __pyx_L5_argtuple_error; - } - kw_args = PyDict_Size(__pyx_kwds); - switch (pos_args) { - case 0: - if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_pyx_type)) != 0)) kw_args--; - else goto __pyx_L5_argtuple_error; - CYTHON_FALLTHROUGH; - case 1: - if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_pyx_checksum)) != 0)) kw_args--; - else { - __Pyx_RaiseArgtupleInvalid("__pyx_unpickle_Enum", 1, 3, 3, 1); __PYX_ERR(1, 1, __pyx_L3_error) - } - CYTHON_FALLTHROUGH; - case 2: - if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_pyx_state)) != 0)) kw_args--; - else { - __Pyx_RaiseArgtupleInvalid("__pyx_unpickle_Enum", 1, 3, 3, 2); __PYX_ERR(1, 1, __pyx_L3_error) - } - } - if (unlikely(kw_args > 0)) { - if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__pyx_unpickle_Enum") < 0)) __PYX_ERR(1, 1, __pyx_L3_error) - } - } else if (PyTuple_GET_SIZE(__pyx_args) != 3) { - goto __pyx_L5_argtuple_error; - } else { - values[0] = PyTuple_GET_ITEM(__pyx_args, 0); - values[1] = PyTuple_GET_ITEM(__pyx_args, 1); - values[2] = PyTuple_GET_ITEM(__pyx_args, 2); - } - __pyx_v___pyx_type = values[0]; - __pyx_v___pyx_checksum = __Pyx_PyInt_As_long(values[1]); if (unlikely((__pyx_v___pyx_checksum == (long)-1) && PyErr_Occurred())) __PYX_ERR(1, 1, __pyx_L3_error) - __pyx_v___pyx_state = values[2]; - } - goto __pyx_L4_argument_unpacking_done; - __pyx_L5_argtuple_error:; - __Pyx_RaiseArgtupleInvalid("__pyx_unpickle_Enum", 1, 3, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(1, 1, __pyx_L3_error) - __pyx_L3_error:; - __Pyx_AddTraceback("View.MemoryView.__pyx_unpickle_Enum", __pyx_clineno, __pyx_lineno, __pyx_filename); - __Pyx_RefNannyFinishContext(); - return NULL; - __pyx_L4_argument_unpacking_done:; - __pyx_r = __pyx_pf_15View_dot_MemoryView___pyx_unpickle_Enum(__pyx_self, __pyx_v___pyx_type, __pyx_v___pyx_checksum, __pyx_v___pyx_state); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_pf_15View_dot_MemoryView___pyx_unpickle_Enum(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v___pyx_type, long __pyx_v___pyx_checksum, PyObject *__pyx_v___pyx_state) { - PyObject *__pyx_v___pyx_PickleError = 0; - PyObject *__pyx_v___pyx_result = 0; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - int __pyx_t_1; - PyObject *__pyx_t_2 = NULL; - PyObject *__pyx_t_3 = NULL; - PyObject *__pyx_t_4 = NULL; - PyObject *__pyx_t_5 = NULL; - int __pyx_t_6; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("__pyx_unpickle_Enum", 0); - - /* "(tree fragment)":4 - * cdef object __pyx_PickleError - * cdef object __pyx_result - * if __pyx_checksum != 0xb068931: # <<<<<<<<<<<<<< - * from pickle import PickleError as __pyx_PickleError - * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) - */ - __pyx_t_1 = ((__pyx_v___pyx_checksum != 0xb068931) != 0); - if (__pyx_t_1) { - - /* "(tree fragment)":5 - * cdef object __pyx_result - * if __pyx_checksum != 0xb068931: - * from pickle import PickleError as __pyx_PickleError # <<<<<<<<<<<<<< - * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) - * __pyx_result = Enum.__new__(__pyx_type) - */ - __pyx_t_2 = PyList_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 5, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_INCREF(__pyx_n_s_PickleError); - __Pyx_GIVEREF(__pyx_n_s_PickleError); - PyList_SET_ITEM(__pyx_t_2, 0, __pyx_n_s_PickleError); - __pyx_t_3 = __Pyx_Import(__pyx_n_s_pickle, __pyx_t_2, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 5, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_2 = __Pyx_ImportFrom(__pyx_t_3, __pyx_n_s_PickleError); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 5, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_INCREF(__pyx_t_2); - __pyx_v___pyx_PickleError = __pyx_t_2; - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - - /* "(tree fragment)":6 - * if __pyx_checksum != 0xb068931: - * from pickle import PickleError as __pyx_PickleError - * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) # <<<<<<<<<<<<<< - * __pyx_result = Enum.__new__(__pyx_type) - * if __pyx_state is not None: - */ - __pyx_t_2 = __Pyx_PyInt_From_long(__pyx_v___pyx_checksum); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 6, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_4 = __Pyx_PyString_Format(__pyx_kp_s_Incompatible_checksums_s_vs_0xb0, __pyx_t_2); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 6, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __Pyx_INCREF(__pyx_v___pyx_PickleError); - __pyx_t_2 = __pyx_v___pyx_PickleError; __pyx_t_5 = NULL; - if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_2))) { - __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_2); - if (likely(__pyx_t_5)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2); - __Pyx_INCREF(__pyx_t_5); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_2, function); - } - } - __pyx_t_3 = (__pyx_t_5) ? __Pyx_PyObject_Call2Args(__pyx_t_2, __pyx_t_5, __pyx_t_4) : __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_t_4); - __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 6, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __Pyx_Raise(__pyx_t_3, 0, 0, 0); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __PYX_ERR(1, 6, __pyx_L1_error) - - /* "(tree fragment)":4 - * cdef object __pyx_PickleError - * cdef object __pyx_result - * if __pyx_checksum != 0xb068931: # <<<<<<<<<<<<<< - * from pickle import PickleError as __pyx_PickleError - * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) - */ - } - - /* "(tree fragment)":7 - * from pickle import PickleError as __pyx_PickleError - * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) - * __pyx_result = Enum.__new__(__pyx_type) # <<<<<<<<<<<<<< - * if __pyx_state is not None: - * __pyx_unpickle_Enum__set_state( __pyx_result, __pyx_state) - */ - __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_MemviewEnum_type), __pyx_n_s_new); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 7, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_4 = NULL; - if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_2))) { - __pyx_t_4 = PyMethod_GET_SELF(__pyx_t_2); - if (likely(__pyx_t_4)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2); - __Pyx_INCREF(__pyx_t_4); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_2, function); - } - } - __pyx_t_3 = (__pyx_t_4) ? __Pyx_PyObject_Call2Args(__pyx_t_2, __pyx_t_4, __pyx_v___pyx_type) : __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_v___pyx_type); - __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; - if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 7, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_v___pyx_result = __pyx_t_3; - __pyx_t_3 = 0; - - /* "(tree fragment)":8 - * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) - * __pyx_result = Enum.__new__(__pyx_type) - * if __pyx_state is not None: # <<<<<<<<<<<<<< - * __pyx_unpickle_Enum__set_state( __pyx_result, __pyx_state) - * return __pyx_result - */ - __pyx_t_1 = (__pyx_v___pyx_state != Py_None); - __pyx_t_6 = (__pyx_t_1 != 0); - if (__pyx_t_6) { - - /* "(tree fragment)":9 - * __pyx_result = Enum.__new__(__pyx_type) - * if __pyx_state is not None: - * __pyx_unpickle_Enum__set_state( __pyx_result, __pyx_state) # <<<<<<<<<<<<<< - * return __pyx_result - * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): - */ - if (!(likely(PyTuple_CheckExact(__pyx_v___pyx_state))||((__pyx_v___pyx_state) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "tuple", Py_TYPE(__pyx_v___pyx_state)->tp_name), 0))) __PYX_ERR(1, 9, __pyx_L1_error) - __pyx_t_3 = __pyx_unpickle_Enum__set_state(((struct __pyx_MemviewEnum_obj *)__pyx_v___pyx_result), ((PyObject*)__pyx_v___pyx_state)); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 9, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - - /* "(tree fragment)":8 - * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) - * __pyx_result = Enum.__new__(__pyx_type) - * if __pyx_state is not None: # <<<<<<<<<<<<<< - * __pyx_unpickle_Enum__set_state( __pyx_result, __pyx_state) - * return __pyx_result - */ - } - - /* "(tree fragment)":10 - * if __pyx_state is not None: - * __pyx_unpickle_Enum__set_state( __pyx_result, __pyx_state) - * return __pyx_result # <<<<<<<<<<<<<< - * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): - * __pyx_result.name = __pyx_state[0] - */ - __Pyx_XDECREF(__pyx_r); - __Pyx_INCREF(__pyx_v___pyx_result); - __pyx_r = __pyx_v___pyx_result; - goto __pyx_L0; - - /* "(tree fragment)":1 - * def __pyx_unpickle_Enum(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<< - * cdef object __pyx_PickleError - * cdef object __pyx_result - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_2); - __Pyx_XDECREF(__pyx_t_3); - __Pyx_XDECREF(__pyx_t_4); - __Pyx_XDECREF(__pyx_t_5); - __Pyx_AddTraceback("View.MemoryView.__pyx_unpickle_Enum", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XDECREF(__pyx_v___pyx_PickleError); - __Pyx_XDECREF(__pyx_v___pyx_result); - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "(tree fragment)":11 - * __pyx_unpickle_Enum__set_state( __pyx_result, __pyx_state) - * return __pyx_result - * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): # <<<<<<<<<<<<<< - * __pyx_result.name = __pyx_state[0] - * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): - */ - -static PyObject *__pyx_unpickle_Enum__set_state(struct __pyx_MemviewEnum_obj *__pyx_v___pyx_result, PyObject *__pyx_v___pyx_state) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - int __pyx_t_2; - Py_ssize_t __pyx_t_3; - int __pyx_t_4; - int __pyx_t_5; - PyObject *__pyx_t_6 = NULL; - PyObject *__pyx_t_7 = NULL; - PyObject *__pyx_t_8 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("__pyx_unpickle_Enum__set_state", 0); - - /* "(tree fragment)":12 - * return __pyx_result - * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): - * __pyx_result.name = __pyx_state[0] # <<<<<<<<<<<<<< - * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): - * __pyx_result.__dict__.update(__pyx_state[1]) - */ - if (unlikely(__pyx_v___pyx_state == Py_None)) { - PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable"); - __PYX_ERR(1, 12, __pyx_L1_error) - } - __pyx_t_1 = __Pyx_GetItemInt_Tuple(__pyx_v___pyx_state, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 12, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_GIVEREF(__pyx_t_1); - __Pyx_GOTREF(__pyx_v___pyx_result->name); - __Pyx_DECREF(__pyx_v___pyx_result->name); - __pyx_v___pyx_result->name = __pyx_t_1; - __pyx_t_1 = 0; - - /* "(tree fragment)":13 - * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): - * __pyx_result.name = __pyx_state[0] - * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): # <<<<<<<<<<<<<< - * __pyx_result.__dict__.update(__pyx_state[1]) - */ - if (unlikely(__pyx_v___pyx_state == Py_None)) { - PyErr_SetString(PyExc_TypeError, "object of type 'NoneType' has no len()"); - __PYX_ERR(1, 13, __pyx_L1_error) - } - __pyx_t_3 = PyTuple_GET_SIZE(__pyx_v___pyx_state); if (unlikely(__pyx_t_3 == ((Py_ssize_t)-1))) __PYX_ERR(1, 13, __pyx_L1_error) - __pyx_t_4 = ((__pyx_t_3 > 1) != 0); - if (__pyx_t_4) { - } else { - __pyx_t_2 = __pyx_t_4; - goto __pyx_L4_bool_binop_done; - } - __pyx_t_4 = __Pyx_HasAttr(((PyObject *)__pyx_v___pyx_result), __pyx_n_s_dict); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(1, 13, __pyx_L1_error) - __pyx_t_5 = (__pyx_t_4 != 0); - __pyx_t_2 = __pyx_t_5; - __pyx_L4_bool_binop_done:; - if (__pyx_t_2) { - - /* "(tree fragment)":14 - * __pyx_result.name = __pyx_state[0] - * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): - * __pyx_result.__dict__.update(__pyx_state[1]) # <<<<<<<<<<<<<< - */ - __pyx_t_6 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v___pyx_result), __pyx_n_s_dict); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 14, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_6); - __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_n_s_update); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 14, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_7); - __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; - if (unlikely(__pyx_v___pyx_state == Py_None)) { - PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable"); - __PYX_ERR(1, 14, __pyx_L1_error) - } - __pyx_t_6 = __Pyx_GetItemInt_Tuple(__pyx_v___pyx_state, 1, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 14, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_6); - __pyx_t_8 = NULL; - if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_7))) { - __pyx_t_8 = PyMethod_GET_SELF(__pyx_t_7); - if (likely(__pyx_t_8)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_7); - __Pyx_INCREF(__pyx_t_8); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_7, function); - } - } - __pyx_t_1 = (__pyx_t_8) ? __Pyx_PyObject_Call2Args(__pyx_t_7, __pyx_t_8, __pyx_t_6) : __Pyx_PyObject_CallOneArg(__pyx_t_7, __pyx_t_6); - __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; - __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; - if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 14, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - - /* "(tree fragment)":13 - * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): - * __pyx_result.name = __pyx_state[0] - * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): # <<<<<<<<<<<<<< - * __pyx_result.__dict__.update(__pyx_state[1]) - */ - } - - /* "(tree fragment)":11 - * __pyx_unpickle_Enum__set_state( __pyx_result, __pyx_state) - * return __pyx_result - * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): # <<<<<<<<<<<<<< - * __pyx_result.name = __pyx_state[0] - * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): - */ - - /* function exit code */ - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_6); - __Pyx_XDECREF(__pyx_t_7); - __Pyx_XDECREF(__pyx_t_8); - __Pyx_AddTraceback("View.MemoryView.__pyx_unpickle_Enum__set_state", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = 0; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} -static struct __pyx_vtabstruct_array __pyx_vtable_array; - -static PyObject *__pyx_tp_new_array(PyTypeObject *t, PyObject *a, PyObject *k) { - struct __pyx_array_obj *p; - PyObject *o; - if (likely((t->tp_flags & Py_TPFLAGS_IS_ABSTRACT) == 0)) { - o = (*t->tp_alloc)(t, 0); - } else { - o = (PyObject *) PyBaseObject_Type.tp_new(t, __pyx_empty_tuple, 0); - } - if (unlikely(!o)) return 0; - p = ((struct __pyx_array_obj *)o); - p->__pyx_vtab = __pyx_vtabptr_array; - p->mode = ((PyObject*)Py_None); Py_INCREF(Py_None); - p->_format = ((PyObject*)Py_None); Py_INCREF(Py_None); - if (unlikely(__pyx_array___cinit__(o, a, k) < 0)) goto bad; - return o; - bad: - Py_DECREF(o); o = 0; - return NULL; -} - -static void __pyx_tp_dealloc_array(PyObject *o) { - struct __pyx_array_obj *p = (struct __pyx_array_obj *)o; - #if CYTHON_USE_TP_FINALIZE - if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE) && Py_TYPE(o)->tp_finalize) && (!PyType_IS_GC(Py_TYPE(o)) || !_PyGC_FINALIZED(o))) { - if (PyObject_CallFinalizerFromDealloc(o)) return; - } - #endif - { - PyObject *etype, *eval, *etb; - PyErr_Fetch(&etype, &eval, &etb); - __Pyx_SET_REFCNT(o, Py_REFCNT(o) + 1); - __pyx_array___dealloc__(o); - __Pyx_SET_REFCNT(o, Py_REFCNT(o) - 1); - PyErr_Restore(etype, eval, etb); - } - Py_CLEAR(p->mode); - Py_CLEAR(p->_format); - (*Py_TYPE(o)->tp_free)(o); -} -static PyObject *__pyx_sq_item_array(PyObject *o, Py_ssize_t i) { - PyObject *r; - PyObject *x = PyInt_FromSsize_t(i); if(!x) return 0; - r = Py_TYPE(o)->tp_as_mapping->mp_subscript(o, x); - Py_DECREF(x); - return r; -} - -static int __pyx_mp_ass_subscript_array(PyObject *o, PyObject *i, PyObject *v) { - if (v) { - return __pyx_array___setitem__(o, i, v); - } - else { - PyErr_Format(PyExc_NotImplementedError, - "Subscript deletion not supported by %.200s", Py_TYPE(o)->tp_name); - return -1; - } -} - -static PyObject *__pyx_tp_getattro_array(PyObject *o, PyObject *n) { - PyObject *v = __Pyx_PyObject_GenericGetAttr(o, n); - if (!v && PyErr_ExceptionMatches(PyExc_AttributeError)) { - PyErr_Clear(); - v = __pyx_array___getattr__(o, n); - } - return v; -} - -static PyObject *__pyx_getprop___pyx_array_memview(PyObject *o, CYTHON_UNUSED void *x) { - return __pyx_pw_15View_dot_MemoryView_5array_7memview_1__get__(o); -} - -static PyMethodDef __pyx_methods_array[] = { - {"__getattr__", (PyCFunction)__pyx_array___getattr__, METH_O|METH_COEXIST, 0}, - {"__reduce_cython__", (PyCFunction)__pyx_pw___pyx_array_1__reduce_cython__, METH_NOARGS, 0}, - {"__setstate_cython__", (PyCFunction)__pyx_pw___pyx_array_3__setstate_cython__, METH_O, 0}, - {0, 0, 0, 0} -}; - -static struct PyGetSetDef __pyx_getsets_array[] = { - {(char *)"memview", __pyx_getprop___pyx_array_memview, 0, (char *)0, 0}, - {0, 0, 0, 0, 0} -}; - -static PySequenceMethods __pyx_tp_as_sequence_array = { - __pyx_array___len__, /*sq_length*/ - 0, /*sq_concat*/ - 0, /*sq_repeat*/ - __pyx_sq_item_array, /*sq_item*/ - 0, /*sq_slice*/ - 0, /*sq_ass_item*/ - 0, /*sq_ass_slice*/ - 0, /*sq_contains*/ - 0, /*sq_inplace_concat*/ - 0, /*sq_inplace_repeat*/ -}; - -static PyMappingMethods __pyx_tp_as_mapping_array = { - __pyx_array___len__, /*mp_length*/ - __pyx_array___getitem__, /*mp_subscript*/ - __pyx_mp_ass_subscript_array, /*mp_ass_subscript*/ -}; - -static PyBufferProcs __pyx_tp_as_buffer_array = { - #if PY_MAJOR_VERSION < 3 - 0, /*bf_getreadbuffer*/ - #endif - #if PY_MAJOR_VERSION < 3 - 0, /*bf_getwritebuffer*/ - #endif - #if PY_MAJOR_VERSION < 3 - 0, /*bf_getsegcount*/ - #endif - #if PY_MAJOR_VERSION < 3 - 0, /*bf_getcharbuffer*/ - #endif - __pyx_array_getbuffer, /*bf_getbuffer*/ - 0, /*bf_releasebuffer*/ -}; - -static PyTypeObject __pyx_type___pyx_array = { - PyVarObject_HEAD_INIT(0, 0) - "monotonic_align.core.array", /*tp_name*/ - sizeof(struct __pyx_array_obj), /*tp_basicsize*/ - 0, /*tp_itemsize*/ - __pyx_tp_dealloc_array, /*tp_dealloc*/ - #if PY_VERSION_HEX < 0x030800b4 - 0, /*tp_print*/ - #endif - #if PY_VERSION_HEX >= 0x030800b4 - 0, /*tp_vectorcall_offset*/ - #endif - 0, /*tp_getattr*/ - 0, /*tp_setattr*/ - #if PY_MAJOR_VERSION < 3 - 0, /*tp_compare*/ - #endif - #if PY_MAJOR_VERSION >= 3 - 0, /*tp_as_async*/ - #endif - 0, /*tp_repr*/ - 0, /*tp_as_number*/ - &__pyx_tp_as_sequence_array, /*tp_as_sequence*/ - &__pyx_tp_as_mapping_array, /*tp_as_mapping*/ - 0, /*tp_hash*/ - 0, /*tp_call*/ - 0, /*tp_str*/ - __pyx_tp_getattro_array, /*tp_getattro*/ - 0, /*tp_setattro*/ - &__pyx_tp_as_buffer_array, /*tp_as_buffer*/ - Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE, /*tp_flags*/ - 0, /*tp_doc*/ - 0, /*tp_traverse*/ - 0, /*tp_clear*/ - 0, /*tp_richcompare*/ - 0, /*tp_weaklistoffset*/ - 0, /*tp_iter*/ - 0, /*tp_iternext*/ - __pyx_methods_array, /*tp_methods*/ - 0, /*tp_members*/ - __pyx_getsets_array, /*tp_getset*/ - 0, /*tp_base*/ - 0, /*tp_dict*/ - 0, /*tp_descr_get*/ - 0, /*tp_descr_set*/ - 0, /*tp_dictoffset*/ - 0, /*tp_init*/ - 0, /*tp_alloc*/ - __pyx_tp_new_array, /*tp_new*/ - 0, /*tp_free*/ - 0, /*tp_is_gc*/ - 0, /*tp_bases*/ - 0, /*tp_mro*/ - 0, /*tp_cache*/ - 0, /*tp_subclasses*/ - 0, /*tp_weaklist*/ - 0, /*tp_del*/ - 0, /*tp_version_tag*/ - #if PY_VERSION_HEX >= 0x030400a1 - 0, /*tp_finalize*/ - #endif - #if PY_VERSION_HEX >= 0x030800b1 - 0, /*tp_vectorcall*/ - #endif - #if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000 - 0, /*tp_print*/ - #endif -}; - -static PyObject *__pyx_tp_new_Enum(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) { - struct __pyx_MemviewEnum_obj *p; - PyObject *o; - if (likely((t->tp_flags & Py_TPFLAGS_IS_ABSTRACT) == 0)) { - o = (*t->tp_alloc)(t, 0); - } else { - o = (PyObject *) PyBaseObject_Type.tp_new(t, __pyx_empty_tuple, 0); - } - if (unlikely(!o)) return 0; - p = ((struct __pyx_MemviewEnum_obj *)o); - p->name = Py_None; Py_INCREF(Py_None); - return o; -} - -static void __pyx_tp_dealloc_Enum(PyObject *o) { - struct __pyx_MemviewEnum_obj *p = (struct __pyx_MemviewEnum_obj *)o; - #if CYTHON_USE_TP_FINALIZE - if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE) && Py_TYPE(o)->tp_finalize) && !_PyGC_FINALIZED(o)) { - if (PyObject_CallFinalizerFromDealloc(o)) return; - } - #endif - PyObject_GC_UnTrack(o); - Py_CLEAR(p->name); - (*Py_TYPE(o)->tp_free)(o); -} - -static int __pyx_tp_traverse_Enum(PyObject *o, visitproc v, void *a) { - int e; - struct __pyx_MemviewEnum_obj *p = (struct __pyx_MemviewEnum_obj *)o; - if (p->name) { - e = (*v)(p->name, a); if (e) return e; - } - return 0; -} - -static int __pyx_tp_clear_Enum(PyObject *o) { - PyObject* tmp; - struct __pyx_MemviewEnum_obj *p = (struct __pyx_MemviewEnum_obj *)o; - tmp = ((PyObject*)p->name); - p->name = Py_None; Py_INCREF(Py_None); - Py_XDECREF(tmp); - return 0; -} - -static PyMethodDef __pyx_methods_Enum[] = { - {"__reduce_cython__", (PyCFunction)__pyx_pw___pyx_MemviewEnum_1__reduce_cython__, METH_NOARGS, 0}, - {"__setstate_cython__", (PyCFunction)__pyx_pw___pyx_MemviewEnum_3__setstate_cython__, METH_O, 0}, - {0, 0, 0, 0} -}; - -static PyTypeObject __pyx_type___pyx_MemviewEnum = { - PyVarObject_HEAD_INIT(0, 0) - "monotonic_align.core.Enum", /*tp_name*/ - sizeof(struct __pyx_MemviewEnum_obj), /*tp_basicsize*/ - 0, /*tp_itemsize*/ - __pyx_tp_dealloc_Enum, /*tp_dealloc*/ - #if PY_VERSION_HEX < 0x030800b4 - 0, /*tp_print*/ - #endif - #if PY_VERSION_HEX >= 0x030800b4 - 0, /*tp_vectorcall_offset*/ - #endif - 0, /*tp_getattr*/ - 0, /*tp_setattr*/ - #if PY_MAJOR_VERSION < 3 - 0, /*tp_compare*/ - #endif - #if PY_MAJOR_VERSION >= 3 - 0, /*tp_as_async*/ - #endif - __pyx_MemviewEnum___repr__, /*tp_repr*/ - 0, /*tp_as_number*/ - 0, /*tp_as_sequence*/ - 0, /*tp_as_mapping*/ - 0, /*tp_hash*/ - 0, /*tp_call*/ - 0, /*tp_str*/ - 0, /*tp_getattro*/ - 0, /*tp_setattro*/ - 0, /*tp_as_buffer*/ - Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/ - 0, /*tp_doc*/ - __pyx_tp_traverse_Enum, /*tp_traverse*/ - __pyx_tp_clear_Enum, /*tp_clear*/ - 0, /*tp_richcompare*/ - 0, /*tp_weaklistoffset*/ - 0, /*tp_iter*/ - 0, /*tp_iternext*/ - __pyx_methods_Enum, /*tp_methods*/ - 0, /*tp_members*/ - 0, /*tp_getset*/ - 0, /*tp_base*/ - 0, /*tp_dict*/ - 0, /*tp_descr_get*/ - 0, /*tp_descr_set*/ - 0, /*tp_dictoffset*/ - __pyx_MemviewEnum___init__, /*tp_init*/ - 0, /*tp_alloc*/ - __pyx_tp_new_Enum, /*tp_new*/ - 0, /*tp_free*/ - 0, /*tp_is_gc*/ - 0, /*tp_bases*/ - 0, /*tp_mro*/ - 0, /*tp_cache*/ - 0, /*tp_subclasses*/ - 0, /*tp_weaklist*/ - 0, /*tp_del*/ - 0, /*tp_version_tag*/ - #if PY_VERSION_HEX >= 0x030400a1 - 0, /*tp_finalize*/ - #endif - #if PY_VERSION_HEX >= 0x030800b1 - 0, /*tp_vectorcall*/ - #endif - #if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000 - 0, /*tp_print*/ - #endif -}; -static struct __pyx_vtabstruct_memoryview __pyx_vtable_memoryview; - -static PyObject *__pyx_tp_new_memoryview(PyTypeObject *t, PyObject *a, PyObject *k) { - struct __pyx_memoryview_obj *p; - PyObject *o; - if (likely((t->tp_flags & Py_TPFLAGS_IS_ABSTRACT) == 0)) { - o = (*t->tp_alloc)(t, 0); - } else { - o = (PyObject *) PyBaseObject_Type.tp_new(t, __pyx_empty_tuple, 0); - } - if (unlikely(!o)) return 0; - p = ((struct __pyx_memoryview_obj *)o); - p->__pyx_vtab = __pyx_vtabptr_memoryview; - p->obj = Py_None; Py_INCREF(Py_None); - p->_size = Py_None; Py_INCREF(Py_None); - p->_array_interface = Py_None; Py_INCREF(Py_None); - p->view.obj = NULL; - if (unlikely(__pyx_memoryview___cinit__(o, a, k) < 0)) goto bad; - return o; - bad: - Py_DECREF(o); o = 0; - return NULL; -} - -static void __pyx_tp_dealloc_memoryview(PyObject *o) { - struct __pyx_memoryview_obj *p = (struct __pyx_memoryview_obj *)o; - #if CYTHON_USE_TP_FINALIZE - if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE) && Py_TYPE(o)->tp_finalize) && !_PyGC_FINALIZED(o)) { - if (PyObject_CallFinalizerFromDealloc(o)) return; - } - #endif - PyObject_GC_UnTrack(o); - { - PyObject *etype, *eval, *etb; - PyErr_Fetch(&etype, &eval, &etb); - __Pyx_SET_REFCNT(o, Py_REFCNT(o) + 1); - __pyx_memoryview___dealloc__(o); - __Pyx_SET_REFCNT(o, Py_REFCNT(o) - 1); - PyErr_Restore(etype, eval, etb); - } - Py_CLEAR(p->obj); - Py_CLEAR(p->_size); - Py_CLEAR(p->_array_interface); - (*Py_TYPE(o)->tp_free)(o); -} - -static int __pyx_tp_traverse_memoryview(PyObject *o, visitproc v, void *a) { - int e; - struct __pyx_memoryview_obj *p = (struct __pyx_memoryview_obj *)o; - if (p->obj) { - e = (*v)(p->obj, a); if (e) return e; - } - if (p->_size) { - e = (*v)(p->_size, a); if (e) return e; - } - if (p->_array_interface) { - e = (*v)(p->_array_interface, a); if (e) return e; - } - if (p->view.obj) { - e = (*v)(p->view.obj, a); if (e) return e; - } - return 0; -} - -static int __pyx_tp_clear_memoryview(PyObject *o) { - PyObject* tmp; - struct __pyx_memoryview_obj *p = (struct __pyx_memoryview_obj *)o; - tmp = ((PyObject*)p->obj); - p->obj = Py_None; Py_INCREF(Py_None); - Py_XDECREF(tmp); - tmp = ((PyObject*)p->_size); - p->_size = Py_None; Py_INCREF(Py_None); - Py_XDECREF(tmp); - tmp = ((PyObject*)p->_array_interface); - p->_array_interface = Py_None; Py_INCREF(Py_None); - Py_XDECREF(tmp); - Py_CLEAR(p->view.obj); - return 0; -} -static PyObject *__pyx_sq_item_memoryview(PyObject *o, Py_ssize_t i) { - PyObject *r; - PyObject *x = PyInt_FromSsize_t(i); if(!x) return 0; - r = Py_TYPE(o)->tp_as_mapping->mp_subscript(o, x); - Py_DECREF(x); - return r; -} - -static int __pyx_mp_ass_subscript_memoryview(PyObject *o, PyObject *i, PyObject *v) { - if (v) { - return __pyx_memoryview___setitem__(o, i, v); - } - else { - PyErr_Format(PyExc_NotImplementedError, - "Subscript deletion not supported by %.200s", Py_TYPE(o)->tp_name); - return -1; - } -} - -static PyObject *__pyx_getprop___pyx_memoryview_T(PyObject *o, CYTHON_UNUSED void *x) { - return __pyx_pw_15View_dot_MemoryView_10memoryview_1T_1__get__(o); -} - -static PyObject *__pyx_getprop___pyx_memoryview_base(PyObject *o, CYTHON_UNUSED void *x) { - return __pyx_pw_15View_dot_MemoryView_10memoryview_4base_1__get__(o); -} - -static PyObject *__pyx_getprop___pyx_memoryview_shape(PyObject *o, CYTHON_UNUSED void *x) { - return __pyx_pw_15View_dot_MemoryView_10memoryview_5shape_1__get__(o); -} - -static PyObject *__pyx_getprop___pyx_memoryview_strides(PyObject *o, CYTHON_UNUSED void *x) { - return __pyx_pw_15View_dot_MemoryView_10memoryview_7strides_1__get__(o); -} - -static PyObject *__pyx_getprop___pyx_memoryview_suboffsets(PyObject *o, CYTHON_UNUSED void *x) { - return __pyx_pw_15View_dot_MemoryView_10memoryview_10suboffsets_1__get__(o); -} - -static PyObject *__pyx_getprop___pyx_memoryview_ndim(PyObject *o, CYTHON_UNUSED void *x) { - return __pyx_pw_15View_dot_MemoryView_10memoryview_4ndim_1__get__(o); -} - -static PyObject *__pyx_getprop___pyx_memoryview_itemsize(PyObject *o, CYTHON_UNUSED void *x) { - return __pyx_pw_15View_dot_MemoryView_10memoryview_8itemsize_1__get__(o); -} - -static PyObject *__pyx_getprop___pyx_memoryview_nbytes(PyObject *o, CYTHON_UNUSED void *x) { - return __pyx_pw_15View_dot_MemoryView_10memoryview_6nbytes_1__get__(o); -} - -static PyObject *__pyx_getprop___pyx_memoryview_size(PyObject *o, CYTHON_UNUSED void *x) { - return __pyx_pw_15View_dot_MemoryView_10memoryview_4size_1__get__(o); -} - -static PyMethodDef __pyx_methods_memoryview[] = { - {"is_c_contig", (PyCFunction)__pyx_memoryview_is_c_contig, METH_NOARGS, 0}, - {"is_f_contig", (PyCFunction)__pyx_memoryview_is_f_contig, METH_NOARGS, 0}, - {"copy", (PyCFunction)__pyx_memoryview_copy, METH_NOARGS, 0}, - {"copy_fortran", (PyCFunction)__pyx_memoryview_copy_fortran, METH_NOARGS, 0}, - {"__reduce_cython__", (PyCFunction)__pyx_pw___pyx_memoryview_1__reduce_cython__, METH_NOARGS, 0}, - {"__setstate_cython__", (PyCFunction)__pyx_pw___pyx_memoryview_3__setstate_cython__, METH_O, 0}, - {0, 0, 0, 0} -}; - -static struct PyGetSetDef __pyx_getsets_memoryview[] = { - {(char *)"T", __pyx_getprop___pyx_memoryview_T, 0, (char *)0, 0}, - {(char *)"base", __pyx_getprop___pyx_memoryview_base, 0, (char *)0, 0}, - {(char *)"shape", __pyx_getprop___pyx_memoryview_shape, 0, (char *)0, 0}, - {(char *)"strides", __pyx_getprop___pyx_memoryview_strides, 0, (char *)0, 0}, - {(char *)"suboffsets", __pyx_getprop___pyx_memoryview_suboffsets, 0, (char *)0, 0}, - {(char *)"ndim", __pyx_getprop___pyx_memoryview_ndim, 0, (char *)0, 0}, - {(char *)"itemsize", __pyx_getprop___pyx_memoryview_itemsize, 0, (char *)0, 0}, - {(char *)"nbytes", __pyx_getprop___pyx_memoryview_nbytes, 0, (char *)0, 0}, - {(char *)"size", __pyx_getprop___pyx_memoryview_size, 0, (char *)0, 0}, - {0, 0, 0, 0, 0} -}; - -static PySequenceMethods __pyx_tp_as_sequence_memoryview = { - __pyx_memoryview___len__, /*sq_length*/ - 0, /*sq_concat*/ - 0, /*sq_repeat*/ - __pyx_sq_item_memoryview, /*sq_item*/ - 0, /*sq_slice*/ - 0, /*sq_ass_item*/ - 0, /*sq_ass_slice*/ - 0, /*sq_contains*/ - 0, /*sq_inplace_concat*/ - 0, /*sq_inplace_repeat*/ -}; - -static PyMappingMethods __pyx_tp_as_mapping_memoryview = { - __pyx_memoryview___len__, /*mp_length*/ - __pyx_memoryview___getitem__, /*mp_subscript*/ - __pyx_mp_ass_subscript_memoryview, /*mp_ass_subscript*/ -}; - -static PyBufferProcs __pyx_tp_as_buffer_memoryview = { - #if PY_MAJOR_VERSION < 3 - 0, /*bf_getreadbuffer*/ - #endif - #if PY_MAJOR_VERSION < 3 - 0, /*bf_getwritebuffer*/ - #endif - #if PY_MAJOR_VERSION < 3 - 0, /*bf_getsegcount*/ - #endif - #if PY_MAJOR_VERSION < 3 - 0, /*bf_getcharbuffer*/ - #endif - __pyx_memoryview_getbuffer, /*bf_getbuffer*/ - 0, /*bf_releasebuffer*/ -}; - -static PyTypeObject __pyx_type___pyx_memoryview = { - PyVarObject_HEAD_INIT(0, 0) - "monotonic_align.core.memoryview", /*tp_name*/ - sizeof(struct __pyx_memoryview_obj), /*tp_basicsize*/ - 0, /*tp_itemsize*/ - __pyx_tp_dealloc_memoryview, /*tp_dealloc*/ - #if PY_VERSION_HEX < 0x030800b4 - 0, /*tp_print*/ - #endif - #if PY_VERSION_HEX >= 0x030800b4 - 0, /*tp_vectorcall_offset*/ - #endif - 0, /*tp_getattr*/ - 0, /*tp_setattr*/ - #if PY_MAJOR_VERSION < 3 - 0, /*tp_compare*/ - #endif - #if PY_MAJOR_VERSION >= 3 - 0, /*tp_as_async*/ - #endif - __pyx_memoryview___repr__, /*tp_repr*/ - 0, /*tp_as_number*/ - &__pyx_tp_as_sequence_memoryview, /*tp_as_sequence*/ - &__pyx_tp_as_mapping_memoryview, /*tp_as_mapping*/ - 0, /*tp_hash*/ - 0, /*tp_call*/ - __pyx_memoryview___str__, /*tp_str*/ - 0, /*tp_getattro*/ - 0, /*tp_setattro*/ - &__pyx_tp_as_buffer_memoryview, /*tp_as_buffer*/ - Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/ - 0, /*tp_doc*/ - __pyx_tp_traverse_memoryview, /*tp_traverse*/ - __pyx_tp_clear_memoryview, /*tp_clear*/ - 0, /*tp_richcompare*/ - 0, /*tp_weaklistoffset*/ - 0, /*tp_iter*/ - 0, /*tp_iternext*/ - __pyx_methods_memoryview, /*tp_methods*/ - 0, /*tp_members*/ - __pyx_getsets_memoryview, /*tp_getset*/ - 0, /*tp_base*/ - 0, /*tp_dict*/ - 0, /*tp_descr_get*/ - 0, /*tp_descr_set*/ - 0, /*tp_dictoffset*/ - 0, /*tp_init*/ - 0, /*tp_alloc*/ - __pyx_tp_new_memoryview, /*tp_new*/ - 0, /*tp_free*/ - 0, /*tp_is_gc*/ - 0, /*tp_bases*/ - 0, /*tp_mro*/ - 0, /*tp_cache*/ - 0, /*tp_subclasses*/ - 0, /*tp_weaklist*/ - 0, /*tp_del*/ - 0, /*tp_version_tag*/ - #if PY_VERSION_HEX >= 0x030400a1 - 0, /*tp_finalize*/ - #endif - #if PY_VERSION_HEX >= 0x030800b1 - 0, /*tp_vectorcall*/ - #endif - #if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000 - 0, /*tp_print*/ - #endif -}; -static struct __pyx_vtabstruct__memoryviewslice __pyx_vtable__memoryviewslice; - -static PyObject *__pyx_tp_new__memoryviewslice(PyTypeObject *t, PyObject *a, PyObject *k) { - struct __pyx_memoryviewslice_obj *p; - PyObject *o = __pyx_tp_new_memoryview(t, a, k); - if (unlikely(!o)) return 0; - p = ((struct __pyx_memoryviewslice_obj *)o); - p->__pyx_base.__pyx_vtab = (struct __pyx_vtabstruct_memoryview*)__pyx_vtabptr__memoryviewslice; - p->from_object = Py_None; Py_INCREF(Py_None); - p->from_slice.memview = NULL; - return o; -} - -static void __pyx_tp_dealloc__memoryviewslice(PyObject *o) { - struct __pyx_memoryviewslice_obj *p = (struct __pyx_memoryviewslice_obj *)o; - #if CYTHON_USE_TP_FINALIZE - if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE) && Py_TYPE(o)->tp_finalize) && !_PyGC_FINALIZED(o)) { - if (PyObject_CallFinalizerFromDealloc(o)) return; - } - #endif - PyObject_GC_UnTrack(o); - { - PyObject *etype, *eval, *etb; - PyErr_Fetch(&etype, &eval, &etb); - __Pyx_SET_REFCNT(o, Py_REFCNT(o) + 1); - __pyx_memoryviewslice___dealloc__(o); - __Pyx_SET_REFCNT(o, Py_REFCNT(o) - 1); - PyErr_Restore(etype, eval, etb); - } - Py_CLEAR(p->from_object); - PyObject_GC_Track(o); - __pyx_tp_dealloc_memoryview(o); -} - -static int __pyx_tp_traverse__memoryviewslice(PyObject *o, visitproc v, void *a) { - int e; - struct __pyx_memoryviewslice_obj *p = (struct __pyx_memoryviewslice_obj *)o; - e = __pyx_tp_traverse_memoryview(o, v, a); if (e) return e; - if (p->from_object) { - e = (*v)(p->from_object, a); if (e) return e; - } - return 0; -} - -static int __pyx_tp_clear__memoryviewslice(PyObject *o) { - PyObject* tmp; - struct __pyx_memoryviewslice_obj *p = (struct __pyx_memoryviewslice_obj *)o; - __pyx_tp_clear_memoryview(o); - tmp = ((PyObject*)p->from_object); - p->from_object = Py_None; Py_INCREF(Py_None); - Py_XDECREF(tmp); - __PYX_XDEC_MEMVIEW(&p->from_slice, 1); - return 0; -} - -static PyObject *__pyx_getprop___pyx_memoryviewslice_base(PyObject *o, CYTHON_UNUSED void *x) { - return __pyx_pw_15View_dot_MemoryView_16_memoryviewslice_4base_1__get__(o); -} - -static PyMethodDef __pyx_methods__memoryviewslice[] = { - {"__reduce_cython__", (PyCFunction)__pyx_pw___pyx_memoryviewslice_1__reduce_cython__, METH_NOARGS, 0}, - {"__setstate_cython__", (PyCFunction)__pyx_pw___pyx_memoryviewslice_3__setstate_cython__, METH_O, 0}, - {0, 0, 0, 0} -}; - -static struct PyGetSetDef __pyx_getsets__memoryviewslice[] = { - {(char *)"base", __pyx_getprop___pyx_memoryviewslice_base, 0, (char *)0, 0}, - {0, 0, 0, 0, 0} -}; - -static PyTypeObject __pyx_type___pyx_memoryviewslice = { - PyVarObject_HEAD_INIT(0, 0) - "monotonic_align.core._memoryviewslice", /*tp_name*/ - sizeof(struct __pyx_memoryviewslice_obj), /*tp_basicsize*/ - 0, /*tp_itemsize*/ - __pyx_tp_dealloc__memoryviewslice, /*tp_dealloc*/ - #if PY_VERSION_HEX < 0x030800b4 - 0, /*tp_print*/ - #endif - #if PY_VERSION_HEX >= 0x030800b4 - 0, /*tp_vectorcall_offset*/ - #endif - 0, /*tp_getattr*/ - 0, /*tp_setattr*/ - #if PY_MAJOR_VERSION < 3 - 0, /*tp_compare*/ - #endif - #if PY_MAJOR_VERSION >= 3 - 0, /*tp_as_async*/ - #endif - #if CYTHON_COMPILING_IN_PYPY - __pyx_memoryview___repr__, /*tp_repr*/ - #else - 0, /*tp_repr*/ - #endif - 0, /*tp_as_number*/ - 0, /*tp_as_sequence*/ - 0, /*tp_as_mapping*/ - 0, /*tp_hash*/ - 0, /*tp_call*/ - #if CYTHON_COMPILING_IN_PYPY - __pyx_memoryview___str__, /*tp_str*/ - #else - 0, /*tp_str*/ - #endif - 0, /*tp_getattro*/ - 0, /*tp_setattro*/ - 0, /*tp_as_buffer*/ - Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/ - "Internal class for passing memoryview slices to Python", /*tp_doc*/ - __pyx_tp_traverse__memoryviewslice, /*tp_traverse*/ - __pyx_tp_clear__memoryviewslice, /*tp_clear*/ - 0, /*tp_richcompare*/ - 0, /*tp_weaklistoffset*/ - 0, /*tp_iter*/ - 0, /*tp_iternext*/ - __pyx_methods__memoryviewslice, /*tp_methods*/ - 0, /*tp_members*/ - __pyx_getsets__memoryviewslice, /*tp_getset*/ - 0, /*tp_base*/ - 0, /*tp_dict*/ - 0, /*tp_descr_get*/ - 0, /*tp_descr_set*/ - 0, /*tp_dictoffset*/ - 0, /*tp_init*/ - 0, /*tp_alloc*/ - __pyx_tp_new__memoryviewslice, /*tp_new*/ - 0, /*tp_free*/ - 0, /*tp_is_gc*/ - 0, /*tp_bases*/ - 0, /*tp_mro*/ - 0, /*tp_cache*/ - 0, /*tp_subclasses*/ - 0, /*tp_weaklist*/ - 0, /*tp_del*/ - 0, /*tp_version_tag*/ - #if PY_VERSION_HEX >= 0x030400a1 - 0, /*tp_finalize*/ - #endif - #if PY_VERSION_HEX >= 0x030800b1 - 0, /*tp_vectorcall*/ - #endif - #if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000 - 0, /*tp_print*/ - #endif -}; - -static PyMethodDef __pyx_methods[] = { - {"maximum_path_c", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_15monotonic_align_4core_1maximum_path_c, METH_VARARGS|METH_KEYWORDS, 0}, - {0, 0, 0, 0} -}; - -#if PY_MAJOR_VERSION >= 3 -#if CYTHON_PEP489_MULTI_PHASE_INIT -static PyObject* __pyx_pymod_create(PyObject *spec, PyModuleDef *def); /*proto*/ -static int __pyx_pymod_exec_core(PyObject* module); /*proto*/ -static PyModuleDef_Slot __pyx_moduledef_slots[] = { - {Py_mod_create, (void*)__pyx_pymod_create}, - {Py_mod_exec, (void*)__pyx_pymod_exec_core}, - {0, NULL} -}; -#endif - -static struct PyModuleDef __pyx_moduledef = { - PyModuleDef_HEAD_INIT, - "core", - 0, /* m_doc */ - #if CYTHON_PEP489_MULTI_PHASE_INIT - 0, /* m_size */ - #else - -1, /* m_size */ - #endif - __pyx_methods /* m_methods */, - #if CYTHON_PEP489_MULTI_PHASE_INIT - __pyx_moduledef_slots, /* m_slots */ - #else - NULL, /* m_reload */ - #endif - NULL, /* m_traverse */ - NULL, /* m_clear */ - NULL /* m_free */ -}; -#endif -#ifndef CYTHON_SMALL_CODE -#if defined(__clang__) - #define CYTHON_SMALL_CODE -#elif defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 3)) - #define CYTHON_SMALL_CODE __attribute__((cold)) -#else - #define CYTHON_SMALL_CODE -#endif -#endif - -static __Pyx_StringTabEntry __pyx_string_tab[] = { - {&__pyx_n_s_ASCII, __pyx_k_ASCII, sizeof(__pyx_k_ASCII), 0, 0, 1, 1}, - {&__pyx_kp_s_Buffer_view_does_not_expose_stri, __pyx_k_Buffer_view_does_not_expose_stri, sizeof(__pyx_k_Buffer_view_does_not_expose_stri), 0, 0, 1, 0}, - {&__pyx_kp_s_Can_only_create_a_buffer_that_is, __pyx_k_Can_only_create_a_buffer_that_is, sizeof(__pyx_k_Can_only_create_a_buffer_that_is), 0, 0, 1, 0}, - {&__pyx_kp_s_Cannot_assign_to_read_only_memor, __pyx_k_Cannot_assign_to_read_only_memor, sizeof(__pyx_k_Cannot_assign_to_read_only_memor), 0, 0, 1, 0}, - {&__pyx_kp_s_Cannot_create_writable_memory_vi, __pyx_k_Cannot_create_writable_memory_vi, sizeof(__pyx_k_Cannot_create_writable_memory_vi), 0, 0, 1, 0}, - {&__pyx_kp_s_Cannot_index_with_type_s, __pyx_k_Cannot_index_with_type_s, sizeof(__pyx_k_Cannot_index_with_type_s), 0, 0, 1, 0}, - {&__pyx_n_s_Ellipsis, __pyx_k_Ellipsis, sizeof(__pyx_k_Ellipsis), 0, 0, 1, 1}, - {&__pyx_kp_s_Empty_shape_tuple_for_cython_arr, __pyx_k_Empty_shape_tuple_for_cython_arr, sizeof(__pyx_k_Empty_shape_tuple_for_cython_arr), 0, 0, 1, 0}, - {&__pyx_kp_s_Incompatible_checksums_s_vs_0xb0, __pyx_k_Incompatible_checksums_s_vs_0xb0, sizeof(__pyx_k_Incompatible_checksums_s_vs_0xb0), 0, 0, 1, 0}, - {&__pyx_n_s_IndexError, __pyx_k_IndexError, sizeof(__pyx_k_IndexError), 0, 0, 1, 1}, - {&__pyx_kp_s_Indirect_dimensions_not_supporte, __pyx_k_Indirect_dimensions_not_supporte, sizeof(__pyx_k_Indirect_dimensions_not_supporte), 0, 0, 1, 0}, - {&__pyx_kp_s_Invalid_mode_expected_c_or_fortr, __pyx_k_Invalid_mode_expected_c_or_fortr, sizeof(__pyx_k_Invalid_mode_expected_c_or_fortr), 0, 0, 1, 0}, - {&__pyx_kp_s_Invalid_shape_in_axis_d_d, __pyx_k_Invalid_shape_in_axis_d_d, sizeof(__pyx_k_Invalid_shape_in_axis_d_d), 0, 0, 1, 0}, - {&__pyx_n_s_MemoryError, __pyx_k_MemoryError, sizeof(__pyx_k_MemoryError), 0, 0, 1, 1}, - {&__pyx_kp_s_MemoryView_of_r_at_0x_x, __pyx_k_MemoryView_of_r_at_0x_x, sizeof(__pyx_k_MemoryView_of_r_at_0x_x), 0, 0, 1, 0}, - {&__pyx_kp_s_MemoryView_of_r_object, __pyx_k_MemoryView_of_r_object, sizeof(__pyx_k_MemoryView_of_r_object), 0, 0, 1, 0}, - {&__pyx_n_b_O, __pyx_k_O, sizeof(__pyx_k_O), 0, 0, 0, 1}, - {&__pyx_kp_s_Out_of_bounds_on_buffer_access_a, __pyx_k_Out_of_bounds_on_buffer_access_a, sizeof(__pyx_k_Out_of_bounds_on_buffer_access_a), 0, 0, 1, 0}, - {&__pyx_n_s_PickleError, __pyx_k_PickleError, sizeof(__pyx_k_PickleError), 0, 0, 1, 1}, - {&__pyx_n_s_TypeError, __pyx_k_TypeError, sizeof(__pyx_k_TypeError), 0, 0, 1, 1}, - {&__pyx_kp_s_Unable_to_convert_item_to_object, __pyx_k_Unable_to_convert_item_to_object, sizeof(__pyx_k_Unable_to_convert_item_to_object), 0, 0, 1, 0}, - {&__pyx_n_s_ValueError, __pyx_k_ValueError, sizeof(__pyx_k_ValueError), 0, 0, 1, 1}, - {&__pyx_n_s_View_MemoryView, __pyx_k_View_MemoryView, sizeof(__pyx_k_View_MemoryView), 0, 0, 1, 1}, - {&__pyx_n_s_allocate_buffer, __pyx_k_allocate_buffer, sizeof(__pyx_k_allocate_buffer), 0, 0, 1, 1}, - {&__pyx_n_s_base, __pyx_k_base, sizeof(__pyx_k_base), 0, 0, 1, 1}, - {&__pyx_n_s_c, __pyx_k_c, sizeof(__pyx_k_c), 0, 0, 1, 1}, - {&__pyx_n_u_c, __pyx_k_c, sizeof(__pyx_k_c), 0, 1, 0, 1}, - {&__pyx_n_s_class, __pyx_k_class, sizeof(__pyx_k_class), 0, 0, 1, 1}, - {&__pyx_n_s_cline_in_traceback, __pyx_k_cline_in_traceback, sizeof(__pyx_k_cline_in_traceback), 0, 0, 1, 1}, - {&__pyx_kp_s_contiguous_and_direct, __pyx_k_contiguous_and_direct, sizeof(__pyx_k_contiguous_and_direct), 0, 0, 1, 0}, - {&__pyx_kp_s_contiguous_and_indirect, __pyx_k_contiguous_and_indirect, sizeof(__pyx_k_contiguous_and_indirect), 0, 0, 1, 0}, - {&__pyx_n_s_dict, __pyx_k_dict, sizeof(__pyx_k_dict), 0, 0, 1, 1}, - {&__pyx_n_s_dtype_is_object, __pyx_k_dtype_is_object, sizeof(__pyx_k_dtype_is_object), 0, 0, 1, 1}, - {&__pyx_n_s_encode, __pyx_k_encode, sizeof(__pyx_k_encode), 0, 0, 1, 1}, - {&__pyx_n_s_enumerate, __pyx_k_enumerate, sizeof(__pyx_k_enumerate), 0, 0, 1, 1}, - {&__pyx_n_s_error, __pyx_k_error, sizeof(__pyx_k_error), 0, 0, 1, 1}, - {&__pyx_n_s_flags, __pyx_k_flags, sizeof(__pyx_k_flags), 0, 0, 1, 1}, - {&__pyx_n_s_format, __pyx_k_format, sizeof(__pyx_k_format), 0, 0, 1, 1}, - {&__pyx_n_s_fortran, __pyx_k_fortran, sizeof(__pyx_k_fortran), 0, 0, 1, 1}, - {&__pyx_n_u_fortran, __pyx_k_fortran, sizeof(__pyx_k_fortran), 0, 1, 0, 1}, - {&__pyx_n_s_getstate, __pyx_k_getstate, sizeof(__pyx_k_getstate), 0, 0, 1, 1}, - {&__pyx_kp_s_got_differing_extents_in_dimensi, __pyx_k_got_differing_extents_in_dimensi, sizeof(__pyx_k_got_differing_extents_in_dimensi), 0, 0, 1, 0}, - {&__pyx_n_s_id, __pyx_k_id, sizeof(__pyx_k_id), 0, 0, 1, 1}, - {&__pyx_n_s_import, __pyx_k_import, sizeof(__pyx_k_import), 0, 0, 1, 1}, - {&__pyx_n_s_itemsize, __pyx_k_itemsize, sizeof(__pyx_k_itemsize), 0, 0, 1, 1}, - {&__pyx_kp_s_itemsize_0_for_cython_array, __pyx_k_itemsize_0_for_cython_array, sizeof(__pyx_k_itemsize_0_for_cython_array), 0, 0, 1, 0}, - {&__pyx_n_s_main, __pyx_k_main, sizeof(__pyx_k_main), 0, 0, 1, 1}, - {&__pyx_n_s_memview, __pyx_k_memview, sizeof(__pyx_k_memview), 0, 0, 1, 1}, - {&__pyx_n_s_mode, __pyx_k_mode, sizeof(__pyx_k_mode), 0, 0, 1, 1}, - {&__pyx_n_s_name, __pyx_k_name, sizeof(__pyx_k_name), 0, 0, 1, 1}, - {&__pyx_n_s_name_2, __pyx_k_name_2, sizeof(__pyx_k_name_2), 0, 0, 1, 1}, - {&__pyx_n_s_ndim, __pyx_k_ndim, sizeof(__pyx_k_ndim), 0, 0, 1, 1}, - {&__pyx_n_s_new, __pyx_k_new, sizeof(__pyx_k_new), 0, 0, 1, 1}, - {&__pyx_kp_s_no_default___reduce___due_to_non, __pyx_k_no_default___reduce___due_to_non, sizeof(__pyx_k_no_default___reduce___due_to_non), 0, 0, 1, 0}, - {&__pyx_n_s_obj, __pyx_k_obj, sizeof(__pyx_k_obj), 0, 0, 1, 1}, - {&__pyx_n_s_pack, __pyx_k_pack, sizeof(__pyx_k_pack), 0, 0, 1, 1}, - {&__pyx_n_s_paths, __pyx_k_paths, sizeof(__pyx_k_paths), 0, 0, 1, 1}, - {&__pyx_n_s_pickle, __pyx_k_pickle, sizeof(__pyx_k_pickle), 0, 0, 1, 1}, - {&__pyx_n_s_pyx_PickleError, __pyx_k_pyx_PickleError, sizeof(__pyx_k_pyx_PickleError), 0, 0, 1, 1}, - {&__pyx_n_s_pyx_checksum, __pyx_k_pyx_checksum, sizeof(__pyx_k_pyx_checksum), 0, 0, 1, 1}, - {&__pyx_n_s_pyx_getbuffer, __pyx_k_pyx_getbuffer, sizeof(__pyx_k_pyx_getbuffer), 0, 0, 1, 1}, - {&__pyx_n_s_pyx_result, __pyx_k_pyx_result, sizeof(__pyx_k_pyx_result), 0, 0, 1, 1}, - {&__pyx_n_s_pyx_state, __pyx_k_pyx_state, sizeof(__pyx_k_pyx_state), 0, 0, 1, 1}, - {&__pyx_n_s_pyx_type, __pyx_k_pyx_type, sizeof(__pyx_k_pyx_type), 0, 0, 1, 1}, - {&__pyx_n_s_pyx_unpickle_Enum, __pyx_k_pyx_unpickle_Enum, sizeof(__pyx_k_pyx_unpickle_Enum), 0, 0, 1, 1}, - {&__pyx_n_s_pyx_vtable, __pyx_k_pyx_vtable, sizeof(__pyx_k_pyx_vtable), 0, 0, 1, 1}, - {&__pyx_n_s_range, __pyx_k_range, sizeof(__pyx_k_range), 0, 0, 1, 1}, - {&__pyx_n_s_reduce, __pyx_k_reduce, sizeof(__pyx_k_reduce), 0, 0, 1, 1}, - {&__pyx_n_s_reduce_cython, __pyx_k_reduce_cython, sizeof(__pyx_k_reduce_cython), 0, 0, 1, 1}, - {&__pyx_n_s_reduce_ex, __pyx_k_reduce_ex, sizeof(__pyx_k_reduce_ex), 0, 0, 1, 1}, - {&__pyx_n_s_setstate, __pyx_k_setstate, sizeof(__pyx_k_setstate), 0, 0, 1, 1}, - {&__pyx_n_s_setstate_cython, __pyx_k_setstate_cython, sizeof(__pyx_k_setstate_cython), 0, 0, 1, 1}, - {&__pyx_n_s_shape, __pyx_k_shape, sizeof(__pyx_k_shape), 0, 0, 1, 1}, - {&__pyx_n_s_size, __pyx_k_size, sizeof(__pyx_k_size), 0, 0, 1, 1}, - {&__pyx_n_s_start, __pyx_k_start, sizeof(__pyx_k_start), 0, 0, 1, 1}, - {&__pyx_n_s_step, __pyx_k_step, sizeof(__pyx_k_step), 0, 0, 1, 1}, - {&__pyx_n_s_stop, __pyx_k_stop, sizeof(__pyx_k_stop), 0, 0, 1, 1}, - {&__pyx_kp_s_strided_and_direct, __pyx_k_strided_and_direct, sizeof(__pyx_k_strided_and_direct), 0, 0, 1, 0}, - {&__pyx_kp_s_strided_and_direct_or_indirect, __pyx_k_strided_and_direct_or_indirect, sizeof(__pyx_k_strided_and_direct_or_indirect), 0, 0, 1, 0}, - {&__pyx_kp_s_strided_and_indirect, __pyx_k_strided_and_indirect, sizeof(__pyx_k_strided_and_indirect), 0, 0, 1, 0}, - {&__pyx_kp_s_stringsource, __pyx_k_stringsource, sizeof(__pyx_k_stringsource), 0, 0, 1, 0}, - {&__pyx_n_s_struct, __pyx_k_struct, sizeof(__pyx_k_struct), 0, 0, 1, 1}, - {&__pyx_n_s_t_xs, __pyx_k_t_xs, sizeof(__pyx_k_t_xs), 0, 0, 1, 1}, - {&__pyx_n_s_t_ys, __pyx_k_t_ys, sizeof(__pyx_k_t_ys), 0, 0, 1, 1}, - {&__pyx_n_s_test, __pyx_k_test, sizeof(__pyx_k_test), 0, 0, 1, 1}, - {&__pyx_kp_s_unable_to_allocate_array_data, __pyx_k_unable_to_allocate_array_data, sizeof(__pyx_k_unable_to_allocate_array_data), 0, 0, 1, 0}, - {&__pyx_kp_s_unable_to_allocate_shape_and_str, __pyx_k_unable_to_allocate_shape_and_str, sizeof(__pyx_k_unable_to_allocate_shape_and_str), 0, 0, 1, 0}, - {&__pyx_n_s_unpack, __pyx_k_unpack, sizeof(__pyx_k_unpack), 0, 0, 1, 1}, - {&__pyx_n_s_update, __pyx_k_update, sizeof(__pyx_k_update), 0, 0, 1, 1}, - {&__pyx_n_s_values, __pyx_k_values, sizeof(__pyx_k_values), 0, 0, 1, 1}, - {0, 0, 0, 0, 0, 0, 0} -}; -static CYTHON_SMALL_CODE int __Pyx_InitCachedBuiltins(void) { - __pyx_builtin_range = __Pyx_GetBuiltinName(__pyx_n_s_range); if (!__pyx_builtin_range) __PYX_ERR(0, 15, __pyx_L1_error) - __pyx_builtin_ValueError = __Pyx_GetBuiltinName(__pyx_n_s_ValueError); if (!__pyx_builtin_ValueError) __PYX_ERR(1, 133, __pyx_L1_error) - __pyx_builtin_MemoryError = __Pyx_GetBuiltinName(__pyx_n_s_MemoryError); if (!__pyx_builtin_MemoryError) __PYX_ERR(1, 148, __pyx_L1_error) - __pyx_builtin_enumerate = __Pyx_GetBuiltinName(__pyx_n_s_enumerate); if (!__pyx_builtin_enumerate) __PYX_ERR(1, 151, __pyx_L1_error) - __pyx_builtin_TypeError = __Pyx_GetBuiltinName(__pyx_n_s_TypeError); if (!__pyx_builtin_TypeError) __PYX_ERR(1, 2, __pyx_L1_error) - __pyx_builtin_Ellipsis = __Pyx_GetBuiltinName(__pyx_n_s_Ellipsis); if (!__pyx_builtin_Ellipsis) __PYX_ERR(1, 404, __pyx_L1_error) - __pyx_builtin_id = __Pyx_GetBuiltinName(__pyx_n_s_id); if (!__pyx_builtin_id) __PYX_ERR(1, 613, __pyx_L1_error) - __pyx_builtin_IndexError = __Pyx_GetBuiltinName(__pyx_n_s_IndexError); if (!__pyx_builtin_IndexError) __PYX_ERR(1, 832, __pyx_L1_error) - return 0; - __pyx_L1_error:; - return -1; -} - -static CYTHON_SMALL_CODE int __Pyx_InitCachedConstants(void) { - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__Pyx_InitCachedConstants", 0); - - /* "View.MemoryView":133 - * - * if not self.ndim: - * raise ValueError("Empty shape tuple for cython.array") # <<<<<<<<<<<<<< - * - * if itemsize <= 0: - */ - __pyx_tuple__2 = PyTuple_Pack(1, __pyx_kp_s_Empty_shape_tuple_for_cython_arr); if (unlikely(!__pyx_tuple__2)) __PYX_ERR(1, 133, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__2); - __Pyx_GIVEREF(__pyx_tuple__2); - - /* "View.MemoryView":136 - * - * if itemsize <= 0: - * raise ValueError("itemsize <= 0 for cython.array") # <<<<<<<<<<<<<< - * - * if not isinstance(format, bytes): - */ - __pyx_tuple__3 = PyTuple_Pack(1, __pyx_kp_s_itemsize_0_for_cython_array); if (unlikely(!__pyx_tuple__3)) __PYX_ERR(1, 136, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__3); - __Pyx_GIVEREF(__pyx_tuple__3); - - /* "View.MemoryView":148 - * - * if not self._shape: - * raise MemoryError("unable to allocate shape and strides.") # <<<<<<<<<<<<<< - * - * - */ - __pyx_tuple__4 = PyTuple_Pack(1, __pyx_kp_s_unable_to_allocate_shape_and_str); if (unlikely(!__pyx_tuple__4)) __PYX_ERR(1, 148, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__4); - __Pyx_GIVEREF(__pyx_tuple__4); - - /* "View.MemoryView":176 - * self.data = malloc(self.len) - * if not self.data: - * raise MemoryError("unable to allocate array data.") # <<<<<<<<<<<<<< - * - * if self.dtype_is_object: - */ - __pyx_tuple__5 = PyTuple_Pack(1, __pyx_kp_s_unable_to_allocate_array_data); if (unlikely(!__pyx_tuple__5)) __PYX_ERR(1, 176, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__5); - __Pyx_GIVEREF(__pyx_tuple__5); - - /* "View.MemoryView":192 - * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS - * if not (flags & bufmode): - * raise ValueError("Can only create a buffer that is contiguous in memory.") # <<<<<<<<<<<<<< - * info.buf = self.data - * info.len = self.len - */ - __pyx_tuple__6 = PyTuple_Pack(1, __pyx_kp_s_Can_only_create_a_buffer_that_is); if (unlikely(!__pyx_tuple__6)) __PYX_ERR(1, 192, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__6); - __Pyx_GIVEREF(__pyx_tuple__6); - - /* "(tree fragment)":2 - * def __reduce_cython__(self): - * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< - * def __setstate_cython__(self, __pyx_state): - * raise TypeError("no default __reduce__ due to non-trivial __cinit__") - */ - __pyx_tuple__7 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__7)) __PYX_ERR(1, 2, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__7); - __Pyx_GIVEREF(__pyx_tuple__7); - - /* "(tree fragment)":4 - * raise TypeError("no default __reduce__ due to non-trivial __cinit__") - * def __setstate_cython__(self, __pyx_state): - * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< - */ - __pyx_tuple__8 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__8)) __PYX_ERR(1, 4, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__8); - __Pyx_GIVEREF(__pyx_tuple__8); - - /* "View.MemoryView":418 - * def __setitem__(memoryview self, object index, object value): - * if self.view.readonly: - * raise TypeError("Cannot assign to read-only memoryview") # <<<<<<<<<<<<<< - * - * have_slices, index = _unellipsify(index, self.view.ndim) - */ - __pyx_tuple__9 = PyTuple_Pack(1, __pyx_kp_s_Cannot_assign_to_read_only_memor); if (unlikely(!__pyx_tuple__9)) __PYX_ERR(1, 418, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__9); - __Pyx_GIVEREF(__pyx_tuple__9); - - /* "View.MemoryView":495 - * result = struct.unpack(self.view.format, bytesitem) - * except struct.error: - * raise ValueError("Unable to convert item to object") # <<<<<<<<<<<<<< - * else: - * if len(self.view.format) == 1: - */ - __pyx_tuple__10 = PyTuple_Pack(1, __pyx_kp_s_Unable_to_convert_item_to_object); if (unlikely(!__pyx_tuple__10)) __PYX_ERR(1, 495, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__10); - __Pyx_GIVEREF(__pyx_tuple__10); - - /* "View.MemoryView":520 - * def __getbuffer__(self, Py_buffer *info, int flags): - * if flags & PyBUF_WRITABLE and self.view.readonly: - * raise ValueError("Cannot create writable memory view from read-only memoryview") # <<<<<<<<<<<<<< - * - * if flags & PyBUF_ND: - */ - __pyx_tuple__11 = PyTuple_Pack(1, __pyx_kp_s_Cannot_create_writable_memory_vi); if (unlikely(!__pyx_tuple__11)) __PYX_ERR(1, 520, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__11); - __Pyx_GIVEREF(__pyx_tuple__11); - - /* "View.MemoryView":570 - * if self.view.strides == NULL: - * - * raise ValueError("Buffer view does not expose strides") # <<<<<<<<<<<<<< - * - * return tuple([stride for stride in self.view.strides[:self.view.ndim]]) - */ - __pyx_tuple__12 = PyTuple_Pack(1, __pyx_kp_s_Buffer_view_does_not_expose_stri); if (unlikely(!__pyx_tuple__12)) __PYX_ERR(1, 570, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__12); - __Pyx_GIVEREF(__pyx_tuple__12); - - /* "View.MemoryView":577 - * def suboffsets(self): - * if self.view.suboffsets == NULL: - * return (-1,) * self.view.ndim # <<<<<<<<<<<<<< - * - * return tuple([suboffset for suboffset in self.view.suboffsets[:self.view.ndim]]) - */ - __pyx_tuple__13 = PyTuple_New(1); if (unlikely(!__pyx_tuple__13)) __PYX_ERR(1, 577, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__13); - __Pyx_INCREF(__pyx_int_neg_1); - __Pyx_GIVEREF(__pyx_int_neg_1); - PyTuple_SET_ITEM(__pyx_tuple__13, 0, __pyx_int_neg_1); - __Pyx_GIVEREF(__pyx_tuple__13); - - /* "(tree fragment)":2 - * def __reduce_cython__(self): - * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< - * def __setstate_cython__(self, __pyx_state): - * raise TypeError("no default __reduce__ due to non-trivial __cinit__") - */ - __pyx_tuple__14 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__14)) __PYX_ERR(1, 2, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__14); - __Pyx_GIVEREF(__pyx_tuple__14); - - /* "(tree fragment)":4 - * raise TypeError("no default __reduce__ due to non-trivial __cinit__") - * def __setstate_cython__(self, __pyx_state): - * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< - */ - __pyx_tuple__15 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__15)) __PYX_ERR(1, 4, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__15); - __Pyx_GIVEREF(__pyx_tuple__15); - - /* "View.MemoryView":682 - * if item is Ellipsis: - * if not seen_ellipsis: - * result.extend([slice(None)] * (ndim - len(tup) + 1)) # <<<<<<<<<<<<<< - * seen_ellipsis = True - * else: - */ - __pyx_slice__16 = PySlice_New(Py_None, Py_None, Py_None); if (unlikely(!__pyx_slice__16)) __PYX_ERR(1, 682, __pyx_L1_error) - __Pyx_GOTREF(__pyx_slice__16); - __Pyx_GIVEREF(__pyx_slice__16); - - /* "View.MemoryView":703 - * for suboffset in suboffsets[:ndim]: - * if suboffset >= 0: - * raise ValueError("Indirect dimensions not supported") # <<<<<<<<<<<<<< - * - * - */ - __pyx_tuple__17 = PyTuple_Pack(1, __pyx_kp_s_Indirect_dimensions_not_supporte); if (unlikely(!__pyx_tuple__17)) __PYX_ERR(1, 703, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__17); - __Pyx_GIVEREF(__pyx_tuple__17); - - /* "(tree fragment)":2 - * def __reduce_cython__(self): - * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< - * def __setstate_cython__(self, __pyx_state): - * raise TypeError("no default __reduce__ due to non-trivial __cinit__") - */ - __pyx_tuple__18 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__18)) __PYX_ERR(1, 2, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__18); - __Pyx_GIVEREF(__pyx_tuple__18); - - /* "(tree fragment)":4 - * raise TypeError("no default __reduce__ due to non-trivial __cinit__") - * def __setstate_cython__(self, __pyx_state): - * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< - */ - __pyx_tuple__19 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__19)) __PYX_ERR(1, 4, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__19); - __Pyx_GIVEREF(__pyx_tuple__19); - - /* "View.MemoryView":286 - * return self.name - * - * cdef generic = Enum("") # <<<<<<<<<<<<<< - * cdef strided = Enum("") # default - * cdef indirect = Enum("") - */ - __pyx_tuple__20 = PyTuple_Pack(1, __pyx_kp_s_strided_and_direct_or_indirect); if (unlikely(!__pyx_tuple__20)) __PYX_ERR(1, 286, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__20); - __Pyx_GIVEREF(__pyx_tuple__20); - - /* "View.MemoryView":287 - * - * cdef generic = Enum("") - * cdef strided = Enum("") # default # <<<<<<<<<<<<<< - * cdef indirect = Enum("") - * - */ - __pyx_tuple__21 = PyTuple_Pack(1, __pyx_kp_s_strided_and_direct); if (unlikely(!__pyx_tuple__21)) __PYX_ERR(1, 287, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__21); - __Pyx_GIVEREF(__pyx_tuple__21); - - /* "View.MemoryView":288 - * cdef generic = Enum("") - * cdef strided = Enum("") # default - * cdef indirect = Enum("") # <<<<<<<<<<<<<< - * - * - */ - __pyx_tuple__22 = PyTuple_Pack(1, __pyx_kp_s_strided_and_indirect); if (unlikely(!__pyx_tuple__22)) __PYX_ERR(1, 288, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__22); - __Pyx_GIVEREF(__pyx_tuple__22); - - /* "View.MemoryView":291 - * - * - * cdef contiguous = Enum("") # <<<<<<<<<<<<<< - * cdef indirect_contiguous = Enum("") - * - */ - __pyx_tuple__23 = PyTuple_Pack(1, __pyx_kp_s_contiguous_and_direct); if (unlikely(!__pyx_tuple__23)) __PYX_ERR(1, 291, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__23); - __Pyx_GIVEREF(__pyx_tuple__23); - - /* "View.MemoryView":292 - * - * cdef contiguous = Enum("") - * cdef indirect_contiguous = Enum("") # <<<<<<<<<<<<<< - * - * - */ - __pyx_tuple__24 = PyTuple_Pack(1, __pyx_kp_s_contiguous_and_indirect); if (unlikely(!__pyx_tuple__24)) __PYX_ERR(1, 292, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__24); - __Pyx_GIVEREF(__pyx_tuple__24); - - /* "(tree fragment)":1 - * def __pyx_unpickle_Enum(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<< - * cdef object __pyx_PickleError - * cdef object __pyx_result - */ - __pyx_tuple__25 = PyTuple_Pack(5, __pyx_n_s_pyx_type, __pyx_n_s_pyx_checksum, __pyx_n_s_pyx_state, __pyx_n_s_pyx_PickleError, __pyx_n_s_pyx_result); if (unlikely(!__pyx_tuple__25)) __PYX_ERR(1, 1, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__25); - __Pyx_GIVEREF(__pyx_tuple__25); - __pyx_codeobj__26 = (PyObject*)__Pyx_PyCode_New(3, 0, 5, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__25, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_stringsource, __pyx_n_s_pyx_unpickle_Enum, 1, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__26)) __PYX_ERR(1, 1, __pyx_L1_error) - __Pyx_RefNannyFinishContext(); - return 0; - __pyx_L1_error:; - __Pyx_RefNannyFinishContext(); - return -1; -} - -static CYTHON_SMALL_CODE int __Pyx_InitGlobals(void) { - /* InitThreads.init */ - #ifdef WITH_THREAD -PyEval_InitThreads(); -#endif - -if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 1, __pyx_L1_error) - - if (__Pyx_InitStrings(__pyx_string_tab) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - __pyx_int_0 = PyInt_FromLong(0); if (unlikely(!__pyx_int_0)) __PYX_ERR(0, 1, __pyx_L1_error) - __pyx_int_1 = PyInt_FromLong(1); if (unlikely(!__pyx_int_1)) __PYX_ERR(0, 1, __pyx_L1_error) - __pyx_int_184977713 = PyInt_FromLong(184977713L); if (unlikely(!__pyx_int_184977713)) __PYX_ERR(0, 1, __pyx_L1_error) - __pyx_int_neg_1 = PyInt_FromLong(-1); if (unlikely(!__pyx_int_neg_1)) __PYX_ERR(0, 1, __pyx_L1_error) - return 0; - __pyx_L1_error:; - return -1; -} - -static CYTHON_SMALL_CODE int __Pyx_modinit_global_init_code(void); /*proto*/ -static CYTHON_SMALL_CODE int __Pyx_modinit_variable_export_code(void); /*proto*/ -static CYTHON_SMALL_CODE int __Pyx_modinit_function_export_code(void); /*proto*/ -static CYTHON_SMALL_CODE int __Pyx_modinit_type_init_code(void); /*proto*/ -static CYTHON_SMALL_CODE int __Pyx_modinit_type_import_code(void); /*proto*/ -static CYTHON_SMALL_CODE int __Pyx_modinit_variable_import_code(void); /*proto*/ -static CYTHON_SMALL_CODE int __Pyx_modinit_function_import_code(void); /*proto*/ - -static int __Pyx_modinit_global_init_code(void) { - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__Pyx_modinit_global_init_code", 0); - /*--- Global init code ---*/ - generic = Py_None; Py_INCREF(Py_None); - strided = Py_None; Py_INCREF(Py_None); - indirect = Py_None; Py_INCREF(Py_None); - contiguous = Py_None; Py_INCREF(Py_None); - indirect_contiguous = Py_None; Py_INCREF(Py_None); - __Pyx_RefNannyFinishContext(); - return 0; -} - -static int __Pyx_modinit_variable_export_code(void) { - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__Pyx_modinit_variable_export_code", 0); - /*--- Variable export code ---*/ - __Pyx_RefNannyFinishContext(); - return 0; -} - -static int __Pyx_modinit_function_export_code(void) { - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__Pyx_modinit_function_export_code", 0); - /*--- Function export code ---*/ - __Pyx_RefNannyFinishContext(); - return 0; -} - -static int __Pyx_modinit_type_init_code(void) { - __Pyx_RefNannyDeclarations - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("__Pyx_modinit_type_init_code", 0); - /*--- Type init code ---*/ - __pyx_vtabptr_array = &__pyx_vtable_array; - __pyx_vtable_array.get_memview = (PyObject *(*)(struct __pyx_array_obj *))__pyx_array_get_memview; - if (PyType_Ready(&__pyx_type___pyx_array) < 0) __PYX_ERR(1, 105, __pyx_L1_error) - #if PY_VERSION_HEX < 0x030800B1 - __pyx_type___pyx_array.tp_print = 0; - #endif - if (__Pyx_SetVtable(__pyx_type___pyx_array.tp_dict, __pyx_vtabptr_array) < 0) __PYX_ERR(1, 105, __pyx_L1_error) - if (__Pyx_setup_reduce((PyObject*)&__pyx_type___pyx_array) < 0) __PYX_ERR(1, 105, __pyx_L1_error) - __pyx_array_type = &__pyx_type___pyx_array; - if (PyType_Ready(&__pyx_type___pyx_MemviewEnum) < 0) __PYX_ERR(1, 279, __pyx_L1_error) - #if PY_VERSION_HEX < 0x030800B1 - __pyx_type___pyx_MemviewEnum.tp_print = 0; - #endif - if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_type___pyx_MemviewEnum.tp_dictoffset && __pyx_type___pyx_MemviewEnum.tp_getattro == PyObject_GenericGetAttr)) { - __pyx_type___pyx_MemviewEnum.tp_getattro = __Pyx_PyObject_GenericGetAttr; - } - if (__Pyx_setup_reduce((PyObject*)&__pyx_type___pyx_MemviewEnum) < 0) __PYX_ERR(1, 279, __pyx_L1_error) - __pyx_MemviewEnum_type = &__pyx_type___pyx_MemviewEnum; - __pyx_vtabptr_memoryview = &__pyx_vtable_memoryview; - __pyx_vtable_memoryview.get_item_pointer = (char *(*)(struct __pyx_memoryview_obj *, PyObject *))__pyx_memoryview_get_item_pointer; - __pyx_vtable_memoryview.is_slice = (PyObject *(*)(struct __pyx_memoryview_obj *, PyObject *))__pyx_memoryview_is_slice; - __pyx_vtable_memoryview.setitem_slice_assignment = (PyObject *(*)(struct __pyx_memoryview_obj *, PyObject *, PyObject *))__pyx_memoryview_setitem_slice_assignment; - __pyx_vtable_memoryview.setitem_slice_assign_scalar = (PyObject *(*)(struct __pyx_memoryview_obj *, struct __pyx_memoryview_obj *, PyObject *))__pyx_memoryview_setitem_slice_assign_scalar; - __pyx_vtable_memoryview.setitem_indexed = (PyObject *(*)(struct __pyx_memoryview_obj *, PyObject *, PyObject *))__pyx_memoryview_setitem_indexed; - __pyx_vtable_memoryview.convert_item_to_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *))__pyx_memoryview_convert_item_to_object; - __pyx_vtable_memoryview.assign_item_from_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *, PyObject *))__pyx_memoryview_assign_item_from_object; - if (PyType_Ready(&__pyx_type___pyx_memoryview) < 0) __PYX_ERR(1, 330, __pyx_L1_error) - #if PY_VERSION_HEX < 0x030800B1 - __pyx_type___pyx_memoryview.tp_print = 0; - #endif - if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_type___pyx_memoryview.tp_dictoffset && __pyx_type___pyx_memoryview.tp_getattro == PyObject_GenericGetAttr)) { - __pyx_type___pyx_memoryview.tp_getattro = __Pyx_PyObject_GenericGetAttr; - } - if (__Pyx_SetVtable(__pyx_type___pyx_memoryview.tp_dict, __pyx_vtabptr_memoryview) < 0) __PYX_ERR(1, 330, __pyx_L1_error) - if (__Pyx_setup_reduce((PyObject*)&__pyx_type___pyx_memoryview) < 0) __PYX_ERR(1, 330, __pyx_L1_error) - __pyx_memoryview_type = &__pyx_type___pyx_memoryview; - __pyx_vtabptr__memoryviewslice = &__pyx_vtable__memoryviewslice; - __pyx_vtable__memoryviewslice.__pyx_base = *__pyx_vtabptr_memoryview; - __pyx_vtable__memoryviewslice.__pyx_base.convert_item_to_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *))__pyx_memoryviewslice_convert_item_to_object; - __pyx_vtable__memoryviewslice.__pyx_base.assign_item_from_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *, PyObject *))__pyx_memoryviewslice_assign_item_from_object; - __pyx_type___pyx_memoryviewslice.tp_base = __pyx_memoryview_type; - if (PyType_Ready(&__pyx_type___pyx_memoryviewslice) < 0) __PYX_ERR(1, 965, __pyx_L1_error) - #if PY_VERSION_HEX < 0x030800B1 - __pyx_type___pyx_memoryviewslice.tp_print = 0; - #endif - if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_type___pyx_memoryviewslice.tp_dictoffset && __pyx_type___pyx_memoryviewslice.tp_getattro == PyObject_GenericGetAttr)) { - __pyx_type___pyx_memoryviewslice.tp_getattro = __Pyx_PyObject_GenericGetAttr; - } - if (__Pyx_SetVtable(__pyx_type___pyx_memoryviewslice.tp_dict, __pyx_vtabptr__memoryviewslice) < 0) __PYX_ERR(1, 965, __pyx_L1_error) - if (__Pyx_setup_reduce((PyObject*)&__pyx_type___pyx_memoryviewslice) < 0) __PYX_ERR(1, 965, __pyx_L1_error) - __pyx_memoryviewslice_type = &__pyx_type___pyx_memoryviewslice; - __Pyx_RefNannyFinishContext(); - return 0; - __pyx_L1_error:; - __Pyx_RefNannyFinishContext(); - return -1; -} - -static int __Pyx_modinit_type_import_code(void) { - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__Pyx_modinit_type_import_code", 0); - /*--- Type import code ---*/ - __Pyx_RefNannyFinishContext(); - return 0; -} - -static int __Pyx_modinit_variable_import_code(void) { - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__Pyx_modinit_variable_import_code", 0); - /*--- Variable import code ---*/ - __Pyx_RefNannyFinishContext(); - return 0; -} - -static int __Pyx_modinit_function_import_code(void) { - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__Pyx_modinit_function_import_code", 0); - /*--- Function import code ---*/ - __Pyx_RefNannyFinishContext(); - return 0; -} - - -#ifndef CYTHON_NO_PYINIT_EXPORT -#define __Pyx_PyMODINIT_FUNC PyMODINIT_FUNC -#elif PY_MAJOR_VERSION < 3 -#ifdef __cplusplus -#define __Pyx_PyMODINIT_FUNC extern "C" void -#else -#define __Pyx_PyMODINIT_FUNC void -#endif -#else -#ifdef __cplusplus -#define __Pyx_PyMODINIT_FUNC extern "C" PyObject * -#else -#define __Pyx_PyMODINIT_FUNC PyObject * -#endif -#endif - - -#if PY_MAJOR_VERSION < 3 -__Pyx_PyMODINIT_FUNC initcore(void) CYTHON_SMALL_CODE; /*proto*/ -__Pyx_PyMODINIT_FUNC initcore(void) -#else -__Pyx_PyMODINIT_FUNC PyInit_core(void) CYTHON_SMALL_CODE; /*proto*/ -__Pyx_PyMODINIT_FUNC PyInit_core(void) -#if CYTHON_PEP489_MULTI_PHASE_INIT -{ - return PyModuleDef_Init(&__pyx_moduledef); -} -static CYTHON_SMALL_CODE int __Pyx_check_single_interpreter(void) { - #if PY_VERSION_HEX >= 0x030700A1 - static PY_INT64_T main_interpreter_id = -1; - PY_INT64_T current_id = PyInterpreterState_GetID(PyThreadState_Get()->interp); - if (main_interpreter_id == -1) { - main_interpreter_id = current_id; - return (unlikely(current_id == -1)) ? -1 : 0; - } else if (unlikely(main_interpreter_id != current_id)) - #else - static PyInterpreterState *main_interpreter = NULL; - PyInterpreterState *current_interpreter = PyThreadState_Get()->interp; - if (!main_interpreter) { - main_interpreter = current_interpreter; - } else if (unlikely(main_interpreter != current_interpreter)) - #endif - { - PyErr_SetString( - PyExc_ImportError, - "Interpreter change detected - this module can only be loaded into one interpreter per process."); - return -1; - } - return 0; -} -static CYTHON_SMALL_CODE int __Pyx_copy_spec_to_module(PyObject *spec, PyObject *moddict, const char* from_name, const char* to_name, int allow_none) { - PyObject *value = PyObject_GetAttrString(spec, from_name); - int result = 0; - if (likely(value)) { - if (allow_none || value != Py_None) { - result = PyDict_SetItemString(moddict, to_name, value); - } - Py_DECREF(value); - } else if (PyErr_ExceptionMatches(PyExc_AttributeError)) { - PyErr_Clear(); - } else { - result = -1; - } - return result; -} -static CYTHON_SMALL_CODE PyObject* __pyx_pymod_create(PyObject *spec, CYTHON_UNUSED PyModuleDef *def) { - PyObject *module = NULL, *moddict, *modname; - if (__Pyx_check_single_interpreter()) - return NULL; - if (__pyx_m) - return __Pyx_NewRef(__pyx_m); - modname = PyObject_GetAttrString(spec, "name"); - if (unlikely(!modname)) goto bad; - module = PyModule_NewObject(modname); - Py_DECREF(modname); - if (unlikely(!module)) goto bad; - moddict = PyModule_GetDict(module); - if (unlikely(!moddict)) goto bad; - if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "loader", "__loader__", 1) < 0)) goto bad; - if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "origin", "__file__", 1) < 0)) goto bad; - if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "parent", "__package__", 1) < 0)) goto bad; - if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "submodule_search_locations", "__path__", 0) < 0)) goto bad; - return module; -bad: - Py_XDECREF(module); - return NULL; -} - - -static CYTHON_SMALL_CODE int __pyx_pymod_exec_core(PyObject *__pyx_pyinit_module) -#endif -#endif -{ - PyObject *__pyx_t_1 = NULL; - static PyThread_type_lock __pyx_t_2[8]; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannyDeclarations - #if CYTHON_PEP489_MULTI_PHASE_INIT - if (__pyx_m) { - if (__pyx_m == __pyx_pyinit_module) return 0; - PyErr_SetString(PyExc_RuntimeError, "Module 'core' has already been imported. Re-initialisation is not supported."); - return -1; - } - #elif PY_MAJOR_VERSION >= 3 - if (__pyx_m) return __Pyx_NewRef(__pyx_m); - #endif - #if CYTHON_REFNANNY -__Pyx_RefNanny = __Pyx_RefNannyImportAPI("refnanny"); -if (!__Pyx_RefNanny) { - PyErr_Clear(); - __Pyx_RefNanny = __Pyx_RefNannyImportAPI("Cython.Runtime.refnanny"); - if (!__Pyx_RefNanny) - Py_FatalError("failed to import 'refnanny' module"); -} -#endif - __Pyx_RefNannySetupContext("__Pyx_PyMODINIT_FUNC PyInit_core(void)", 0); - if (__Pyx_check_binary_version() < 0) __PYX_ERR(0, 1, __pyx_L1_error) - #ifdef __Pxy_PyFrame_Initialize_Offsets - __Pxy_PyFrame_Initialize_Offsets(); - #endif - __pyx_empty_tuple = PyTuple_New(0); if (unlikely(!__pyx_empty_tuple)) __PYX_ERR(0, 1, __pyx_L1_error) - __pyx_empty_bytes = PyBytes_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_bytes)) __PYX_ERR(0, 1, __pyx_L1_error) - __pyx_empty_unicode = PyUnicode_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_unicode)) __PYX_ERR(0, 1, __pyx_L1_error) - #ifdef __Pyx_CyFunction_USED - if (__pyx_CyFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) - #endif - #ifdef __Pyx_FusedFunction_USED - if (__pyx_FusedFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) - #endif - #ifdef __Pyx_Coroutine_USED - if (__pyx_Coroutine_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) - #endif - #ifdef __Pyx_Generator_USED - if (__pyx_Generator_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) - #endif - #ifdef __Pyx_AsyncGen_USED - if (__pyx_AsyncGen_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) - #endif - #ifdef __Pyx_StopAsyncIteration_USED - if (__pyx_StopAsyncIteration_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) - #endif - /*--- Library function declarations ---*/ - /*--- Threads initialization code ---*/ - #if defined(__PYX_FORCE_INIT_THREADS) && __PYX_FORCE_INIT_THREADS - #ifdef WITH_THREAD /* Python build with threading support? */ - PyEval_InitThreads(); - #endif - #endif - /*--- Module creation code ---*/ - #if CYTHON_PEP489_MULTI_PHASE_INIT - __pyx_m = __pyx_pyinit_module; - Py_INCREF(__pyx_m); - #else - #if PY_MAJOR_VERSION < 3 - __pyx_m = Py_InitModule4("core", __pyx_methods, 0, 0, PYTHON_API_VERSION); Py_XINCREF(__pyx_m); - #else - __pyx_m = PyModule_Create(&__pyx_moduledef); - #endif - if (unlikely(!__pyx_m)) __PYX_ERR(0, 1, __pyx_L1_error) - #endif - __pyx_d = PyModule_GetDict(__pyx_m); if (unlikely(!__pyx_d)) __PYX_ERR(0, 1, __pyx_L1_error) - Py_INCREF(__pyx_d); - __pyx_b = PyImport_AddModule(__Pyx_BUILTIN_MODULE_NAME); if (unlikely(!__pyx_b)) __PYX_ERR(0, 1, __pyx_L1_error) - Py_INCREF(__pyx_b); - __pyx_cython_runtime = PyImport_AddModule((char *) "cython_runtime"); if (unlikely(!__pyx_cython_runtime)) __PYX_ERR(0, 1, __pyx_L1_error) - Py_INCREF(__pyx_cython_runtime); - if (PyObject_SetAttrString(__pyx_m, "__builtins__", __pyx_b) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - /*--- Initialize various global constants etc. ---*/ - if (__Pyx_InitGlobals() < 0) __PYX_ERR(0, 1, __pyx_L1_error) - #if PY_MAJOR_VERSION < 3 && (__PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT) - if (__Pyx_init_sys_getdefaultencoding_params() < 0) __PYX_ERR(0, 1, __pyx_L1_error) - #endif - if (__pyx_module_is_main_monotonic_align__core) { - if (PyObject_SetAttr(__pyx_m, __pyx_n_s_name_2, __pyx_n_s_main) < 0) __PYX_ERR(0, 1, __pyx_L1_error) - } - #if PY_MAJOR_VERSION >= 3 - { - PyObject *modules = PyImport_GetModuleDict(); if (unlikely(!modules)) __PYX_ERR(0, 1, __pyx_L1_error) - if (!PyDict_GetItemString(modules, "monotonic_align.core")) { - if (unlikely(PyDict_SetItemString(modules, "monotonic_align.core", __pyx_m) < 0)) __PYX_ERR(0, 1, __pyx_L1_error) - } - } - #endif - /*--- Builtin init code ---*/ - if (__Pyx_InitCachedBuiltins() < 0) __PYX_ERR(0, 1, __pyx_L1_error) - /*--- Constants init code ---*/ - if (__Pyx_InitCachedConstants() < 0) __PYX_ERR(0, 1, __pyx_L1_error) - /*--- Global type/function init code ---*/ - (void)__Pyx_modinit_global_init_code(); - (void)__Pyx_modinit_variable_export_code(); - (void)__Pyx_modinit_function_export_code(); - if (unlikely(__Pyx_modinit_type_init_code() < 0)) __PYX_ERR(0, 1, __pyx_L1_error) - (void)__Pyx_modinit_type_import_code(); - (void)__Pyx_modinit_variable_import_code(); - (void)__Pyx_modinit_function_import_code(); - /*--- Execution code ---*/ - #if defined(__Pyx_Generator_USED) || defined(__Pyx_Coroutine_USED) - if (__Pyx_patch_abc() < 0) __PYX_ERR(0, 1, __pyx_L1_error) - #endif - - /* "monotonic_align/core.pyx":7 - * @cython.boundscheck(False) - * @cython.wraparound(False) - * cdef void maximum_path_each(int[:,::1] path, float[:,::1] value, int t_y, int t_x, float max_neg_val=-1e9) nogil: # <<<<<<<<<<<<<< - * cdef int x - * cdef int y - */ - __pyx_k_ = (-1e9); - - /* "monotonic_align/core.pyx":1 - * cimport cython # <<<<<<<<<<<<<< - * from cython.parallel import prange - * - */ - __pyx_t_1 = __Pyx_PyDict_NewPresized(0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - if (PyDict_SetItem(__pyx_d, __pyx_n_s_test, __pyx_t_1) < 0) __PYX_ERR(0, 1, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - - /* "View.MemoryView":209 - * info.obj = self - * - * __pyx_getbuffer = capsule( &__pyx_array_getbuffer, "getbuffer(obj, view, flags)") # <<<<<<<<<<<<<< - * - * def __dealloc__(array self): - */ - __pyx_t_1 = __pyx_capsule_create(((void *)(&__pyx_array_getbuffer)), ((char *)"getbuffer(obj, view, flags)")); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 209, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - if (PyDict_SetItem((PyObject *)__pyx_array_type->tp_dict, __pyx_n_s_pyx_getbuffer, __pyx_t_1) < 0) __PYX_ERR(1, 209, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - PyType_Modified(__pyx_array_type); - - /* "View.MemoryView":286 - * return self.name - * - * cdef generic = Enum("") # <<<<<<<<<<<<<< - * cdef strided = Enum("") # default - * cdef indirect = Enum("") - */ - __pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__20, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 286, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_XGOTREF(generic); - __Pyx_DECREF_SET(generic, __pyx_t_1); - __Pyx_GIVEREF(__pyx_t_1); - __pyx_t_1 = 0; - - /* "View.MemoryView":287 - * - * cdef generic = Enum("") - * cdef strided = Enum("") # default # <<<<<<<<<<<<<< - * cdef indirect = Enum("") - * - */ - __pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__21, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 287, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_XGOTREF(strided); - __Pyx_DECREF_SET(strided, __pyx_t_1); - __Pyx_GIVEREF(__pyx_t_1); - __pyx_t_1 = 0; - - /* "View.MemoryView":288 - * cdef generic = Enum("") - * cdef strided = Enum("") # default - * cdef indirect = Enum("") # <<<<<<<<<<<<<< - * - * - */ - __pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__22, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 288, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_XGOTREF(indirect); - __Pyx_DECREF_SET(indirect, __pyx_t_1); - __Pyx_GIVEREF(__pyx_t_1); - __pyx_t_1 = 0; - - /* "View.MemoryView":291 - * - * - * cdef contiguous = Enum("") # <<<<<<<<<<<<<< - * cdef indirect_contiguous = Enum("") - * - */ - __pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__23, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 291, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_XGOTREF(contiguous); - __Pyx_DECREF_SET(contiguous, __pyx_t_1); - __Pyx_GIVEREF(__pyx_t_1); - __pyx_t_1 = 0; - - /* "View.MemoryView":292 - * - * cdef contiguous = Enum("") - * cdef indirect_contiguous = Enum("") # <<<<<<<<<<<<<< - * - * - */ - __pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__24, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 292, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_XGOTREF(indirect_contiguous); - __Pyx_DECREF_SET(indirect_contiguous, __pyx_t_1); - __Pyx_GIVEREF(__pyx_t_1); - __pyx_t_1 = 0; - - /* "View.MemoryView":316 - * - * DEF THREAD_LOCKS_PREALLOCATED = 8 - * cdef int __pyx_memoryview_thread_locks_used = 0 # <<<<<<<<<<<<<< - * cdef PyThread_type_lock[THREAD_LOCKS_PREALLOCATED] __pyx_memoryview_thread_locks = [ - * PyThread_allocate_lock(), - */ - __pyx_memoryview_thread_locks_used = 0; - - /* "View.MemoryView":317 - * DEF THREAD_LOCKS_PREALLOCATED = 8 - * cdef int __pyx_memoryview_thread_locks_used = 0 - * cdef PyThread_type_lock[THREAD_LOCKS_PREALLOCATED] __pyx_memoryview_thread_locks = [ # <<<<<<<<<<<<<< - * PyThread_allocate_lock(), - * PyThread_allocate_lock(), - */ - __pyx_t_2[0] = PyThread_allocate_lock(); - __pyx_t_2[1] = PyThread_allocate_lock(); - __pyx_t_2[2] = PyThread_allocate_lock(); - __pyx_t_2[3] = PyThread_allocate_lock(); - __pyx_t_2[4] = PyThread_allocate_lock(); - __pyx_t_2[5] = PyThread_allocate_lock(); - __pyx_t_2[6] = PyThread_allocate_lock(); - __pyx_t_2[7] = PyThread_allocate_lock(); - memcpy(&(__pyx_memoryview_thread_locks[0]), __pyx_t_2, sizeof(__pyx_memoryview_thread_locks[0]) * (8)); - - /* "View.MemoryView":549 - * info.obj = self - * - * __pyx_getbuffer = capsule( &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)") # <<<<<<<<<<<<<< - * - * - */ - __pyx_t_1 = __pyx_capsule_create(((void *)(&__pyx_memoryview_getbuffer)), ((char *)"getbuffer(obj, view, flags)")); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 549, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - if (PyDict_SetItem((PyObject *)__pyx_memoryview_type->tp_dict, __pyx_n_s_pyx_getbuffer, __pyx_t_1) < 0) __PYX_ERR(1, 549, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - PyType_Modified(__pyx_memoryview_type); - - /* "View.MemoryView":995 - * return self.from_object - * - * __pyx_getbuffer = capsule( &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)") # <<<<<<<<<<<<<< - * - * - */ - __pyx_t_1 = __pyx_capsule_create(((void *)(&__pyx_memoryview_getbuffer)), ((char *)"getbuffer(obj, view, flags)")); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 995, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - if (PyDict_SetItem((PyObject *)__pyx_memoryviewslice_type->tp_dict, __pyx_n_s_pyx_getbuffer, __pyx_t_1) < 0) __PYX_ERR(1, 995, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - PyType_Modified(__pyx_memoryviewslice_type); - - /* "(tree fragment)":1 - * def __pyx_unpickle_Enum(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<< - * cdef object __pyx_PickleError - * cdef object __pyx_result - */ - __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_15View_dot_MemoryView_1__pyx_unpickle_Enum, NULL, __pyx_n_s_View_MemoryView); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - if (PyDict_SetItem(__pyx_d, __pyx_n_s_pyx_unpickle_Enum, __pyx_t_1) < 0) __PYX_ERR(1, 1, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - - /* "(tree fragment)":11 - * __pyx_unpickle_Enum__set_state( __pyx_result, __pyx_state) - * return __pyx_result - * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): # <<<<<<<<<<<<<< - * __pyx_result.name = __pyx_state[0] - * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): - */ - - /*--- Wrapped vars code ---*/ - - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - if (__pyx_m) { - if (__pyx_d) { - __Pyx_AddTraceback("init monotonic_align.core", __pyx_clineno, __pyx_lineno, __pyx_filename); - } - Py_CLEAR(__pyx_m); - } else if (!PyErr_Occurred()) { - PyErr_SetString(PyExc_ImportError, "init monotonic_align.core"); - } - __pyx_L0:; - __Pyx_RefNannyFinishContext(); - #if CYTHON_PEP489_MULTI_PHASE_INIT - return (__pyx_m != NULL) ? 0 : -1; - #elif PY_MAJOR_VERSION >= 3 - return __pyx_m; - #else - return; - #endif -} - -/* --- Runtime support code --- */ -/* Refnanny */ -#if CYTHON_REFNANNY -static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname) { - PyObject *m = NULL, *p = NULL; - void *r = NULL; - m = PyImport_ImportModule(modname); - if (!m) goto end; - p = PyObject_GetAttrString(m, "RefNannyAPI"); - if (!p) goto end; - r = PyLong_AsVoidPtr(p); -end: - Py_XDECREF(p); - Py_XDECREF(m); - return (__Pyx_RefNannyAPIStruct *)r; -} -#endif - -/* PyObjectGetAttrStr */ -#if CYTHON_USE_TYPE_SLOTS -static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name) { - PyTypeObject* tp = Py_TYPE(obj); - if (likely(tp->tp_getattro)) - return tp->tp_getattro(obj, attr_name); -#if PY_MAJOR_VERSION < 3 - if (likely(tp->tp_getattr)) - return tp->tp_getattr(obj, PyString_AS_STRING(attr_name)); -#endif - return PyObject_GetAttr(obj, attr_name); -} -#endif - -/* GetBuiltinName */ -static PyObject *__Pyx_GetBuiltinName(PyObject *name) { - PyObject* result = __Pyx_PyObject_GetAttrStr(__pyx_b, name); - if (unlikely(!result)) { - PyErr_Format(PyExc_NameError, -#if PY_MAJOR_VERSION >= 3 - "name '%U' is not defined", name); -#else - "name '%.200s' is not defined", PyString_AS_STRING(name)); -#endif - } - return result; -} - -/* MemviewSliceInit */ -static int -__Pyx_init_memviewslice(struct __pyx_memoryview_obj *memview, - int ndim, - __Pyx_memviewslice *memviewslice, - int memview_is_new_reference) -{ - __Pyx_RefNannyDeclarations - int i, retval=-1; - Py_buffer *buf = &memview->view; - __Pyx_RefNannySetupContext("init_memviewslice", 0); - if (unlikely(memviewslice->memview || memviewslice->data)) { - PyErr_SetString(PyExc_ValueError, - "memviewslice is already initialized!"); - goto fail; - } - if (buf->strides) { - for (i = 0; i < ndim; i++) { - memviewslice->strides[i] = buf->strides[i]; - } - } else { - Py_ssize_t stride = buf->itemsize; - for (i = ndim - 1; i >= 0; i--) { - memviewslice->strides[i] = stride; - stride *= buf->shape[i]; - } - } - for (i = 0; i < ndim; i++) { - memviewslice->shape[i] = buf->shape[i]; - if (buf->suboffsets) { - memviewslice->suboffsets[i] = buf->suboffsets[i]; - } else { - memviewslice->suboffsets[i] = -1; - } - } - memviewslice->memview = memview; - memviewslice->data = (char *)buf->buf; - if (__pyx_add_acquisition_count(memview) == 0 && !memview_is_new_reference) { - Py_INCREF(memview); - } - retval = 0; - goto no_fail; -fail: - memviewslice->memview = 0; - memviewslice->data = 0; - retval = -1; -no_fail: - __Pyx_RefNannyFinishContext(); - return retval; -} -#ifndef Py_NO_RETURN -#define Py_NO_RETURN -#endif -static void __pyx_fatalerror(const char *fmt, ...) Py_NO_RETURN { - va_list vargs; - char msg[200]; -#ifdef HAVE_STDARG_PROTOTYPES - va_start(vargs, fmt); -#else - va_start(vargs); -#endif - vsnprintf(msg, 200, fmt, vargs); - va_end(vargs); - Py_FatalError(msg); -} -static CYTHON_INLINE int -__pyx_add_acquisition_count_locked(__pyx_atomic_int *acquisition_count, - PyThread_type_lock lock) -{ - int result; - PyThread_acquire_lock(lock, 1); - result = (*acquisition_count)++; - PyThread_release_lock(lock); - return result; -} -static CYTHON_INLINE int -__pyx_sub_acquisition_count_locked(__pyx_atomic_int *acquisition_count, - PyThread_type_lock lock) -{ - int result; - PyThread_acquire_lock(lock, 1); - result = (*acquisition_count)--; - PyThread_release_lock(lock); - return result; -} -static CYTHON_INLINE void -__Pyx_INC_MEMVIEW(__Pyx_memviewslice *memslice, int have_gil, int lineno) -{ - int first_time; - struct __pyx_memoryview_obj *memview = memslice->memview; - if (unlikely(!memview || (PyObject *) memview == Py_None)) - return; - if (unlikely(__pyx_get_slice_count(memview) < 0)) - __pyx_fatalerror("Acquisition count is %d (line %d)", - __pyx_get_slice_count(memview), lineno); - first_time = __pyx_add_acquisition_count(memview) == 0; - if (unlikely(first_time)) { - if (have_gil) { - Py_INCREF((PyObject *) memview); - } else { - PyGILState_STATE _gilstate = PyGILState_Ensure(); - Py_INCREF((PyObject *) memview); - PyGILState_Release(_gilstate); - } - } -} -static CYTHON_INLINE void __Pyx_XDEC_MEMVIEW(__Pyx_memviewslice *memslice, - int have_gil, int lineno) { - int last_time; - struct __pyx_memoryview_obj *memview = memslice->memview; - if (unlikely(!memview || (PyObject *) memview == Py_None)) { - memslice->memview = NULL; - return; - } - if (unlikely(__pyx_get_slice_count(memview) <= 0)) - __pyx_fatalerror("Acquisition count is %d (line %d)", - __pyx_get_slice_count(memview), lineno); - last_time = __pyx_sub_acquisition_count(memview) == 1; - memslice->data = NULL; - if (unlikely(last_time)) { - if (have_gil) { - Py_CLEAR(memslice->memview); - } else { - PyGILState_STATE _gilstate = PyGILState_Ensure(); - Py_CLEAR(memslice->memview); - PyGILState_Release(_gilstate); - } - } else { - memslice->memview = NULL; - } -} - -/* RaiseArgTupleInvalid */ -static void __Pyx_RaiseArgtupleInvalid( - const char* func_name, - int exact, - Py_ssize_t num_min, - Py_ssize_t num_max, - Py_ssize_t num_found) -{ - Py_ssize_t num_expected; - const char *more_or_less; - if (num_found < num_min) { - num_expected = num_min; - more_or_less = "at least"; - } else { - num_expected = num_max; - more_or_less = "at most"; - } - if (exact) { - more_or_less = "exactly"; - } - PyErr_Format(PyExc_TypeError, - "%.200s() takes %.8s %" CYTHON_FORMAT_SSIZE_T "d positional argument%.1s (%" CYTHON_FORMAT_SSIZE_T "d given)", - func_name, more_or_less, num_expected, - (num_expected == 1) ? "" : "s", num_found); -} - -/* RaiseDoubleKeywords */ -static void __Pyx_RaiseDoubleKeywordsError( - const char* func_name, - PyObject* kw_name) -{ - PyErr_Format(PyExc_TypeError, - #if PY_MAJOR_VERSION >= 3 - "%s() got multiple values for keyword argument '%U'", func_name, kw_name); - #else - "%s() got multiple values for keyword argument '%s'", func_name, - PyString_AsString(kw_name)); - #endif -} - -/* ParseKeywords */ -static int __Pyx_ParseOptionalKeywords( - PyObject *kwds, - PyObject **argnames[], - PyObject *kwds2, - PyObject *values[], - Py_ssize_t num_pos_args, - const char* function_name) -{ - PyObject *key = 0, *value = 0; - Py_ssize_t pos = 0; - PyObject*** name; - PyObject*** first_kw_arg = argnames + num_pos_args; - while (PyDict_Next(kwds, &pos, &key, &value)) { - name = first_kw_arg; - while (*name && (**name != key)) name++; - if (*name) { - values[name-argnames] = value; - continue; - } - name = first_kw_arg; - #if PY_MAJOR_VERSION < 3 - if (likely(PyString_Check(key))) { - while (*name) { - if ((CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**name) == PyString_GET_SIZE(key)) - && _PyString_Eq(**name, key)) { - values[name-argnames] = value; - break; - } - name++; - } - if (*name) continue; - else { - PyObject*** argname = argnames; - while (argname != first_kw_arg) { - if ((**argname == key) || ( - (CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**argname) == PyString_GET_SIZE(key)) - && _PyString_Eq(**argname, key))) { - goto arg_passed_twice; - } - argname++; - } - } - } else - #endif - if (likely(PyUnicode_Check(key))) { - while (*name) { - int cmp = (**name == key) ? 0 : - #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 - (__Pyx_PyUnicode_GET_LENGTH(**name) != __Pyx_PyUnicode_GET_LENGTH(key)) ? 1 : - #endif - PyUnicode_Compare(**name, key); - if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; - if (cmp == 0) { - values[name-argnames] = value; - break; - } - name++; - } - if (*name) continue; - else { - PyObject*** argname = argnames; - while (argname != first_kw_arg) { - int cmp = (**argname == key) ? 0 : - #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 - (__Pyx_PyUnicode_GET_LENGTH(**argname) != __Pyx_PyUnicode_GET_LENGTH(key)) ? 1 : - #endif - PyUnicode_Compare(**argname, key); - if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; - if (cmp == 0) goto arg_passed_twice; - argname++; - } - } - } else - goto invalid_keyword_type; - if (kwds2) { - if (unlikely(PyDict_SetItem(kwds2, key, value))) goto bad; - } else { - goto invalid_keyword; - } - } - return 0; -arg_passed_twice: - __Pyx_RaiseDoubleKeywordsError(function_name, key); - goto bad; -invalid_keyword_type: - PyErr_Format(PyExc_TypeError, - "%.200s() keywords must be strings", function_name); - goto bad; -invalid_keyword: - PyErr_Format(PyExc_TypeError, - #if PY_MAJOR_VERSION < 3 - "%.200s() got an unexpected keyword argument '%.200s'", - function_name, PyString_AsString(key)); - #else - "%s() got an unexpected keyword argument '%U'", - function_name, key); - #endif -bad: - return -1; -} - -/* None */ -static CYTHON_INLINE void __Pyx_RaiseUnboundLocalError(const char *varname) { - PyErr_Format(PyExc_UnboundLocalError, "local variable '%s' referenced before assignment", varname); -} - -/* ArgTypeTest */ -static int __Pyx__ArgTypeTest(PyObject *obj, PyTypeObject *type, const char *name, int exact) -{ - if (unlikely(!type)) { - PyErr_SetString(PyExc_SystemError, "Missing type object"); - return 0; - } - else if (exact) { - #if PY_MAJOR_VERSION == 2 - if ((type == &PyBaseString_Type) && likely(__Pyx_PyBaseString_CheckExact(obj))) return 1; - #endif - } - else { - if (likely(__Pyx_TypeCheck(obj, type))) return 1; - } - PyErr_Format(PyExc_TypeError, - "Argument '%.200s' has incorrect type (expected %.200s, got %.200s)", - name, type->tp_name, Py_TYPE(obj)->tp_name); - return 0; -} - -/* PyObjectCall */ -#if CYTHON_COMPILING_IN_CPYTHON -static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw) { - PyObject *result; - ternaryfunc call = func->ob_type->tp_call; - if (unlikely(!call)) - return PyObject_Call(func, arg, kw); - if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object"))) - return NULL; - result = (*call)(func, arg, kw); - Py_LeaveRecursiveCall(); - if (unlikely(!result) && unlikely(!PyErr_Occurred())) { - PyErr_SetString( - PyExc_SystemError, - "NULL result without error in PyObject_Call"); - } - return result; -} -#endif - -/* PyErrFetchRestore */ -#if CYTHON_FAST_THREAD_STATE -static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) { - PyObject *tmp_type, *tmp_value, *tmp_tb; - tmp_type = tstate->curexc_type; - tmp_value = tstate->curexc_value; - tmp_tb = tstate->curexc_traceback; - tstate->curexc_type = type; - tstate->curexc_value = value; - tstate->curexc_traceback = tb; - Py_XDECREF(tmp_type); - Py_XDECREF(tmp_value); - Py_XDECREF(tmp_tb); -} -static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { - *type = tstate->curexc_type; - *value = tstate->curexc_value; - *tb = tstate->curexc_traceback; - tstate->curexc_type = 0; - tstate->curexc_value = 0; - tstate->curexc_traceback = 0; -} -#endif - -/* RaiseException */ -#if PY_MAJOR_VERSION < 3 -static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, - CYTHON_UNUSED PyObject *cause) { - __Pyx_PyThreadState_declare - Py_XINCREF(type); - if (!value || value == Py_None) - value = NULL; - else - Py_INCREF(value); - if (!tb || tb == Py_None) - tb = NULL; - else { - Py_INCREF(tb); - if (!PyTraceBack_Check(tb)) { - PyErr_SetString(PyExc_TypeError, - "raise: arg 3 must be a traceback or None"); - goto raise_error; - } - } - if (PyType_Check(type)) { -#if CYTHON_COMPILING_IN_PYPY - if (!value) { - Py_INCREF(Py_None); - value = Py_None; - } -#endif - PyErr_NormalizeException(&type, &value, &tb); - } else { - if (value) { - PyErr_SetString(PyExc_TypeError, - "instance exception may not have a separate value"); - goto raise_error; - } - value = type; - type = (PyObject*) Py_TYPE(type); - Py_INCREF(type); - if (!PyType_IsSubtype((PyTypeObject *)type, (PyTypeObject *)PyExc_BaseException)) { - PyErr_SetString(PyExc_TypeError, - "raise: exception class must be a subclass of BaseException"); - goto raise_error; - } - } - __Pyx_PyThreadState_assign - __Pyx_ErrRestore(type, value, tb); - return; -raise_error: - Py_XDECREF(value); - Py_XDECREF(type); - Py_XDECREF(tb); - return; -} -#else -static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause) { - PyObject* owned_instance = NULL; - if (tb == Py_None) { - tb = 0; - } else if (tb && !PyTraceBack_Check(tb)) { - PyErr_SetString(PyExc_TypeError, - "raise: arg 3 must be a traceback or None"); - goto bad; - } - if (value == Py_None) - value = 0; - if (PyExceptionInstance_Check(type)) { - if (value) { - PyErr_SetString(PyExc_TypeError, - "instance exception may not have a separate value"); - goto bad; - } - value = type; - type = (PyObject*) Py_TYPE(value); - } else if (PyExceptionClass_Check(type)) { - PyObject *instance_class = NULL; - if (value && PyExceptionInstance_Check(value)) { - instance_class = (PyObject*) Py_TYPE(value); - if (instance_class != type) { - int is_subclass = PyObject_IsSubclass(instance_class, type); - if (!is_subclass) { - instance_class = NULL; - } else if (unlikely(is_subclass == -1)) { - goto bad; - } else { - type = instance_class; - } - } - } - if (!instance_class) { - PyObject *args; - if (!value) - args = PyTuple_New(0); - else if (PyTuple_Check(value)) { - Py_INCREF(value); - args = value; - } else - args = PyTuple_Pack(1, value); - if (!args) - goto bad; - owned_instance = PyObject_Call(type, args, NULL); - Py_DECREF(args); - if (!owned_instance) - goto bad; - value = owned_instance; - if (!PyExceptionInstance_Check(value)) { - PyErr_Format(PyExc_TypeError, - "calling %R should have returned an instance of " - "BaseException, not %R", - type, Py_TYPE(value)); - goto bad; - } - } - } else { - PyErr_SetString(PyExc_TypeError, - "raise: exception class must be a subclass of BaseException"); - goto bad; - } - if (cause) { - PyObject *fixed_cause; - if (cause == Py_None) { - fixed_cause = NULL; - } else if (PyExceptionClass_Check(cause)) { - fixed_cause = PyObject_CallObject(cause, NULL); - if (fixed_cause == NULL) - goto bad; - } else if (PyExceptionInstance_Check(cause)) { - fixed_cause = cause; - Py_INCREF(fixed_cause); - } else { - PyErr_SetString(PyExc_TypeError, - "exception causes must derive from " - "BaseException"); - goto bad; - } - PyException_SetCause(value, fixed_cause); - } - PyErr_SetObject(type, value); - if (tb) { -#if CYTHON_COMPILING_IN_PYPY - PyObject *tmp_type, *tmp_value, *tmp_tb; - PyErr_Fetch(&tmp_type, &tmp_value, &tmp_tb); - Py_INCREF(tb); - PyErr_Restore(tmp_type, tmp_value, tb); - Py_XDECREF(tmp_tb); -#else - PyThreadState *tstate = __Pyx_PyThreadState_Current; - PyObject* tmp_tb = tstate->curexc_traceback; - if (tb != tmp_tb) { - Py_INCREF(tb); - tstate->curexc_traceback = tb; - Py_XDECREF(tmp_tb); - } -#endif - } -bad: - Py_XDECREF(owned_instance); - return; -} -#endif - -/* PyCFunctionFastCall */ -#if CYTHON_FAST_PYCCALL -static CYTHON_INLINE PyObject * __Pyx_PyCFunction_FastCall(PyObject *func_obj, PyObject **args, Py_ssize_t nargs) { - PyCFunctionObject *func = (PyCFunctionObject*)func_obj; - PyCFunction meth = PyCFunction_GET_FUNCTION(func); - PyObject *self = PyCFunction_GET_SELF(func); - int flags = PyCFunction_GET_FLAGS(func); - assert(PyCFunction_Check(func)); - assert(METH_FASTCALL == (flags & ~(METH_CLASS | METH_STATIC | METH_COEXIST | METH_KEYWORDS | METH_STACKLESS))); - assert(nargs >= 0); - assert(nargs == 0 || args != NULL); - /* _PyCFunction_FastCallDict() must not be called with an exception set, - because it may clear it (directly or indirectly) and so the - caller loses its exception */ - assert(!PyErr_Occurred()); - if ((PY_VERSION_HEX < 0x030700A0) || unlikely(flags & METH_KEYWORDS)) { - return (*((__Pyx_PyCFunctionFastWithKeywords)(void*)meth)) (self, args, nargs, NULL); - } else { - return (*((__Pyx_PyCFunctionFast)(void*)meth)) (self, args, nargs); - } -} -#endif - -/* PyFunctionFastCall */ -#if CYTHON_FAST_PYCALL -static PyObject* __Pyx_PyFunction_FastCallNoKw(PyCodeObject *co, PyObject **args, Py_ssize_t na, - PyObject *globals) { - PyFrameObject *f; - PyThreadState *tstate = __Pyx_PyThreadState_Current; - PyObject **fastlocals; - Py_ssize_t i; - PyObject *result; - assert(globals != NULL); - /* XXX Perhaps we should create a specialized - PyFrame_New() that doesn't take locals, but does - take builtins without sanity checking them. - */ - assert(tstate != NULL); - f = PyFrame_New(tstate, co, globals, NULL); - if (f == NULL) { - return NULL; - } - fastlocals = __Pyx_PyFrame_GetLocalsplus(f); - for (i = 0; i < na; i++) { - Py_INCREF(*args); - fastlocals[i] = *args++; - } - result = PyEval_EvalFrameEx(f,0); - ++tstate->recursion_depth; - Py_DECREF(f); - --tstate->recursion_depth; - return result; -} -#if 1 || PY_VERSION_HEX < 0x030600B1 -static PyObject *__Pyx_PyFunction_FastCallDict(PyObject *func, PyObject **args, Py_ssize_t nargs, PyObject *kwargs) { - PyCodeObject *co = (PyCodeObject *)PyFunction_GET_CODE(func); - PyObject *globals = PyFunction_GET_GLOBALS(func); - PyObject *argdefs = PyFunction_GET_DEFAULTS(func); - PyObject *closure; -#if PY_MAJOR_VERSION >= 3 - PyObject *kwdefs; -#endif - PyObject *kwtuple, **k; - PyObject **d; - Py_ssize_t nd; - Py_ssize_t nk; - PyObject *result; - assert(kwargs == NULL || PyDict_Check(kwargs)); - nk = kwargs ? PyDict_Size(kwargs) : 0; - if (Py_EnterRecursiveCall((char*)" while calling a Python object")) { - return NULL; - } - if ( -#if PY_MAJOR_VERSION >= 3 - co->co_kwonlyargcount == 0 && -#endif - likely(kwargs == NULL || nk == 0) && - co->co_flags == (CO_OPTIMIZED | CO_NEWLOCALS | CO_NOFREE)) { - if (argdefs == NULL && co->co_argcount == nargs) { - result = __Pyx_PyFunction_FastCallNoKw(co, args, nargs, globals); - goto done; - } - else if (nargs == 0 && argdefs != NULL - && co->co_argcount == Py_SIZE(argdefs)) { - /* function called with no arguments, but all parameters have - a default value: use default values as arguments .*/ - args = &PyTuple_GET_ITEM(argdefs, 0); - result =__Pyx_PyFunction_FastCallNoKw(co, args, Py_SIZE(argdefs), globals); - goto done; - } - } - if (kwargs != NULL) { - Py_ssize_t pos, i; - kwtuple = PyTuple_New(2 * nk); - if (kwtuple == NULL) { - result = NULL; - goto done; - } - k = &PyTuple_GET_ITEM(kwtuple, 0); - pos = i = 0; - while (PyDict_Next(kwargs, &pos, &k[i], &k[i+1])) { - Py_INCREF(k[i]); - Py_INCREF(k[i+1]); - i += 2; - } - nk = i / 2; - } - else { - kwtuple = NULL; - k = NULL; - } - closure = PyFunction_GET_CLOSURE(func); -#if PY_MAJOR_VERSION >= 3 - kwdefs = PyFunction_GET_KW_DEFAULTS(func); -#endif - if (argdefs != NULL) { - d = &PyTuple_GET_ITEM(argdefs, 0); - nd = Py_SIZE(argdefs); - } - else { - d = NULL; - nd = 0; - } -#if PY_MAJOR_VERSION >= 3 - result = PyEval_EvalCodeEx((PyObject*)co, globals, (PyObject *)NULL, - args, (int)nargs, - k, (int)nk, - d, (int)nd, kwdefs, closure); -#else - result = PyEval_EvalCodeEx(co, globals, (PyObject *)NULL, - args, (int)nargs, - k, (int)nk, - d, (int)nd, closure); -#endif - Py_XDECREF(kwtuple); -done: - Py_LeaveRecursiveCall(); - return result; -} -#endif -#endif - -/* PyObjectCall2Args */ -static CYTHON_UNUSED PyObject* __Pyx_PyObject_Call2Args(PyObject* function, PyObject* arg1, PyObject* arg2) { - PyObject *args, *result = NULL; - #if CYTHON_FAST_PYCALL - if (PyFunction_Check(function)) { - PyObject *args[2] = {arg1, arg2}; - return __Pyx_PyFunction_FastCall(function, args, 2); - } - #endif - #if CYTHON_FAST_PYCCALL - if (__Pyx_PyFastCFunction_Check(function)) { - PyObject *args[2] = {arg1, arg2}; - return __Pyx_PyCFunction_FastCall(function, args, 2); - } - #endif - args = PyTuple_New(2); - if (unlikely(!args)) goto done; - Py_INCREF(arg1); - PyTuple_SET_ITEM(args, 0, arg1); - Py_INCREF(arg2); - PyTuple_SET_ITEM(args, 1, arg2); - Py_INCREF(function); - result = __Pyx_PyObject_Call(function, args, NULL); - Py_DECREF(args); - Py_DECREF(function); -done: - return result; -} - -/* PyObjectCallMethO */ -#if CYTHON_COMPILING_IN_CPYTHON -static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg) { - PyObject *self, *result; - PyCFunction cfunc; - cfunc = PyCFunction_GET_FUNCTION(func); - self = PyCFunction_GET_SELF(func); - if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object"))) - return NULL; - result = cfunc(self, arg); - Py_LeaveRecursiveCall(); - if (unlikely(!result) && unlikely(!PyErr_Occurred())) { - PyErr_SetString( - PyExc_SystemError, - "NULL result without error in PyObject_Call"); - } - return result; -} -#endif - -/* PyObjectCallOneArg */ -#if CYTHON_COMPILING_IN_CPYTHON -static PyObject* __Pyx__PyObject_CallOneArg(PyObject *func, PyObject *arg) { - PyObject *result; - PyObject *args = PyTuple_New(1); - if (unlikely(!args)) return NULL; - Py_INCREF(arg); - PyTuple_SET_ITEM(args, 0, arg); - result = __Pyx_PyObject_Call(func, args, NULL); - Py_DECREF(args); - return result; -} -static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) { -#if CYTHON_FAST_PYCALL - if (PyFunction_Check(func)) { - return __Pyx_PyFunction_FastCall(func, &arg, 1); - } -#endif - if (likely(PyCFunction_Check(func))) { - if (likely(PyCFunction_GET_FLAGS(func) & METH_O)) { - return __Pyx_PyObject_CallMethO(func, arg); -#if CYTHON_FAST_PYCCALL - } else if (PyCFunction_GET_FLAGS(func) & METH_FASTCALL) { - return __Pyx_PyCFunction_FastCall(func, &arg, 1); -#endif - } - } - return __Pyx__PyObject_CallOneArg(func, arg); -} -#else -static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) { - PyObject *result; - PyObject *args = PyTuple_Pack(1, arg); - if (unlikely(!args)) return NULL; - result = __Pyx_PyObject_Call(func, args, NULL); - Py_DECREF(args); - return result; -} -#endif - -/* BytesEquals */ -static CYTHON_INLINE int __Pyx_PyBytes_Equals(PyObject* s1, PyObject* s2, int equals) { -#if CYTHON_COMPILING_IN_PYPY - return PyObject_RichCompareBool(s1, s2, equals); -#else - if (s1 == s2) { - return (equals == Py_EQ); - } else if (PyBytes_CheckExact(s1) & PyBytes_CheckExact(s2)) { - const char *ps1, *ps2; - Py_ssize_t length = PyBytes_GET_SIZE(s1); - if (length != PyBytes_GET_SIZE(s2)) - return (equals == Py_NE); - ps1 = PyBytes_AS_STRING(s1); - ps2 = PyBytes_AS_STRING(s2); - if (ps1[0] != ps2[0]) { - return (equals == Py_NE); - } else if (length == 1) { - return (equals == Py_EQ); - } else { - int result; -#if CYTHON_USE_UNICODE_INTERNALS - Py_hash_t hash1, hash2; - hash1 = ((PyBytesObject*)s1)->ob_shash; - hash2 = ((PyBytesObject*)s2)->ob_shash; - if (hash1 != hash2 && hash1 != -1 && hash2 != -1) { - return (equals == Py_NE); - } -#endif - result = memcmp(ps1, ps2, (size_t)length); - return (equals == Py_EQ) ? (result == 0) : (result != 0); - } - } else if ((s1 == Py_None) & PyBytes_CheckExact(s2)) { - return (equals == Py_NE); - } else if ((s2 == Py_None) & PyBytes_CheckExact(s1)) { - return (equals == Py_NE); - } else { - int result; - PyObject* py_result = PyObject_RichCompare(s1, s2, equals); - if (!py_result) - return -1; - result = __Pyx_PyObject_IsTrue(py_result); - Py_DECREF(py_result); - return result; - } -#endif -} - -/* UnicodeEquals */ -static CYTHON_INLINE int __Pyx_PyUnicode_Equals(PyObject* s1, PyObject* s2, int equals) { -#if CYTHON_COMPILING_IN_PYPY - return PyObject_RichCompareBool(s1, s2, equals); -#else -#if PY_MAJOR_VERSION < 3 - PyObject* owned_ref = NULL; -#endif - int s1_is_unicode, s2_is_unicode; - if (s1 == s2) { - goto return_eq; - } - s1_is_unicode = PyUnicode_CheckExact(s1); - s2_is_unicode = PyUnicode_CheckExact(s2); -#if PY_MAJOR_VERSION < 3 - if ((s1_is_unicode & (!s2_is_unicode)) && PyString_CheckExact(s2)) { - owned_ref = PyUnicode_FromObject(s2); - if (unlikely(!owned_ref)) - return -1; - s2 = owned_ref; - s2_is_unicode = 1; - } else if ((s2_is_unicode & (!s1_is_unicode)) && PyString_CheckExact(s1)) { - owned_ref = PyUnicode_FromObject(s1); - if (unlikely(!owned_ref)) - return -1; - s1 = owned_ref; - s1_is_unicode = 1; - } else if (((!s2_is_unicode) & (!s1_is_unicode))) { - return __Pyx_PyBytes_Equals(s1, s2, equals); - } -#endif - if (s1_is_unicode & s2_is_unicode) { - Py_ssize_t length; - int kind; - void *data1, *data2; - if (unlikely(__Pyx_PyUnicode_READY(s1) < 0) || unlikely(__Pyx_PyUnicode_READY(s2) < 0)) - return -1; - length = __Pyx_PyUnicode_GET_LENGTH(s1); - if (length != __Pyx_PyUnicode_GET_LENGTH(s2)) { - goto return_ne; - } -#if CYTHON_USE_UNICODE_INTERNALS - { - Py_hash_t hash1, hash2; - #if CYTHON_PEP393_ENABLED - hash1 = ((PyASCIIObject*)s1)->hash; - hash2 = ((PyASCIIObject*)s2)->hash; - #else - hash1 = ((PyUnicodeObject*)s1)->hash; - hash2 = ((PyUnicodeObject*)s2)->hash; - #endif - if (hash1 != hash2 && hash1 != -1 && hash2 != -1) { - goto return_ne; - } - } -#endif - kind = __Pyx_PyUnicode_KIND(s1); - if (kind != __Pyx_PyUnicode_KIND(s2)) { - goto return_ne; - } - data1 = __Pyx_PyUnicode_DATA(s1); - data2 = __Pyx_PyUnicode_DATA(s2); - if (__Pyx_PyUnicode_READ(kind, data1, 0) != __Pyx_PyUnicode_READ(kind, data2, 0)) { - goto return_ne; - } else if (length == 1) { - goto return_eq; - } else { - int result = memcmp(data1, data2, (size_t)(length * kind)); - #if PY_MAJOR_VERSION < 3 - Py_XDECREF(owned_ref); - #endif - return (equals == Py_EQ) ? (result == 0) : (result != 0); - } - } else if ((s1 == Py_None) & s2_is_unicode) { - goto return_ne; - } else if ((s2 == Py_None) & s1_is_unicode) { - goto return_ne; - } else { - int result; - PyObject* py_result = PyObject_RichCompare(s1, s2, equals); - #if PY_MAJOR_VERSION < 3 - Py_XDECREF(owned_ref); - #endif - if (!py_result) - return -1; - result = __Pyx_PyObject_IsTrue(py_result); - Py_DECREF(py_result); - return result; - } -return_eq: - #if PY_MAJOR_VERSION < 3 - Py_XDECREF(owned_ref); - #endif - return (equals == Py_EQ); -return_ne: - #if PY_MAJOR_VERSION < 3 - Py_XDECREF(owned_ref); - #endif - return (equals == Py_NE); -#endif -} - -/* None */ -static CYTHON_INLINE Py_ssize_t __Pyx_div_Py_ssize_t(Py_ssize_t a, Py_ssize_t b) { - Py_ssize_t q = a / b; - Py_ssize_t r = a - q*b; - q -= ((r != 0) & ((r ^ b) < 0)); - return q; -} - -/* GetAttr */ -static CYTHON_INLINE PyObject *__Pyx_GetAttr(PyObject *o, PyObject *n) { -#if CYTHON_USE_TYPE_SLOTS -#if PY_MAJOR_VERSION >= 3 - if (likely(PyUnicode_Check(n))) -#else - if (likely(PyString_Check(n))) -#endif - return __Pyx_PyObject_GetAttrStr(o, n); -#endif - return PyObject_GetAttr(o, n); -} - -/* GetItemInt */ -static PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j) { - PyObject *r; - if (!j) return NULL; - r = PyObject_GetItem(o, j); - Py_DECREF(j); - return r; -} -static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i, - CYTHON_NCP_UNUSED int wraparound, - CYTHON_NCP_UNUSED int boundscheck) { -#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS - Py_ssize_t wrapped_i = i; - if (wraparound & unlikely(i < 0)) { - wrapped_i += PyList_GET_SIZE(o); - } - if ((!boundscheck) || likely(__Pyx_is_valid_index(wrapped_i, PyList_GET_SIZE(o)))) { - PyObject *r = PyList_GET_ITEM(o, wrapped_i); - Py_INCREF(r); - return r; - } - return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); -#else - return PySequence_GetItem(o, i); -#endif -} -static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i, - CYTHON_NCP_UNUSED int wraparound, - CYTHON_NCP_UNUSED int boundscheck) { -#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS - Py_ssize_t wrapped_i = i; - if (wraparound & unlikely(i < 0)) { - wrapped_i += PyTuple_GET_SIZE(o); - } - if ((!boundscheck) || likely(__Pyx_is_valid_index(wrapped_i, PyTuple_GET_SIZE(o)))) { - PyObject *r = PyTuple_GET_ITEM(o, wrapped_i); - Py_INCREF(r); - return r; - } - return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); -#else - return PySequence_GetItem(o, i); -#endif -} -static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i, int is_list, - CYTHON_NCP_UNUSED int wraparound, - CYTHON_NCP_UNUSED int boundscheck) { -#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS && CYTHON_USE_TYPE_SLOTS - if (is_list || PyList_CheckExact(o)) { - Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyList_GET_SIZE(o); - if ((!boundscheck) || (likely(__Pyx_is_valid_index(n, PyList_GET_SIZE(o))))) { - PyObject *r = PyList_GET_ITEM(o, n); - Py_INCREF(r); - return r; - } - } - else if (PyTuple_CheckExact(o)) { - Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyTuple_GET_SIZE(o); - if ((!boundscheck) || likely(__Pyx_is_valid_index(n, PyTuple_GET_SIZE(o)))) { - PyObject *r = PyTuple_GET_ITEM(o, n); - Py_INCREF(r); - return r; - } - } else { - PySequenceMethods *m = Py_TYPE(o)->tp_as_sequence; - if (likely(m && m->sq_item)) { - if (wraparound && unlikely(i < 0) && likely(m->sq_length)) { - Py_ssize_t l = m->sq_length(o); - if (likely(l >= 0)) { - i += l; - } else { - if (!PyErr_ExceptionMatches(PyExc_OverflowError)) - return NULL; - PyErr_Clear(); - } - } - return m->sq_item(o, i); - } - } -#else - if (is_list || PySequence_Check(o)) { - return PySequence_GetItem(o, i); - } -#endif - return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); -} - -/* ObjectGetItem */ -#if CYTHON_USE_TYPE_SLOTS -static PyObject *__Pyx_PyObject_GetIndex(PyObject *obj, PyObject* index) { - PyObject *runerr; - Py_ssize_t key_value; - PySequenceMethods *m = Py_TYPE(obj)->tp_as_sequence; - if (unlikely(!(m && m->sq_item))) { - PyErr_Format(PyExc_TypeError, "'%.200s' object is not subscriptable", Py_TYPE(obj)->tp_name); - return NULL; - } - key_value = __Pyx_PyIndex_AsSsize_t(index); - if (likely(key_value != -1 || !(runerr = PyErr_Occurred()))) { - return __Pyx_GetItemInt_Fast(obj, key_value, 0, 1, 1); - } - if (PyErr_GivenExceptionMatches(runerr, PyExc_OverflowError)) { - PyErr_Clear(); - PyErr_Format(PyExc_IndexError, "cannot fit '%.200s' into an index-sized integer", Py_TYPE(index)->tp_name); - } - return NULL; -} -static PyObject *__Pyx_PyObject_GetItem(PyObject *obj, PyObject* key) { - PyMappingMethods *m = Py_TYPE(obj)->tp_as_mapping; - if (likely(m && m->mp_subscript)) { - return m->mp_subscript(obj, key); - } - return __Pyx_PyObject_GetIndex(obj, key); -} -#endif - -/* decode_c_string */ -static CYTHON_INLINE PyObject* __Pyx_decode_c_string( - const char* cstring, Py_ssize_t start, Py_ssize_t stop, - const char* encoding, const char* errors, - PyObject* (*decode_func)(const char *s, Py_ssize_t size, const char *errors)) { - Py_ssize_t length; - if (unlikely((start < 0) | (stop < 0))) { - size_t slen = strlen(cstring); - if (unlikely(slen > (size_t) PY_SSIZE_T_MAX)) { - PyErr_SetString(PyExc_OverflowError, - "c-string too long to convert to Python"); - return NULL; - } - length = (Py_ssize_t) slen; - if (start < 0) { - start += length; - if (start < 0) - start = 0; - } - if (stop < 0) - stop += length; - } - if (unlikely(stop <= start)) - return __Pyx_NewRef(__pyx_empty_unicode); - length = stop - start; - cstring += start; - if (decode_func) { - return decode_func(cstring, length, errors); - } else { - return PyUnicode_Decode(cstring, length, encoding, errors); - } -} - -/* PyErrExceptionMatches */ -#if CYTHON_FAST_THREAD_STATE -static int __Pyx_PyErr_ExceptionMatchesTuple(PyObject *exc_type, PyObject *tuple) { - Py_ssize_t i, n; - n = PyTuple_GET_SIZE(tuple); -#if PY_MAJOR_VERSION >= 3 - for (i=0; icurexc_type; - if (exc_type == err) return 1; - if (unlikely(!exc_type)) return 0; - if (unlikely(PyTuple_Check(err))) - return __Pyx_PyErr_ExceptionMatchesTuple(exc_type, err); - return __Pyx_PyErr_GivenExceptionMatches(exc_type, err); -} -#endif - -/* GetAttr3 */ -static PyObject *__Pyx_GetAttr3Default(PyObject *d) { - __Pyx_PyThreadState_declare - __Pyx_PyThreadState_assign - if (unlikely(!__Pyx_PyErr_ExceptionMatches(PyExc_AttributeError))) - return NULL; - __Pyx_PyErr_Clear(); - Py_INCREF(d); - return d; -} -static CYTHON_INLINE PyObject *__Pyx_GetAttr3(PyObject *o, PyObject *n, PyObject *d) { - PyObject *r = __Pyx_GetAttr(o, n); - return (likely(r)) ? r : __Pyx_GetAttr3Default(d); -} - -/* PyDictVersioning */ -#if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_TYPE_SLOTS -static CYTHON_INLINE PY_UINT64_T __Pyx_get_tp_dict_version(PyObject *obj) { - PyObject *dict = Py_TYPE(obj)->tp_dict; - return likely(dict) ? __PYX_GET_DICT_VERSION(dict) : 0; -} -static CYTHON_INLINE PY_UINT64_T __Pyx_get_object_dict_version(PyObject *obj) { - PyObject **dictptr = NULL; - Py_ssize_t offset = Py_TYPE(obj)->tp_dictoffset; - if (offset) { -#if CYTHON_COMPILING_IN_CPYTHON - dictptr = (likely(offset > 0)) ? (PyObject **) ((char *)obj + offset) : _PyObject_GetDictPtr(obj); -#else - dictptr = _PyObject_GetDictPtr(obj); -#endif - } - return (dictptr && *dictptr) ? __PYX_GET_DICT_VERSION(*dictptr) : 0; -} -static CYTHON_INLINE int __Pyx_object_dict_version_matches(PyObject* obj, PY_UINT64_T tp_dict_version, PY_UINT64_T obj_dict_version) { - PyObject *dict = Py_TYPE(obj)->tp_dict; - if (unlikely(!dict) || unlikely(tp_dict_version != __PYX_GET_DICT_VERSION(dict))) - return 0; - return obj_dict_version == __Pyx_get_object_dict_version(obj); -} -#endif - -/* GetModuleGlobalName */ -#if CYTHON_USE_DICT_VERSIONS -static PyObject *__Pyx__GetModuleGlobalName(PyObject *name, PY_UINT64_T *dict_version, PyObject **dict_cached_value) -#else -static CYTHON_INLINE PyObject *__Pyx__GetModuleGlobalName(PyObject *name) -#endif -{ - PyObject *result; -#if !CYTHON_AVOID_BORROWED_REFS -#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030500A1 - result = _PyDict_GetItem_KnownHash(__pyx_d, name, ((PyASCIIObject *) name)->hash); - __PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version) - if (likely(result)) { - return __Pyx_NewRef(result); - } else if (unlikely(PyErr_Occurred())) { - return NULL; - } -#else - result = PyDict_GetItem(__pyx_d, name); - __PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version) - if (likely(result)) { - return __Pyx_NewRef(result); - } -#endif -#else - result = PyObject_GetItem(__pyx_d, name); - __PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version) - if (likely(result)) { - return __Pyx_NewRef(result); - } - PyErr_Clear(); -#endif - return __Pyx_GetBuiltinName(name); -} - -/* RaiseTooManyValuesToUnpack */ -static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected) { - PyErr_Format(PyExc_ValueError, - "too many values to unpack (expected %" CYTHON_FORMAT_SSIZE_T "d)", expected); -} - -/* RaiseNeedMoreValuesToUnpack */ -static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index) { - PyErr_Format(PyExc_ValueError, - "need more than %" CYTHON_FORMAT_SSIZE_T "d value%.1s to unpack", - index, (index == 1) ? "" : "s"); -} - -/* RaiseNoneIterError */ -static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void) { - PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable"); -} - -/* ExtTypeTest */ -static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type) { - if (unlikely(!type)) { - PyErr_SetString(PyExc_SystemError, "Missing type object"); - return 0; - } - if (likely(__Pyx_TypeCheck(obj, type))) - return 1; - PyErr_Format(PyExc_TypeError, "Cannot convert %.200s to %.200s", - Py_TYPE(obj)->tp_name, type->tp_name); - return 0; -} - -/* GetTopmostException */ -#if CYTHON_USE_EXC_INFO_STACK -static _PyErr_StackItem * -__Pyx_PyErr_GetTopmostException(PyThreadState *tstate) -{ - _PyErr_StackItem *exc_info = tstate->exc_info; - while ((exc_info->exc_type == NULL || exc_info->exc_type == Py_None) && - exc_info->previous_item != NULL) - { - exc_info = exc_info->previous_item; - } - return exc_info; -} -#endif - -/* SaveResetException */ -#if CYTHON_FAST_THREAD_STATE -static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { - #if CYTHON_USE_EXC_INFO_STACK - _PyErr_StackItem *exc_info = __Pyx_PyErr_GetTopmostException(tstate); - *type = exc_info->exc_type; - *value = exc_info->exc_value; - *tb = exc_info->exc_traceback; - #else - *type = tstate->exc_type; - *value = tstate->exc_value; - *tb = tstate->exc_traceback; - #endif - Py_XINCREF(*type); - Py_XINCREF(*value); - Py_XINCREF(*tb); -} -static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) { - PyObject *tmp_type, *tmp_value, *tmp_tb; - #if CYTHON_USE_EXC_INFO_STACK - _PyErr_StackItem *exc_info = tstate->exc_info; - tmp_type = exc_info->exc_type; - tmp_value = exc_info->exc_value; - tmp_tb = exc_info->exc_traceback; - exc_info->exc_type = type; - exc_info->exc_value = value; - exc_info->exc_traceback = tb; - #else - tmp_type = tstate->exc_type; - tmp_value = tstate->exc_value; - tmp_tb = tstate->exc_traceback; - tstate->exc_type = type; - tstate->exc_value = value; - tstate->exc_traceback = tb; - #endif - Py_XDECREF(tmp_type); - Py_XDECREF(tmp_value); - Py_XDECREF(tmp_tb); -} -#endif - -/* GetException */ -#if CYTHON_FAST_THREAD_STATE -static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) -#else -static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb) -#endif -{ - PyObject *local_type, *local_value, *local_tb; -#if CYTHON_FAST_THREAD_STATE - PyObject *tmp_type, *tmp_value, *tmp_tb; - local_type = tstate->curexc_type; - local_value = tstate->curexc_value; - local_tb = tstate->curexc_traceback; - tstate->curexc_type = 0; - tstate->curexc_value = 0; - tstate->curexc_traceback = 0; -#else - PyErr_Fetch(&local_type, &local_value, &local_tb); -#endif - PyErr_NormalizeException(&local_type, &local_value, &local_tb); -#if CYTHON_FAST_THREAD_STATE - if (unlikely(tstate->curexc_type)) -#else - if (unlikely(PyErr_Occurred())) -#endif - goto bad; - #if PY_MAJOR_VERSION >= 3 - if (local_tb) { - if (unlikely(PyException_SetTraceback(local_value, local_tb) < 0)) - goto bad; - } - #endif - Py_XINCREF(local_tb); - Py_XINCREF(local_type); - Py_XINCREF(local_value); - *type = local_type; - *value = local_value; - *tb = local_tb; -#if CYTHON_FAST_THREAD_STATE - #if CYTHON_USE_EXC_INFO_STACK - { - _PyErr_StackItem *exc_info = tstate->exc_info; - tmp_type = exc_info->exc_type; - tmp_value = exc_info->exc_value; - tmp_tb = exc_info->exc_traceback; - exc_info->exc_type = local_type; - exc_info->exc_value = local_value; - exc_info->exc_traceback = local_tb; - } - #else - tmp_type = tstate->exc_type; - tmp_value = tstate->exc_value; - tmp_tb = tstate->exc_traceback; - tstate->exc_type = local_type; - tstate->exc_value = local_value; - tstate->exc_traceback = local_tb; - #endif - Py_XDECREF(tmp_type); - Py_XDECREF(tmp_value); - Py_XDECREF(tmp_tb); -#else - PyErr_SetExcInfo(local_type, local_value, local_tb); -#endif - return 0; -bad: - *type = 0; - *value = 0; - *tb = 0; - Py_XDECREF(local_type); - Py_XDECREF(local_value); - Py_XDECREF(local_tb); - return -1; -} - -/* SwapException */ -#if CYTHON_FAST_THREAD_STATE -static CYTHON_INLINE void __Pyx__ExceptionSwap(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { - PyObject *tmp_type, *tmp_value, *tmp_tb; - #if CYTHON_USE_EXC_INFO_STACK - _PyErr_StackItem *exc_info = tstate->exc_info; - tmp_type = exc_info->exc_type; - tmp_value = exc_info->exc_value; - tmp_tb = exc_info->exc_traceback; - exc_info->exc_type = *type; - exc_info->exc_value = *value; - exc_info->exc_traceback = *tb; - #else - tmp_type = tstate->exc_type; - tmp_value = tstate->exc_value; - tmp_tb = tstate->exc_traceback; - tstate->exc_type = *type; - tstate->exc_value = *value; - tstate->exc_traceback = *tb; - #endif - *type = tmp_type; - *value = tmp_value; - *tb = tmp_tb; -} -#else -static CYTHON_INLINE void __Pyx_ExceptionSwap(PyObject **type, PyObject **value, PyObject **tb) { - PyObject *tmp_type, *tmp_value, *tmp_tb; - PyErr_GetExcInfo(&tmp_type, &tmp_value, &tmp_tb); - PyErr_SetExcInfo(*type, *value, *tb); - *type = tmp_type; - *value = tmp_value; - *tb = tmp_tb; -} -#endif - -/* Import */ -static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level) { - PyObject *empty_list = 0; - PyObject *module = 0; - PyObject *global_dict = 0; - PyObject *empty_dict = 0; - PyObject *list; - #if PY_MAJOR_VERSION < 3 - PyObject *py_import; - py_import = __Pyx_PyObject_GetAttrStr(__pyx_b, __pyx_n_s_import); - if (!py_import) - goto bad; - #endif - if (from_list) - list = from_list; - else { - empty_list = PyList_New(0); - if (!empty_list) - goto bad; - list = empty_list; - } - global_dict = PyModule_GetDict(__pyx_m); - if (!global_dict) - goto bad; - empty_dict = PyDict_New(); - if (!empty_dict) - goto bad; - { - #if PY_MAJOR_VERSION >= 3 - if (level == -1) { - if ((1) && (strchr(__Pyx_MODULE_NAME, '.'))) { - module = PyImport_ImportModuleLevelObject( - name, global_dict, empty_dict, list, 1); - if (!module) { - if (!PyErr_ExceptionMatches(PyExc_ImportError)) - goto bad; - PyErr_Clear(); - } - } - level = 0; - } - #endif - if (!module) { - #if PY_MAJOR_VERSION < 3 - PyObject *py_level = PyInt_FromLong(level); - if (!py_level) - goto bad; - module = PyObject_CallFunctionObjArgs(py_import, - name, global_dict, empty_dict, list, py_level, (PyObject *)NULL); - Py_DECREF(py_level); - #else - module = PyImport_ImportModuleLevelObject( - name, global_dict, empty_dict, list, level); - #endif - } - } -bad: - #if PY_MAJOR_VERSION < 3 - Py_XDECREF(py_import); - #endif - Py_XDECREF(empty_list); - Py_XDECREF(empty_dict); - return module; -} - -/* FastTypeChecks */ -#if CYTHON_COMPILING_IN_CPYTHON -static int __Pyx_InBases(PyTypeObject *a, PyTypeObject *b) { - while (a) { - a = a->tp_base; - if (a == b) - return 1; - } - return b == &PyBaseObject_Type; -} -static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b) { - PyObject *mro; - if (a == b) return 1; - mro = a->tp_mro; - if (likely(mro)) { - Py_ssize_t i, n; - n = PyTuple_GET_SIZE(mro); - for (i = 0; i < n; i++) { - if (PyTuple_GET_ITEM(mro, i) == (PyObject *)b) - return 1; - } - return 0; - } - return __Pyx_InBases(a, b); -} -#if PY_MAJOR_VERSION == 2 -static int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject* exc_type2) { - PyObject *exception, *value, *tb; - int res; - __Pyx_PyThreadState_declare - __Pyx_PyThreadState_assign - __Pyx_ErrFetch(&exception, &value, &tb); - res = exc_type1 ? PyObject_IsSubclass(err, exc_type1) : 0; - if (unlikely(res == -1)) { - PyErr_WriteUnraisable(err); - res = 0; - } - if (!res) { - res = PyObject_IsSubclass(err, exc_type2); - if (unlikely(res == -1)) { - PyErr_WriteUnraisable(err); - res = 0; - } - } - __Pyx_ErrRestore(exception, value, tb); - return res; -} -#else -static CYTHON_INLINE int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject *exc_type2) { - int res = exc_type1 ? __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type1) : 0; - if (!res) { - res = __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type2); - } - return res; -} -#endif -static int __Pyx_PyErr_GivenExceptionMatchesTuple(PyObject *exc_type, PyObject *tuple) { - Py_ssize_t i, n; - assert(PyExceptionClass_Check(exc_type)); - n = PyTuple_GET_SIZE(tuple); -#if PY_MAJOR_VERSION >= 3 - for (i=0; i= 0 || (x^b) >= 0)) - return PyInt_FromLong(x); - return PyLong_Type.tp_as_number->nb_add(op1, op2); - } - #endif - #if CYTHON_USE_PYLONG_INTERNALS - if (likely(PyLong_CheckExact(op1))) { - const long b = intval; - long a, x; -#ifdef HAVE_LONG_LONG - const PY_LONG_LONG llb = intval; - PY_LONG_LONG lla, llx; -#endif - const digit* digits = ((PyLongObject*)op1)->ob_digit; - const Py_ssize_t size = Py_SIZE(op1); - if (likely(__Pyx_sst_abs(size) <= 1)) { - a = likely(size) ? digits[0] : 0; - if (size == -1) a = -a; - } else { - switch (size) { - case -2: - if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { - a = -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); - break; -#ifdef HAVE_LONG_LONG - } else if (8 * sizeof(PY_LONG_LONG) - 1 > 2 * PyLong_SHIFT) { - lla = -(PY_LONG_LONG) (((((unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); - goto long_long; -#endif - } - CYTHON_FALLTHROUGH; - case 2: - if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { - a = (long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); - break; -#ifdef HAVE_LONG_LONG - } else if (8 * sizeof(PY_LONG_LONG) - 1 > 2 * PyLong_SHIFT) { - lla = (PY_LONG_LONG) (((((unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); - goto long_long; -#endif - } - CYTHON_FALLTHROUGH; - case -3: - if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { - a = -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); - break; -#ifdef HAVE_LONG_LONG - } else if (8 * sizeof(PY_LONG_LONG) - 1 > 3 * PyLong_SHIFT) { - lla = -(PY_LONG_LONG) (((((((unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); - goto long_long; -#endif - } - CYTHON_FALLTHROUGH; - case 3: - if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { - a = (long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); - break; -#ifdef HAVE_LONG_LONG - } else if (8 * sizeof(PY_LONG_LONG) - 1 > 3 * PyLong_SHIFT) { - lla = (PY_LONG_LONG) (((((((unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); - goto long_long; -#endif - } - CYTHON_FALLTHROUGH; - case -4: - if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { - a = -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); - break; -#ifdef HAVE_LONG_LONG - } else if (8 * sizeof(PY_LONG_LONG) - 1 > 4 * PyLong_SHIFT) { - lla = -(PY_LONG_LONG) (((((((((unsigned PY_LONG_LONG)digits[3]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); - goto long_long; -#endif - } - CYTHON_FALLTHROUGH; - case 4: - if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { - a = (long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); - break; -#ifdef HAVE_LONG_LONG - } else if (8 * sizeof(PY_LONG_LONG) - 1 > 4 * PyLong_SHIFT) { - lla = (PY_LONG_LONG) (((((((((unsigned PY_LONG_LONG)digits[3]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); - goto long_long; -#endif - } - CYTHON_FALLTHROUGH; - default: return PyLong_Type.tp_as_number->nb_add(op1, op2); - } - } - x = a + b; - return PyLong_FromLong(x); -#ifdef HAVE_LONG_LONG - long_long: - llx = lla + llb; - return PyLong_FromLongLong(llx); -#endif - - - } - #endif - if (PyFloat_CheckExact(op1)) { - const long b = intval; - double a = PyFloat_AS_DOUBLE(op1); - double result; - PyFPE_START_PROTECT("add", return NULL) - result = ((double)a) + (double)b; - PyFPE_END_PROTECT(result) - return PyFloat_FromDouble(result); - } - return (inplace ? PyNumber_InPlaceAdd : PyNumber_Add)(op1, op2); -} -#endif - -/* None */ -static CYTHON_INLINE long __Pyx_div_long(long a, long b) { - long q = a / b; - long r = a - q*b; - q -= ((r != 0) & ((r ^ b) < 0)); - return q; -} - -/* ImportFrom */ -static PyObject* __Pyx_ImportFrom(PyObject* module, PyObject* name) { - PyObject* value = __Pyx_PyObject_GetAttrStr(module, name); - if (unlikely(!value) && PyErr_ExceptionMatches(PyExc_AttributeError)) { - PyErr_Format(PyExc_ImportError, - #if PY_MAJOR_VERSION < 3 - "cannot import name %.230s", PyString_AS_STRING(name)); - #else - "cannot import name %S", name); - #endif - } - return value; -} - -/* HasAttr */ -static CYTHON_INLINE int __Pyx_HasAttr(PyObject *o, PyObject *n) { - PyObject *r; - if (unlikely(!__Pyx_PyBaseString_Check(n))) { - PyErr_SetString(PyExc_TypeError, - "hasattr(): attribute name must be string"); - return -1; - } - r = __Pyx_GetAttr(o, n); - if (unlikely(!r)) { - PyErr_Clear(); - return 0; - } else { - Py_DECREF(r); - return 1; - } -} - -/* PyObject_GenericGetAttrNoDict */ -#if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000 -static PyObject *__Pyx_RaiseGenericGetAttributeError(PyTypeObject *tp, PyObject *attr_name) { - PyErr_Format(PyExc_AttributeError, -#if PY_MAJOR_VERSION >= 3 - "'%.50s' object has no attribute '%U'", - tp->tp_name, attr_name); -#else - "'%.50s' object has no attribute '%.400s'", - tp->tp_name, PyString_AS_STRING(attr_name)); -#endif - return NULL; -} -static CYTHON_INLINE PyObject* __Pyx_PyObject_GenericGetAttrNoDict(PyObject* obj, PyObject* attr_name) { - PyObject *descr; - PyTypeObject *tp = Py_TYPE(obj); - if (unlikely(!PyString_Check(attr_name))) { - return PyObject_GenericGetAttr(obj, attr_name); - } - assert(!tp->tp_dictoffset); - descr = _PyType_Lookup(tp, attr_name); - if (unlikely(!descr)) { - return __Pyx_RaiseGenericGetAttributeError(tp, attr_name); - } - Py_INCREF(descr); - #if PY_MAJOR_VERSION < 3 - if (likely(PyType_HasFeature(Py_TYPE(descr), Py_TPFLAGS_HAVE_CLASS))) - #endif - { - descrgetfunc f = Py_TYPE(descr)->tp_descr_get; - if (unlikely(f)) { - PyObject *res = f(descr, obj, (PyObject *)tp); - Py_DECREF(descr); - return res; - } - } - return descr; -} -#endif - -/* PyObject_GenericGetAttr */ -#if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000 -static PyObject* __Pyx_PyObject_GenericGetAttr(PyObject* obj, PyObject* attr_name) { - if (unlikely(Py_TYPE(obj)->tp_dictoffset)) { - return PyObject_GenericGetAttr(obj, attr_name); - } - return __Pyx_PyObject_GenericGetAttrNoDict(obj, attr_name); -} -#endif - -/* SetVTable */ -static int __Pyx_SetVtable(PyObject *dict, void *vtable) { -#if PY_VERSION_HEX >= 0x02070000 - PyObject *ob = PyCapsule_New(vtable, 0, 0); -#else - PyObject *ob = PyCObject_FromVoidPtr(vtable, 0); -#endif - if (!ob) - goto bad; - if (PyDict_SetItem(dict, __pyx_n_s_pyx_vtable, ob) < 0) - goto bad; - Py_DECREF(ob); - return 0; -bad: - Py_XDECREF(ob); - return -1; -} - -/* PyObjectGetAttrStrNoError */ -static void __Pyx_PyObject_GetAttrStr_ClearAttributeError(void) { - __Pyx_PyThreadState_declare - __Pyx_PyThreadState_assign - if (likely(__Pyx_PyErr_ExceptionMatches(PyExc_AttributeError))) - __Pyx_PyErr_Clear(); -} -static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStrNoError(PyObject* obj, PyObject* attr_name) { - PyObject *result; -#if CYTHON_COMPILING_IN_CPYTHON && CYTHON_USE_TYPE_SLOTS && PY_VERSION_HEX >= 0x030700B1 - PyTypeObject* tp = Py_TYPE(obj); - if (likely(tp->tp_getattro == PyObject_GenericGetAttr)) { - return _PyObject_GenericGetAttrWithDict(obj, attr_name, NULL, 1); - } -#endif - result = __Pyx_PyObject_GetAttrStr(obj, attr_name); - if (unlikely(!result)) { - __Pyx_PyObject_GetAttrStr_ClearAttributeError(); - } - return result; -} - -/* SetupReduce */ -static int __Pyx_setup_reduce_is_named(PyObject* meth, PyObject* name) { - int ret; - PyObject *name_attr; - name_attr = __Pyx_PyObject_GetAttrStr(meth, __pyx_n_s_name_2); - if (likely(name_attr)) { - ret = PyObject_RichCompareBool(name_attr, name, Py_EQ); - } else { - ret = -1; - } - if (unlikely(ret < 0)) { - PyErr_Clear(); - ret = 0; - } - Py_XDECREF(name_attr); - return ret; -} -static int __Pyx_setup_reduce(PyObject* type_obj) { - int ret = 0; - PyObject *object_reduce = NULL; - PyObject *object_reduce_ex = NULL; - PyObject *reduce = NULL; - PyObject *reduce_ex = NULL; - PyObject *reduce_cython = NULL; - PyObject *setstate = NULL; - PyObject *setstate_cython = NULL; -#if CYTHON_USE_PYTYPE_LOOKUP - if (_PyType_Lookup((PyTypeObject*)type_obj, __pyx_n_s_getstate)) goto __PYX_GOOD; -#else - if (PyObject_HasAttr(type_obj, __pyx_n_s_getstate)) goto __PYX_GOOD; -#endif -#if CYTHON_USE_PYTYPE_LOOKUP - object_reduce_ex = _PyType_Lookup(&PyBaseObject_Type, __pyx_n_s_reduce_ex); if (!object_reduce_ex) goto __PYX_BAD; -#else - object_reduce_ex = __Pyx_PyObject_GetAttrStr((PyObject*)&PyBaseObject_Type, __pyx_n_s_reduce_ex); if (!object_reduce_ex) goto __PYX_BAD; -#endif - reduce_ex = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_reduce_ex); if (unlikely(!reduce_ex)) goto __PYX_BAD; - if (reduce_ex == object_reduce_ex) { -#if CYTHON_USE_PYTYPE_LOOKUP - object_reduce = _PyType_Lookup(&PyBaseObject_Type, __pyx_n_s_reduce); if (!object_reduce) goto __PYX_BAD; -#else - object_reduce = __Pyx_PyObject_GetAttrStr((PyObject*)&PyBaseObject_Type, __pyx_n_s_reduce); if (!object_reduce) goto __PYX_BAD; -#endif - reduce = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_reduce); if (unlikely(!reduce)) goto __PYX_BAD; - if (reduce == object_reduce || __Pyx_setup_reduce_is_named(reduce, __pyx_n_s_reduce_cython)) { - reduce_cython = __Pyx_PyObject_GetAttrStrNoError(type_obj, __pyx_n_s_reduce_cython); - if (likely(reduce_cython)) { - ret = PyDict_SetItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_reduce, reduce_cython); if (unlikely(ret < 0)) goto __PYX_BAD; - ret = PyDict_DelItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_reduce_cython); if (unlikely(ret < 0)) goto __PYX_BAD; - } else if (reduce == object_reduce || PyErr_Occurred()) { - goto __PYX_BAD; - } - setstate = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_setstate); - if (!setstate) PyErr_Clear(); - if (!setstate || __Pyx_setup_reduce_is_named(setstate, __pyx_n_s_setstate_cython)) { - setstate_cython = __Pyx_PyObject_GetAttrStrNoError(type_obj, __pyx_n_s_setstate_cython); - if (likely(setstate_cython)) { - ret = PyDict_SetItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_setstate, setstate_cython); if (unlikely(ret < 0)) goto __PYX_BAD; - ret = PyDict_DelItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_setstate_cython); if (unlikely(ret < 0)) goto __PYX_BAD; - } else if (!setstate || PyErr_Occurred()) { - goto __PYX_BAD; - } - } - PyType_Modified((PyTypeObject*)type_obj); - } - } - goto __PYX_GOOD; -__PYX_BAD: - if (!PyErr_Occurred()) - PyErr_Format(PyExc_RuntimeError, "Unable to initialize pickling for %s", ((PyTypeObject*)type_obj)->tp_name); - ret = -1; -__PYX_GOOD: -#if !CYTHON_USE_PYTYPE_LOOKUP - Py_XDECREF(object_reduce); - Py_XDECREF(object_reduce_ex); -#endif - Py_XDECREF(reduce); - Py_XDECREF(reduce_ex); - Py_XDECREF(reduce_cython); - Py_XDECREF(setstate); - Py_XDECREF(setstate_cython); - return ret; -} - -/* CLineInTraceback */ -#ifndef CYTHON_CLINE_IN_TRACEBACK -static int __Pyx_CLineForTraceback(CYTHON_NCP_UNUSED PyThreadState *tstate, int c_line) { - PyObject *use_cline; - PyObject *ptype, *pvalue, *ptraceback; -#if CYTHON_COMPILING_IN_CPYTHON - PyObject **cython_runtime_dict; -#endif - if (unlikely(!__pyx_cython_runtime)) { - return c_line; - } - __Pyx_ErrFetchInState(tstate, &ptype, &pvalue, &ptraceback); -#if CYTHON_COMPILING_IN_CPYTHON - cython_runtime_dict = _PyObject_GetDictPtr(__pyx_cython_runtime); - if (likely(cython_runtime_dict)) { - __PYX_PY_DICT_LOOKUP_IF_MODIFIED( - use_cline, *cython_runtime_dict, - __Pyx_PyDict_GetItemStr(*cython_runtime_dict, __pyx_n_s_cline_in_traceback)) - } else -#endif - { - PyObject *use_cline_obj = __Pyx_PyObject_GetAttrStr(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback); - if (use_cline_obj) { - use_cline = PyObject_Not(use_cline_obj) ? Py_False : Py_True; - Py_DECREF(use_cline_obj); - } else { - PyErr_Clear(); - use_cline = NULL; - } - } - if (!use_cline) { - c_line = 0; - PyObject_SetAttr(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback, Py_False); - } - else if (use_cline == Py_False || (use_cline != Py_True && PyObject_Not(use_cline) != 0)) { - c_line = 0; - } - __Pyx_ErrRestoreInState(tstate, ptype, pvalue, ptraceback); - return c_line; -} -#endif - -/* CodeObjectCache */ -static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line) { - int start = 0, mid = 0, end = count - 1; - if (end >= 0 && code_line > entries[end].code_line) { - return count; - } - while (start < end) { - mid = start + (end - start) / 2; - if (code_line < entries[mid].code_line) { - end = mid; - } else if (code_line > entries[mid].code_line) { - start = mid + 1; - } else { - return mid; - } - } - if (code_line <= entries[mid].code_line) { - return mid; - } else { - return mid + 1; - } -} -static PyCodeObject *__pyx_find_code_object(int code_line) { - PyCodeObject* code_object; - int pos; - if (unlikely(!code_line) || unlikely(!__pyx_code_cache.entries)) { - return NULL; - } - pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); - if (unlikely(pos >= __pyx_code_cache.count) || unlikely(__pyx_code_cache.entries[pos].code_line != code_line)) { - return NULL; - } - code_object = __pyx_code_cache.entries[pos].code_object; - Py_INCREF(code_object); - return code_object; -} -static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object) { - int pos, i; - __Pyx_CodeObjectCacheEntry* entries = __pyx_code_cache.entries; - if (unlikely(!code_line)) { - return; - } - if (unlikely(!entries)) { - entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Malloc(64*sizeof(__Pyx_CodeObjectCacheEntry)); - if (likely(entries)) { - __pyx_code_cache.entries = entries; - __pyx_code_cache.max_count = 64; - __pyx_code_cache.count = 1; - entries[0].code_line = code_line; - entries[0].code_object = code_object; - Py_INCREF(code_object); - } - return; - } - pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); - if ((pos < __pyx_code_cache.count) && unlikely(__pyx_code_cache.entries[pos].code_line == code_line)) { - PyCodeObject* tmp = entries[pos].code_object; - entries[pos].code_object = code_object; - Py_DECREF(tmp); - return; - } - if (__pyx_code_cache.count == __pyx_code_cache.max_count) { - int new_max = __pyx_code_cache.max_count + 64; - entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Realloc( - __pyx_code_cache.entries, ((size_t)new_max) * sizeof(__Pyx_CodeObjectCacheEntry)); - if (unlikely(!entries)) { - return; - } - __pyx_code_cache.entries = entries; - __pyx_code_cache.max_count = new_max; - } - for (i=__pyx_code_cache.count; i>pos; i--) { - entries[i] = entries[i-1]; - } - entries[pos].code_line = code_line; - entries[pos].code_object = code_object; - __pyx_code_cache.count++; - Py_INCREF(code_object); -} - -/* AddTraceback */ -#include "compile.h" -#include "frameobject.h" -#include "traceback.h" -static PyCodeObject* __Pyx_CreateCodeObjectForTraceback( - const char *funcname, int c_line, - int py_line, const char *filename) { - PyCodeObject *py_code = 0; - PyObject *py_srcfile = 0; - PyObject *py_funcname = 0; - #if PY_MAJOR_VERSION < 3 - py_srcfile = PyString_FromString(filename); - #else - py_srcfile = PyUnicode_FromString(filename); - #endif - if (!py_srcfile) goto bad; - if (c_line) { - #if PY_MAJOR_VERSION < 3 - py_funcname = PyString_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); - #else - py_funcname = PyUnicode_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); - #endif - } - else { - #if PY_MAJOR_VERSION < 3 - py_funcname = PyString_FromString(funcname); - #else - py_funcname = PyUnicode_FromString(funcname); - #endif - } - if (!py_funcname) goto bad; - py_code = __Pyx_PyCode_New( - 0, - 0, - 0, - 0, - 0, - __pyx_empty_bytes, /*PyObject *code,*/ - __pyx_empty_tuple, /*PyObject *consts,*/ - __pyx_empty_tuple, /*PyObject *names,*/ - __pyx_empty_tuple, /*PyObject *varnames,*/ - __pyx_empty_tuple, /*PyObject *freevars,*/ - __pyx_empty_tuple, /*PyObject *cellvars,*/ - py_srcfile, /*PyObject *filename,*/ - py_funcname, /*PyObject *name,*/ - py_line, - __pyx_empty_bytes /*PyObject *lnotab*/ - ); - Py_DECREF(py_srcfile); - Py_DECREF(py_funcname); - return py_code; -bad: - Py_XDECREF(py_srcfile); - Py_XDECREF(py_funcname); - return NULL; -} -static void __Pyx_AddTraceback(const char *funcname, int c_line, - int py_line, const char *filename) { - PyCodeObject *py_code = 0; - PyFrameObject *py_frame = 0; - PyThreadState *tstate = __Pyx_PyThreadState_Current; - if (c_line) { - c_line = __Pyx_CLineForTraceback(tstate, c_line); - } - py_code = __pyx_find_code_object(c_line ? -c_line : py_line); - if (!py_code) { - py_code = __Pyx_CreateCodeObjectForTraceback( - funcname, c_line, py_line, filename); - if (!py_code) goto bad; - __pyx_insert_code_object(c_line ? -c_line : py_line, py_code); - } - py_frame = PyFrame_New( - tstate, /*PyThreadState *tstate,*/ - py_code, /*PyCodeObject *code,*/ - __pyx_d, /*PyObject *globals,*/ - 0 /*PyObject *locals*/ - ); - if (!py_frame) goto bad; - __Pyx_PyFrame_SetLineNumber(py_frame, py_line); - PyTraceBack_Here(py_frame); -bad: - Py_XDECREF(py_code); - Py_XDECREF(py_frame); -} - -#if PY_MAJOR_VERSION < 3 -static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags) { - if (PyObject_CheckBuffer(obj)) return PyObject_GetBuffer(obj, view, flags); - if (__Pyx_TypeCheck(obj, __pyx_array_type)) return __pyx_array_getbuffer(obj, view, flags); - if (__Pyx_TypeCheck(obj, __pyx_memoryview_type)) return __pyx_memoryview_getbuffer(obj, view, flags); - PyErr_Format(PyExc_TypeError, "'%.200s' does not have the buffer interface", Py_TYPE(obj)->tp_name); - return -1; -} -static void __Pyx_ReleaseBuffer(Py_buffer *view) { - PyObject *obj = view->obj; - if (!obj) return; - if (PyObject_CheckBuffer(obj)) { - PyBuffer_Release(view); - return; - } - if ((0)) {} - view->obj = NULL; - Py_DECREF(obj); -} -#endif - - -/* MemviewSliceIsContig */ -static int -__pyx_memviewslice_is_contig(const __Pyx_memviewslice mvs, char order, int ndim) -{ - int i, index, step, start; - Py_ssize_t itemsize = mvs.memview->view.itemsize; - if (order == 'F') { - step = 1; - start = 0; - } else { - step = -1; - start = ndim - 1; - } - for (i = 0; i < ndim; i++) { - index = start + step * i; - if (mvs.suboffsets[index] >= 0 || mvs.strides[index] != itemsize) - return 0; - itemsize *= mvs.shape[index]; - } - return 1; -} - -/* OverlappingSlices */ -static void -__pyx_get_array_memory_extents(__Pyx_memviewslice *slice, - void **out_start, void **out_end, - int ndim, size_t itemsize) -{ - char *start, *end; - int i; - start = end = slice->data; - for (i = 0; i < ndim; i++) { - Py_ssize_t stride = slice->strides[i]; - Py_ssize_t extent = slice->shape[i]; - if (extent == 0) { - *out_start = *out_end = start; - return; - } else { - if (stride > 0) - end += stride * (extent - 1); - else - start += stride * (extent - 1); - } - } - *out_start = start; - *out_end = end + itemsize; -} -static int -__pyx_slices_overlap(__Pyx_memviewslice *slice1, - __Pyx_memviewslice *slice2, - int ndim, size_t itemsize) -{ - void *start1, *end1, *start2, *end2; - __pyx_get_array_memory_extents(slice1, &start1, &end1, ndim, itemsize); - __pyx_get_array_memory_extents(slice2, &start2, &end2, ndim, itemsize); - return (start1 < end2) && (start2 < end1); -} - -/* Capsule */ -static CYTHON_INLINE PyObject * -__pyx_capsule_create(void *p, CYTHON_UNUSED const char *sig) -{ - PyObject *cobj; -#if PY_VERSION_HEX >= 0x02070000 - cobj = PyCapsule_New(p, sig, NULL); -#else - cobj = PyCObject_FromVoidPtr(p, NULL); -#endif - return cobj; -} - -/* IsLittleEndian */ -static CYTHON_INLINE int __Pyx_Is_Little_Endian(void) -{ - union { - uint32_t u32; - uint8_t u8[4]; - } S; - S.u32 = 0x01020304; - return S.u8[0] == 4; -} - -/* BufferFormatCheck */ -static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx, - __Pyx_BufFmt_StackElem* stack, - __Pyx_TypeInfo* type) { - stack[0].field = &ctx->root; - stack[0].parent_offset = 0; - ctx->root.type = type; - ctx->root.name = "buffer dtype"; - ctx->root.offset = 0; - ctx->head = stack; - ctx->head->field = &ctx->root; - ctx->fmt_offset = 0; - ctx->head->parent_offset = 0; - ctx->new_packmode = '@'; - ctx->enc_packmode = '@'; - ctx->new_count = 1; - ctx->enc_count = 0; - ctx->enc_type = 0; - ctx->is_complex = 0; - ctx->is_valid_array = 0; - ctx->struct_alignment = 0; - while (type->typegroup == 'S') { - ++ctx->head; - ctx->head->field = type->fields; - ctx->head->parent_offset = 0; - type = type->fields->type; - } -} -static int __Pyx_BufFmt_ParseNumber(const char** ts) { - int count; - const char* t = *ts; - if (*t < '0' || *t > '9') { - return -1; - } else { - count = *t++ - '0'; - while (*t >= '0' && *t <= '9') { - count *= 10; - count += *t++ - '0'; - } - } - *ts = t; - return count; -} -static int __Pyx_BufFmt_ExpectNumber(const char **ts) { - int number = __Pyx_BufFmt_ParseNumber(ts); - if (number == -1) - PyErr_Format(PyExc_ValueError,\ - "Does not understand character buffer dtype format string ('%c')", **ts); - return number; -} -static void __Pyx_BufFmt_RaiseUnexpectedChar(char ch) { - PyErr_Format(PyExc_ValueError, - "Unexpected format string character: '%c'", ch); -} -static const char* __Pyx_BufFmt_DescribeTypeChar(char ch, int is_complex) { - switch (ch) { - case '?': return "'bool'"; - case 'c': return "'char'"; - case 'b': return "'signed char'"; - case 'B': return "'unsigned char'"; - case 'h': return "'short'"; - case 'H': return "'unsigned short'"; - case 'i': return "'int'"; - case 'I': return "'unsigned int'"; - case 'l': return "'long'"; - case 'L': return "'unsigned long'"; - case 'q': return "'long long'"; - case 'Q': return "'unsigned long long'"; - case 'f': return (is_complex ? "'complex float'" : "'float'"); - case 'd': return (is_complex ? "'complex double'" : "'double'"); - case 'g': return (is_complex ? "'complex long double'" : "'long double'"); - case 'T': return "a struct"; - case 'O': return "Python object"; - case 'P': return "a pointer"; - case 's': case 'p': return "a string"; - case 0: return "end"; - default: return "unparseable format string"; - } -} -static size_t __Pyx_BufFmt_TypeCharToStandardSize(char ch, int is_complex) { - switch (ch) { - case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; - case 'h': case 'H': return 2; - case 'i': case 'I': case 'l': case 'L': return 4; - case 'q': case 'Q': return 8; - case 'f': return (is_complex ? 8 : 4); - case 'd': return (is_complex ? 16 : 8); - case 'g': { - PyErr_SetString(PyExc_ValueError, "Python does not define a standard format string size for long double ('g').."); - return 0; - } - case 'O': case 'P': return sizeof(void*); - default: - __Pyx_BufFmt_RaiseUnexpectedChar(ch); - return 0; - } -} -static size_t __Pyx_BufFmt_TypeCharToNativeSize(char ch, int is_complex) { - switch (ch) { - case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; - case 'h': case 'H': return sizeof(short); - case 'i': case 'I': return sizeof(int); - case 'l': case 'L': return sizeof(long); - #ifdef HAVE_LONG_LONG - case 'q': case 'Q': return sizeof(PY_LONG_LONG); - #endif - case 'f': return sizeof(float) * (is_complex ? 2 : 1); - case 'd': return sizeof(double) * (is_complex ? 2 : 1); - case 'g': return sizeof(long double) * (is_complex ? 2 : 1); - case 'O': case 'P': return sizeof(void*); - default: { - __Pyx_BufFmt_RaiseUnexpectedChar(ch); - return 0; - } - } -} -typedef struct { char c; short x; } __Pyx_st_short; -typedef struct { char c; int x; } __Pyx_st_int; -typedef struct { char c; long x; } __Pyx_st_long; -typedef struct { char c; float x; } __Pyx_st_float; -typedef struct { char c; double x; } __Pyx_st_double; -typedef struct { char c; long double x; } __Pyx_st_longdouble; -typedef struct { char c; void *x; } __Pyx_st_void_p; -#ifdef HAVE_LONG_LONG -typedef struct { char c; PY_LONG_LONG x; } __Pyx_st_longlong; -#endif -static size_t __Pyx_BufFmt_TypeCharToAlignment(char ch, CYTHON_UNUSED int is_complex) { - switch (ch) { - case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; - case 'h': case 'H': return sizeof(__Pyx_st_short) - sizeof(short); - case 'i': case 'I': return sizeof(__Pyx_st_int) - sizeof(int); - case 'l': case 'L': return sizeof(__Pyx_st_long) - sizeof(long); -#ifdef HAVE_LONG_LONG - case 'q': case 'Q': return sizeof(__Pyx_st_longlong) - sizeof(PY_LONG_LONG); -#endif - case 'f': return sizeof(__Pyx_st_float) - sizeof(float); - case 'd': return sizeof(__Pyx_st_double) - sizeof(double); - case 'g': return sizeof(__Pyx_st_longdouble) - sizeof(long double); - case 'P': case 'O': return sizeof(__Pyx_st_void_p) - sizeof(void*); - default: - __Pyx_BufFmt_RaiseUnexpectedChar(ch); - return 0; - } -} -/* These are for computing the padding at the end of the struct to align - on the first member of the struct. This will probably the same as above, - but we don't have any guarantees. - */ -typedef struct { short x; char c; } __Pyx_pad_short; -typedef struct { int x; char c; } __Pyx_pad_int; -typedef struct { long x; char c; } __Pyx_pad_long; -typedef struct { float x; char c; } __Pyx_pad_float; -typedef struct { double x; char c; } __Pyx_pad_double; -typedef struct { long double x; char c; } __Pyx_pad_longdouble; -typedef struct { void *x; char c; } __Pyx_pad_void_p; -#ifdef HAVE_LONG_LONG -typedef struct { PY_LONG_LONG x; char c; } __Pyx_pad_longlong; -#endif -static size_t __Pyx_BufFmt_TypeCharToPadding(char ch, CYTHON_UNUSED int is_complex) { - switch (ch) { - case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; - case 'h': case 'H': return sizeof(__Pyx_pad_short) - sizeof(short); - case 'i': case 'I': return sizeof(__Pyx_pad_int) - sizeof(int); - case 'l': case 'L': return sizeof(__Pyx_pad_long) - sizeof(long); -#ifdef HAVE_LONG_LONG - case 'q': case 'Q': return sizeof(__Pyx_pad_longlong) - sizeof(PY_LONG_LONG); -#endif - case 'f': return sizeof(__Pyx_pad_float) - sizeof(float); - case 'd': return sizeof(__Pyx_pad_double) - sizeof(double); - case 'g': return sizeof(__Pyx_pad_longdouble) - sizeof(long double); - case 'P': case 'O': return sizeof(__Pyx_pad_void_p) - sizeof(void*); - default: - __Pyx_BufFmt_RaiseUnexpectedChar(ch); - return 0; - } -} -static char __Pyx_BufFmt_TypeCharToGroup(char ch, int is_complex) { - switch (ch) { - case 'c': - return 'H'; - case 'b': case 'h': case 'i': - case 'l': case 'q': case 's': case 'p': - return 'I'; - case '?': case 'B': case 'H': case 'I': case 'L': case 'Q': - return 'U'; - case 'f': case 'd': case 'g': - return (is_complex ? 'C' : 'R'); - case 'O': - return 'O'; - case 'P': - return 'P'; - default: { - __Pyx_BufFmt_RaiseUnexpectedChar(ch); - return 0; - } - } -} -static void __Pyx_BufFmt_RaiseExpected(__Pyx_BufFmt_Context* ctx) { - if (ctx->head == NULL || ctx->head->field == &ctx->root) { - const char* expected; - const char* quote; - if (ctx->head == NULL) { - expected = "end"; - quote = ""; - } else { - expected = ctx->head->field->type->name; - quote = "'"; - } - PyErr_Format(PyExc_ValueError, - "Buffer dtype mismatch, expected %s%s%s but got %s", - quote, expected, quote, - __Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex)); - } else { - __Pyx_StructField* field = ctx->head->field; - __Pyx_StructField* parent = (ctx->head - 1)->field; - PyErr_Format(PyExc_ValueError, - "Buffer dtype mismatch, expected '%s' but got %s in '%s.%s'", - field->type->name, __Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex), - parent->type->name, field->name); - } -} -static int __Pyx_BufFmt_ProcessTypeChunk(__Pyx_BufFmt_Context* ctx) { - char group; - size_t size, offset, arraysize = 1; - if (ctx->enc_type == 0) return 0; - if (ctx->head->field->type->arraysize[0]) { - int i, ndim = 0; - if (ctx->enc_type == 's' || ctx->enc_type == 'p') { - ctx->is_valid_array = ctx->head->field->type->ndim == 1; - ndim = 1; - if (ctx->enc_count != ctx->head->field->type->arraysize[0]) { - PyErr_Format(PyExc_ValueError, - "Expected a dimension of size %zu, got %zu", - ctx->head->field->type->arraysize[0], ctx->enc_count); - return -1; - } - } - if (!ctx->is_valid_array) { - PyErr_Format(PyExc_ValueError, "Expected %d dimensions, got %d", - ctx->head->field->type->ndim, ndim); - return -1; - } - for (i = 0; i < ctx->head->field->type->ndim; i++) { - arraysize *= ctx->head->field->type->arraysize[i]; - } - ctx->is_valid_array = 0; - ctx->enc_count = 1; - } - group = __Pyx_BufFmt_TypeCharToGroup(ctx->enc_type, ctx->is_complex); - do { - __Pyx_StructField* field = ctx->head->field; - __Pyx_TypeInfo* type = field->type; - if (ctx->enc_packmode == '@' || ctx->enc_packmode == '^') { - size = __Pyx_BufFmt_TypeCharToNativeSize(ctx->enc_type, ctx->is_complex); - } else { - size = __Pyx_BufFmt_TypeCharToStandardSize(ctx->enc_type, ctx->is_complex); - } - if (ctx->enc_packmode == '@') { - size_t align_at = __Pyx_BufFmt_TypeCharToAlignment(ctx->enc_type, ctx->is_complex); - size_t align_mod_offset; - if (align_at == 0) return -1; - align_mod_offset = ctx->fmt_offset % align_at; - if (align_mod_offset > 0) ctx->fmt_offset += align_at - align_mod_offset; - if (ctx->struct_alignment == 0) - ctx->struct_alignment = __Pyx_BufFmt_TypeCharToPadding(ctx->enc_type, - ctx->is_complex); - } - if (type->size != size || type->typegroup != group) { - if (type->typegroup == 'C' && type->fields != NULL) { - size_t parent_offset = ctx->head->parent_offset + field->offset; - ++ctx->head; - ctx->head->field = type->fields; - ctx->head->parent_offset = parent_offset; - continue; - } - if ((type->typegroup == 'H' || group == 'H') && type->size == size) { - } else { - __Pyx_BufFmt_RaiseExpected(ctx); - return -1; - } - } - offset = ctx->head->parent_offset + field->offset; - if (ctx->fmt_offset != offset) { - PyErr_Format(PyExc_ValueError, - "Buffer dtype mismatch; next field is at offset %" CYTHON_FORMAT_SSIZE_T "d but %" CYTHON_FORMAT_SSIZE_T "d expected", - (Py_ssize_t)ctx->fmt_offset, (Py_ssize_t)offset); - return -1; - } - ctx->fmt_offset += size; - if (arraysize) - ctx->fmt_offset += (arraysize - 1) * size; - --ctx->enc_count; - while (1) { - if (field == &ctx->root) { - ctx->head = NULL; - if (ctx->enc_count != 0) { - __Pyx_BufFmt_RaiseExpected(ctx); - return -1; - } - break; - } - ctx->head->field = ++field; - if (field->type == NULL) { - --ctx->head; - field = ctx->head->field; - continue; - } else if (field->type->typegroup == 'S') { - size_t parent_offset = ctx->head->parent_offset + field->offset; - if (field->type->fields->type == NULL) continue; - field = field->type->fields; - ++ctx->head; - ctx->head->field = field; - ctx->head->parent_offset = parent_offset; - break; - } else { - break; - } - } - } while (ctx->enc_count); - ctx->enc_type = 0; - ctx->is_complex = 0; - return 0; -} -static PyObject * -__pyx_buffmt_parse_array(__Pyx_BufFmt_Context* ctx, const char** tsp) -{ - const char *ts = *tsp; - int i = 0, number, ndim; - ++ts; - if (ctx->new_count != 1) { - PyErr_SetString(PyExc_ValueError, - "Cannot handle repeated arrays in format string"); - return NULL; - } - if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; - ndim = ctx->head->field->type->ndim; - while (*ts && *ts != ')') { - switch (*ts) { - case ' ': case '\f': case '\r': case '\n': case '\t': case '\v': continue; - default: break; - } - number = __Pyx_BufFmt_ExpectNumber(&ts); - if (number == -1) return NULL; - if (i < ndim && (size_t) number != ctx->head->field->type->arraysize[i]) - return PyErr_Format(PyExc_ValueError, - "Expected a dimension of size %zu, got %d", - ctx->head->field->type->arraysize[i], number); - if (*ts != ',' && *ts != ')') - return PyErr_Format(PyExc_ValueError, - "Expected a comma in format string, got '%c'", *ts); - if (*ts == ',') ts++; - i++; - } - if (i != ndim) - return PyErr_Format(PyExc_ValueError, "Expected %d dimension(s), got %d", - ctx->head->field->type->ndim, i); - if (!*ts) { - PyErr_SetString(PyExc_ValueError, - "Unexpected end of format string, expected ')'"); - return NULL; - } - ctx->is_valid_array = 1; - ctx->new_count = 1; - *tsp = ++ts; - return Py_None; -} -static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts) { - int got_Z = 0; - while (1) { - switch(*ts) { - case 0: - if (ctx->enc_type != 0 && ctx->head == NULL) { - __Pyx_BufFmt_RaiseExpected(ctx); - return NULL; - } - if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; - if (ctx->head != NULL) { - __Pyx_BufFmt_RaiseExpected(ctx); - return NULL; - } - return ts; - case ' ': - case '\r': - case '\n': - ++ts; - break; - case '<': - if (!__Pyx_Is_Little_Endian()) { - PyErr_SetString(PyExc_ValueError, "Little-endian buffer not supported on big-endian compiler"); - return NULL; - } - ctx->new_packmode = '='; - ++ts; - break; - case '>': - case '!': - if (__Pyx_Is_Little_Endian()) { - PyErr_SetString(PyExc_ValueError, "Big-endian buffer not supported on little-endian compiler"); - return NULL; - } - ctx->new_packmode = '='; - ++ts; - break; - case '=': - case '@': - case '^': - ctx->new_packmode = *ts++; - break; - case 'T': - { - const char* ts_after_sub; - size_t i, struct_count = ctx->new_count; - size_t struct_alignment = ctx->struct_alignment; - ctx->new_count = 1; - ++ts; - if (*ts != '{') { - PyErr_SetString(PyExc_ValueError, "Buffer acquisition: Expected '{' after 'T'"); - return NULL; - } - if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; - ctx->enc_type = 0; - ctx->enc_count = 0; - ctx->struct_alignment = 0; - ++ts; - ts_after_sub = ts; - for (i = 0; i != struct_count; ++i) { - ts_after_sub = __Pyx_BufFmt_CheckString(ctx, ts); - if (!ts_after_sub) return NULL; - } - ts = ts_after_sub; - if (struct_alignment) ctx->struct_alignment = struct_alignment; - } - break; - case '}': - { - size_t alignment = ctx->struct_alignment; - ++ts; - if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; - ctx->enc_type = 0; - if (alignment && ctx->fmt_offset % alignment) { - ctx->fmt_offset += alignment - (ctx->fmt_offset % alignment); - } - } - return ts; - case 'x': - if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; - ctx->fmt_offset += ctx->new_count; - ctx->new_count = 1; - ctx->enc_count = 0; - ctx->enc_type = 0; - ctx->enc_packmode = ctx->new_packmode; - ++ts; - break; - case 'Z': - got_Z = 1; - ++ts; - if (*ts != 'f' && *ts != 'd' && *ts != 'g') { - __Pyx_BufFmt_RaiseUnexpectedChar('Z'); - return NULL; - } - CYTHON_FALLTHROUGH; - case '?': case 'c': case 'b': case 'B': case 'h': case 'H': case 'i': case 'I': - case 'l': case 'L': case 'q': case 'Q': - case 'f': case 'd': case 'g': - case 'O': case 'p': - if ((ctx->enc_type == *ts) && (got_Z == ctx->is_complex) && - (ctx->enc_packmode == ctx->new_packmode) && (!ctx->is_valid_array)) { - ctx->enc_count += ctx->new_count; - ctx->new_count = 1; - got_Z = 0; - ++ts; - break; - } - CYTHON_FALLTHROUGH; - case 's': - if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; - ctx->enc_count = ctx->new_count; - ctx->enc_packmode = ctx->new_packmode; - ctx->enc_type = *ts; - ctx->is_complex = got_Z; - ++ts; - ctx->new_count = 1; - got_Z = 0; - break; - case ':': - ++ts; - while(*ts != ':') ++ts; - ++ts; - break; - case '(': - if (!__pyx_buffmt_parse_array(ctx, &ts)) return NULL; - break; - default: - { - int number = __Pyx_BufFmt_ExpectNumber(&ts); - if (number == -1) return NULL; - ctx->new_count = (size_t)number; - } - } - } -} - -/* TypeInfoCompare */ - static int -__pyx_typeinfo_cmp(__Pyx_TypeInfo *a, __Pyx_TypeInfo *b) -{ - int i; - if (!a || !b) - return 0; - if (a == b) - return 1; - if (a->size != b->size || a->typegroup != b->typegroup || - a->is_unsigned != b->is_unsigned || a->ndim != b->ndim) { - if (a->typegroup == 'H' || b->typegroup == 'H') { - return a->size == b->size; - } else { - return 0; - } - } - if (a->ndim) { - for (i = 0; i < a->ndim; i++) - if (a->arraysize[i] != b->arraysize[i]) - return 0; - } - if (a->typegroup == 'S') { - if (a->flags != b->flags) - return 0; - if (a->fields || b->fields) { - if (!(a->fields && b->fields)) - return 0; - for (i = 0; a->fields[i].type && b->fields[i].type; i++) { - __Pyx_StructField *field_a = a->fields + i; - __Pyx_StructField *field_b = b->fields + i; - if (field_a->offset != field_b->offset || - !__pyx_typeinfo_cmp(field_a->type, field_b->type)) - return 0; - } - return !a->fields[i].type && !b->fields[i].type; - } - } - return 1; -} - -/* MemviewSliceValidateAndInit */ - static int -__pyx_check_strides(Py_buffer *buf, int dim, int ndim, int spec) -{ - if (buf->shape[dim] <= 1) - return 1; - if (buf->strides) { - if (spec & __Pyx_MEMVIEW_CONTIG) { - if (spec & (__Pyx_MEMVIEW_PTR|__Pyx_MEMVIEW_FULL)) { - if (unlikely(buf->strides[dim] != sizeof(void *))) { - PyErr_Format(PyExc_ValueError, - "Buffer is not indirectly contiguous " - "in dimension %d.", dim); - goto fail; - } - } else if (unlikely(buf->strides[dim] != buf->itemsize)) { - PyErr_SetString(PyExc_ValueError, - "Buffer and memoryview are not contiguous " - "in the same dimension."); - goto fail; - } - } - if (spec & __Pyx_MEMVIEW_FOLLOW) { - Py_ssize_t stride = buf->strides[dim]; - if (stride < 0) - stride = -stride; - if (unlikely(stride < buf->itemsize)) { - PyErr_SetString(PyExc_ValueError, - "Buffer and memoryview are not contiguous " - "in the same dimension."); - goto fail; - } - } - } else { - if (unlikely(spec & __Pyx_MEMVIEW_CONTIG && dim != ndim - 1)) { - PyErr_Format(PyExc_ValueError, - "C-contiguous buffer is not contiguous in " - "dimension %d", dim); - goto fail; - } else if (unlikely(spec & (__Pyx_MEMVIEW_PTR))) { - PyErr_Format(PyExc_ValueError, - "C-contiguous buffer is not indirect in " - "dimension %d", dim); - goto fail; - } else if (unlikely(buf->suboffsets)) { - PyErr_SetString(PyExc_ValueError, - "Buffer exposes suboffsets but no strides"); - goto fail; - } - } - return 1; -fail: - return 0; -} -static int -__pyx_check_suboffsets(Py_buffer *buf, int dim, CYTHON_UNUSED int ndim, int spec) -{ - if (spec & __Pyx_MEMVIEW_DIRECT) { - if (unlikely(buf->suboffsets && buf->suboffsets[dim] >= 0)) { - PyErr_Format(PyExc_ValueError, - "Buffer not compatible with direct access " - "in dimension %d.", dim); - goto fail; - } - } - if (spec & __Pyx_MEMVIEW_PTR) { - if (unlikely(!buf->suboffsets || (buf->suboffsets[dim] < 0))) { - PyErr_Format(PyExc_ValueError, - "Buffer is not indirectly accessible " - "in dimension %d.", dim); - goto fail; - } - } - return 1; -fail: - return 0; -} -static int -__pyx_verify_contig(Py_buffer *buf, int ndim, int c_or_f_flag) -{ - int i; - if (c_or_f_flag & __Pyx_IS_F_CONTIG) { - Py_ssize_t stride = 1; - for (i = 0; i < ndim; i++) { - if (unlikely(stride * buf->itemsize != buf->strides[i] && buf->shape[i] > 1)) { - PyErr_SetString(PyExc_ValueError, - "Buffer not fortran contiguous."); - goto fail; - } - stride = stride * buf->shape[i]; - } - } else if (c_or_f_flag & __Pyx_IS_C_CONTIG) { - Py_ssize_t stride = 1; - for (i = ndim - 1; i >- 1; i--) { - if (unlikely(stride * buf->itemsize != buf->strides[i] && buf->shape[i] > 1)) { - PyErr_SetString(PyExc_ValueError, - "Buffer not C contiguous."); - goto fail; - } - stride = stride * buf->shape[i]; - } - } - return 1; -fail: - return 0; -} -static int __Pyx_ValidateAndInit_memviewslice( - int *axes_specs, - int c_or_f_flag, - int buf_flags, - int ndim, - __Pyx_TypeInfo *dtype, - __Pyx_BufFmt_StackElem stack[], - __Pyx_memviewslice *memviewslice, - PyObject *original_obj) -{ - struct __pyx_memoryview_obj *memview, *new_memview; - __Pyx_RefNannyDeclarations - Py_buffer *buf; - int i, spec = 0, retval = -1; - __Pyx_BufFmt_Context ctx; - int from_memoryview = __pyx_memoryview_check(original_obj); - __Pyx_RefNannySetupContext("ValidateAndInit_memviewslice", 0); - if (from_memoryview && __pyx_typeinfo_cmp(dtype, ((struct __pyx_memoryview_obj *) - original_obj)->typeinfo)) { - memview = (struct __pyx_memoryview_obj *) original_obj; - new_memview = NULL; - } else { - memview = (struct __pyx_memoryview_obj *) __pyx_memoryview_new( - original_obj, buf_flags, 0, dtype); - new_memview = memview; - if (unlikely(!memview)) - goto fail; - } - buf = &memview->view; - if (unlikely(buf->ndim != ndim)) { - PyErr_Format(PyExc_ValueError, - "Buffer has wrong number of dimensions (expected %d, got %d)", - ndim, buf->ndim); - goto fail; - } - if (new_memview) { - __Pyx_BufFmt_Init(&ctx, stack, dtype); - if (unlikely(!__Pyx_BufFmt_CheckString(&ctx, buf->format))) goto fail; - } - if (unlikely((unsigned) buf->itemsize != dtype->size)) { - PyErr_Format(PyExc_ValueError, - "Item size of buffer (%" CYTHON_FORMAT_SSIZE_T "u byte%s) " - "does not match size of '%s' (%" CYTHON_FORMAT_SSIZE_T "u byte%s)", - buf->itemsize, - (buf->itemsize > 1) ? "s" : "", - dtype->name, - dtype->size, - (dtype->size > 1) ? "s" : ""); - goto fail; - } - if (buf->len > 0) { - for (i = 0; i < ndim; i++) { - spec = axes_specs[i]; - if (unlikely(!__pyx_check_strides(buf, i, ndim, spec))) - goto fail; - if (unlikely(!__pyx_check_suboffsets(buf, i, ndim, spec))) - goto fail; - } - if (unlikely(buf->strides && !__pyx_verify_contig(buf, ndim, c_or_f_flag))) - goto fail; - } - if (unlikely(__Pyx_init_memviewslice(memview, ndim, memviewslice, - new_memview != NULL) == -1)) { - goto fail; - } - retval = 0; - goto no_fail; -fail: - Py_XDECREF(new_memview); - retval = -1; -no_fail: - __Pyx_RefNannyFinishContext(); - return retval; -} - -/* ObjectToMemviewSlice */ - static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_d_d_dc_int(PyObject *obj, int writable_flag) { - __Pyx_memviewslice result = { 0, 0, { 0 }, { 0 }, { 0 } }; - __Pyx_BufFmt_StackElem stack[1]; - int axes_specs[] = { (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_FOLLOW), (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_FOLLOW), (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_CONTIG) }; - int retcode; - if (obj == Py_None) { - result.memview = (struct __pyx_memoryview_obj *) Py_None; - return result; - } - retcode = __Pyx_ValidateAndInit_memviewslice(axes_specs, __Pyx_IS_C_CONTIG, - (PyBUF_C_CONTIGUOUS | PyBUF_FORMAT) | writable_flag, 3, - &__Pyx_TypeInfo_int, stack, - &result, obj); - if (unlikely(retcode == -1)) - goto __pyx_fail; - return result; -__pyx_fail: - result.memview = NULL; - result.data = NULL; - return result; -} - -/* ObjectToMemviewSlice */ - static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_d_d_dc_float(PyObject *obj, int writable_flag) { - __Pyx_memviewslice result = { 0, 0, { 0 }, { 0 }, { 0 } }; - __Pyx_BufFmt_StackElem stack[1]; - int axes_specs[] = { (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_FOLLOW), (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_FOLLOW), (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_CONTIG) }; - int retcode; - if (obj == Py_None) { - result.memview = (struct __pyx_memoryview_obj *) Py_None; - return result; - } - retcode = __Pyx_ValidateAndInit_memviewslice(axes_specs, __Pyx_IS_C_CONTIG, - (PyBUF_C_CONTIGUOUS | PyBUF_FORMAT) | writable_flag, 3, - &__Pyx_TypeInfo_float, stack, - &result, obj); - if (unlikely(retcode == -1)) - goto __pyx_fail; - return result; -__pyx_fail: - result.memview = NULL; - result.data = NULL; - return result; -} - -/* ObjectToMemviewSlice */ - static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_dc_int(PyObject *obj, int writable_flag) { - __Pyx_memviewslice result = { 0, 0, { 0 }, { 0 }, { 0 } }; - __Pyx_BufFmt_StackElem stack[1]; - int axes_specs[] = { (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_CONTIG) }; - int retcode; - if (obj == Py_None) { - result.memview = (struct __pyx_memoryview_obj *) Py_None; - return result; - } - retcode = __Pyx_ValidateAndInit_memviewslice(axes_specs, __Pyx_IS_C_CONTIG, - (PyBUF_C_CONTIGUOUS | PyBUF_FORMAT) | writable_flag, 1, - &__Pyx_TypeInfo_int, stack, - &result, obj); - if (unlikely(retcode == -1)) - goto __pyx_fail; - return result; -__pyx_fail: - result.memview = NULL; - result.data = NULL; - return result; -} - -/* CIntToPy */ - static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value) { - const int neg_one = (int) ((int) 0 - (int) 1), const_zero = (int) 0; - const int is_unsigned = neg_one > const_zero; - if (is_unsigned) { - if (sizeof(int) < sizeof(long)) { - return PyInt_FromLong((long) value); - } else if (sizeof(int) <= sizeof(unsigned long)) { - return PyLong_FromUnsignedLong((unsigned long) value); -#ifdef HAVE_LONG_LONG - } else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) { - return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); -#endif - } - } else { - if (sizeof(int) <= sizeof(long)) { - return PyInt_FromLong((long) value); -#ifdef HAVE_LONG_LONG - } else if (sizeof(int) <= sizeof(PY_LONG_LONG)) { - return PyLong_FromLongLong((PY_LONG_LONG) value); -#endif - } - } - { - int one = 1; int little = (int)*(unsigned char *)&one; - unsigned char *bytes = (unsigned char *)&value; - return _PyLong_FromByteArray(bytes, sizeof(int), - little, !is_unsigned); - } -} - -/* CIntFromPyVerify */ - #define __PYX_VERIFY_RETURN_INT(target_type, func_type, func_value)\ - __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 0) -#define __PYX_VERIFY_RETURN_INT_EXC(target_type, func_type, func_value)\ - __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 1) -#define __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, exc)\ - {\ - func_type value = func_value;\ - if (sizeof(target_type) < sizeof(func_type)) {\ - if (unlikely(value != (func_type) (target_type) value)) {\ - func_type zero = 0;\ - if (exc && unlikely(value == (func_type)-1 && PyErr_Occurred()))\ - return (target_type) -1;\ - if (is_unsigned && unlikely(value < zero))\ - goto raise_neg_overflow;\ - else\ - goto raise_overflow;\ - }\ - }\ - return (target_type) value;\ - } - -/* CIntToPy */ - static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value) { - const long neg_one = (long) ((long) 0 - (long) 1), const_zero = (long) 0; - const int is_unsigned = neg_one > const_zero; - if (is_unsigned) { - if (sizeof(long) < sizeof(long)) { - return PyInt_FromLong((long) value); - } else if (sizeof(long) <= sizeof(unsigned long)) { - return PyLong_FromUnsignedLong((unsigned long) value); -#ifdef HAVE_LONG_LONG - } else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) { - return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); -#endif - } - } else { - if (sizeof(long) <= sizeof(long)) { - return PyInt_FromLong((long) value); -#ifdef HAVE_LONG_LONG - } else if (sizeof(long) <= sizeof(PY_LONG_LONG)) { - return PyLong_FromLongLong((PY_LONG_LONG) value); -#endif - } - } - { - int one = 1; int little = (int)*(unsigned char *)&one; - unsigned char *bytes = (unsigned char *)&value; - return _PyLong_FromByteArray(bytes, sizeof(long), - little, !is_unsigned); - } -} - -/* MemviewSliceCopyTemplate */ - static __Pyx_memviewslice -__pyx_memoryview_copy_new_contig(const __Pyx_memviewslice *from_mvs, - const char *mode, int ndim, - size_t sizeof_dtype, int contig_flag, - int dtype_is_object) -{ - __Pyx_RefNannyDeclarations - int i; - __Pyx_memviewslice new_mvs = { 0, 0, { 0 }, { 0 }, { 0 } }; - struct __pyx_memoryview_obj *from_memview = from_mvs->memview; - Py_buffer *buf = &from_memview->view; - PyObject *shape_tuple = NULL; - PyObject *temp_int = NULL; - struct __pyx_array_obj *array_obj = NULL; - struct __pyx_memoryview_obj *memview_obj = NULL; - __Pyx_RefNannySetupContext("__pyx_memoryview_copy_new_contig", 0); - for (i = 0; i < ndim; i++) { - if (unlikely(from_mvs->suboffsets[i] >= 0)) { - PyErr_Format(PyExc_ValueError, "Cannot copy memoryview slice with " - "indirect dimensions (axis %d)", i); - goto fail; - } - } - shape_tuple = PyTuple_New(ndim); - if (unlikely(!shape_tuple)) { - goto fail; - } - __Pyx_GOTREF(shape_tuple); - for(i = 0; i < ndim; i++) { - temp_int = PyInt_FromSsize_t(from_mvs->shape[i]); - if(unlikely(!temp_int)) { - goto fail; - } else { - PyTuple_SET_ITEM(shape_tuple, i, temp_int); - temp_int = NULL; - } - } - array_obj = __pyx_array_new(shape_tuple, sizeof_dtype, buf->format, (char *) mode, NULL); - if (unlikely(!array_obj)) { - goto fail; - } - __Pyx_GOTREF(array_obj); - memview_obj = (struct __pyx_memoryview_obj *) __pyx_memoryview_new( - (PyObject *) array_obj, contig_flag, - dtype_is_object, - from_mvs->memview->typeinfo); - if (unlikely(!memview_obj)) - goto fail; - if (unlikely(__Pyx_init_memviewslice(memview_obj, ndim, &new_mvs, 1) < 0)) - goto fail; - if (unlikely(__pyx_memoryview_copy_contents(*from_mvs, new_mvs, ndim, ndim, - dtype_is_object) < 0)) - goto fail; - goto no_fail; -fail: - __Pyx_XDECREF(new_mvs.memview); - new_mvs.memview = NULL; - new_mvs.data = NULL; -no_fail: - __Pyx_XDECREF(shape_tuple); - __Pyx_XDECREF(temp_int); - __Pyx_XDECREF(array_obj); - __Pyx_RefNannyFinishContext(); - return new_mvs; -} - -/* CIntFromPy */ - static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *x) { - const int neg_one = (int) ((int) 0 - (int) 1), const_zero = (int) 0; - const int is_unsigned = neg_one > const_zero; -#if PY_MAJOR_VERSION < 3 - if (likely(PyInt_Check(x))) { - if (sizeof(int) < sizeof(long)) { - __PYX_VERIFY_RETURN_INT(int, long, PyInt_AS_LONG(x)) - } else { - long val = PyInt_AS_LONG(x); - if (is_unsigned && unlikely(val < 0)) { - goto raise_neg_overflow; - } - return (int) val; - } - } else -#endif - if (likely(PyLong_Check(x))) { - if (is_unsigned) { -#if CYTHON_USE_PYLONG_INTERNALS - const digit* digits = ((PyLongObject*)x)->ob_digit; - switch (Py_SIZE(x)) { - case 0: return (int) 0; - case 1: __PYX_VERIFY_RETURN_INT(int, digit, digits[0]) - case 2: - if (8 * sizeof(int) > 1 * PyLong_SHIFT) { - if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { - __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if (8 * sizeof(int) >= 2 * PyLong_SHIFT) { - return (int) (((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); - } - } - break; - case 3: - if (8 * sizeof(int) > 2 * PyLong_SHIFT) { - if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { - __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if (8 * sizeof(int) >= 3 * PyLong_SHIFT) { - return (int) (((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); - } - } - break; - case 4: - if (8 * sizeof(int) > 3 * PyLong_SHIFT) { - if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { - __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if (8 * sizeof(int) >= 4 * PyLong_SHIFT) { - return (int) (((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); - } - } - break; - } -#endif -#if CYTHON_COMPILING_IN_CPYTHON - if (unlikely(Py_SIZE(x) < 0)) { - goto raise_neg_overflow; - } -#else - { - int result = PyObject_RichCompareBool(x, Py_False, Py_LT); - if (unlikely(result < 0)) - return (int) -1; - if (unlikely(result == 1)) - goto raise_neg_overflow; - } -#endif - if (sizeof(int) <= sizeof(unsigned long)) { - __PYX_VERIFY_RETURN_INT_EXC(int, unsigned long, PyLong_AsUnsignedLong(x)) -#ifdef HAVE_LONG_LONG - } else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) { - __PYX_VERIFY_RETURN_INT_EXC(int, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) -#endif - } - } else { -#if CYTHON_USE_PYLONG_INTERNALS - const digit* digits = ((PyLongObject*)x)->ob_digit; - switch (Py_SIZE(x)) { - case 0: return (int) 0; - case -1: __PYX_VERIFY_RETURN_INT(int, sdigit, (sdigit) (-(sdigit)digits[0])) - case 1: __PYX_VERIFY_RETURN_INT(int, digit, +digits[0]) - case -2: - if (8 * sizeof(int) - 1 > 1 * PyLong_SHIFT) { - if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { - __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { - return (int) (((int)-1)*(((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); - } - } - break; - case 2: - if (8 * sizeof(int) > 1 * PyLong_SHIFT) { - if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { - __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { - return (int) ((((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); - } - } - break; - case -3: - if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { - if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { - __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { - return (int) (((int)-1)*(((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); - } - } - break; - case 3: - if (8 * sizeof(int) > 2 * PyLong_SHIFT) { - if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { - __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { - return (int) ((((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); - } - } - break; - case -4: - if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { - if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { - __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) { - return (int) (((int)-1)*(((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); - } - } - break; - case 4: - if (8 * sizeof(int) > 3 * PyLong_SHIFT) { - if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { - __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) { - return (int) ((((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); - } - } - break; - } -#endif - if (sizeof(int) <= sizeof(long)) { - __PYX_VERIFY_RETURN_INT_EXC(int, long, PyLong_AsLong(x)) -#ifdef HAVE_LONG_LONG - } else if (sizeof(int) <= sizeof(PY_LONG_LONG)) { - __PYX_VERIFY_RETURN_INT_EXC(int, PY_LONG_LONG, PyLong_AsLongLong(x)) -#endif - } - } - { -#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) - PyErr_SetString(PyExc_RuntimeError, - "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); -#else - int val; - PyObject *v = __Pyx_PyNumber_IntOrLong(x); - #if PY_MAJOR_VERSION < 3 - if (likely(v) && !PyLong_Check(v)) { - PyObject *tmp = v; - v = PyNumber_Long(tmp); - Py_DECREF(tmp); - } - #endif - if (likely(v)) { - int one = 1; int is_little = (int)*(unsigned char *)&one; - unsigned char *bytes = (unsigned char *)&val; - int ret = _PyLong_AsByteArray((PyLongObject *)v, - bytes, sizeof(val), - is_little, !is_unsigned); - Py_DECREF(v); - if (likely(!ret)) - return val; - } -#endif - return (int) -1; - } - } else { - int val; - PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); - if (!tmp) return (int) -1; - val = __Pyx_PyInt_As_int(tmp); - Py_DECREF(tmp); - return val; - } -raise_overflow: - PyErr_SetString(PyExc_OverflowError, - "value too large to convert to int"); - return (int) -1; -raise_neg_overflow: - PyErr_SetString(PyExc_OverflowError, - "can't convert negative value to int"); - return (int) -1; -} - -/* CIntFromPy */ - static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *x) { - const long neg_one = (long) ((long) 0 - (long) 1), const_zero = (long) 0; - const int is_unsigned = neg_one > const_zero; -#if PY_MAJOR_VERSION < 3 - if (likely(PyInt_Check(x))) { - if (sizeof(long) < sizeof(long)) { - __PYX_VERIFY_RETURN_INT(long, long, PyInt_AS_LONG(x)) - } else { - long val = PyInt_AS_LONG(x); - if (is_unsigned && unlikely(val < 0)) { - goto raise_neg_overflow; - } - return (long) val; - } - } else -#endif - if (likely(PyLong_Check(x))) { - if (is_unsigned) { -#if CYTHON_USE_PYLONG_INTERNALS - const digit* digits = ((PyLongObject*)x)->ob_digit; - switch (Py_SIZE(x)) { - case 0: return (long) 0; - case 1: __PYX_VERIFY_RETURN_INT(long, digit, digits[0]) - case 2: - if (8 * sizeof(long) > 1 * PyLong_SHIFT) { - if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { - __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if (8 * sizeof(long) >= 2 * PyLong_SHIFT) { - return (long) (((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); - } - } - break; - case 3: - if (8 * sizeof(long) > 2 * PyLong_SHIFT) { - if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { - __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if (8 * sizeof(long) >= 3 * PyLong_SHIFT) { - return (long) (((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); - } - } - break; - case 4: - if (8 * sizeof(long) > 3 * PyLong_SHIFT) { - if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { - __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if (8 * sizeof(long) >= 4 * PyLong_SHIFT) { - return (long) (((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); - } - } - break; - } -#endif -#if CYTHON_COMPILING_IN_CPYTHON - if (unlikely(Py_SIZE(x) < 0)) { - goto raise_neg_overflow; - } -#else - { - int result = PyObject_RichCompareBool(x, Py_False, Py_LT); - if (unlikely(result < 0)) - return (long) -1; - if (unlikely(result == 1)) - goto raise_neg_overflow; - } -#endif - if (sizeof(long) <= sizeof(unsigned long)) { - __PYX_VERIFY_RETURN_INT_EXC(long, unsigned long, PyLong_AsUnsignedLong(x)) -#ifdef HAVE_LONG_LONG - } else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) { - __PYX_VERIFY_RETURN_INT_EXC(long, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) -#endif - } - } else { -#if CYTHON_USE_PYLONG_INTERNALS - const digit* digits = ((PyLongObject*)x)->ob_digit; - switch (Py_SIZE(x)) { - case 0: return (long) 0; - case -1: __PYX_VERIFY_RETURN_INT(long, sdigit, (sdigit) (-(sdigit)digits[0])) - case 1: __PYX_VERIFY_RETURN_INT(long, digit, +digits[0]) - case -2: - if (8 * sizeof(long) - 1 > 1 * PyLong_SHIFT) { - if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { - __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { - return (long) (((long)-1)*(((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); - } - } - break; - case 2: - if (8 * sizeof(long) > 1 * PyLong_SHIFT) { - if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { - __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { - return (long) ((((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); - } - } - break; - case -3: - if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { - if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { - __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { - return (long) (((long)-1)*(((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); - } - } - break; - case 3: - if (8 * sizeof(long) > 2 * PyLong_SHIFT) { - if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { - __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { - return (long) ((((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); - } - } - break; - case -4: - if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { - if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { - __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { - return (long) (((long)-1)*(((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); - } - } - break; - case 4: - if (8 * sizeof(long) > 3 * PyLong_SHIFT) { - if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { - __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { - return (long) ((((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); - } - } - break; - } -#endif - if (sizeof(long) <= sizeof(long)) { - __PYX_VERIFY_RETURN_INT_EXC(long, long, PyLong_AsLong(x)) -#ifdef HAVE_LONG_LONG - } else if (sizeof(long) <= sizeof(PY_LONG_LONG)) { - __PYX_VERIFY_RETURN_INT_EXC(long, PY_LONG_LONG, PyLong_AsLongLong(x)) -#endif - } - } - { -#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) - PyErr_SetString(PyExc_RuntimeError, - "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); -#else - long val; - PyObject *v = __Pyx_PyNumber_IntOrLong(x); - #if PY_MAJOR_VERSION < 3 - if (likely(v) && !PyLong_Check(v)) { - PyObject *tmp = v; - v = PyNumber_Long(tmp); - Py_DECREF(tmp); - } - #endif - if (likely(v)) { - int one = 1; int is_little = (int)*(unsigned char *)&one; - unsigned char *bytes = (unsigned char *)&val; - int ret = _PyLong_AsByteArray((PyLongObject *)v, - bytes, sizeof(val), - is_little, !is_unsigned); - Py_DECREF(v); - if (likely(!ret)) - return val; - } -#endif - return (long) -1; - } - } else { - long val; - PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); - if (!tmp) return (long) -1; - val = __Pyx_PyInt_As_long(tmp); - Py_DECREF(tmp); - return val; - } -raise_overflow: - PyErr_SetString(PyExc_OverflowError, - "value too large to convert to long"); - return (long) -1; -raise_neg_overflow: - PyErr_SetString(PyExc_OverflowError, - "can't convert negative value to long"); - return (long) -1; -} - -/* CIntFromPy */ - static CYTHON_INLINE char __Pyx_PyInt_As_char(PyObject *x) { - const char neg_one = (char) ((char) 0 - (char) 1), const_zero = (char) 0; - const int is_unsigned = neg_one > const_zero; -#if PY_MAJOR_VERSION < 3 - if (likely(PyInt_Check(x))) { - if (sizeof(char) < sizeof(long)) { - __PYX_VERIFY_RETURN_INT(char, long, PyInt_AS_LONG(x)) - } else { - long val = PyInt_AS_LONG(x); - if (is_unsigned && unlikely(val < 0)) { - goto raise_neg_overflow; - } - return (char) val; - } - } else -#endif - if (likely(PyLong_Check(x))) { - if (is_unsigned) { -#if CYTHON_USE_PYLONG_INTERNALS - const digit* digits = ((PyLongObject*)x)->ob_digit; - switch (Py_SIZE(x)) { - case 0: return (char) 0; - case 1: __PYX_VERIFY_RETURN_INT(char, digit, digits[0]) - case 2: - if (8 * sizeof(char) > 1 * PyLong_SHIFT) { - if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { - __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if (8 * sizeof(char) >= 2 * PyLong_SHIFT) { - return (char) (((((char)digits[1]) << PyLong_SHIFT) | (char)digits[0])); - } - } - break; - case 3: - if (8 * sizeof(char) > 2 * PyLong_SHIFT) { - if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { - __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if (8 * sizeof(char) >= 3 * PyLong_SHIFT) { - return (char) (((((((char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0])); - } - } - break; - case 4: - if (8 * sizeof(char) > 3 * PyLong_SHIFT) { - if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { - __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if (8 * sizeof(char) >= 4 * PyLong_SHIFT) { - return (char) (((((((((char)digits[3]) << PyLong_SHIFT) | (char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0])); - } - } - break; - } -#endif -#if CYTHON_COMPILING_IN_CPYTHON - if (unlikely(Py_SIZE(x) < 0)) { - goto raise_neg_overflow; - } -#else - { - int result = PyObject_RichCompareBool(x, Py_False, Py_LT); - if (unlikely(result < 0)) - return (char) -1; - if (unlikely(result == 1)) - goto raise_neg_overflow; - } -#endif - if (sizeof(char) <= sizeof(unsigned long)) { - __PYX_VERIFY_RETURN_INT_EXC(char, unsigned long, PyLong_AsUnsignedLong(x)) -#ifdef HAVE_LONG_LONG - } else if (sizeof(char) <= sizeof(unsigned PY_LONG_LONG)) { - __PYX_VERIFY_RETURN_INT_EXC(char, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) -#endif - } - } else { -#if CYTHON_USE_PYLONG_INTERNALS - const digit* digits = ((PyLongObject*)x)->ob_digit; - switch (Py_SIZE(x)) { - case 0: return (char) 0; - case -1: __PYX_VERIFY_RETURN_INT(char, sdigit, (sdigit) (-(sdigit)digits[0])) - case 1: __PYX_VERIFY_RETURN_INT(char, digit, +digits[0]) - case -2: - if (8 * sizeof(char) - 1 > 1 * PyLong_SHIFT) { - if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { - __PYX_VERIFY_RETURN_INT(char, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if (8 * sizeof(char) - 1 > 2 * PyLong_SHIFT) { - return (char) (((char)-1)*(((((char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); - } - } - break; - case 2: - if (8 * sizeof(char) > 1 * PyLong_SHIFT) { - if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { - __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if (8 * sizeof(char) - 1 > 2 * PyLong_SHIFT) { - return (char) ((((((char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); - } - } - break; - case -3: - if (8 * sizeof(char) - 1 > 2 * PyLong_SHIFT) { - if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { - __PYX_VERIFY_RETURN_INT(char, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if (8 * sizeof(char) - 1 > 3 * PyLong_SHIFT) { - return (char) (((char)-1)*(((((((char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); - } - } - break; - case 3: - if (8 * sizeof(char) > 2 * PyLong_SHIFT) { - if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { - __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if (8 * sizeof(char) - 1 > 3 * PyLong_SHIFT) { - return (char) ((((((((char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); - } - } - break; - case -4: - if (8 * sizeof(char) - 1 > 3 * PyLong_SHIFT) { - if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { - __PYX_VERIFY_RETURN_INT(char, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if (8 * sizeof(char) - 1 > 4 * PyLong_SHIFT) { - return (char) (((char)-1)*(((((((((char)digits[3]) << PyLong_SHIFT) | (char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); - } - } - break; - case 4: - if (8 * sizeof(char) > 3 * PyLong_SHIFT) { - if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { - __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if (8 * sizeof(char) - 1 > 4 * PyLong_SHIFT) { - return (char) ((((((((((char)digits[3]) << PyLong_SHIFT) | (char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); - } - } - break; - } -#endif - if (sizeof(char) <= sizeof(long)) { - __PYX_VERIFY_RETURN_INT_EXC(char, long, PyLong_AsLong(x)) -#ifdef HAVE_LONG_LONG - } else if (sizeof(char) <= sizeof(PY_LONG_LONG)) { - __PYX_VERIFY_RETURN_INT_EXC(char, PY_LONG_LONG, PyLong_AsLongLong(x)) -#endif - } - } - { -#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) - PyErr_SetString(PyExc_RuntimeError, - "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); -#else - char val; - PyObject *v = __Pyx_PyNumber_IntOrLong(x); - #if PY_MAJOR_VERSION < 3 - if (likely(v) && !PyLong_Check(v)) { - PyObject *tmp = v; - v = PyNumber_Long(tmp); - Py_DECREF(tmp); - } - #endif - if (likely(v)) { - int one = 1; int is_little = (int)*(unsigned char *)&one; - unsigned char *bytes = (unsigned char *)&val; - int ret = _PyLong_AsByteArray((PyLongObject *)v, - bytes, sizeof(val), - is_little, !is_unsigned); - Py_DECREF(v); - if (likely(!ret)) - return val; - } -#endif - return (char) -1; - } - } else { - char val; - PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); - if (!tmp) return (char) -1; - val = __Pyx_PyInt_As_char(tmp); - Py_DECREF(tmp); - return val; - } -raise_overflow: - PyErr_SetString(PyExc_OverflowError, - "value too large to convert to char"); - return (char) -1; -raise_neg_overflow: - PyErr_SetString(PyExc_OverflowError, - "can't convert negative value to char"); - return (char) -1; -} - -/* CheckBinaryVersion */ - static int __Pyx_check_binary_version(void) { - char ctversion[4], rtversion[4]; - PyOS_snprintf(ctversion, 4, "%d.%d", PY_MAJOR_VERSION, PY_MINOR_VERSION); - PyOS_snprintf(rtversion, 4, "%s", Py_GetVersion()); - if (ctversion[0] != rtversion[0] || ctversion[2] != rtversion[2]) { - char message[200]; - PyOS_snprintf(message, sizeof(message), - "compiletime version %s of module '%.100s' " - "does not match runtime version %s", - ctversion, __Pyx_MODULE_NAME, rtversion); - return PyErr_WarnEx(NULL, message, 1); - } - return 0; -} - -/* InitStrings */ - static int __Pyx_InitStrings(__Pyx_StringTabEntry *t) { - while (t->p) { - #if PY_MAJOR_VERSION < 3 - if (t->is_unicode) { - *t->p = PyUnicode_DecodeUTF8(t->s, t->n - 1, NULL); - } else if (t->intern) { - *t->p = PyString_InternFromString(t->s); - } else { - *t->p = PyString_FromStringAndSize(t->s, t->n - 1); - } - #else - if (t->is_unicode | t->is_str) { - if (t->intern) { - *t->p = PyUnicode_InternFromString(t->s); - } else if (t->encoding) { - *t->p = PyUnicode_Decode(t->s, t->n - 1, t->encoding, NULL); - } else { - *t->p = PyUnicode_FromStringAndSize(t->s, t->n - 1); - } - } else { - *t->p = PyBytes_FromStringAndSize(t->s, t->n - 1); - } - #endif - if (!*t->p) - return -1; - if (PyObject_Hash(*t->p) == -1) - return -1; - ++t; - } - return 0; -} - -static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char* c_str) { - return __Pyx_PyUnicode_FromStringAndSize(c_str, (Py_ssize_t)strlen(c_str)); -} -static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject* o) { - Py_ssize_t ignore; - return __Pyx_PyObject_AsStringAndSize(o, &ignore); -} -#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT -#if !CYTHON_PEP393_ENABLED -static const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) { - char* defenc_c; - PyObject* defenc = _PyUnicode_AsDefaultEncodedString(o, NULL); - if (!defenc) return NULL; - defenc_c = PyBytes_AS_STRING(defenc); -#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII - { - char* end = defenc_c + PyBytes_GET_SIZE(defenc); - char* c; - for (c = defenc_c; c < end; c++) { - if ((unsigned char) (*c) >= 128) { - PyUnicode_AsASCIIString(o); - return NULL; - } - } - } -#endif - *length = PyBytes_GET_SIZE(defenc); - return defenc_c; -} -#else -static CYTHON_INLINE const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) { - if (unlikely(__Pyx_PyUnicode_READY(o) == -1)) return NULL; -#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII - if (likely(PyUnicode_IS_ASCII(o))) { - *length = PyUnicode_GET_LENGTH(o); - return PyUnicode_AsUTF8(o); - } else { - PyUnicode_AsASCIIString(o); - return NULL; - } -#else - return PyUnicode_AsUTF8AndSize(o, length); -#endif -} -#endif -#endif -static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject* o, Py_ssize_t *length) { -#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT - if ( -#if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII - __Pyx_sys_getdefaultencoding_not_ascii && -#endif - PyUnicode_Check(o)) { - return __Pyx_PyUnicode_AsStringAndSize(o, length); - } else -#endif -#if (!CYTHON_COMPILING_IN_PYPY) || (defined(PyByteArray_AS_STRING) && defined(PyByteArray_GET_SIZE)) - if (PyByteArray_Check(o)) { - *length = PyByteArray_GET_SIZE(o); - return PyByteArray_AS_STRING(o); - } else -#endif - { - char* result; - int r = PyBytes_AsStringAndSize(o, &result, length); - if (unlikely(r < 0)) { - return NULL; - } else { - return result; - } - } -} -static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject* x) { - int is_true = x == Py_True; - if (is_true | (x == Py_False) | (x == Py_None)) return is_true; - else return PyObject_IsTrue(x); -} -static CYTHON_INLINE int __Pyx_PyObject_IsTrueAndDecref(PyObject* x) { - int retval; - if (unlikely(!x)) return -1; - retval = __Pyx_PyObject_IsTrue(x); - Py_DECREF(x); - return retval; -} -static PyObject* __Pyx_PyNumber_IntOrLongWrongResultType(PyObject* result, const char* type_name) { -#if PY_MAJOR_VERSION >= 3 - if (PyLong_Check(result)) { - if (PyErr_WarnFormat(PyExc_DeprecationWarning, 1, - "__int__ returned non-int (type %.200s). " - "The ability to return an instance of a strict subclass of int " - "is deprecated, and may be removed in a future version of Python.", - Py_TYPE(result)->tp_name)) { - Py_DECREF(result); - return NULL; - } - return result; - } -#endif - PyErr_Format(PyExc_TypeError, - "__%.4s__ returned non-%.4s (type %.200s)", - type_name, type_name, Py_TYPE(result)->tp_name); - Py_DECREF(result); - return NULL; -} -static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x) { -#if CYTHON_USE_TYPE_SLOTS - PyNumberMethods *m; -#endif - const char *name = NULL; - PyObject *res = NULL; -#if PY_MAJOR_VERSION < 3 - if (likely(PyInt_Check(x) || PyLong_Check(x))) -#else - if (likely(PyLong_Check(x))) -#endif - return __Pyx_NewRef(x); -#if CYTHON_USE_TYPE_SLOTS - m = Py_TYPE(x)->tp_as_number; - #if PY_MAJOR_VERSION < 3 - if (m && m->nb_int) { - name = "int"; - res = m->nb_int(x); - } - else if (m && m->nb_long) { - name = "long"; - res = m->nb_long(x); - } - #else - if (likely(m && m->nb_int)) { - name = "int"; - res = m->nb_int(x); - } - #endif -#else - if (!PyBytes_CheckExact(x) && !PyUnicode_CheckExact(x)) { - res = PyNumber_Int(x); - } -#endif - if (likely(res)) { -#if PY_MAJOR_VERSION < 3 - if (unlikely(!PyInt_Check(res) && !PyLong_Check(res))) { -#else - if (unlikely(!PyLong_CheckExact(res))) { -#endif - return __Pyx_PyNumber_IntOrLongWrongResultType(res, name); - } - } - else if (!PyErr_Occurred()) { - PyErr_SetString(PyExc_TypeError, - "an integer is required"); - } - return res; -} -static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject* b) { - Py_ssize_t ival; - PyObject *x; -#if PY_MAJOR_VERSION < 3 - if (likely(PyInt_CheckExact(b))) { - if (sizeof(Py_ssize_t) >= sizeof(long)) - return PyInt_AS_LONG(b); - else - return PyInt_AsSsize_t(b); - } -#endif - if (likely(PyLong_CheckExact(b))) { - #if CYTHON_USE_PYLONG_INTERNALS - const digit* digits = ((PyLongObject*)b)->ob_digit; - const Py_ssize_t size = Py_SIZE(b); - if (likely(__Pyx_sst_abs(size) <= 1)) { - ival = likely(size) ? digits[0] : 0; - if (size == -1) ival = -ival; - return ival; - } else { - switch (size) { - case 2: - if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) { - return (Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); - } - break; - case -2: - if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) { - return -(Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); - } - break; - case 3: - if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) { - return (Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); - } - break; - case -3: - if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) { - return -(Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); - } - break; - case 4: - if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) { - return (Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); - } - break; - case -4: - if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) { - return -(Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); - } - break; - } - } - #endif - return PyLong_AsSsize_t(b); - } - x = PyNumber_Index(b); - if (!x) return -1; - ival = PyInt_AsSsize_t(x); - Py_DECREF(x); - return ival; -} -static CYTHON_INLINE PyObject * __Pyx_PyBool_FromLong(long b) { - return b ? __Pyx_NewRef(Py_True) : __Pyx_NewRef(Py_False); -} -static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t ival) { - return PyInt_FromSize_t(ival); -} - - -#endif /* Py_PYTHON_H */ diff --git a/spaces/merle/PROTEIN_GENERATOR/utils/model/Attention_module.py b/spaces/merle/PROTEIN_GENERATOR/utils/model/Attention_module.py deleted file mode 100644 index 44310ebc94cc40f7cca9eda31463e172188a0084..0000000000000000000000000000000000000000 --- a/spaces/merle/PROTEIN_GENERATOR/utils/model/Attention_module.py +++ /dev/null @@ -1,411 +0,0 @@ -import torch -import torch.nn as nn -import torch.nn.functional as F -import math -from opt_einsum import contract as einsum -from util_module import init_lecun_normal -from icecream import ic - -class FeedForwardLayer(nn.Module): - def __init__(self, d_model, r_ff, p_drop=0.1): - super(FeedForwardLayer, self).__init__() - self.norm = nn.LayerNorm(d_model) - self.linear1 = nn.Linear(d_model, d_model*r_ff) - self.dropout = nn.Dropout(p_drop) - self.linear2 = nn.Linear(d_model*r_ff, d_model) - - self.reset_parameter() - - def reset_parameter(self): - # initialize linear layer right before ReLu: He initializer (kaiming normal) - nn.init.kaiming_normal_(self.linear1.weight, nonlinearity='relu') - nn.init.zeros_(self.linear1.bias) - - # initialize linear layer right before residual connection: zero initialize - nn.init.zeros_(self.linear2.weight) - nn.init.zeros_(self.linear2.bias) - - def forward(self, src): - src = self.norm(src) - src = self.linear2(self.dropout(F.relu_(self.linear1(src)))) - return src - -class Attention(nn.Module): - # calculate multi-head attention - def __init__(self, d_query, d_key, n_head, d_hidden, d_out, p_drop=0.1): - super(Attention, self).__init__() - self.h = n_head - self.dim = d_hidden - # - self.to_q = nn.Linear(d_query, n_head*d_hidden, bias=False) - self.to_k = nn.Linear(d_key, n_head*d_hidden, bias=False) - self.to_v = nn.Linear(d_key, n_head*d_hidden, bias=False) - # - self.to_out = nn.Linear(n_head*d_hidden, d_out) - self.scaling = 1/math.sqrt(d_hidden) - # - # initialize all parameters properly - self.reset_parameter() - - def reset_parameter(self): - # query/key/value projection: Glorot uniform / Xavier uniform - nn.init.xavier_uniform_(self.to_q.weight) - nn.init.xavier_uniform_(self.to_k.weight) - nn.init.xavier_uniform_(self.to_v.weight) - - # to_out: right before residual connection: zero initialize -- to make it sure residual operation is same to the Identity at the begining - nn.init.zeros_(self.to_out.weight) - nn.init.zeros_(self.to_out.bias) - - def forward(self, query, key, value): - B, Q = query.shape[:2] - B, K = key.shape[:2] - # - query = self.to_q(query).reshape(B, Q, self.h, self.dim) - key = self.to_k(key).reshape(B, K, self.h, self.dim) - value = self.to_v(value).reshape(B, K, self.h, self.dim) - # - query = query * self.scaling - attn = einsum('bqhd,bkhd->bhqk', query, key) - attn = F.softmax(attn, dim=-1) - # - out = einsum('bhqk,bkhd->bqhd', attn, value) - out = out.reshape(B, Q, self.h*self.dim) - # - out = self.to_out(out) - - return out - -class AttentionWithBias(nn.Module): - def __init__(self, d_in=256, d_bias=128, n_head=8, d_hidden=32): - super(AttentionWithBias, self).__init__() - self.norm_in = nn.LayerNorm(d_in) - self.norm_bias = nn.LayerNorm(d_bias) - # - self.to_q = nn.Linear(d_in, n_head*d_hidden, bias=False) - self.to_k = nn.Linear(d_in, n_head*d_hidden, bias=False) - self.to_v = nn.Linear(d_in, n_head*d_hidden, bias=False) - self.to_b = nn.Linear(d_bias, n_head, bias=False) - self.to_g = nn.Linear(d_in, n_head*d_hidden) - self.to_out = nn.Linear(n_head*d_hidden, d_in) - - self.scaling = 1/math.sqrt(d_hidden) - self.h = n_head - self.dim = d_hidden - - self.reset_parameter() - - def reset_parameter(self): - # query/key/value projection: Glorot uniform / Xavier uniform - nn.init.xavier_uniform_(self.to_q.weight) - nn.init.xavier_uniform_(self.to_k.weight) - nn.init.xavier_uniform_(self.to_v.weight) - - # bias: normal distribution - self.to_b = init_lecun_normal(self.to_b) - - # gating: zero weights, one biases (mostly open gate at the begining) - nn.init.zeros_(self.to_g.weight) - nn.init.ones_(self.to_g.bias) - - # to_out: right before residual connection: zero initialize -- to make it sure residual operation is same to the Identity at the begining - nn.init.zeros_(self.to_out.weight) - nn.init.zeros_(self.to_out.bias) - - def forward(self, x, bias): - B, L = x.shape[:2] - # - x = self.norm_in(x) - bias = self.norm_bias(bias) - # - query = self.to_q(x).reshape(B, L, self.h, self.dim) - key = self.to_k(x).reshape(B, L, self.h, self.dim) - value = self.to_v(x).reshape(B, L, self.h, self.dim) - bias = self.to_b(bias) # (B, L, L, h) - gate = torch.sigmoid(self.to_g(x)) - # - key = key * self.scaling - attn = einsum('bqhd,bkhd->bqkh', query, key) - attn = attn + bias - attn = F.softmax(attn, dim=-2) - # - out = einsum('bqkh,bkhd->bqhd', attn, value).reshape(B, L, -1) - out = gate * out - # - out = self.to_out(out) - return out - -# MSA Attention (row/column) from AlphaFold architecture -class SequenceWeight(nn.Module): - def __init__(self, d_msa, n_head, d_hidden, p_drop=0.1): - super(SequenceWeight, self).__init__() - self.h = n_head - self.dim = d_hidden - self.scale = 1.0 / math.sqrt(self.dim) - - self.to_query = nn.Linear(d_msa, n_head*d_hidden) - self.to_key = nn.Linear(d_msa, n_head*d_hidden) - self.dropout = nn.Dropout(p_drop) - - self.reset_parameter() - - def reset_parameter(self): - # query/key/value projection: Glorot uniform / Xavier uniform - nn.init.xavier_uniform_(self.to_query.weight) - nn.init.xavier_uniform_(self.to_key.weight) - - def forward(self, msa): - B, N, L = msa.shape[:3] - - tar_seq = msa[:,0] - - q = self.to_query(tar_seq).view(B, 1, L, self.h, self.dim) - k = self.to_key(msa).view(B, N, L, self.h, self.dim) - - q = q * self.scale - attn = einsum('bqihd,bkihd->bkihq', q, k) - attn = F.softmax(attn, dim=1) - return self.dropout(attn) - -class MSARowAttentionWithBias(nn.Module): - def __init__(self, d_msa=256, d_pair=128, n_head=8, d_hidden=32): - super(MSARowAttentionWithBias, self).__init__() - self.norm_msa = nn.LayerNorm(d_msa) - self.norm_pair = nn.LayerNorm(d_pair) - # - self.seq_weight = SequenceWeight(d_msa, n_head, d_hidden, p_drop=0.1) - self.to_q = nn.Linear(d_msa, n_head*d_hidden, bias=False) - self.to_k = nn.Linear(d_msa, n_head*d_hidden, bias=False) - self.to_v = nn.Linear(d_msa, n_head*d_hidden, bias=False) - self.to_b = nn.Linear(d_pair, n_head, bias=False) - self.to_g = nn.Linear(d_msa, n_head*d_hidden) - self.to_out = nn.Linear(n_head*d_hidden, d_msa) - - self.scaling = 1/math.sqrt(d_hidden) - self.h = n_head - self.dim = d_hidden - - self.reset_parameter() - - def reset_parameter(self): - # query/key/value projection: Glorot uniform / Xavier uniform - nn.init.xavier_uniform_(self.to_q.weight) - nn.init.xavier_uniform_(self.to_k.weight) - nn.init.xavier_uniform_(self.to_v.weight) - - # bias: normal distribution - self.to_b = init_lecun_normal(self.to_b) - - # gating: zero weights, one biases (mostly open gate at the begining) - nn.init.zeros_(self.to_g.weight) - nn.init.ones_(self.to_g.bias) - - # to_out: right before residual connection: zero initialize -- to make it sure residual operation is same to the Identity at the begining - nn.init.zeros_(self.to_out.weight) - nn.init.zeros_(self.to_out.bias) - - def forward(self, msa, pair): # TODO: make this as tied-attention - B, N, L = msa.shape[:3] - # - msa = self.norm_msa(msa) - pair = self.norm_pair(pair) - # - seq_weight = self.seq_weight(msa) # (B, N, L, h, 1) - query = self.to_q(msa).reshape(B, N, L, self.h, self.dim) - key = self.to_k(msa).reshape(B, N, L, self.h, self.dim) - value = self.to_v(msa).reshape(B, N, L, self.h, self.dim) - bias = self.to_b(pair) # (B, L, L, h) - gate = torch.sigmoid(self.to_g(msa)) - # - query = query * seq_weight.expand(-1, -1, -1, -1, self.dim) - key = key * self.scaling - attn = einsum('bsqhd,bskhd->bqkh', query, key) - attn = attn + bias - attn = F.softmax(attn, dim=-2) - # - out = einsum('bqkh,bskhd->bsqhd', attn, value).reshape(B, N, L, -1) - out = gate * out - # - out = self.to_out(out) - return out - -class MSAColAttention(nn.Module): - def __init__(self, d_msa=256, n_head=8, d_hidden=32): - super(MSAColAttention, self).__init__() - self.norm_msa = nn.LayerNorm(d_msa) - # - self.to_q = nn.Linear(d_msa, n_head*d_hidden, bias=False) - self.to_k = nn.Linear(d_msa, n_head*d_hidden, bias=False) - self.to_v = nn.Linear(d_msa, n_head*d_hidden, bias=False) - self.to_g = nn.Linear(d_msa, n_head*d_hidden) - self.to_out = nn.Linear(n_head*d_hidden, d_msa) - - self.scaling = 1/math.sqrt(d_hidden) - self.h = n_head - self.dim = d_hidden - - self.reset_parameter() - - def reset_parameter(self): - # query/key/value projection: Glorot uniform / Xavier uniform - nn.init.xavier_uniform_(self.to_q.weight) - nn.init.xavier_uniform_(self.to_k.weight) - nn.init.xavier_uniform_(self.to_v.weight) - - # gating: zero weights, one biases (mostly open gate at the begining) - nn.init.zeros_(self.to_g.weight) - nn.init.ones_(self.to_g.bias) - - # to_out: right before residual connection: zero initialize -- to make it sure residual operation is same to the Identity at the begining - nn.init.zeros_(self.to_out.weight) - nn.init.zeros_(self.to_out.bias) - - def forward(self, msa): - B, N, L = msa.shape[:3] - # - msa = self.norm_msa(msa) - # - query = self.to_q(msa).reshape(B, N, L, self.h, self.dim) - key = self.to_k(msa).reshape(B, N, L, self.h, self.dim) - value = self.to_v(msa).reshape(B, N, L, self.h, self.dim) - gate = torch.sigmoid(self.to_g(msa)) - # - query = query * self.scaling - attn = einsum('bqihd,bkihd->bihqk', query, key) - attn = F.softmax(attn, dim=-1) - # - out = einsum('bihqk,bkihd->bqihd', attn, value).reshape(B, N, L, -1) - out = gate * out - # - out = self.to_out(out) - return out - -class MSAColGlobalAttention(nn.Module): - def __init__(self, d_msa=64, n_head=8, d_hidden=8): - super(MSAColGlobalAttention, self).__init__() - self.norm_msa = nn.LayerNorm(d_msa) - # - self.to_q = nn.Linear(d_msa, n_head*d_hidden, bias=False) - self.to_k = nn.Linear(d_msa, d_hidden, bias=False) - self.to_v = nn.Linear(d_msa, d_hidden, bias=False) - self.to_g = nn.Linear(d_msa, n_head*d_hidden) - self.to_out = nn.Linear(n_head*d_hidden, d_msa) - - self.scaling = 1/math.sqrt(d_hidden) - self.h = n_head - self.dim = d_hidden - - self.reset_parameter() - - def reset_parameter(self): - # query/key/value projection: Glorot uniform / Xavier uniform - nn.init.xavier_uniform_(self.to_q.weight) - nn.init.xavier_uniform_(self.to_k.weight) - nn.init.xavier_uniform_(self.to_v.weight) - - # gating: zero weights, one biases (mostly open gate at the begining) - nn.init.zeros_(self.to_g.weight) - nn.init.ones_(self.to_g.bias) - - # to_out: right before residual connection: zero initialize -- to make it sure residual operation is same to the Identity at the begining - nn.init.zeros_(self.to_out.weight) - nn.init.zeros_(self.to_out.bias) - - def forward(self, msa): - B, N, L = msa.shape[:3] - # - msa = self.norm_msa(msa) - # - query = self.to_q(msa).reshape(B, N, L, self.h, self.dim) - query = query.mean(dim=1) # (B, L, h, dim) - key = self.to_k(msa) # (B, N, L, dim) - value = self.to_v(msa) # (B, N, L, dim) - gate = torch.sigmoid(self.to_g(msa)) # (B, N, L, h*dim) - # - query = query * self.scaling - attn = einsum('bihd,bkid->bihk', query, key) # (B, L, h, N) - attn = F.softmax(attn, dim=-1) - # - out = einsum('bihk,bkid->bihd', attn, value).reshape(B, 1, L, -1) # (B, 1, L, h*dim) - out = gate * out # (B, N, L, h*dim) - # - out = self.to_out(out) - return out - -# Instead of triangle attention, use Tied axail attention with bias from coordinates..? -class BiasedAxialAttention(nn.Module): - def __init__(self, d_pair, d_bias, n_head, d_hidden, p_drop=0.1, is_row=True): - super(BiasedAxialAttention, self).__init__() - # - self.is_row = is_row - self.norm_pair = nn.LayerNorm(d_pair) - self.norm_bias = nn.LayerNorm(d_bias) - - self.to_q = nn.Linear(d_pair, n_head*d_hidden, bias=False) - self.to_k = nn.Linear(d_pair, n_head*d_hidden, bias=False) - self.to_v = nn.Linear(d_pair, n_head*d_hidden, bias=False) - self.to_b = nn.Linear(d_bias, n_head, bias=False) - self.to_g = nn.Linear(d_pair, n_head*d_hidden) - self.to_out = nn.Linear(n_head*d_hidden, d_pair) - - self.scaling = 1/math.sqrt(d_hidden) - self.h = n_head - self.dim = d_hidden - - # initialize all parameters properly - self.reset_parameter() - - def reset_parameter(self): - # query/key/value projection: Glorot uniform / Xavier uniform - nn.init.xavier_uniform_(self.to_q.weight) - nn.init.xavier_uniform_(self.to_k.weight) - nn.init.xavier_uniform_(self.to_v.weight) - - # bias: normal distribution - self.to_b = init_lecun_normal(self.to_b) - - # gating: zero weights, one biases (mostly open gate at the begining) - nn.init.zeros_(self.to_g.weight) - nn.init.ones_(self.to_g.bias) - - # to_out: right before residual connection: zero initialize -- to make it sure residual operation is same to the Identity at the begining - nn.init.zeros_(self.to_out.weight) - nn.init.zeros_(self.to_out.bias) - - def forward(self, pair, bias, same_chain = None): - # pair: (B, L, L, d_pair) - B, L = pair.shape[:2] - - if self.is_row: - pair = pair.permute(0,2,1,3) - bias = bias.permute(0,2,1,3) - - pair = self.norm_pair(pair) - bias = self.norm_bias(bias) - - query = self.to_q(pair).reshape(B, L, L, self.h, self.dim) - key = self.to_k(pair).reshape(B, L, L, self.h, self.dim) - value = self.to_v(pair).reshape(B, L, L, self.h, self.dim) - bias = self.to_b(bias) # (B, L, L, h) - gate = torch.sigmoid(self.to_g(pair)) # (B, L, L, h*dim) - - query = query * self.scaling - key = key / math.sqrt(L) # normalize for tied attention - attn = einsum('bnihk,bnjhk->bijh', query, key) # tied attention - attn = attn + bias # apply bias - attn = F.softmax(attn, dim=-2) # (B, L, L, h) - - if same_chain is not None: - ic(same_chain) - ic(attn) - ic(attn[~same_chain]) - attn[~same_chain] *= 1.1 - - out = einsum('bijh,bkjhd->bikhd', attn, value).reshape(B, L, L, -1) - out = gate * out - - out = self.to_out(out) - if self.is_row: - out = out.permute(0,2,1,3) - return out - diff --git a/spaces/merle/PROTEIN_GENERATOR/utils/model/se3_transformer/model/layers/._attention.py b/spaces/merle/PROTEIN_GENERATOR/utils/model/se3_transformer/model/layers/._attention.py deleted file mode 100644 index e104c6317b4c2bd3aed2efb7c5db8556cd8d985e..0000000000000000000000000000000000000000 Binary files a/spaces/merle/PROTEIN_GENERATOR/utils/model/se3_transformer/model/layers/._attention.py and /dev/null differ diff --git a/spaces/merve/data-leak/source/_posts/2019-10-03-fairness.html b/spaces/merve/data-leak/source/_posts/2019-10-03-fairness.html deleted file mode 100644 index e87b79e7fec2d286610661ddae8970bb7c9fe1dc..0000000000000000000000000000000000000000 --- a/spaces/merve/data-leak/source/_posts/2019-10-03-fairness.html +++ /dev/null @@ -1,219 +0,0 @@ - ---- -permalink: /measuring-fairness/ -template: post.html - -title: Considering Model Fairness -title: Measuring Fairness -summary: There are multiple ways to measure accuracy. No matter how we build our model, accuracy across these measures will vary when applied to different groups of people. -summaryalt: There are multiple ways to assess machine learning models, such as its overall accuracy. Another important perspective to consider is the fairness of the model with respect to different groups of people or different contexts of use. -shareimg: https://pair.withgoogle.com/explorables/images/measuring-fairness.png -date: 2021-05-01 ---- - - - - - -
    -
    -
    - - -
    -

    Measuring Fairness

    - -

    How do you make sure a model works equally well for different groups of people? It turns out that in many situations, this is harder than you might think. - -

    The problem is that there are different ways to measure the accuracy of a model, and often it's mathematically impossible for them all to be equal across groups. - -

    We'll illustrate how this happens by creating a (fake) medical model to screen these people for a disease. -

    - - -
    -

    Ground Truth

    - -

    About half of these people actually have the disease a; half of them don't b. -

    - - -
    -

    Model Predictions

    - -

    In a perfect world, only sick people would test positive for the disease and only healthy people would test negative. -

    - - -
    -

    Model Mistakes

    - -

    But models and tests aren't perfect. - -

    The model might make a mistake and mark a sick person as healthy c. - -

    Or the opposite: marking a healthy person as sick f. -

    - - -

    Never Miss the Disease...

    - -

    If there's a simple follow-up test, we could have the model aggressively call close cases so it rarely misses the disease. - -

    We can quantify this by measuring the percentage of sick people a who test positive g. - -

    -
    - - -
    -

    ...Or Avoid Overcalling?

    - -

    On the other hand, if there isn't a secondary test, or the treatment uses a drug with a limited supply, we might care more about the percentage of people with positive tests who are actually sick g . - -

    - -

    These issues and trade-offs in model optimization aren't new, but they're brought into focus when we have the ability to fine-tune exactly how aggressively disease is diagnosed. - -

    - - Try adjusting how aggressive the model is in diagnosing the disease -
    - - -
    -

    Subgroup Analysis

    - -

    Things get even more complicated when we check if the model treats different groups fairly.¹ - -

    Whatever we decide on in terms of trade-offs between these metrics, we'd probably like them to be roughly even across different groups of people. - -

    If we're trying to evenly allocate resources, having the model miss more cases in children than adults would be bad! ² -

    - - -
    -

    Base Rates

    - -

    If you look carefully, you'll see that the disease is more prevalent in children. That is, the "base rate" of the disease is different across groups. - -

    The fact that the base rates are different makes the situation surprisingly tricky. For one thing, even though the test catches the same percentage of sick adults and sick children, an adult who tests positive is less likely to have the disease than a child who tests positive. -

    - - -
    -

    Imbalanced Metrics

    - -

    Why is there a disparity in diagnosing between children and adults? There is a higher proportion of well adults, so mistakes in the test will cause more well adults to be marked "positive" than well children (and similarly with mistaken negatives). - -


    -
    - -

    To fix this, we could have the model take age into account. - -

    -
    -
    - -
    -

    Try adjusting the slider to make the model grade adults less aggressively than children.
    - -
    -

    This allows us to align one metric. But now adults who have the disease are less likely to be diagnosed with it! - -

    -
    -
    - -

    No matter how you move the sliders, you won't be able to make both metrics fair at once. It turns out this is inevitable any time the base rates are different, and the test isn't perfect. - -

    There are multiple ways to define fairness mathematically. It usually isn't possible to satisfy all of them.³ -

    -
    - - -
    -
    -
    -
    - -

    Conclusion

    - -

    Thankfully, the notion of fairness you choose to satisfy will depend on the context of your model, so while it may not be possible to satisfy every definition of fairness, you can focus on the notions of fairness that make sense for your use case. - -

    Even if fairness along every dimension isn't possible, we shouldn't stop checking for bias. The Hidden Bias explorable outlines different ways human bias can feed into an ML model. - -

    More Reading

    - -

    In some contexts, setting different thresholds for different populations might not be acceptable. Can you make AI fairer than a judge? explores an algorithm that can send people to jail. - -

    There are lots of different metrics you might use to determine if an algorithm is fair. Attacking discrimination with smarter machine learning shows how several of them work. Using Fairness Indicators in conjunction with the What-If Tool and other fairness tools, you can test your own model against commonly used fairness metrics. - -

    Machine learning practitioners use words like “recall” to describe the percentage of sick people who test positive. Checkout the PAIR Guidebook Glossary to learn how to learn how to talk to the people building the models. - -

    Appendix

    - -

    ¹ This essay uses very academic, mathematical standards for fairness that don't encompass everything we might include in the colloquial meaning of fairness. There's a gap between the technical descriptions of algorithms here and the social context that they're deployed in. - -

    ² Sometimes we might care more about different error modes in different populations. If treatment is riskier for children, we'd probably want the model to be less aggressive in diagnosing. - -

    ³The above example assumes the model sorts and scores people based on how likely it is that they are sick. With complete control over the model's exact rate of under- and over-diagnosing in both groups, it's actually possible to align both of the metrics we've discussed so far. Try tweaking the model below to get both of them to line up. - -

    Adding a third metric, the percentage of well people a who test negative e, makes perfect fairness impossible. Can you see why all three metrics won't align unless the base rate of the disease is the same in both populations? - -

    - -
    Drag ⁠— to adjust model accuracy and ⁠| to adjust the occurrence of disease
    -
    - -

    Credits

    - -

    Adam Pearce // May 2020 - -

    Thanks to Carey Radebaugh, Dan Nanas, David Weinberger, Emily Denton, Emily Reif, Fernanda Viégas, Hal Abelson, James Wexler, Kristen Olson, Lucas Dixon, Mahima Pushkarna, Martin Wattenberg, Michael Terry, Rebecca Salois, Timnit Gebru, Tulsee Doshi, Yannick Assogba, Yoni Halpern, Zan Armstrong, and my other colleagues at Google for their help with this piece. - -

    Silhouettes from ProPublica's Wee People. - -

    More Explorables

    - -

    - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/spaces/merve/fill-in-the-blank/public/measuring-fairness/init.js b/spaces/merve/fill-in-the-blank/public/measuring-fairness/init.js deleted file mode 100644 index 5a8df63793d90464eb148443787eb91e2b34180b..0000000000000000000000000000000000000000 --- a/spaces/merve/fill-in-the-blank/public/measuring-fairness/init.js +++ /dev/null @@ -1,200 +0,0 @@ -/* Copyright 2020 Google LLC. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - - - - -d3.select('body').selectAppend('div.tooltip.tooltip-hidden') - -nCols = 12 - -window.colors = { - well: d3.color('#669399') + '', - sick: d3.color('#EE2A2A') + '', - - // well: d3.color('green') + '', - // sick: d3.color('purple'), - - // well: d3.color('#e9a3c9') + '', - // sick: d3.color('#a1d76a'), - - // well: d3.color('#e9a3c9') + '', - // sick: d3.color('#a1d76a'), - - // well: d3.color('#e9a3c9') + '', - // sick: d3.color('#a1d76a'), - - // well: d3.color('#865327') + '', - // sick: d3.color('#012394'), - - // well: d3.color('#012394') + '', - // sick: d3.color('#FBC20F') + '', - - // well: d3.color('#012394') + '', - // sick: d3.color('#E71E24') + '', - - // well: d3.color('#A9159C') + '', - // sick: d3.color('#E71E24') + '', - - // well: d3.color('#A9159C') + '', - // sick: d3.color('#012394') + '', - - // well: d3.color('orange') + '', - // sick: d3.color('#012394') + '', - - -} - -window.colors = { - well: d3.interpolate(colors.well, '#fff')(.5), - sick: d3.interpolate(colors.sick, '#fff')(.2), -} - -window.lcolors = { - well: d3.interpolate(colors.well, '#fff')(.5), - sick: d3.interpolate(colors.sick, '#fff')(.35) -} -window.llcolors = { - well: d3.interpolate(colors.well, '#fff')(.5), - sick: d3.interpolate(colors.sick, '#fff')(1) -} -window.dcolors = { - well: d3.interpolate(colors.well, '#000')(.65), - sick: d3.interpolate(colors.sick, '#000')(.65) -} - -// window.colors = { -// well: d3.color('#BEF5FF') + '', -// sick: d3.color('#FCC5C3') + '', -// } - -// window.colors = { -// well: d3.color('#669399') + '', -// sick: d3.color('#EE2A2A') + '', -// } - -// window.lcolors = { -// well: d3.interpolate(colors.well, '#fff')(.3), -// sick: d3.interpolate(colors.sick, '#fff')(.3) -// } -// window.llcolors = { -// well: d3.interpolate(colors.well, '#fff')(.2), -// sick: d3.interpolate(colors.sick, '#fff')(.2) -// } - -// window.lcolors = { -// well: '#CFFCF6', -// sick: '#FFBD96' -// } - -// copy(logColors()) -function logColors(){ - return ` - body{ - --colors-well: ${d3.rgb(colors.well)}; - --colors-sick: ${d3.rgb(colors.sick)}; - --lcolors-well: ${d3.rgb(lcolors.well)}; - --lcolors-sick: ${d3.rgb(lcolors.sick)}; - --dcolors-well: ${d3.rgb(dcolors.well)}; - --dcolors-sick: ${d3.rgb(dcolors.sick)}; - } - ` -} - - - -window.init = function(){ - console.clear() - - graphSel = d3.select('#graph').html('').append('div') - totalWidth = graphSel.node().offsetWidth - totalWidth = 400 - - c = d3.conventions({ - sel: graphSel.st({marginTop: 40}), - margin: {top: 20}, - totalWidth, - totalHeight: totalWidth, - }) - - students = makeStudents() - sel = makeSel() - mini = makeMini() - slider = makeSlider() - slides = makeSlides() - gs = makeGS() - - function sizeGraphSel(){ - var scale = (totalWidth + 35)/(innerWidth - 10) // off by one, s is 35 - scale = d3.clamp(1, scale, 2) - - graphSel.st({ - transform: `scale(${1/scale})`, - transformOrigin: '0px 0px', - - }) - } - sizeGraphSel() - d3.select(window).on('resize', sizeGraphSel) - -} -init() - - - - - -!(function(){ - var footnums = '¹²³' - - d3.selectAll('.footstart').each(function(d, i){ - d3.select(this) - .at({ - href: '#footend-' + i, - }) - .text(footnums[i]) - .parent().at({id: 'footstart-' + i}) - }) - - d3.selectAll('.footend').each(function(d, i){ - d3.select(this) - .at({ - href: '#footstart-' + i, - id: 'footend-' + i, - }) - .text(footnums[i]) - }) - - - d3.selectAll('#sections wee, #graph .weepeople').attr('aria-hidden', true) - -})() - - - - - - - - - - - - - - - - - diff --git a/spaces/mikeee/radiobee-dev/radiobee/amend_avec.py b/spaces/mikeee/radiobee-dev/radiobee/amend_avec.py deleted file mode 100644 index 8cc6a88af0de6152c2aadebafa48e5744e018b00..0000000000000000000000000000000000000000 --- a/spaces/mikeee/radiobee-dev/radiobee/amend_avec.py +++ /dev/null @@ -1,60 +0,0 @@ -"""Amend avec from align_block.""" -# pylint: disable=unused-variable, unused-import - -from typing import List, Tuple, Union - -from itertools import zip_longest -from logzero import logger # noqa - -# from radiobee.gen_aset import gen_aset - - -# fmt: off -def amend_avec( - avec: List[Tuple[int, int]], - len0: int = 0, - len1: int = 0, -) -> List[Tuple[Union[int, str], Union[int, str]]]: - # fmt: on - """Amend avec from align_block. - - Rewritten based on gen_aset. - - Args: - avec: list generated by align_block - len0: max first possibe entries - len1: max second entries - Return - nicely amended to include all entries - Raise - None - """ - # empty avec [] - if not avec: - return [*zip_longest(range(len0), range(len1), fillvalue="")] - # empty [[]] - if len(avec) == 1: - if not avec[0]: - return [*zip_longest(range(len0), range(len1), fillvalue="")] - - buff = [] - pos0, pos1 = -1, -1 - for elm in avec: - # elm0, elm1, elm2 = elm - elm0, elm1, *elm2 = elm - elm0 = int(elm0) - elm1 = int(elm1) - interval = max(elm0 - pos0 - 1, elm1 - pos1 - 1) - _ = zip_longest(range(pos0 + 1, elm0), range(pos1 + 1, elm1), [""] * interval, fillvalue="") - buff.extend(_) - buff.append(elm) - pos0, pos1 = elm0, elm1 - - # last batch if any - # elm0, elm1 = tgt_len, src_len - elm0, elm1 = len0, len1 - interval = max(elm0 - pos0 - 1, elm1 - pos1 - 1) - _ = zip_longest(range(pos0 + 1, elm0), range(pos1 + 1, elm1), [""] * interval, fillvalue="") - buff.extend(_) - - return buff diff --git a/spaces/mingyuan/MotionDiffuse/utils/skeleton.py b/spaces/mingyuan/MotionDiffuse/utils/skeleton.py deleted file mode 100644 index 6de56af0c29ae7cccbd7178f912459413f87c646..0000000000000000000000000000000000000000 --- a/spaces/mingyuan/MotionDiffuse/utils/skeleton.py +++ /dev/null @@ -1,199 +0,0 @@ -from utils.quaternion import * -import scipy.ndimage.filters as filters - -class Skeleton(object): - def __init__(self, offset, kinematic_tree, device): - self.device = device - self._raw_offset_np = offset.numpy() - self._raw_offset = offset.clone().detach().to(device).float() - self._kinematic_tree = kinematic_tree - self._offset = None - self._parents = [0] * len(self._raw_offset) - self._parents[0] = -1 - for chain in self._kinematic_tree: - for j in range(1, len(chain)): - self._parents[chain[j]] = chain[j-1] - - def njoints(self): - return len(self._raw_offset) - - def offset(self): - return self._offset - - def set_offset(self, offsets): - self._offset = offsets.clone().detach().to(self.device).float() - - def kinematic_tree(self): - return self._kinematic_tree - - def parents(self): - return self._parents - - # joints (batch_size, joints_num, 3) - def get_offsets_joints_batch(self, joints): - assert len(joints.shape) == 3 - _offsets = self._raw_offset.expand(joints.shape[0], -1, -1).clone() - for i in range(1, self._raw_offset.shape[0]): - _offsets[:, i] = torch.norm(joints[:, i] - joints[:, self._parents[i]], p=2, dim=1)[:, None] * _offsets[:, i] - - self._offset = _offsets.detach() - return _offsets - - # joints (joints_num, 3) - def get_offsets_joints(self, joints): - assert len(joints.shape) == 2 - _offsets = self._raw_offset.clone() - for i in range(1, self._raw_offset.shape[0]): - # print(joints.shape) - _offsets[i] = torch.norm(joints[i] - joints[self._parents[i]], p=2, dim=0) * _offsets[i] - - self._offset = _offsets.detach() - return _offsets - - # face_joint_idx should follow the order of right hip, left hip, right shoulder, left shoulder - # joints (batch_size, joints_num, 3) - def inverse_kinematics_np(self, joints, face_joint_idx, smooth_forward=False): - assert len(face_joint_idx) == 4 - '''Get Forward Direction''' - l_hip, r_hip, sdr_r, sdr_l = face_joint_idx - across1 = joints[:, r_hip] - joints[:, l_hip] - across2 = joints[:, sdr_r] - joints[:, sdr_l] - across = across1 + across2 - across = across / np.sqrt((across**2).sum(axis=-1))[:, np.newaxis] - # print(across1.shape, across2.shape) - - # forward (batch_size, 3) - forward = np.cross(np.array([[0, 1, 0]]), across, axis=-1) - if smooth_forward: - forward = filters.gaussian_filter1d(forward, 20, axis=0, mode='nearest') - # forward (batch_size, 3) - forward = forward / np.sqrt((forward**2).sum(axis=-1))[..., np.newaxis] - - '''Get Root Rotation''' - target = np.array([[0,0,1]]).repeat(len(forward), axis=0) - root_quat = qbetween_np(forward, target) - - '''Inverse Kinematics''' - # quat_params (batch_size, joints_num, 4) - # print(joints.shape[:-1]) - quat_params = np.zeros(joints.shape[:-1] + (4,)) - # print(quat_params.shape) - root_quat[0] = np.array([[1.0, 0.0, 0.0, 0.0]]) - quat_params[:, 0] = root_quat - # quat_params[0, 0] = np.array([[1.0, 0.0, 0.0, 0.0]]) - for chain in self._kinematic_tree: - R = root_quat - for j in range(len(chain) - 1): - # (batch, 3) - u = self._raw_offset_np[chain[j+1]][np.newaxis,...].repeat(len(joints), axis=0) - # print(u.shape) - # (batch, 3) - v = joints[:, chain[j+1]] - joints[:, chain[j]] - v = v / np.sqrt((v**2).sum(axis=-1))[:, np.newaxis] - # print(u.shape, v.shape) - rot_u_v = qbetween_np(u, v) - - R_loc = qmul_np(qinv_np(R), rot_u_v) - - quat_params[:,chain[j + 1], :] = R_loc - R = qmul_np(R, R_loc) - - return quat_params - - # Be sure root joint is at the beginning of kinematic chains - def forward_kinematics(self, quat_params, root_pos, skel_joints=None, do_root_R=True): - # quat_params (batch_size, joints_num, 4) - # joints (batch_size, joints_num, 3) - # root_pos (batch_size, 3) - if skel_joints is not None: - offsets = self.get_offsets_joints_batch(skel_joints) - if len(self._offset.shape) == 2: - offsets = self._offset.expand(quat_params.shape[0], -1, -1) - joints = torch.zeros(quat_params.shape[:-1] + (3,)).to(self.device) - joints[:, 0] = root_pos - for chain in self._kinematic_tree: - if do_root_R: - R = quat_params[:, 0] - else: - R = torch.tensor([[1.0, 0.0, 0.0, 0.0]]).expand(len(quat_params), -1).detach().to(self.device) - for i in range(1, len(chain)): - R = qmul(R, quat_params[:, chain[i]]) - offset_vec = offsets[:, chain[i]] - joints[:, chain[i]] = qrot(R, offset_vec) + joints[:, chain[i-1]] - return joints - - # Be sure root joint is at the beginning of kinematic chains - def forward_kinematics_np(self, quat_params, root_pos, skel_joints=None, do_root_R=True): - # quat_params (batch_size, joints_num, 4) - # joints (batch_size, joints_num, 3) - # root_pos (batch_size, 3) - if skel_joints is not None: - skel_joints = torch.from_numpy(skel_joints) - offsets = self.get_offsets_joints_batch(skel_joints) - if len(self._offset.shape) == 2: - offsets = self._offset.expand(quat_params.shape[0], -1, -1) - offsets = offsets.numpy() - joints = np.zeros(quat_params.shape[:-1] + (3,)) - joints[:, 0] = root_pos - for chain in self._kinematic_tree: - if do_root_R: - R = quat_params[:, 0] - else: - R = np.array([[1.0, 0.0, 0.0, 0.0]]).repeat(len(quat_params), axis=0) - for i in range(1, len(chain)): - R = qmul_np(R, quat_params[:, chain[i]]) - offset_vec = offsets[:, chain[i]] - joints[:, chain[i]] = qrot_np(R, offset_vec) + joints[:, chain[i - 1]] - return joints - - def forward_kinematics_cont6d_np(self, cont6d_params, root_pos, skel_joints=None, do_root_R=True): - # cont6d_params (batch_size, joints_num, 6) - # joints (batch_size, joints_num, 3) - # root_pos (batch_size, 3) - if skel_joints is not None: - skel_joints = torch.from_numpy(skel_joints) - offsets = self.get_offsets_joints_batch(skel_joints) - if len(self._offset.shape) == 2: - offsets = self._offset.expand(cont6d_params.shape[0], -1, -1) - offsets = offsets.numpy() - joints = np.zeros(cont6d_params.shape[:-1] + (3,)) - joints[:, 0] = root_pos - for chain in self._kinematic_tree: - if do_root_R: - matR = cont6d_to_matrix_np(cont6d_params[:, 0]) - else: - matR = np.eye(3)[np.newaxis, :].repeat(len(cont6d_params), axis=0) - for i in range(1, len(chain)): - matR = np.matmul(matR, cont6d_to_matrix_np(cont6d_params[:, chain[i]])) - offset_vec = offsets[:, chain[i]][..., np.newaxis] - # print(matR.shape, offset_vec.shape) - joints[:, chain[i]] = np.matmul(matR, offset_vec).squeeze(-1) + joints[:, chain[i-1]] - return joints - - def forward_kinematics_cont6d(self, cont6d_params, root_pos, skel_joints=None, do_root_R=True): - # cont6d_params (batch_size, joints_num, 6) - # joints (batch_size, joints_num, 3) - # root_pos (batch_size, 3) - if skel_joints is not None: - # skel_joints = torch.from_numpy(skel_joints) - offsets = self.get_offsets_joints_batch(skel_joints) - if len(self._offset.shape) == 2: - offsets = self._offset.expand(cont6d_params.shape[0], -1, -1) - joints = torch.zeros(cont6d_params.shape[:-1] + (3,)).to(cont6d_params.device) - joints[..., 0, :] = root_pos - for chain in self._kinematic_tree: - if do_root_R: - matR = cont6d_to_matrix(cont6d_params[:, 0]) - else: - matR = torch.eye(3).expand((len(cont6d_params), -1, -1)).detach().to(cont6d_params.device) - for i in range(1, len(chain)): - matR = torch.matmul(matR, cont6d_to_matrix(cont6d_params[:, chain[i]])) - offset_vec = offsets[:, chain[i]].unsqueeze(-1) - # print(matR.shape, offset_vec.shape) - joints[:, chain[i]] = torch.matmul(matR, offset_vec).squeeze(-1) + joints[:, chain[i-1]] - return joints - - - - - diff --git a/spaces/ml-energy/leaderboard/spitfight/colosseum/common.py b/spaces/ml-energy/leaderboard/spitfight/colosseum/common.py deleted file mode 100644 index 329d66d49a581d7bb9c20116b5caadf747d1dee0..0000000000000000000000000000000000000000 --- a/spaces/ml-energy/leaderboard/spitfight/colosseum/common.py +++ /dev/null @@ -1,41 +0,0 @@ -from __future__ import annotations - -from typing import Literal - -from pydantic import BaseModel - -COLOSSEUM_MODELS_ROUTE = "/models" -COLOSSEUM_PROMPT_ROUTE = "/prompt" -COLOSSEUM_RESP_VOTE_ROUTE = "/response_vote" -COLOSSEUM_ENERGY_VOTE_ROUTE = "/energy_vote" -COLOSSEUM_HEALTH_ROUTE = "/health" - - -class ModelsResponse(BaseModel): - available_models: list[str] - - -class PromptRequest(BaseModel): - request_id: str - prompt: str - model_index: Literal[0, 1] - model_preference: str - - -class ResponseVoteRequest(BaseModel): - request_id: str - victory_index: Literal[0, 1] - - -class ResponseVoteResponse(BaseModel): - model_names: list[str] - energy_consumptions: list[float] - - -class EnergyVoteRequest(BaseModel): - request_id: str - is_worth: bool - - -class EnergyVoteResponse(BaseModel): - model_names: list[str] diff --git a/spaces/mmecheri/Rakuten_Streamlit/app.py b/spaces/mmecheri/Rakuten_Streamlit/app.py deleted file mode 100644 index b21f548c577b73ffde751afe109349ac85234318..0000000000000000000000000000000000000000 --- a/spaces/mmecheri/Rakuten_Streamlit/app.py +++ /dev/null @@ -1,23 +0,0 @@ -import streamlit as st -from multiapp import MultiApp -import home, demo, dataset ,models, conclusion - - - -def main(): - - st.set_page_config('rakupy', layout="wide") - - apps = MultiApp('Navigation','Menu') - # Add all Applications - apps.add_app("Description du projet", home.app) - apps.add_app("Jeu de données", dataset.app) - apps.add_app("Modélisation", models.app) - apps.add_app("Démo", demo.app) - apps.add_app("Conclusion", conclusion.app) - - # The main app - apps.run() - -if __name__ == '__main__': - main() diff --git a/spaces/mnauf/detect-bees/utils/__init__.py b/spaces/mnauf/detect-bees/utils/__init__.py deleted file mode 100644 index 22a4caad4f17b58c36491cb3a5076df986032f9e..0000000000000000000000000000000000000000 --- a/spaces/mnauf/detect-bees/utils/__init__.py +++ /dev/null @@ -1,71 +0,0 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license -""" -utils/initialization -""" - -import contextlib -import platform -import threading - - -def emojis(str=''): - # Return platform-dependent emoji-safe version of string - return str.encode().decode('ascii', 'ignore') if platform.system() == 'Windows' else str - - -class TryExcept(contextlib.ContextDecorator): - # YOLOv5 TryExcept class. Usage: @TryExcept() decorator or 'with TryExcept():' context manager - def __init__(self, msg=''): - self.msg = msg - - def __enter__(self): - pass - - def __exit__(self, exc_type, value, traceback): - if value: - print(emojis(f"{self.msg}{': ' if self.msg else ''}{value}")) - return True - - -def threaded(func): - # Multi-threads a target function and returns thread. Usage: @threaded decorator - def wrapper(*args, **kwargs): - thread = threading.Thread(target=func, args=args, kwargs=kwargs, daemon=True) - thread.start() - return thread - - return wrapper - - -def notebook_init(verbose=True): - # Check system software and hardware - print('Checking setup...') - - import os - import shutil - - from utils.general import check_font, check_requirements, is_colab - from utils.torch_utils import select_device # imports - - check_requirements(('psutil', 'IPython')) - check_font() - - import psutil - from IPython import display # to display images and clear console output - - if is_colab(): - shutil.rmtree('/content/sample_data', ignore_errors=True) # remove colab /sample_data directory - - # System info - if verbose: - gb = 1 << 30 # bytes to GiB (1024 ** 3) - ram = psutil.virtual_memory().total - total, used, free = shutil.disk_usage("/") - display.clear_output() - s = f'({os.cpu_count()} CPUs, {ram / gb:.1f} GB RAM, {(total - free) / gb:.1f}/{total / gb:.1f} GB disk)' - else: - s = '' - - select_device(newline=False) - print(emojis(f'Setup complete ✅ {s}')) - return display diff --git a/spaces/monster-labs/Controlnet-QRCode-Monster-V1/app.py b/spaces/monster-labs/Controlnet-QRCode-Monster-V1/app.py deleted file mode 100644 index a4bc3219c3906fc89984e09dbb5e3bd6574dcd9d..0000000000000000000000000000000000000000 --- a/spaces/monster-labs/Controlnet-QRCode-Monster-V1/app.py +++ /dev/null @@ -1,299 +0,0 @@ -import torch -import gradio as gr -from PIL import Image -import qrcode -from pathlib import Path -from multiprocessing import cpu_count -import requests -import io -import os -from PIL import Image - -from diffusers import ( - StableDiffusionControlNetPipeline, - ControlNetModel, - DDIMScheduler, - DPMSolverMultistepScheduler, - DEISMultistepScheduler, - HeunDiscreteScheduler, - EulerDiscreteScheduler, - EulerAncestralDiscreteScheduler, -) - -controlnet = ControlNetModel.from_pretrained( - "monster-labs/control_v1p_sd15_qrcode_monster", torch_dtype=torch.float16 -) - -pipe = StableDiffusionControlNetPipeline.from_pretrained( - "runwayml/stable-diffusion-v1-5", - controlnet=controlnet, - safety_checker=None, - torch_dtype=torch.float16, -).to("cuda") -pipe.enable_xformers_memory_efficient_attention() - -SAMPLER_MAP = { - "DPM++ Karras SDE": lambda config: DPMSolverMultistepScheduler.from_config(config, use_karras=True, algorithm_type="sde-dpmsolver++"), - "DPM++ Karras": lambda config: DPMSolverMultistepScheduler.from_config(config, use_karras=True), - "Heun": lambda config: HeunDiscreteScheduler.from_config(config), - "Euler a": lambda config: EulerAncestralDiscreteScheduler.from_config(config), - "Euler": lambda config: EulerDiscreteScheduler.from_config(config), - "DDIM": lambda config: DDIMScheduler.from_config(config), - "DEIS": lambda config: DEISMultistepScheduler.from_config(config), -} - - -def create_code(content: str): - qr = qrcode.QRCode( - version=1, - error_correction=qrcode.constants.ERROR_CORRECT_H, - box_size=16, - border=0, - ) - qr.add_data(content) - qr.make(fit=True) - img = qr.make_image(fill_color="black", back_color="white") - - # find smallest image size multiple of 256 that can fit qr - offset_min = 8 * 16 - w, h = img.size - w = (w + 255 + offset_min) // 256 * 256 - h = (h + 255 + offset_min) // 256 * 256 - if w > 1024: - raise gr.Error("QR code is too large, please use a shorter content") - bg = Image.new('L', (w, h), 128) - - # align on 16px grid - coords = ((w - img.size[0]) // 2 // 16 * 16, - (h - img.size[1]) // 2 // 16 * 16) - bg.paste(img, coords) - return bg - - -def inference( - qr_code_content: str, - prompt: str, - negative_prompt: str, - guidance_scale: float = 10.0, - controlnet_conditioning_scale: float = 2.0, - seed: int = -1, - sampler="Euler a", -): - if prompt is None or prompt == "": - raise gr.Error("Prompt is required") - - if qr_code_content is None or qr_code_content == "": - raise gr.Error("QR Code Content is required") - - pipe.scheduler = SAMPLER_MAP[sampler](pipe.scheduler.config) - - generator = torch.manual_seed(seed) if seed != -1 else torch.Generator() - - print("Generating QR Code from content") - qrcode_image = create_code(qr_code_content) - - # hack due to gradio examples - init_image = qrcode_image - - out = pipe( - prompt=prompt, - negative_prompt=negative_prompt, - image=qrcode_image, - width=qrcode_image.width, - height=qrcode_image.height, - guidance_scale=float(guidance_scale), - controlnet_conditioning_scale=float(controlnet_conditioning_scale), - generator=generator, - num_inference_steps=40, - ) - return out.images[0] - - -css = """ -#result_image { - display: flex; - place-content: center; - align-items: center; -} -#result_image > img { - height: auto; - max-width: 100%; - width: revert; -} -""" - -with gr.Blocks(css=css) as blocks: - gr.Markdown( - """ -# QR Code Monster v1.0 -## QR Code AI Art Generator - -Model used: https://huggingface.co/monster-labs/control_v1p_sd15_qrcode_monster - -Try our more powerful v2 here: https://qrcodemonster.art! - - -Duplicate Space for no queue on your own hardware.

    -""" - ) - - with gr.Row(): - with gr.Column(): - qr_code_content = gr.Textbox( - label="QR Code Content or URL", - info="The text you want to encode into the QR code", - value="", - ) - - prompt = gr.Textbox( - label="Prompt", - info="Prompt that guides the generation towards", - ) - negative_prompt = gr.Textbox( - label="Negative Prompt", - value="ugly, disfigured, low quality, blurry, nsfw", - info="Prompt that guides the generation away from", - ) - - with gr.Accordion( - label="Params: The generated QR Code functionality is largely influenced by the parameters detailed below", - open=True, - ): - controlnet_conditioning_scale = gr.Slider( - minimum=0.5, - maximum=2.5, - step=0.01, - value=1.5, - label="Controlnet Conditioning Scale", - info="""Controls the readability/creativity of the QR code. - High values: The generated QR code will be more readable. - Low values: The generated QR code will be more creative. - """ - ) - guidance_scale = gr.Slider( - minimum=0.0, - maximum=25.0, - step=0.25, - value=7, - label="Guidance Scale", - info="Controls the amount of guidance the text prompt guides the image generation" - ) - sampler = gr.Dropdown(choices=list( - SAMPLER_MAP.keys()), value="Euler a", label="Sampler") - seed = gr.Number( - minimum=-1, - maximum=9999999999, - step=1, - value=2313123, - label="Seed", - randomize=True, - info="Seed for the random number generator. Set to -1 for a random seed" - ) - with gr.Row(): - run_btn = gr.Button("Run") - with gr.Column(): - result_image = gr.Image(label="Result Image", elem_id="result_image") - run_btn.click( - inference, - inputs=[ - qr_code_content, - prompt, - negative_prompt, - guidance_scale, - controlnet_conditioning_scale, - seed, - sampler, - ], - outputs=[result_image], - ) - - gr.Examples( - examples=[ - [ - "test", - "Baroque rococo architecture, architectural photography, post apocalyptic New York, hyperrealism, [roots], hyperrealistic, octane render, cinematic, hyper detailed, 8K", - "", - 7, - 1.6, - 2592353769, - "Euler a", - ], - [ - "https://qrcodemonster.art", - "a centered render of an ancient tree covered in bio - organic micro organisms growing in a mystical setting, cinematic, beautifully lit, by tomasz alen kopera and peter mohrbacher and craig mullins, 3d, trending on artstation, octane render, 8k", - "", - 7, - 1.57, - 259235398, - "Euler a", - ], - [ - "test", - "3 cups of coffee with coffee beans around", - "", - 7, - 1.95, - 1889601353, - "Euler a", - ], - [ - "https://huggingface.co", - "A top view picture of a sandy beach with a sand castle, beautiful lighting, 8k, highly detailed", - "sky", - 7, - 1.15, - 46200, - "Euler a", - ], - [ - "test", - "A top view picture of a sandy beach, organic shapes, beautiful lighting, bumps and shadows, 8k, highly detailed", - "sky, water, squares", - 7, - 1.25, - 46220, - "Euler a", - ], - ], - fn=inference, - inputs=[ - qr_code_content, - prompt, - negative_prompt, - guidance_scale, - controlnet_conditioning_scale, - seed, - sampler, - ], - outputs=[result_image], - cache_examples=True, - ) - gr.Markdown( - """ -## Notes - -* The generated QR codes may not always be easily readable and may require adjusting the parameters. -* The prompt affects the quality of the generated QR code. -* The scan may work better if the phone is held further away from the screen, or if the page is zoomed out. - -## Parameters - -- **Input Text:** The text you want to encode into the QR code -- **Prompt:** Input a prompt to guide the QR code generation process, allowing you to control the appearance and style of the generated QR codes. Some are easier than others to generate readable QR codes. -- **Controlnet Control Scale:** Raise the control scale value to increase the readability of the QR codes or lower it to make the QR codes more creative and distinctive. - -The generated QR codes might not always be easily readable. It might take a few tries with different parameters to find the right balance. This often depends on the prompt, which can be more or less suitable for QR code generation. - - -## How to Use - -1. Input your text: Pass the text you'd like to encode into the QR code as input. Bigger text means bigger codes, which are less likely to give good results (will ressemble qr codes too much). -2. Set your prompt: Choose a prompt to guide the generation process (use all the SD tricks you like: styles, adjectives...). -3. Adjust the Controlnet Control Scale: The higher the control scale, the more readable the QR code will be, while a lower control scale leads to a more creative QR code. -4. Generate multiple codes: Since not all generated codes may be readable, you'll need to create a few codes with the same parameters to determine if any adjustments are needed. -5. Test the generated QR codes: Scan the generated QR codes to make sure they are readable and meet your requirements. -""" - ) - -blocks.queue(concurrency_count=1, max_size=20, api_open=False) -blocks.launch(share=bool(os.environ.get("SHARE", False)), show_api=False) diff --git a/spaces/mrm8488/santacoder-swift-completion/app.py b/spaces/mrm8488/santacoder-swift-completion/app.py deleted file mode 100644 index ac53c87332a6c7ace4d363c6d19b5a269ebc8064..0000000000000000000000000000000000000000 --- a/spaces/mrm8488/santacoder-swift-completion/app.py +++ /dev/null @@ -1,57 +0,0 @@ -import gradio as gr -from transformers import AutoTokenizer, AutoModelForCausalLM, set_seed, pipeline - -title = "SantaCoder 🎅 Swift 🍏 Completion" -description = "This is a subspace to make code generation with [SantaCoder fine-tuned on The Stack Swift](https://huggingface.co/mrm8488/santacoder-finetuned-the-stack-swift)" -EXAMPLE_0 = "import SwiftUI\n\nstruct ContentView: View {\n var body: some View {" -EXAMPLE_1 = "// Make a naviagtion list with the days of the week\nNavigationView {" - - -CKPT = "mrm8488/santacoder-finetuned-the-stack-swift" - -examples = [[EXAMPLE_0, 9, 0.6, 42], [EXAMPLE_1, 114, 0.6, 42]] -tokenizer = AutoTokenizer.from_pretrained(CKPT) -model = AutoModelForCausalLM.from_pretrained(CKPT, trust_remote_code=True) - - -def code_generation(gen_prompt, max_tokens, temperature=0.6, seed=42): - set_seed(seed) - pipe = pipeline("text-generation", model=model, tokenizer=tokenizer) - generated_text = pipe(gen_prompt, do_sample=True, top_p=0.95, temperature=temperature, max_new_tokens=max_tokens)[0]['generated_text'] - return generated_text - - -iface = gr.Interface( - fn=code_generation, - inputs=[ - gr.Textbox(lines=10, label="Input code"), - gr.inputs.Slider( - minimum=8, - maximum=256, - step=1, - default=8, - label="Number of tokens to generate", - ), - gr.inputs.Slider( - minimum=0, - maximum=2, - step=0.1, - default=0.6, - label="Temperature", - ), - gr.inputs.Slider( - minimum=0, - maximum=1000, - step=1, - default=42, - label="Random seed to use for the generation" - ) - ], - outputs=gr.Textbox(label="Predicted code", lines=10), - examples=examples, - layout="horizontal", - theme="peach", - description=description, - title=title -) -iface.launch() \ No newline at end of file diff --git a/spaces/mshukor/UnIVAL/fairseq/examples/laser/laser_src/laser_task.py b/spaces/mshukor/UnIVAL/fairseq/examples/laser/laser_src/laser_task.py deleted file mode 100644 index e4152fde6861488acc3595fa25c456bf60f134b9..0000000000000000000000000000000000000000 --- a/spaces/mshukor/UnIVAL/fairseq/examples/laser/laser_src/laser_task.py +++ /dev/null @@ -1,331 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - - -from collections import OrderedDict, defaultdict -import json -import os -import logging -from argparse import ArgumentError - -from fairseq import options, models -from fairseq.data import ( - data_utils, - Dictionary, - LanguagePairDataset, - IndexedDataset, - FairseqDataset, -) -from .multitask_data_utils import ( - MultitaskDatasetWrapper, - MultidatasetEpochBatchIterator, -) - - -from fairseq.tasks import LegacyFairseqTask, register_task - -logger = logging.getLogger(__name__) - - -@register_task("laser") -class LaserTask(LegacyFairseqTask): - @staticmethod - def add_args(parser): - """Add task-specific arguments to the parser.""" - parser.add_argument( - "configfile", metavar="PATH", help="dataset configuration file in json" - ) - parser.add_argument( - "--weighting-alpha", - type=float, - default=None, - help="alpha for automatic weighting", - ) - parser.add_argument( - "--raw-text", action="store_true", help="load raw text dataset" - ) - parser.add_argument( - "--left-pad-source", - default="True", - type=str, - metavar="BOOL", - help="pad the source on the left (default: True)", - ) - parser.add_argument( - "--left-pad-target", - default="False", - type=str, - metavar="BOOL", - help="pad the target on the left (default: False)", - ) - try: - parser.add_argument( - "--max-source-positions", - default=1024, - type=int, - metavar="N", - help="max number of tokens in the source sequence", - ) - parser.add_argument( - "--max-target-positions", - default=1024, - type=int, - metavar="N", - help="max number of tokens in the target sequence", - ) - except ArgumentError: - # this might have already been defined. Once we transition this to hydra it should be fine to add it here. - pass - - def __init__(self, args, config, src_dictionary, tgt_dictionary, num_tasks): - super().__init__(args) - self.config = config - self.src_dictionary = src_dictionary - self.tgt_dictionary = tgt_dictionary - self.num_tasks = num_tasks - - @classmethod - def setup_task(cls, args, **kwargs): - with open(args.configfile, "r") as f: - config = json.load(f) - num_tasks = max(dataset["id"] for dataset in config["train"]) + 1 - - args.left_pad_source = options.eval_bool(args.left_pad_source) - args.left_pad_target = options.eval_bool(args.left_pad_target) - - src_dictionary = Dictionary.load(config["src_vocab"]) - tgt_dictionary = Dictionary.load(config["tgt_vocab"]) - - logger.info( - "| src Dictionary {} : {} types".format( - config["src_vocab"], len(src_dictionary) - ) - ) - logger.info( - "| tgt Dictionary {} : {} types".format( - config["tgt_vocab"], len(tgt_dictionary) - ) - ) - - return cls(args, config, src_dictionary, tgt_dictionary, num_tasks) - - # Experimental overriding for backtranslation - def build_model(self, args): - model = models.build_model(args, self) - return model - - def dataset(self, split): - if split not in self.datasets: - raise KeyError("Dataset not loaded: " + split) - return self.datasets[split] - - def load_dataset(self, split, epoch=1, **kwargs): - """Load a dataset split.""" - - def indexed_dataset(path, dictionary): - if self.args.raw_text: - raise Exception("Unable to handle raw text.") - dataset = IndexedDataset(path, fix_lua_indexing=True) - - return dataset - - pair_datasets = OrderedDict() - - if split == "valid": - self.datasets[split] = pair_datasets - return - - if split not in self.config: - raise FileNotFoundError( - "Dataset not found in config file: {}".format(split) - ) - - size_by_corpus = defaultdict(int) - size_sum = 0 - size_sum_with_subsampling = 0 - init_pair_datasets = {} - - for dataset_config in self.config[split]: - src_path = os.path.dirname(dataset_config["src"]) - corpus_name = src_path.split("/")[-2] - language_pair_name = src_path.split("/")[-1] - pair_datasets_key = corpus_name + "-" + language_pair_name - - logger.info(f"loading... {pair_datasets_key}") - if "src" in dataset_config: - src_dataset = indexed_dataset( - dataset_config["src"], self.src_dictionary - ) - else: - src_dataset = None - - if "tgt" in dataset_config: - tgt_dataset = indexed_dataset( - dataset_config["tgt"], self.tgt_dictionary - ) - else: - tgt_dataset = None - - dataset = LanguagePairDataset( - src_dataset, - src_dataset.sizes, - self.src_dictionary, - tgt_dataset, - tgt_dataset.sizes, - self.tgt_dictionary, - left_pad_source=self.args.left_pad_source, - left_pad_target=self.args.left_pad_target, - ) - - if pair_datasets_key in init_pair_datasets: - logger.warning( - f"Ignoring already added {pair_datasets_key}. " - f"Consider using `sample` key in order to upsample." - ) - else: - init_pair_datasets[pair_datasets_key] = { - "dataset": dataset, - "sample": dataset_config.get("sample", None), - "id": dataset_config.get("id", None), - "len": len(dataset), - } - - length_sum = 0 - weighted_freqs_sum = 0 - freq_per_dataset = {} - vmax = 0 - vmin = 1 - weighted_freq_per_dataset = {} - - if self.args.weighting_alpha: - for key in init_pair_datasets: - if init_pair_datasets[key]["sample"] is None: - length_sum += len(init_pair_datasets[key]["dataset"]) - - for key in init_pair_datasets: - if init_pair_datasets[key]["sample"] is None: - val = float(init_pair_datasets[key]["len"]) / length_sum - freq_per_dataset[key] = val - weighted_freqs_sum += val ** self.args.weighting_alpha - - for key in freq_per_dataset: - val = ( - freq_per_dataset[key] ** self.args.weighting_alpha - / weighted_freqs_sum - ) - vmin = min(vmin, val) - vmax = max(vmax, val) - weighted_freq_per_dataset[key] = val - - for pair_datasets_key in init_pair_datasets: - dataset_config = init_pair_datasets[pair_datasets_key] - dataset = dataset_config["dataset"] - sample = dataset_config["sample"] - if sample is None: - sample = 1.0 - - if pair_datasets_key in weighted_freq_per_dataset: - w = vmax / weighted_freq_per_dataset[pair_datasets_key] - sample = w - - sample = round(sample) - - initial_sample = sample - initial_pair_datasets_key = pair_datasets_key - - while sample >= 1.0: - assert ( - pair_datasets_key not in pair_datasets - ), f"{pair_datasets_key} already in" - size_sum_with_subsampling += len(dataset) - pair_datasets[pair_datasets_key] = MultitaskDatasetWrapper( - dataset, dataset_config.get("id", 0), 1.0, name=pair_datasets_key - ) - size_sum += len(dataset) - sample -= 1.0 - pair_datasets_key += "-up" - - assert sample < 1e-6, f"sample remains > 0 {pair_datasets_key}" - - logger.info( - f"added pair {initial_pair_datasets_key} length {len(dataset)} new_length = {len(dataset)*initial_sample}" - ) - size_by_corpus[corpus_name] += len(dataset) - - self.datasets[split] = pair_datasets - logger.info( - f"Datasets number = {len(self.datasets[split])} size = {size_sum} size_sum_with_subsampling = {size_sum_with_subsampling}" - ) - - @property - def source_dictionary(self): - return self.src_dictionary - - @property - def target_dictionary(self): - return self.tgt_dictionary - - def get_batch_iterator( - self, - dataset, - max_tokens=None, - max_sentences=None, - max_positions=None, - ignore_invalid_inputs=False, - required_batch_size_multiple=1, - seed=1, - num_shards=1, - shard_id=0, - num_workers=0, - epoch=1, - data_buffer_size=0, - disable_iterator_cache=False, - ): - - assert isinstance(dataset, OrderedDict) - assert len(dataset) - assert isinstance(dataset[next(iter(dataset))], FairseqDataset) - - # initialize the dataset with the correct starting epoch - for _, dt in dataset.items(): - dt.set_epoch(epoch) - - indices = OrderedDict() - batch_sampler = OrderedDict() - - with data_utils.numpy_seed(seed + epoch): - for key, dt in dataset.items(): - logger.info(f"\t ordered_indices {key}") - indices[key] = dt.ordered_indices() - - # filter examples that are too large - if max_positions is not None: - for key, dt in dataset.items(): - logger.info(f"\t filter_by_size {key}") - indices[key], ignored = dt.filter_indices_by_size( - indices[key], max_positions - ) - - for key, dt in dataset.items(): - logger.info(f"\t batch_by_size {key}") - batch_sampler[key] = data_utils.batch_by_size( - indices[key], - dt.num_tokens, - max_tokens=max_tokens, - max_sentences=max_sentences, - required_batch_size_multiple=required_batch_size_multiple, - ) - - epoch_iter = MultidatasetEpochBatchIterator( - dataset=dataset, - batch_sampler=batch_sampler, - seed=seed, - num_shards=num_shards, - shard_id=shard_id, - num_workers=num_workers, - epoch=epoch, - ) - - return epoch_iter diff --git a/spaces/mshukor/UnIVAL/fairseq/examples/multilingual/data_scripts/binarize.py b/spaces/mshukor/UnIVAL/fairseq/examples/multilingual/data_scripts/binarize.py deleted file mode 100644 index ee54c6aabf021ca526743f8f1f67b91889e1e335..0000000000000000000000000000000000000000 --- a/spaces/mshukor/UnIVAL/fairseq/examples/multilingual/data_scripts/binarize.py +++ /dev/null @@ -1,200 +0,0 @@ -import shutil -import os, sys -from subprocess import check_call, check_output -import glob -import argparse -import shutil -import pathlib -import itertools - -def call_output(cmd): - print(f"Executing: {cmd}") - ret = check_output(cmd, shell=True) - print(ret) - return ret - -def call(cmd): - print(cmd) - check_call(cmd, shell=True) - - -WORKDIR_ROOT = os.environ.get('WORKDIR_ROOT', None) - -if WORKDIR_ROOT is None or not WORKDIR_ROOT.strip(): - print('please specify your working directory root in OS environment variable WORKDIR_ROOT. Exitting..."') - sys.exit(-1) - -SPM_PATH = os.environ.get('SPM_PATH', None) - -if SPM_PATH is None or not SPM_PATH.strip(): - print("Please install sentence piecence from https://github.com/google/sentencepiece and set SPM_PATH pointing to the installed spm_encode.py. Exitting...") - sys.exit(-1) - - -SPM_MODEL = f'{WORKDIR_ROOT}/sentence.bpe.model' -SPM_VOCAB = f'{WORKDIR_ROOT}/dict_250k.txt' - -SPM_ENCODE = f'{SPM_PATH}' - -if not os.path.exists(SPM_MODEL): - call(f"wget https://dl.fbaipublicfiles.com/fairseq/models/mbart50/sentence.bpe.model -O {SPM_MODEL}") - - -if not os.path.exists(SPM_VOCAB): - call(f"wget https://dl.fbaipublicfiles.com/fairseq/models/mbart50/dict_250k.txt -O {SPM_VOCAB}") - - - -def get_data_size(raw): - cmd = f'wc -l {raw}' - ret = call_output(cmd) - return int(ret.split()[0]) - -def encode_spm(model, direction, prefix='', splits=['train', 'test', 'valid'], pairs_per_shard=None): - src, tgt = direction.split('-') - - for split in splits: - src_raw, tgt_raw = f'{RAW_DIR}/{split}{prefix}.{direction}.{src}', f'{RAW_DIR}/{split}{prefix}.{direction}.{tgt}' - if os.path.exists(src_raw) and os.path.exists(tgt_raw): - cmd = f"""python {SPM_ENCODE} \ - --model {model}\ - --output_format=piece \ - --inputs {src_raw} {tgt_raw} \ - --outputs {BPE_DIR}/{direction}{prefix}/{split}.bpe.{src} {BPE_DIR}/{direction}{prefix}/{split}.bpe.{tgt} """ - print(cmd) - call(cmd) - - -def binarize_( - bpe_dir, - databin_dir, - direction, spm_vocab=SPM_VOCAB, - splits=['train', 'test', 'valid'], -): - src, tgt = direction.split('-') - - try: - shutil.rmtree(f'{databin_dir}', ignore_errors=True) - os.mkdir(f'{databin_dir}') - except OSError as error: - print(error) - cmds = [ - "fairseq-preprocess", - f"--source-lang {src} --target-lang {tgt}", - f"--destdir {databin_dir}/", - f"--workers 8", - ] - if isinstance(spm_vocab, tuple): - src_vocab, tgt_vocab = spm_vocab - cmds.extend( - [ - f"--srcdict {src_vocab}", - f"--tgtdict {tgt_vocab}", - ] - ) - else: - cmds.extend( - [ - f"--joined-dictionary", - f"--srcdict {spm_vocab}", - ] - ) - input_options = [] - if 'train' in splits and glob.glob(f"{bpe_dir}/train.bpe*"): - input_options.append( - f"--trainpref {bpe_dir}/train.bpe", - ) - if 'valid' in splits and glob.glob(f"{bpe_dir}/valid.bpe*"): - input_options.append(f"--validpref {bpe_dir}/valid.bpe") - if 'test' in splits and glob.glob(f"{bpe_dir}/test.bpe*"): - input_options.append(f"--testpref {bpe_dir}/test.bpe") - if len(input_options) > 0: - cmd = " ".join(cmds + input_options) - print(cmd) - call(cmd) - - -def binarize( - databin_dir, - direction, spm_vocab=SPM_VOCAB, prefix='', - splits=['train', 'test', 'valid'], - pairs_per_shard=None, -): - def move_databin_files(from_folder, to_folder): - for bin_file in glob.glob(f"{from_folder}/*.bin") \ - + glob.glob(f"{from_folder}/*.idx") \ - + glob.glob(f"{from_folder}/dict*"): - try: - shutil.move(bin_file, to_folder) - except OSError as error: - print(error) - bpe_databin_dir = f"{BPE_DIR}/{direction}{prefix}_databin" - bpe_dir = f"{BPE_DIR}/{direction}{prefix}" - if pairs_per_shard is None: - binarize_(bpe_dir, bpe_databin_dir, direction, spm_vocab=spm_vocab, splits=splits) - move_databin_files(bpe_databin_dir, databin_dir) - else: - # binarize valid and test which will not be sharded - binarize_( - bpe_dir, bpe_databin_dir, direction, - spm_vocab=spm_vocab, splits=[s for s in splits if s != "train"]) - for shard_bpe_dir in glob.glob(f"{bpe_dir}/shard*"): - path_strs = os.path.split(shard_bpe_dir) - shard_str = path_strs[-1] - shard_folder = f"{bpe_databin_dir}/{shard_str}" - databin_shard_folder = f"{databin_dir}/{shard_str}" - print(f'working from {shard_folder} to {databin_shard_folder}') - os.makedirs(databin_shard_folder, exist_ok=True) - binarize_( - shard_bpe_dir, shard_folder, direction, - spm_vocab=spm_vocab, splits=["train"]) - - for test_data in glob.glob(f"{bpe_databin_dir}/valid.*") + glob.glob(f"{bpe_databin_dir}/test.*"): - filename = os.path.split(test_data)[-1] - try: - os.symlink(test_data, f"{databin_shard_folder}/{filename}") - except OSError as error: - print(error) - move_databin_files(shard_folder, databin_shard_folder) - - -def load_langs(path): - with open(path) as fr: - langs = [l.strip() for l in fr] - return langs - -if __name__ == '__main__': - parser = argparse.ArgumentParser() - parser.add_argument("--data_root", default=f"{WORKDIR_ROOT}/ML50") - parser.add_argument("--raw-folder", default='raw') - parser.add_argument("--bpe-folder", default='bpe') - parser.add_argument("--databin-folder", default='databin') - - args = parser.parse_args() - - DATA_PATH = args.data_root #'/private/home/yuqtang/public_data/ML50' - RAW_DIR = f'{DATA_PATH}/{args.raw_folder}' - BPE_DIR = f'{DATA_PATH}/{args.bpe_folder}' - DATABIN_DIR = f'{DATA_PATH}/{args.databin_folder}' - os.makedirs(BPE_DIR, exist_ok=True) - - raw_files = itertools.chain( - glob.glob(f'{RAW_DIR}/train*'), - glob.glob(f'{RAW_DIR}/valid*'), - glob.glob(f'{RAW_DIR}/test*'), - ) - - directions = [os.path.split(file_path)[-1].split('.')[1] for file_path in raw_files] - - for direction in directions: - prefix = "" - splits = ['train', 'valid', 'test'] - try: - shutil.rmtree(f'{BPE_DIR}/{direction}{prefix}', ignore_errors=True) - os.mkdir(f'{BPE_DIR}/{direction}{prefix}') - os.makedirs(DATABIN_DIR, exist_ok=True) - except OSError as error: - print(error) - spm_model, spm_vocab = SPM_MODEL, SPM_VOCAB - encode_spm(spm_model, direction=direction, splits=splits) - binarize(DATABIN_DIR, direction, spm_vocab=spm_vocab, splits=splits) diff --git a/spaces/mshukor/UnIVAL/fairseq/fairseq/models/fairseq_encoder.py b/spaces/mshukor/UnIVAL/fairseq/fairseq/models/fairseq_encoder.py deleted file mode 100644 index 08cbde15a46e9b6d58e11c2f6052e7cf2d0cc8b2..0000000000000000000000000000000000000000 --- a/spaces/mshukor/UnIVAL/fairseq/fairseq/models/fairseq_encoder.py +++ /dev/null @@ -1,92 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -from typing import Dict, List, NamedTuple, Optional - -import torch -import torch.nn as nn -from torch import Tensor - - -EncoderOut = NamedTuple( - "EncoderOut", - [ - ("encoder_out", Tensor), # T x B x C - ("encoder_padding_mask", Optional[Tensor]), # B x T - ("encoder_embedding", Optional[Tensor]), # B x T x C - ("encoder_states", Optional[List[Tensor]]), # List[T x B x C] - ("src_tokens", Optional[Tensor]), # B x T - ("src_lengths", Optional[Tensor]), # B x 1 - ], -) - - -class FairseqEncoder(nn.Module): - """Base class for encoders.""" - - def __init__(self, dictionary): - super().__init__() - self.dictionary = dictionary - - def forward(self, src_tokens, src_lengths=None, **kwargs): - """ - Args: - src_tokens (LongTensor): tokens in the source language of shape - `(batch, src_len)` - src_lengths (LongTensor): lengths of each source sentence of shape - `(batch)` - """ - raise NotImplementedError - - def forward_torchscript(self, net_input: Dict[str, Tensor]): - """A TorchScript-compatible version of forward. - - Encoders which use additional arguments may want to override - this method for TorchScript compatibility. - """ - if torch.jit.is_scripting(): - return self.forward( - src_tokens=net_input["src_tokens"], - src_lengths=net_input["src_lengths"], - ) - else: - return self.forward_non_torchscript(net_input) - - @torch.jit.unused - def forward_non_torchscript(self, net_input: Dict[str, Tensor]): - encoder_input = { - k: v for k, v in net_input.items() if k != "prev_output_tokens" - } - return self.forward(**encoder_input) - - def reorder_encoder_out(self, encoder_out, new_order): - """ - Reorder encoder output according to `new_order`. - - Args: - encoder_out: output from the ``forward()`` method - new_order (LongTensor): desired order - - Returns: - `encoder_out` rearranged according to `new_order` - """ - raise NotImplementedError - - def max_positions(self): - """Maximum input length supported by the encoder.""" - return 1e6 # an arbitrary large number - - def upgrade_state_dict_named(self, state_dict, name): - """Upgrade old state dicts to work with newer code.""" - return state_dict - - def set_num_updates(self, num_updates): - """State from trainer to pass along to model at every update.""" - - def _apply(m): - if hasattr(m, "set_num_updates") and m != self: - m.set_num_updates(num_updates) - - self.apply(_apply) diff --git a/spaces/mueller-franzes/medfusion-app/medical_diffusion/loss/gan_losses.py b/spaces/mueller-franzes/medfusion-app/medical_diffusion/loss/gan_losses.py deleted file mode 100644 index 3b7ecb187745408292200b59f5d72ec7f4c95bb2..0000000000000000000000000000000000000000 --- a/spaces/mueller-franzes/medfusion-app/medical_diffusion/loss/gan_losses.py +++ /dev/null @@ -1,22 +0,0 @@ - - -import torch -import torch.nn.functional as F - -def exp_d_loss(logits_real, logits_fake): - loss_real = torch.mean(torch.exp(-logits_real)) - loss_fake = torch.mean(torch.exp(logits_fake)) - d_loss = 0.5 * (loss_real + loss_fake) - return d_loss - -def hinge_d_loss(logits_real, logits_fake): - loss_real = torch.mean(F.relu(1. - logits_real)) - loss_fake = torch.mean(F.relu(1. + logits_fake)) - d_loss = 0.5 * (loss_real + loss_fake) - return d_loss - -def vanilla_d_loss(logits_real, logits_fake): - d_loss = 0.5 * ( - torch.mean(F.softplus(-logits_real)) + - torch.mean(F.softplus(logits_fake))) - return d_loss \ No newline at end of file diff --git a/spaces/mygyasir/Real-Time-Voice-Cloning/encoder_preprocess.py b/spaces/mygyasir/Real-Time-Voice-Cloning/encoder_preprocess.py deleted file mode 100644 index 11502013c8d75d4652fb0ffdcdc49d55e8fb8bc9..0000000000000000000000000000000000000000 --- a/spaces/mygyasir/Real-Time-Voice-Cloning/encoder_preprocess.py +++ /dev/null @@ -1,70 +0,0 @@ -from encoder.preprocess import preprocess_librispeech, preprocess_voxceleb1, preprocess_voxceleb2 -from utils.argutils import print_args -from pathlib import Path -import argparse - -if __name__ == "__main__": - class MyFormatter(argparse.ArgumentDefaultsHelpFormatter, argparse.RawDescriptionHelpFormatter): - pass - - parser = argparse.ArgumentParser( - description="Preprocesses audio files from datasets, encodes them as mel spectrograms and " - "writes them to the disk. This will allow you to train the encoder. The " - "datasets required are at least one of VoxCeleb1, VoxCeleb2 and LibriSpeech. " - "Ideally, you should have all three. You should extract them as they are " - "after having downloaded them and put them in a same directory, e.g.:\n" - "-[datasets_root]\n" - " -LibriSpeech\n" - " -train-other-500\n" - " -VoxCeleb1\n" - " -wav\n" - " -vox1_meta.csv\n" - " -VoxCeleb2\n" - " -dev", - formatter_class=MyFormatter - ) - parser.add_argument("datasets_root", type=Path, help=\ - "Path to the directory containing your LibriSpeech/TTS and VoxCeleb datasets.") - parser.add_argument("-o", "--out_dir", type=Path, default=argparse.SUPPRESS, help=\ - "Path to the output directory that will contain the mel spectrograms. If left out, " - "defaults to /SV2TTS/encoder/") - parser.add_argument("-d", "--datasets", type=str, - default="librispeech_other,voxceleb1,voxceleb2", help=\ - "Comma-separated list of the name of the datasets you want to preprocess. Only the train " - "set of these datasets will be used. Possible names: librispeech_other, voxceleb1, " - "voxceleb2.") - parser.add_argument("-s", "--skip_existing", action="store_true", help=\ - "Whether to skip existing output files with the same name. Useful if this script was " - "interrupted.") - parser.add_argument("--no_trim", action="store_true", help=\ - "Preprocess audio without trimming silences (not recommended).") - args = parser.parse_args() - - # Verify webrtcvad is available - if not args.no_trim: - try: - import webrtcvad - except: - raise ModuleNotFoundError("Package 'webrtcvad' not found. This package enables " - "noise removal and is recommended. Please install and try again. If installation fails, " - "use --no_trim to disable this error message.") - del args.no_trim - - # Process the arguments - args.datasets = args.datasets.split(",") - if not hasattr(args, "out_dir"): - args.out_dir = args.datasets_root.joinpath("SV2TTS", "encoder") - assert args.datasets_root.exists() - args.out_dir.mkdir(exist_ok=True, parents=True) - - # Preprocess the datasets - print_args(args, parser) - preprocess_func = { - "librispeech_other": preprocess_librispeech, - "voxceleb1": preprocess_voxceleb1, - "voxceleb2": preprocess_voxceleb2, - } - args = vars(args) - for dataset in args.pop("datasets"): - print("Preprocessing %s" % dataset) - preprocess_func[dataset](**args) diff --git a/spaces/myrad01/Inpaint-Anything/third_party/segment-anything/demo/src/components/hooks/createContext.tsx b/spaces/myrad01/Inpaint-Anything/third_party/segment-anything/demo/src/components/hooks/createContext.tsx deleted file mode 100644 index c363be6afed0ea17e0f9fabf6ec67b3cf168be7a..0000000000000000000000000000000000000000 --- a/spaces/myrad01/Inpaint-Anything/third_party/segment-anything/demo/src/components/hooks/createContext.tsx +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright (c) Meta Platforms, Inc. and affiliates. -// All rights reserved. - -// This source code is licensed under the license found in the -// LICENSE file in the root directory of this source tree. - -import { createContext } from "react"; -import { modelInputProps } from "../helpers/Interfaces"; - -interface contextProps { - clicks: [ - clicks: modelInputProps[] | null, - setClicks: (e: modelInputProps[] | null) => void - ]; - image: [ - image: HTMLImageElement | null, - setImage: (e: HTMLImageElement | null) => void - ]; - maskImg: [ - maskImg: HTMLImageElement | null, - setMaskImg: (e: HTMLImageElement | null) => void - ]; -} - -const AppContext = createContext(null); - -export default AppContext; diff --git a/spaces/myrad01/Inpaint-Anything/third_party/segment-anything/demo/tailwind.config.js b/spaces/myrad01/Inpaint-Anything/third_party/segment-anything/demo/tailwind.config.js deleted file mode 100644 index e92b38b8fe466d9592f9eaff10de94803b320154..0000000000000000000000000000000000000000 --- a/spaces/myrad01/Inpaint-Anything/third_party/segment-anything/demo/tailwind.config.js +++ /dev/null @@ -1,12 +0,0 @@ -// Copyright (c) Meta Platforms, Inc. and affiliates. -// All rights reserved. - -// This source code is licensed under the license found in the -// LICENSE file in the root directory of this source tree. - -/** @type {import('tailwindcss').Config} */ -module.exports = { - content: ["./src/**/*.{html,js,tsx}"], - theme: {}, - plugins: [], -}; diff --git a/spaces/nateraw/deepafx-st/deepafx_st/models/encoder.py b/spaces/nateraw/deepafx-st/deepafx_st/models/encoder.py deleted file mode 100644 index 9c7bc6303d250f127492f20cfa15f5978c7da1ba..0000000000000000000000000000000000000000 --- a/spaces/nateraw/deepafx-st/deepafx_st/models/encoder.py +++ /dev/null @@ -1,113 +0,0 @@ -import torch - -from deepafx_st.models.mobilenetv2 import MobileNetV2 -from deepafx_st.models.efficient_net import EfficientNet - - -class SpectralEncoder(torch.nn.Module): - def __init__( - self, - num_params, - sample_rate, - encoder_model="mobilenet_v2", - embed_dim=1028, - width_mult=1, - min_level_db=-80, - ): - """Encoder operating on spectrograms. - - Args: - num_params (int): Number of processor parameters to generate. - sample_rate (float): Audio sample rate for computing melspectrogram. - encoder_model (str, optional): Encoder model architecture. Default: "mobilenet_v2" - embed_dim (int, optional): Dimentionality of the encoder representations. - width_mult (int, optional): Encoder size. Default: 1 - min_level_db (float, optional): Minimal dB value for the spectrogram. Default: -80 - """ - super().__init__() - self.num_params = num_params - self.sample_rate = sample_rate - self.encoder_model = encoder_model - self.embed_dim = embed_dim - self.width_mult = width_mult - self.min_level_db = min_level_db - - # load model from torch.hub - if encoder_model == "mobilenet_v2": - self.encoder = MobileNetV2(embed_dim=embed_dim, width_mult=width_mult) - elif encoder_model == "efficient_net": - self.encoder = EfficientNet.from_name( - "efficientnet-b2", - in_channels=1, - image_size=(128, 65), - include_top=False, - ) - self.embedding_projection = torch.nn.Conv2d( - in_channels=1408, - out_channels=embed_dim, - kernel_size=(1, 1), - stride=(1, 1), - padding=(0, 0), - bias=True, - ) - - else: - raise ValueError(f"Invalid encoder_model: {encoder_model}.") - - self.window = torch.nn.Parameter(torch.hann_window(4096)) - - def forward(self, x): - """ - Args: - x (Tensor): Input waveform of shape [batch x channels x samples] - - Returns: - e (Tensor): Latent embedding produced by Encoder. [batch x embed_dim] - """ - bs, chs, samp = x.size() - - # compute spectrogram of waveform - X = torch.stft( - x.view(bs, -1), - 4096, - 2048, - window=self.window, - return_complex=True, - ) - X_db = torch.pow(X.abs() + 1e-8, 0.3) - X_db_norm = X_db - - # standardize (0, 1) 0.322970 0.278452 - X_db_norm -= 0.322970 - X_db_norm /= 0.278452 - X_db_norm = X_db_norm.unsqueeze(1).permute(0, 1, 3, 2) - - if self.encoder_model == "mobilenet_v2": - # repeat channels by 3 to fit vision model - X_db_norm = X_db_norm.repeat(1, 3, 1, 1) - - # pass melspectrogram through encoder - e = self.encoder(X_db_norm) - - # apply avg pooling across time for encoder embeddings - e = torch.nn.functional.adaptive_avg_pool2d(e, 1).reshape(e.shape[0], -1) - - # normalize by L2 norm - norm = torch.norm(e, p=2, dim=-1, keepdim=True) - e_norm = e / norm - - elif self.encoder_model == "efficient_net": - - # Efficient Net internal downsamples by 32 on time and freq axis, then average pools the rest - e = self.encoder(X_db_norm) - - # Adding 1x1 conv to project down or up to the requested embedding size - e = self.embedding_projection(e) - e = torch.squeeze(e, dim=3) - e = torch.squeeze(e, dim=2) - - # normalize by L2 norm - norm = torch.norm(e, p=2, dim=-1, keepdim=True) - e_norm = e / norm - - return e_norm diff --git a/spaces/nathanTQ/ChatDev/camel/messages/base.py b/spaces/nathanTQ/ChatDev/camel/messages/base.py deleted file mode 100644 index 16c84e43f506456713866598b26e6bc16088602b..0000000000000000000000000000000000000000 --- a/spaces/nathanTQ/ChatDev/camel/messages/base.py +++ /dev/null @@ -1,302 +0,0 @@ -# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. =========== -# Licensed under the Apache License, Version 2.0 (the “License”); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an “AS IS” BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. =========== -from dataclasses import dataclass -from typing import Any, Dict, List, Optional, Tuple, Union - -from camel.messages import ( - OpenAIAssistantMessage, - OpenAIChatMessage, - OpenAIMessage, - OpenAISystemMessage, - OpenAIUserMessage, -) -from camel.prompts import CodePrompt, TextPrompt -from camel.typing import ModelType, RoleType - - -@dataclass -class BaseMessage: - r"""Base class for message objects used in CAMEL chat system. - - Args: - role_name (str): The name of the user or assistant role. - role_type (RoleType): The type of role, either - :obj:`RoleType.ASSISTANT` or :obj:`RoleType.USER`. - meta_dict (Optional[Dict[str, str]]): Additional metadata dictionary - for the message. - role (str): The role of the message in OpenAI chat system, either - :obj:`"system"`, :obj:`"user"`, or :obj:`"assistant"`. - content (str): The content of the message. - """ - role_name: str - role_type: RoleType - meta_dict: Optional[Dict[str, str]] - role: str - content: str - - def __getattribute__(self, name: str) -> Any: - r"""Get attribute override to delegate string methods to the - :obj:`content`. - - Args: - name (str): The name of the attribute. - - Returns: - Any: The attribute value. - """ - delegate_methods = [ - method for method in dir(str) if not method.startswith('_') - ] - if name in delegate_methods: - content = super().__getattribute__('content') - if isinstance(content, str): - content_method = getattr(content, name, None) - if callable(content_method): - - def modify_arg(arg: Any) -> Any: - r"""Modify the argument for delegate method. - - Args: - arg (Any): The argument value. - - Returns: - Any: The modified argument value. - """ - if isinstance(arg, BaseMessage): - return arg.content - elif isinstance(arg, (list, tuple)): - return type(arg)(modify_arg(item) for item in arg) - else: - return arg - - def wrapper(*args: Any, **kwargs: Any) -> Any: - r"""Wrapper function for delegate method. - - Args: - *args (Any): Variable length argument list. - **kwargs (Any): Arbitrary keyword arguments. - - Returns: - Any: The result of the delegate method. - """ - modified_args = [modify_arg(arg) for arg in args] - modified_kwargs = { - k: modify_arg(v) - for k, v in kwargs.items() - } - output = content_method(*modified_args, - **modified_kwargs) - return self._create_new_instance(output) if isinstance( - output, str) else output - - return wrapper - - return super().__getattribute__(name) - - def _create_new_instance(self, content: str) -> "BaseMessage": - r"""Create a new instance of the :obj:`BaseMessage` with updated - content. - - Args: - content (str): The new content value. - - Returns: - BaseMessage: The new instance of :obj:`BaseMessage`. - """ - return self.__class__(role_name=self.role_name, - role_type=self.role_type, - meta_dict=self.meta_dict, role=self.role, - content=content) - - def __add__(self, other: Any) -> Union["BaseMessage", Any]: - r"""Addition operator override for :obj:`BaseMessage`. - - Args: - other (Any): The value to be added with. - - Returns: - Union[BaseMessage, Any]: The result of the addition. - """ - if isinstance(other, BaseMessage): - combined_content = self.content.__add__(other.content) - elif isinstance(other, str): - combined_content = self.content.__add__(other) - else: - raise TypeError( - f"Unsupported operand type(s) for +: '{type(self)}' and " - f"'{type(other)}'") - return self._create_new_instance(combined_content) - - def __mul__(self, other: Any) -> Union["BaseMessage", Any]: - r"""Multiplication operator override for :obj:`BaseMessage`. - - Args: - other (Any): The value to be multiplied with. - - Returns: - Union[BaseMessage, Any]: The result of the multiplication. - """ - if isinstance(other, int): - multiplied_content = self.content.__mul__(other) - return self._create_new_instance(multiplied_content) - else: - raise TypeError( - f"Unsupported operand type(s) for *: '{type(self)}' and " - f"'{type(other)}'") - - def __len__(self) -> int: - r"""Length operator override for :obj:`BaseMessage`. - - Returns: - int: The length of the content. - """ - return len(self.content) - - def __contains__(self, item: str) -> bool: - r"""Contains operator override for :obj:`BaseMessage`. - - Args: - item (str): The item to check for containment. - - Returns: - bool: :obj:`True` if the item is contained in the content, - :obj:`False` otherwise. - """ - return item in self.content - - def token_len(self, model: ModelType = ModelType.GPT_3_5_TURBO) -> int: - r"""Calculate the token length of the message for the specified model. - - Args: - model (ModelType, optional): The model type to calculate the token - length. (default: :obj:`ModelType.GPT_3_5_TURBO`) - - Returns: - int: The token length of the message. - """ - from camel.utils import num_tokens_from_messages - return num_tokens_from_messages([self.to_openai_chat_message()], model) - - def extract_text_and_code_prompts( - self) -> Tuple[List[TextPrompt], List[CodePrompt]]: - r"""Extract text and code prompts from the message content. - - Returns: - Tuple[List[TextPrompt], List[CodePrompt]]: A tuple containing a - list of text prompts and a list of code prompts extracted - from the content. - """ - text_prompts: List[TextPrompt] = [] - code_prompts: List[CodePrompt] = [] - - lines = self.content.split("\n") - idx = 0 - start_idx = 0 - while idx < len(lines): - while idx < len(lines) and ( - not lines[idx].lstrip().startswith("```")): - idx += 1 - text = "\n".join(lines[start_idx:idx]).strip() - text_prompts.append(TextPrompt(text)) - - if idx >= len(lines): - break - - code_type = lines[idx].strip()[3:].strip() - idx += 1 - start_idx = idx - while not lines[idx].lstrip().startswith("```"): - idx += 1 - code = "\n".join(lines[start_idx:idx]).strip() - code_prompts.append(CodePrompt(code, code_type=code_type)) - - idx += 1 - start_idx = idx - - return text_prompts, code_prompts - - def to_openai_message(self, role: Optional[str] = None) -> OpenAIMessage: - r"""Converts the message to an :obj:`OpenAIMessage` object. - - Args: - role (Optional[str]): The role of the message in OpenAI chat - system, either :obj:`"system"`, :obj:`"user"`, or - obj:`"assistant"`. (default: :obj:`None`) - - Returns: - OpenAIMessage: The converted :obj:`OpenAIMessage` object. - """ - role = role or self.role - if role not in {"system", "user", "assistant"}: - raise ValueError(f"Unrecognized role: {role}") - return {"role": role, "content": self.content} - - def to_openai_chat_message( - self, - role: Optional[str] = None, - ) -> OpenAIChatMessage: - r"""Converts the message to an :obj:`OpenAIChatMessage` object. - - Args: - role (Optional[str]): The role of the message in OpenAI chat - system, either :obj:`"user"`, or :obj:`"assistant"`. - (default: :obj:`None`) - - Returns: - OpenAIChatMessage: The converted :obj:`OpenAIChatMessage` object. - """ - role = role or self.role - if role not in {"user", "assistant"}: - raise ValueError(f"Unrecognized role: {role}") - return {"role": role, "content": self.content} - - def to_openai_system_message(self) -> OpenAISystemMessage: - r"""Converts the message to an :obj:`OpenAISystemMessage` object. - - Returns: - OpenAISystemMessage: The converted :obj:`OpenAISystemMessage` - object. - """ - return {"role": "system", "content": self.content} - - def to_openai_user_message(self) -> OpenAIUserMessage: - r"""Converts the message to an :obj:`OpenAIUserMessage` object. - - Returns: - OpenAIUserMessage: The converted :obj:`OpenAIUserMessage` object. - """ - return {"role": "user", "content": self.content} - - def to_openai_assistant_message(self) -> OpenAIAssistantMessage: - r"""Converts the message to an :obj:`OpenAIAssistantMessage` object. - - Returns: - OpenAIAssistantMessage: The converted :obj:`OpenAIAssistantMessage` - object. - """ - return {"role": "assistant", "content": self.content} - - def to_dict(self) -> Dict: - r"""Converts the message to a dictionary. - - Returns: - dict: The converted dictionary. - """ - return { - "role_name": self.role_name, - "role_type": self.role_type.name, - **(self.meta_dict or {}), - "role": self.role, - "content": self.content, - } diff --git a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Cleanmymac11010activation.md b/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Cleanmymac11010activation.md deleted file mode 100644 index f6767bfdea36e60a29ce2763b17094172b5f2b73..0000000000000000000000000000000000000000 --- a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Cleanmymac11010activation.md +++ /dev/null @@ -1,37 +0,0 @@ - -

    How to Install and Activate CleanMyMac X on Your Mac

    -

    CleanMyMac X is a powerful and easy-to-use app that helps you clean, optimize, and protect your Mac from junk, malware, and other issues. Whether you want to free up some space, speed up your Mac, or keep it safe from threats, CleanMyMac X can help you do it in a few clicks.

    -

    But before you can enjoy all the benefits of CleanMyMac X, you need to install and activate it on your Mac. Depending on where you got the app from, the process may vary slightly. In this article, we'll show you how to install and activate CleanMyMac X from different sources: the MacPaw Store, the App Store, and Setapp.

    -

    cleanmymac11010activation


    Download Zip >>> https://urlcod.com/2uIctK



    -

    Installing and activating CleanMyMac X from the MacPaw Store

    -

    If you bought CleanMyMac X from the MacPaw Store, or you have a license key that looks like this: id012345678910odr, then you need to follow these steps to install and activate the app:

    -
      -
    1. Download CleanMyMac X from the MacPaw website.
    2. -
    3. Double-click the downloaded CleanMyMac.dmg file and drag the CleanMyMac X icon to your Applications folder.
    4. -
    5. Open CleanMyMac X from your Applications folder or Launchpad.
    6. -
    7. Click the Unlock Full Version button in the sidebar on the left, then click Activate Now.
    8. -
    9. Sign in with your MacPaw Account credentials (email and password) or enter your activation number. If you don't have a MacPaw Account yet, you can create one here.
    10. -
    11. If you entered the data correctly, CleanMyMac X will unlock its full functionality.
    12. -
    -

    If you haven't purchased CleanMyMac X yet, you can start a free 7-day trial by clicking the Start Trial button in the sidebar on the left. You can also buy a plan from the MacPaw Store by clicking the Buy Plan button.

    -

    Installing and activating CleanMyMac X from the App Store

    -

    If you prefer to get CleanMyMac X from the App Store, then you need to follow these steps to install and activate the app:

    -

    -
      -
    1. Open the App Store app on your Mac and search for CleanMyMac X.
    2. -
    3. Click the Get button and then click Open.
    4. -
    5. Open CleanMyMac X from your Applications folder or Launchpad.
    6. -
    7. Click the Unlock Full Version button in the sidebar on the left.
    8. -
    9. Follow the instructions on the screen to start your free 7-day trial or purchase a full version of CleanMyMac X.
    10. -
    -

    Note that the App Store version of CleanMyMac X has some differences in functionality compared to the MacPaw Store version. You can learn more about them here.

    -

    Installing and activating CleanMyMac X from Setapp

    -

    If you have Setapp, a subscription service that gives you access to hundreds of apps for a monthly fee, then you already have CleanMyMac X! All you need to do is find it in the Setapp catalog on your Mac and install it. Here's how:

    -
      -
    1. Open Setapp on your Mac and search for CleanMyMac X.
    2. -
    3. Click the Install button and then click Open.
    4. -
    5. Open CleanMyMac X from your Applications folder or Launchpad.
    6. -
    -

    You don't need any license keys or subscriptions to use CleanMyMac X with Setapp. You can enjoy all its features as long as you have an active Set

    7b8c122e87
    -
    -
    \ No newline at end of file diff --git a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Easy Photo Mosaic Maker 4.21 Download [HOT].md b/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Easy Photo Mosaic Maker 4.21 Download [HOT].md deleted file mode 100644 index 8fa210f2d200ae992a41239272278ffdedbad8c8..0000000000000000000000000000000000000000 --- a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Easy Photo Mosaic Maker 4.21 Download [HOT].md +++ /dev/null @@ -1,30 +0,0 @@ -
    -

    Easy Photo Mosaic Maker 4.21 Download: Create Stunning Mosaics from Your Photos

    -

    Do you want to turn your photos into beautiful mosaics that you can print, share, or use as wallpapers? If so, you need to try Easy Photo Mosaic Maker 4.21, the latest version of the popular software that lets you create amazing mosaics in minutes.

    -

    Easy Photo Mosaic Maker 4.21 is a powerful and easy-to-use tool that allows you to create mosaics from any photo or image. You can choose from hundreds of templates or design your own, customize the size, shape, and color of the tiles, and adjust the brightness, contrast, and saturation of the mosaic. You can also add text, logos, frames, and other effects to make your mosaic more unique and attractive.

    -

    Easy Photo Mosaic Maker 4.21 Download


    Download File >>> https://urlcod.com/2uIbIl



    -

    How to Use Easy Photo Mosaic Maker 4.21

    -

    Using Easy Photo Mosaic Maker 4.21 is very simple and fun. You just need to follow these steps:

    -
      -
    1. Download and install Easy Photo Mosaic Maker 4.21 from the official website. The software is compatible with Windows XP/Vista/7/8/10 and requires 256 MB of RAM and 100 MB of disk space.
    2. -
    3. Launch the software and click on "New Project" to start a new mosaic.
    4. -
    5. Select the photo or image that you want to use as the source for your mosaic. You can browse your computer or drag and drop the file into the software.
    6. -
    7. Choose a template for your mosaic from the gallery or click on "Customize" to create your own. You can select the number, shape, and size of the tiles, as well as the color mode and the background color.
    8. -
    9. Click on "Generate" to see a preview of your mosaic. You can zoom in and out, pan, and rotate the mosaic to check the details.
    10. -
    11. If you are satisfied with the result, click on "Save" to save your mosaic as a JPG, PNG, BMP, or TIFF file. You can also print your mosaic or share it online via email or social media.
    12. -
    -

    Why Choose Easy Photo Mosaic Maker 4.21

    -

    Easy Photo Mosaic Maker 4.21 is not just another photo editing software. It is a specialized tool that offers many advantages over other similar products. Here are some of them:

    -
      -
    • It is fast and easy to use. You can create stunning mosaics in minutes without any technical skills or experience.
    • -
    • It is versatile and flexible. You can use any photo or image as the source for your mosaic, and customize every aspect of your mosaic according to your preferences and needs.
    • -
    • It is high-quality and realistic. The software uses advanced algorithms to ensure that your mosaic looks natural and seamless, without any visible gaps or distortions.
    • -
    • It is fun and creative. You can experiment with different templates, colors, effects, and texts to make your mosaic more personal and original.
    • -
    -

    Download Easy Photo Mosaic Maker 4.21 Now

    -

    If you are looking for a simple and effective way to transform your photos into beautiful mosaics, you should not miss Easy Photo Mosaic Maker 4.21. This software will help you create amazing mosaics that you can enjoy and share with your friends and family.

    -

    To download Easy Photo Mosaic Maker 4.21 now, click on the button below:

    -Download Easy Photo Mosaic Maker 4.21

    -

    7b8c122e87
    -
    -
    \ No newline at end of file diff --git a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Embird 2012 Crack 242 BEST.md b/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Embird 2012 Crack 242 BEST.md deleted file mode 100644 index 1f41bcbe56dda3d9b0a17d1851760681be72eb63..0000000000000000000000000000000000000000 --- a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Embird 2012 Crack 242 BEST.md +++ /dev/null @@ -1,39 +0,0 @@ - -

    How to Download and Install Embird 2012 Crack 242

    -

    Embird 2012 is a popular embroidery software that allows you to create, edit, and digitize embroidery designs. It also supports various embroidery formats and machines. However, Embird 2012 is not a free software and requires a license key to activate. If you want to use Embird 2012 without paying for it, you might be tempted to download and install a cracked version of the software. But is it worth it?

    -

    In this article, we will show you how to download and install Embird 2012 crack 242, which is one of the latest versions of the cracked software available online. We will also discuss the risks and disadvantages of using a cracked software and why you should avoid it.

    -

    Embird 2012 Crack 242


    DOWNLOAD - https://urlcod.com/2uIbq4



    - -

    How to Download and Install Embird 2012 Crack 242

    -

    To download and install Embird 2012 crack 242, you will need to follow these steps:

    -
      -
    1. Go to a website that offers Embird 2012 crack 242 for download. You can search for it on Google or use one of the links below:
    2. - -
    3. Click on the download button and save the file to your computer.
    4. -
    5. Extract the file using a program like WinRAR or 7-Zip.
    6. -
    7. Run the setup.exe file and follow the installation instructions.
    8. -
    9. Copy the crack file from the extracted folder and paste it into the installation directory of Embird 2012.
    10. -
    11. Launch Embird 2012 and enjoy using it without a license key.
    12. -
    - -

    The Risks and Disadvantages of Using Embird 2012 Crack 242

    -

    While downloading and installing Embird 2012 crack 242 might seem like an easy and convenient way to use the software for free, it comes with many risks and disadvantages that you should be aware of. Here are some of them:

    -

    -
      -
    • You might be violating the intellectual property rights of the original developers of Embird 2012. This could result in legal consequences or fines if you are caught using a cracked software.
    • -
    • You might be exposing your computer to viruses, malware, spyware, or other harmful programs that could damage your system or steal your personal information. Cracked software often contains hidden malicious code that can infect your computer without your knowledge.
    • -
    • You might be compromising the quality and functionality of Embird 2012. Cracked software often has bugs, errors, glitches, or missing features that can affect your embroidery projects. You might also experience crashes, freezes, or slow performance when using a cracked software.
    • -
    • You might be missing out on updates, support, and new features from the original developers of Embird 2012. Cracked software usually does not receive any updates or patches that can fix issues or improve the software. You might also not be able to access customer support or online resources from the official website of Embird 2012.
    • -
    - -

    Why You Should Avoid Using Embird 2012 Crack 242

    -

    As you can see, using Embird 2012 crack 242 is not worth the risk or hassle. You might end up harming your computer, violating the law, or ruining your embroidery projects. Instead of using a cracked software, you should consider buying a legitimate license key from the official website of Embird 2012. By doing so, you will be able to enjoy the full benefits of the software, such as:

    -
      -
    • Accessing all the features and functions of Embird 2012 without any limitations or restrictions.
    • -
    • Receiving regular updates and patches

      e93f5a0c3f
      -
      -
      \ No newline at end of file diff --git a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Main Prem Ki Diwani Hoon Movie Kickass 720p Movies ((HOT)).md b/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Main Prem Ki Diwani Hoon Movie Kickass 720p Movies ((HOT)).md deleted file mode 100644 index d0b3690a0a351bb3c89cabed2da5d17e2e839a1f..0000000000000000000000000000000000000000 --- a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Main Prem Ki Diwani Hoon Movie Kickass 720p Movies ((HOT)).md +++ /dev/null @@ -1,22 +0,0 @@ -
      -Here is a possible title and article for the keyword "Main Prem Ki Diwani Hoon movie kickass 720p movies". I have used code blocks to encapsulate the HTML formatting. - -

      Main Prem Ki Diwani Hoon Movie: A Romantic Comedy with a Twist

      - -

      If you are looking for a fun and entertaining Bollywood movie to watch, you might want to check out Main Prem Ki Diwani Hoon. This 2003 romantic comedy film stars Hrithik Roshan, Kareena Kapoor and Abhishek Bachchan in a love triangle that will keep you guessing until the end.

      -

      Main Prem Ki Diwani Hoon movie kickass 720p movies


      Download Ziphttps://urlcod.com/2uIcoe



      - -

      The story revolves around Sanjana (Kapoor), a young woman who is pressured by her family to marry Prem (Bachchan), the son of a wealthy businessman. However, she falls in love with another Prem (Roshan), a carefree and adventurous guy who works as a manager in a music company. Little does she know that he is actually the real son of the businessman, while the other Prem is an impostor hired by his uncle to fool the family.

      - -

      What follows is a series of hilarious and romantic situations, as Sanjana tries to figure out who is the real Prem and who is the fake one. The film also features some catchy songs and dances, such as Bani Bani, Kasam Ki Kasam and O Ajnabi.

      - -

      If you want to watch Main Prem Ki Diwani Hoon movie in high quality, you can download it from kickass 720p movies. Kickass 720p movies is a website that offers a wide range of Bollywood movies in HD resolution. You can easily find and download your favorite movies from this site, without any hassle or registration.

      - -

      So, what are you waiting for? Download Main Prem Ki Diwani Hoon movie from kickass 720p movies today and enjoy this entertaining film with your friends and family.

      -

      Here is a possible continuation of the article for the keyword "Main Prem Ki Diwani Hoon movie kickass 720p movies". I have used code blocks to encapsulate the HTML formatting. - -

      Main Prem Ki Diwani Hoon movie has received mixed reviews from critics and audiences. Some have praised the film for its colorful visuals, energetic performances and catchy music, while others have criticized it for its over-the-top acting, weak plot and excessive length. The film has a rating of 4/10 on IMDb[^1^], 2/5 on Rotten Tomatoes[^3^] and 1/5 on Rediff. However, some reviewers have also found the film to be so bad that it is good, and have enjoyed it as a comedy or a parody of Bollywood clichés. For instance, comedian Kanan Gill has made a hilarious video review of the film on YouTube[^2^], where he mocks the film's absurdity and excessiveness.

      - -

      Whether you love it or hate it, Main Prem Ki Diwani Hoon movie is certainly a memorable experience that will not leave you indifferent. You can watch it for yourself and form your own opinion by downloading it from kickass 720p movies. Kickass 720p movies is a reliable and safe source of high-quality Bollywood movies that you can enjoy on your laptop, tablet or smartphone. You don't need to pay any fees or register any account to access the movies on this site. Just click on the link below and start downloading Main Prem Ki Diwani Hoon movie from kickass 720p movies now.

      7196e7f11a
      -
      -
      \ No newline at end of file diff --git a/spaces/nikitaPDL2023/assignment4/detectron2/detectron2/data/datasets/builtin_meta.py b/spaces/nikitaPDL2023/assignment4/detectron2/detectron2/data/datasets/builtin_meta.py deleted file mode 100644 index 63c7a1a31b31dd89b82011effee26471faccacf5..0000000000000000000000000000000000000000 --- a/spaces/nikitaPDL2023/assignment4/detectron2/detectron2/data/datasets/builtin_meta.py +++ /dev/null @@ -1,350 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright (c) Facebook, Inc. and its affiliates. - -""" -Note: -For your custom dataset, there is no need to hard-code metadata anywhere in the code. -For example, for COCO-format dataset, metadata will be obtained automatically -when calling `load_coco_json`. For other dataset, metadata may also be obtained in other ways -during loading. - -However, we hard-coded metadata for a few common dataset here. -The only goal is to allow users who don't have these dataset to use pre-trained models. -Users don't have to download a COCO json (which contains metadata), in order to visualize a -COCO model (with correct class names and colors). -""" - - -# All coco categories, together with their nice-looking visualization colors -# It's from https://github.com/cocodataset/panopticapi/blob/master/panoptic_coco_categories.json -COCO_CATEGORIES = [ - {"color": [220, 20, 60], "isthing": 1, "id": 1, "name": "person"}, - {"color": [119, 11, 32], "isthing": 1, "id": 2, "name": "bicycle"}, - {"color": [0, 0, 142], "isthing": 1, "id": 3, "name": "car"}, - {"color": [0, 0, 230], "isthing": 1, "id": 4, "name": "motorcycle"}, - {"color": [106, 0, 228], "isthing": 1, "id": 5, "name": "airplane"}, - {"color": [0, 60, 100], "isthing": 1, "id": 6, "name": "bus"}, - {"color": [0, 80, 100], "isthing": 1, "id": 7, "name": "train"}, - {"color": [0, 0, 70], "isthing": 1, "id": 8, "name": "truck"}, - {"color": [0, 0, 192], "isthing": 1, "id": 9, "name": "boat"}, - {"color": [250, 170, 30], "isthing": 1, "id": 10, "name": "traffic light"}, - {"color": [100, 170, 30], "isthing": 1, "id": 11, "name": "fire hydrant"}, - {"color": [220, 220, 0], "isthing": 1, "id": 13, "name": "stop sign"}, - {"color": [175, 116, 175], "isthing": 1, "id": 14, "name": "parking meter"}, - {"color": [250, 0, 30], "isthing": 1, "id": 15, "name": "bench"}, - {"color": [165, 42, 42], "isthing": 1, "id": 16, "name": "bird"}, - {"color": [255, 77, 255], "isthing": 1, "id": 17, "name": "cat"}, - {"color": [0, 226, 252], "isthing": 1, "id": 18, "name": "dog"}, - {"color": [182, 182, 255], "isthing": 1, "id": 19, "name": "horse"}, - {"color": [0, 82, 0], "isthing": 1, "id": 20, "name": "sheep"}, - {"color": [120, 166, 157], "isthing": 1, "id": 21, "name": "cow"}, - {"color": [110, 76, 0], "isthing": 1, "id": 22, "name": "elephant"}, - {"color": [174, 57, 255], "isthing": 1, "id": 23, "name": "bear"}, - {"color": [199, 100, 0], "isthing": 1, "id": 24, "name": "zebra"}, - {"color": [72, 0, 118], "isthing": 1, "id": 25, "name": "giraffe"}, - {"color": [255, 179, 240], "isthing": 1, "id": 27, "name": "backpack"}, - {"color": [0, 125, 92], "isthing": 1, "id": 28, "name": "umbrella"}, - {"color": [209, 0, 151], "isthing": 1, "id": 31, "name": "handbag"}, - {"color": [188, 208, 182], "isthing": 1, "id": 32, "name": "tie"}, - {"color": [0, 220, 176], "isthing": 1, "id": 33, "name": "suitcase"}, - {"color": [255, 99, 164], "isthing": 1, "id": 34, "name": "frisbee"}, - {"color": [92, 0, 73], "isthing": 1, "id": 35, "name": "skis"}, - {"color": [133, 129, 255], "isthing": 1, "id": 36, "name": "snowboard"}, - {"color": [78, 180, 255], "isthing": 1, "id": 37, "name": "sports ball"}, - {"color": [0, 228, 0], "isthing": 1, "id": 38, "name": "kite"}, - {"color": [174, 255, 243], "isthing": 1, "id": 39, "name": "baseball bat"}, - {"color": [45, 89, 255], "isthing": 1, "id": 40, "name": "baseball glove"}, - {"color": [134, 134, 103], "isthing": 1, "id": 41, "name": "skateboard"}, - {"color": [145, 148, 174], "isthing": 1, "id": 42, "name": "surfboard"}, - {"color": [255, 208, 186], "isthing": 1, "id": 43, "name": "tennis racket"}, - {"color": [197, 226, 255], "isthing": 1, "id": 44, "name": "bottle"}, - {"color": [171, 134, 1], "isthing": 1, "id": 46, "name": "wine glass"}, - {"color": [109, 63, 54], "isthing": 1, "id": 47, "name": "cup"}, - {"color": [207, 138, 255], "isthing": 1, "id": 48, "name": "fork"}, - {"color": [151, 0, 95], "isthing": 1, "id": 49, "name": "knife"}, - {"color": [9, 80, 61], "isthing": 1, "id": 50, "name": "spoon"}, - {"color": [84, 105, 51], "isthing": 1, "id": 51, "name": "bowl"}, - {"color": [74, 65, 105], "isthing": 1, "id": 52, "name": "banana"}, - {"color": [166, 196, 102], "isthing": 1, "id": 53, "name": "apple"}, - {"color": [208, 195, 210], "isthing": 1, "id": 54, "name": "sandwich"}, - {"color": [255, 109, 65], "isthing": 1, "id": 55, "name": "orange"}, - {"color": [0, 143, 149], "isthing": 1, "id": 56, "name": "broccoli"}, - {"color": [179, 0, 194], "isthing": 1, "id": 57, "name": "carrot"}, - {"color": [209, 99, 106], "isthing": 1, "id": 58, "name": "hot dog"}, - {"color": [5, 121, 0], "isthing": 1, "id": 59, "name": "pizza"}, - {"color": [227, 255, 205], "isthing": 1, "id": 60, "name": "donut"}, - {"color": [147, 186, 208], "isthing": 1, "id": 61, "name": "cake"}, - {"color": [153, 69, 1], "isthing": 1, "id": 62, "name": "chair"}, - {"color": [3, 95, 161], "isthing": 1, "id": 63, "name": "couch"}, - {"color": [163, 255, 0], "isthing": 1, "id": 64, "name": "potted plant"}, - {"color": [119, 0, 170], "isthing": 1, "id": 65, "name": "bed"}, - {"color": [0, 182, 199], "isthing": 1, "id": 67, "name": "dining table"}, - {"color": [0, 165, 120], "isthing": 1, "id": 70, "name": "toilet"}, - {"color": [183, 130, 88], "isthing": 1, "id": 72, "name": "tv"}, - {"color": [95, 32, 0], "isthing": 1, "id": 73, "name": "laptop"}, - {"color": [130, 114, 135], "isthing": 1, "id": 74, "name": "mouse"}, - {"color": [110, 129, 133], "isthing": 1, "id": 75, "name": "remote"}, - {"color": [166, 74, 118], "isthing": 1, "id": 76, "name": "keyboard"}, - {"color": [219, 142, 185], "isthing": 1, "id": 77, "name": "cell phone"}, - {"color": [79, 210, 114], "isthing": 1, "id": 78, "name": "microwave"}, - {"color": [178, 90, 62], "isthing": 1, "id": 79, "name": "oven"}, - {"color": [65, 70, 15], "isthing": 1, "id": 80, "name": "toaster"}, - {"color": [127, 167, 115], "isthing": 1, "id": 81, "name": "sink"}, - {"color": [59, 105, 106], "isthing": 1, "id": 82, "name": "refrigerator"}, - {"color": [142, 108, 45], "isthing": 1, "id": 84, "name": "book"}, - {"color": [196, 172, 0], "isthing": 1, "id": 85, "name": "clock"}, - {"color": [95, 54, 80], "isthing": 1, "id": 86, "name": "vase"}, - {"color": [128, 76, 255], "isthing": 1, "id": 87, "name": "scissors"}, - {"color": [201, 57, 1], "isthing": 1, "id": 88, "name": "teddy bear"}, - {"color": [246, 0, 122], "isthing": 1, "id": 89, "name": "hair drier"}, - {"color": [191, 162, 208], "isthing": 1, "id": 90, "name": "toothbrush"}, - {"color": [255, 255, 128], "isthing": 0, "id": 92, "name": "banner"}, - {"color": [147, 211, 203], "isthing": 0, "id": 93, "name": "blanket"}, - {"color": [150, 100, 100], "isthing": 0, "id": 95, "name": "bridge"}, - {"color": [168, 171, 172], "isthing": 0, "id": 100, "name": "cardboard"}, - {"color": [146, 112, 198], "isthing": 0, "id": 107, "name": "counter"}, - {"color": [210, 170, 100], "isthing": 0, "id": 109, "name": "curtain"}, - {"color": [92, 136, 89], "isthing": 0, "id": 112, "name": "door-stuff"}, - {"color": [218, 88, 184], "isthing": 0, "id": 118, "name": "floor-wood"}, - {"color": [241, 129, 0], "isthing": 0, "id": 119, "name": "flower"}, - {"color": [217, 17, 255], "isthing": 0, "id": 122, "name": "fruit"}, - {"color": [124, 74, 181], "isthing": 0, "id": 125, "name": "gravel"}, - {"color": [70, 70, 70], "isthing": 0, "id": 128, "name": "house"}, - {"color": [255, 228, 255], "isthing": 0, "id": 130, "name": "light"}, - {"color": [154, 208, 0], "isthing": 0, "id": 133, "name": "mirror-stuff"}, - {"color": [193, 0, 92], "isthing": 0, "id": 138, "name": "net"}, - {"color": [76, 91, 113], "isthing": 0, "id": 141, "name": "pillow"}, - {"color": [255, 180, 195], "isthing": 0, "id": 144, "name": "platform"}, - {"color": [106, 154, 176], "isthing": 0, "id": 145, "name": "playingfield"}, - {"color": [230, 150, 140], "isthing": 0, "id": 147, "name": "railroad"}, - {"color": [60, 143, 255], "isthing": 0, "id": 148, "name": "river"}, - {"color": [128, 64, 128], "isthing": 0, "id": 149, "name": "road"}, - {"color": [92, 82, 55], "isthing": 0, "id": 151, "name": "roof"}, - {"color": [254, 212, 124], "isthing": 0, "id": 154, "name": "sand"}, - {"color": [73, 77, 174], "isthing": 0, "id": 155, "name": "sea"}, - {"color": [255, 160, 98], "isthing": 0, "id": 156, "name": "shelf"}, - {"color": [255, 255, 255], "isthing": 0, "id": 159, "name": "snow"}, - {"color": [104, 84, 109], "isthing": 0, "id": 161, "name": "stairs"}, - {"color": [169, 164, 131], "isthing": 0, "id": 166, "name": "tent"}, - {"color": [225, 199, 255], "isthing": 0, "id": 168, "name": "towel"}, - {"color": [137, 54, 74], "isthing": 0, "id": 171, "name": "wall-brick"}, - {"color": [135, 158, 223], "isthing": 0, "id": 175, "name": "wall-stone"}, - {"color": [7, 246, 231], "isthing": 0, "id": 176, "name": "wall-tile"}, - {"color": [107, 255, 200], "isthing": 0, "id": 177, "name": "wall-wood"}, - {"color": [58, 41, 149], "isthing": 0, "id": 178, "name": "water-other"}, - {"color": [183, 121, 142], "isthing": 0, "id": 180, "name": "window-blind"}, - {"color": [255, 73, 97], "isthing": 0, "id": 181, "name": "window-other"}, - {"color": [107, 142, 35], "isthing": 0, "id": 184, "name": "tree-merged"}, - {"color": [190, 153, 153], "isthing": 0, "id": 185, "name": "fence-merged"}, - {"color": [146, 139, 141], "isthing": 0, "id": 186, "name": "ceiling-merged"}, - {"color": [70, 130, 180], "isthing": 0, "id": 187, "name": "sky-other-merged"}, - {"color": [134, 199, 156], "isthing": 0, "id": 188, "name": "cabinet-merged"}, - {"color": [209, 226, 140], "isthing": 0, "id": 189, "name": "table-merged"}, - {"color": [96, 36, 108], "isthing": 0, "id": 190, "name": "floor-other-merged"}, - {"color": [96, 96, 96], "isthing": 0, "id": 191, "name": "pavement-merged"}, - {"color": [64, 170, 64], "isthing": 0, "id": 192, "name": "mountain-merged"}, - {"color": [152, 251, 152], "isthing": 0, "id": 193, "name": "grass-merged"}, - {"color": [208, 229, 228], "isthing": 0, "id": 194, "name": "dirt-merged"}, - {"color": [206, 186, 171], "isthing": 0, "id": 195, "name": "paper-merged"}, - {"color": [152, 161, 64], "isthing": 0, "id": 196, "name": "food-other-merged"}, - {"color": [116, 112, 0], "isthing": 0, "id": 197, "name": "building-other-merged"}, - {"color": [0, 114, 143], "isthing": 0, "id": 198, "name": "rock-merged"}, - {"color": [102, 102, 156], "isthing": 0, "id": 199, "name": "wall-other-merged"}, - {"color": [250, 141, 255], "isthing": 0, "id": 200, "name": "rug-merged"}, -] - -# fmt: off -COCO_PERSON_KEYPOINT_NAMES = ( - "nose", - "left_eye", "right_eye", - "left_ear", "right_ear", - "left_shoulder", "right_shoulder", - "left_elbow", "right_elbow", - "left_wrist", "right_wrist", - "left_hip", "right_hip", - "left_knee", "right_knee", - "left_ankle", "right_ankle", -) -# fmt: on - -# Pairs of keypoints that should be exchanged under horizontal flipping -COCO_PERSON_KEYPOINT_FLIP_MAP = ( - ("left_eye", "right_eye"), - ("left_ear", "right_ear"), - ("left_shoulder", "right_shoulder"), - ("left_elbow", "right_elbow"), - ("left_wrist", "right_wrist"), - ("left_hip", "right_hip"), - ("left_knee", "right_knee"), - ("left_ankle", "right_ankle"), -) - -# rules for pairs of keypoints to draw a line between, and the line color to use. -KEYPOINT_CONNECTION_RULES = [ - # face - ("left_ear", "left_eye", (102, 204, 255)), - ("right_ear", "right_eye", (51, 153, 255)), - ("left_eye", "nose", (102, 0, 204)), - ("nose", "right_eye", (51, 102, 255)), - # upper-body - ("left_shoulder", "right_shoulder", (255, 128, 0)), - ("left_shoulder", "left_elbow", (153, 255, 204)), - ("right_shoulder", "right_elbow", (128, 229, 255)), - ("left_elbow", "left_wrist", (153, 255, 153)), - ("right_elbow", "right_wrist", (102, 255, 224)), - # lower-body - ("left_hip", "right_hip", (255, 102, 0)), - ("left_hip", "left_knee", (255, 255, 77)), - ("right_hip", "right_knee", (153, 255, 204)), - ("left_knee", "left_ankle", (191, 255, 128)), - ("right_knee", "right_ankle", (255, 195, 77)), -] - -# All Cityscapes categories, together with their nice-looking visualization colors -# It's from https://github.com/mcordts/cityscapesScripts/blob/master/cityscapesscripts/helpers/labels.py # noqa -CITYSCAPES_CATEGORIES = [ - {"color": (128, 64, 128), "isthing": 0, "id": 7, "trainId": 0, "name": "road"}, - {"color": (244, 35, 232), "isthing": 0, "id": 8, "trainId": 1, "name": "sidewalk"}, - {"color": (70, 70, 70), "isthing": 0, "id": 11, "trainId": 2, "name": "building"}, - {"color": (102, 102, 156), "isthing": 0, "id": 12, "trainId": 3, "name": "wall"}, - {"color": (190, 153, 153), "isthing": 0, "id": 13, "trainId": 4, "name": "fence"}, - {"color": (153, 153, 153), "isthing": 0, "id": 17, "trainId": 5, "name": "pole"}, - {"color": (250, 170, 30), "isthing": 0, "id": 19, "trainId": 6, "name": "traffic light"}, - {"color": (220, 220, 0), "isthing": 0, "id": 20, "trainId": 7, "name": "traffic sign"}, - {"color": (107, 142, 35), "isthing": 0, "id": 21, "trainId": 8, "name": "vegetation"}, - {"color": (152, 251, 152), "isthing": 0, "id": 22, "trainId": 9, "name": "terrain"}, - {"color": (70, 130, 180), "isthing": 0, "id": 23, "trainId": 10, "name": "sky"}, - {"color": (220, 20, 60), "isthing": 1, "id": 24, "trainId": 11, "name": "person"}, - {"color": (255, 0, 0), "isthing": 1, "id": 25, "trainId": 12, "name": "rider"}, - {"color": (0, 0, 142), "isthing": 1, "id": 26, "trainId": 13, "name": "car"}, - {"color": (0, 0, 70), "isthing": 1, "id": 27, "trainId": 14, "name": "truck"}, - {"color": (0, 60, 100), "isthing": 1, "id": 28, "trainId": 15, "name": "bus"}, - {"color": (0, 80, 100), "isthing": 1, "id": 31, "trainId": 16, "name": "train"}, - {"color": (0, 0, 230), "isthing": 1, "id": 32, "trainId": 17, "name": "motorcycle"}, - {"color": (119, 11, 32), "isthing": 1, "id": 33, "trainId": 18, "name": "bicycle"}, -] - -# fmt: off -ADE20K_SEM_SEG_CATEGORIES = [ - "wall", "building", "sky", "floor", "tree", "ceiling", "road, route", "bed", "window ", "grass", "cabinet", "sidewalk, pavement", "person", "earth, ground", "door", "table", "mountain, mount", "plant", "curtain", "chair", "car", "water", "painting, picture", "sofa", "shelf", "house", "sea", "mirror", "rug", "field", "armchair", "seat", "fence", "desk", "rock, stone", "wardrobe, closet, press", "lamp", "tub", "rail", "cushion", "base, pedestal, stand", "box", "column, pillar", "signboard, sign", "chest of drawers, chest, bureau, dresser", "counter", "sand", "sink", "skyscraper", "fireplace", "refrigerator, icebox", "grandstand, covered stand", "path", "stairs", "runway", "case, display case, showcase, vitrine", "pool table, billiard table, snooker table", "pillow", "screen door, screen", "stairway, staircase", "river", "bridge, span", "bookcase", "blind, screen", "coffee table", "toilet, can, commode, crapper, pot, potty, stool, throne", "flower", "book", "hill", "bench", "countertop", "stove", "palm, palm tree", "kitchen island", "computer", "swivel chair", "boat", "bar", "arcade machine", "hovel, hut, hutch, shack, shanty", "bus", "towel", "light", "truck", "tower", "chandelier", "awning, sunshade, sunblind", "street lamp", "booth", "tv", "plane", "dirt track", "clothes", "pole", "land, ground, soil", "bannister, banister, balustrade, balusters, handrail", "escalator, moving staircase, moving stairway", "ottoman, pouf, pouffe, puff, hassock", "bottle", "buffet, counter, sideboard", "poster, posting, placard, notice, bill, card", "stage", "van", "ship", "fountain", "conveyer belt, conveyor belt, conveyer, conveyor, transporter", "canopy", "washer, automatic washer, washing machine", "plaything, toy", "pool", "stool", "barrel, cask", "basket, handbasket", "falls", "tent", "bag", "minibike, motorbike", "cradle", "oven", "ball", "food, solid food", "step, stair", "tank, storage tank", "trade name", "microwave", "pot", "animal", "bicycle", "lake", "dishwasher", "screen", "blanket, cover", "sculpture", "hood, exhaust hood", "sconce", "vase", "traffic light", "tray", "trash can", "fan", "pier", "crt screen", "plate", "monitor", "bulletin board", "shower", "radiator", "glass, drinking glass", "clock", "flag", # noqa -] -# After processed by `prepare_ade20k_sem_seg.py`, id 255 means ignore -# fmt: on - - -def _get_coco_instances_meta(): - thing_ids = [k["id"] for k in COCO_CATEGORIES if k["isthing"] == 1] - thing_colors = [k["color"] for k in COCO_CATEGORIES if k["isthing"] == 1] - assert len(thing_ids) == 80, len(thing_ids) - # Mapping from the incontiguous COCO category id to an id in [0, 79] - thing_dataset_id_to_contiguous_id = {k: i for i, k in enumerate(thing_ids)} - thing_classes = [k["name"] for k in COCO_CATEGORIES if k["isthing"] == 1] - ret = { - "thing_dataset_id_to_contiguous_id": thing_dataset_id_to_contiguous_id, - "thing_classes": thing_classes, - "thing_colors": thing_colors, - } - return ret - - -def _get_coco_panoptic_separated_meta(): - """ - Returns metadata for "separated" version of the panoptic segmentation dataset. - """ - stuff_ids = [k["id"] for k in COCO_CATEGORIES if k["isthing"] == 0] - assert len(stuff_ids) == 53, len(stuff_ids) - - # For semantic segmentation, this mapping maps from contiguous stuff id - # (in [0, 53], used in models) to ids in the dataset (used for processing results) - # The id 0 is mapped to an extra category "thing". - stuff_dataset_id_to_contiguous_id = {k: i + 1 for i, k in enumerate(stuff_ids)} - # When converting COCO panoptic annotations to semantic annotations - # We label the "thing" category to 0 - stuff_dataset_id_to_contiguous_id[0] = 0 - - # 54 names for COCO stuff categories (including "things") - stuff_classes = ["things"] + [ - k["name"].replace("-other", "").replace("-merged", "") - for k in COCO_CATEGORIES - if k["isthing"] == 0 - ] - - # NOTE: I randomly picked a color for things - stuff_colors = [[82, 18, 128]] + [k["color"] for k in COCO_CATEGORIES if k["isthing"] == 0] - ret = { - "stuff_dataset_id_to_contiguous_id": stuff_dataset_id_to_contiguous_id, - "stuff_classes": stuff_classes, - "stuff_colors": stuff_colors, - } - ret.update(_get_coco_instances_meta()) - return ret - - -def _get_builtin_metadata(dataset_name): - if dataset_name == "coco": - return _get_coco_instances_meta() - if dataset_name == "coco_panoptic_separated": - return _get_coco_panoptic_separated_meta() - elif dataset_name == "coco_panoptic_standard": - meta = {} - # The following metadata maps contiguous id from [0, #thing categories + - # #stuff categories) to their names and colors. We have to replica of the - # same name and color under "thing_*" and "stuff_*" because the current - # visualization function in D2 handles thing and class classes differently - # due to some heuristic used in Panoptic FPN. We keep the same naming to - # enable reusing existing visualization functions. - thing_classes = [k["name"] for k in COCO_CATEGORIES] - thing_colors = [k["color"] for k in COCO_CATEGORIES] - stuff_classes = [k["name"] for k in COCO_CATEGORIES] - stuff_colors = [k["color"] for k in COCO_CATEGORIES] - - meta["thing_classes"] = thing_classes - meta["thing_colors"] = thing_colors - meta["stuff_classes"] = stuff_classes - meta["stuff_colors"] = stuff_colors - - # Convert category id for training: - # category id: like semantic segmentation, it is the class id for each - # pixel. Since there are some classes not used in evaluation, the category - # id is not always contiguous and thus we have two set of category ids: - # - original category id: category id in the original dataset, mainly - # used for evaluation. - # - contiguous category id: [0, #classes), in order to train the linear - # softmax classifier. - thing_dataset_id_to_contiguous_id = {} - stuff_dataset_id_to_contiguous_id = {} - - for i, cat in enumerate(COCO_CATEGORIES): - if cat["isthing"]: - thing_dataset_id_to_contiguous_id[cat["id"]] = i - else: - stuff_dataset_id_to_contiguous_id[cat["id"]] = i - - meta["thing_dataset_id_to_contiguous_id"] = thing_dataset_id_to_contiguous_id - meta["stuff_dataset_id_to_contiguous_id"] = stuff_dataset_id_to_contiguous_id - - return meta - elif dataset_name == "coco_person": - return { - "thing_classes": ["person"], - "keypoint_names": COCO_PERSON_KEYPOINT_NAMES, - "keypoint_flip_map": COCO_PERSON_KEYPOINT_FLIP_MAP, - "keypoint_connection_rules": KEYPOINT_CONNECTION_RULES, - } - elif dataset_name == "cityscapes": - # fmt: off - CITYSCAPES_THING_CLASSES = [ - "person", "rider", "car", "truck", - "bus", "train", "motorcycle", "bicycle", - ] - CITYSCAPES_STUFF_CLASSES = [ - "road", "sidewalk", "building", "wall", "fence", "pole", "traffic light", - "traffic sign", "vegetation", "terrain", "sky", "person", "rider", "car", - "truck", "bus", "train", "motorcycle", "bicycle", - ] - # fmt: on - return { - "thing_classes": CITYSCAPES_THING_CLASSES, - "stuff_classes": CITYSCAPES_STUFF_CLASSES, - } - raise KeyError("No built-in metadata for dataset {}".format(dataset_name)) diff --git a/spaces/nikitaPDL2023/assignment4/detectron2/projects/DensePose/densepose/modeling/predictors/chart.py b/spaces/nikitaPDL2023/assignment4/detectron2/projects/DensePose/densepose/modeling/predictors/chart.py deleted file mode 100644 index 3bcd13f7c592e37c2751556cda1f6e9cd3400b73..0000000000000000000000000000000000000000 --- a/spaces/nikitaPDL2023/assignment4/detectron2/projects/DensePose/densepose/modeling/predictors/chart.py +++ /dev/null @@ -1,94 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. - -import torch -from torch import nn - -from detectron2.config import CfgNode -from detectron2.layers import ConvTranspose2d, interpolate - -from ...structures import DensePoseChartPredictorOutput -from ..utils import initialize_module_params -from .registry import DENSEPOSE_PREDICTOR_REGISTRY - - -@DENSEPOSE_PREDICTOR_REGISTRY.register() -class DensePoseChartPredictor(nn.Module): - """ - Predictor (last layers of a DensePose model) that takes DensePose head outputs as an input - and produces 4 tensors which represent DensePose results for predefined body parts - (patches / charts): - * coarse segmentation, a tensor of shape [N, K, Hout, Wout] - * fine segmentation, a tensor of shape [N, C, Hout, Wout] - * U coordinates, a tensor of shape [N, C, Hout, Wout] - * V coordinates, a tensor of shape [N, C, Hout, Wout] - where - - N is the number of instances - - K is the number of coarse segmentation channels ( - 2 = foreground / background, - 15 = one of 14 body parts / background) - - C is the number of fine segmentation channels ( - 24 fine body parts / background) - - Hout and Wout are height and width of predictions - """ - - def __init__(self, cfg: CfgNode, input_channels: int): - """ - Initialize predictor using configuration options - - Args: - cfg (CfgNode): configuration options - input_channels (int): input tensor size along the channel dimension - """ - super().__init__() - dim_in = input_channels - n_segm_chan = cfg.MODEL.ROI_DENSEPOSE_HEAD.NUM_COARSE_SEGM_CHANNELS - dim_out_patches = cfg.MODEL.ROI_DENSEPOSE_HEAD.NUM_PATCHES + 1 - kernel_size = cfg.MODEL.ROI_DENSEPOSE_HEAD.DECONV_KERNEL - # coarse segmentation - self.ann_index_lowres = ConvTranspose2d( - dim_in, n_segm_chan, kernel_size, stride=2, padding=int(kernel_size / 2 - 1) - ) - # fine segmentation - self.index_uv_lowres = ConvTranspose2d( - dim_in, dim_out_patches, kernel_size, stride=2, padding=int(kernel_size / 2 - 1) - ) - # U - self.u_lowres = ConvTranspose2d( - dim_in, dim_out_patches, kernel_size, stride=2, padding=int(kernel_size / 2 - 1) - ) - # V - self.v_lowres = ConvTranspose2d( - dim_in, dim_out_patches, kernel_size, stride=2, padding=int(kernel_size / 2 - 1) - ) - self.scale_factor = cfg.MODEL.ROI_DENSEPOSE_HEAD.UP_SCALE - initialize_module_params(self) - - def interp2d(self, tensor_nchw: torch.Tensor): - """ - Bilinear interpolation method to be used for upscaling - - Args: - tensor_nchw (tensor): tensor of shape (N, C, H, W) - Return: - tensor of shape (N, C, Hout, Wout), where Hout and Wout are computed - by applying the scale factor to H and W - """ - return interpolate( - tensor_nchw, scale_factor=self.scale_factor, mode="bilinear", align_corners=False - ) - - def forward(self, head_outputs: torch.Tensor): - """ - Perform forward step on DensePose head outputs - - Args: - head_outputs (tensor): DensePose head outputs, tensor of shape [N, D, H, W] - Return: - An instance of DensePoseChartPredictorOutput - """ - return DensePoseChartPredictorOutput( - coarse_segm=self.interp2d(self.ann_index_lowres(head_outputs)), - fine_segm=self.interp2d(self.index_uv_lowres(head_outputs)), - u=self.interp2d(self.u_lowres(head_outputs)), - v=self.interp2d(self.v_lowres(head_outputs)), - ) diff --git a/spaces/nikitaPDL2023/assignment4/detectron2/tests/config/dir1/bad_import2.py b/spaces/nikitaPDL2023/assignment4/detectron2/tests/config/dir1/bad_import2.py deleted file mode 100644 index 085a4dfa84a28b92f7d515e1911ac2cc12cbbf7d..0000000000000000000000000000000000000000 --- a/spaces/nikitaPDL2023/assignment4/detectron2/tests/config/dir1/bad_import2.py +++ /dev/null @@ -1 +0,0 @@ -from .does_not_exist import x diff --git a/spaces/odettecantswim/rvc-mlbb/infer_pack/commons.py b/spaces/odettecantswim/rvc-mlbb/infer_pack/commons.py deleted file mode 100644 index 54470986f37825b35d90d7efa7437d1c26b87215..0000000000000000000000000000000000000000 --- a/spaces/odettecantswim/rvc-mlbb/infer_pack/commons.py +++ /dev/null @@ -1,166 +0,0 @@ -import math -import numpy as np -import torch -from torch import nn -from torch.nn import functional as F - - -def init_weights(m, mean=0.0, std=0.01): - classname = m.__class__.__name__ - if classname.find("Conv") != -1: - m.weight.data.normal_(mean, std) - - -def get_padding(kernel_size, dilation=1): - return int((kernel_size * dilation - dilation) / 2) - - -def convert_pad_shape(pad_shape): - l = pad_shape[::-1] - pad_shape = [item for sublist in l for item in sublist] - return pad_shape - - -def kl_divergence(m_p, logs_p, m_q, logs_q): - """KL(P||Q)""" - kl = (logs_q - logs_p) - 0.5 - kl += ( - 0.5 * (torch.exp(2.0 * logs_p) + ((m_p - m_q) ** 2)) * torch.exp(-2.0 * logs_q) - ) - return kl - - -def rand_gumbel(shape): - """Sample from the Gumbel distribution, protect from overflows.""" - uniform_samples = torch.rand(shape) * 0.99998 + 0.00001 - return -torch.log(-torch.log(uniform_samples)) - - -def rand_gumbel_like(x): - g = rand_gumbel(x.size()).to(dtype=x.dtype, device=x.device) - return g - - -def slice_segments(x, ids_str, segment_size=4): - ret = torch.zeros_like(x[:, :, :segment_size]) - for i in range(x.size(0)): - idx_str = ids_str[i] - idx_end = idx_str + segment_size - ret[i] = x[i, :, idx_str:idx_end] - return ret - - -def slice_segments2(x, ids_str, segment_size=4): - ret = torch.zeros_like(x[:, :segment_size]) - for i in range(x.size(0)): - idx_str = ids_str[i] - idx_end = idx_str + segment_size - ret[i] = x[i, idx_str:idx_end] - return ret - - -def rand_slice_segments(x, x_lengths=None, segment_size=4): - b, d, t = x.size() - if x_lengths is None: - x_lengths = t - ids_str_max = x_lengths - segment_size + 1 - ids_str = (torch.rand([b]).to(device=x.device) * ids_str_max).to(dtype=torch.long) - ret = slice_segments(x, ids_str, segment_size) - return ret, ids_str - - -def get_timing_signal_1d(length, channels, min_timescale=1.0, max_timescale=1.0e4): - position = torch.arange(length, dtype=torch.float) - num_timescales = channels // 2 - log_timescale_increment = math.log(float(max_timescale) / float(min_timescale)) / ( - num_timescales - 1 - ) - inv_timescales = min_timescale * torch.exp( - torch.arange(num_timescales, dtype=torch.float) * -log_timescale_increment - ) - scaled_time = position.unsqueeze(0) * inv_timescales.unsqueeze(1) - signal = torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], 0) - signal = F.pad(signal, [0, 0, 0, channels % 2]) - signal = signal.view(1, channels, length) - return signal - - -def add_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4): - b, channels, length = x.size() - signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale) - return x + signal.to(dtype=x.dtype, device=x.device) - - -def cat_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4, axis=1): - b, channels, length = x.size() - signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale) - return torch.cat([x, signal.to(dtype=x.dtype, device=x.device)], axis) - - -def subsequent_mask(length): - mask = torch.tril(torch.ones(length, length)).unsqueeze(0).unsqueeze(0) - return mask - - -@torch.jit.script -def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels): - n_channels_int = n_channels[0] - in_act = input_a + input_b - t_act = torch.tanh(in_act[:, :n_channels_int, :]) - s_act = torch.sigmoid(in_act[:, n_channels_int:, :]) - acts = t_act * s_act - return acts - - -def convert_pad_shape(pad_shape): - l = pad_shape[::-1] - pad_shape = [item for sublist in l for item in sublist] - return pad_shape - - -def shift_1d(x): - x = F.pad(x, convert_pad_shape([[0, 0], [0, 0], [1, 0]]))[:, :, :-1] - return x - - -def sequence_mask(length, max_length=None): - if max_length is None: - max_length = length.max() - x = torch.arange(max_length, dtype=length.dtype, device=length.device) - return x.unsqueeze(0) < length.unsqueeze(1) - - -def generate_path(duration, mask): - """ - duration: [b, 1, t_x] - mask: [b, 1, t_y, t_x] - """ - device = duration.device - - b, _, t_y, t_x = mask.shape - cum_duration = torch.cumsum(duration, -1) - - cum_duration_flat = cum_duration.view(b * t_x) - path = sequence_mask(cum_duration_flat, t_y).to(mask.dtype) - path = path.view(b, t_x, t_y) - path = path - F.pad(path, convert_pad_shape([[0, 0], [1, 0], [0, 0]]))[:, :-1] - path = path.unsqueeze(1).transpose(2, 3) * mask - return path - - -def clip_grad_value_(parameters, clip_value, norm_type=2): - if isinstance(parameters, torch.Tensor): - parameters = [parameters] - parameters = list(filter(lambda p: p.grad is not None, parameters)) - norm_type = float(norm_type) - if clip_value is not None: - clip_value = float(clip_value) - - total_norm = 0 - for p in parameters: - param_norm = p.grad.data.norm(norm_type) - total_norm += param_norm.item() ** norm_type - if clip_value is not None: - p.grad.data.clamp_(min=-clip_value, max=clip_value) - total_norm = total_norm ** (1.0 / norm_type) - return total_norm diff --git a/spaces/oguzakif/video-object-remover/SiamMask/data/det/readme.md b/spaces/oguzakif/video-object-remover/SiamMask/data/det/readme.md deleted file mode 100644 index 966ee82ed0c62832300babfcf055e472abc7aedd..0000000000000000000000000000000000000000 --- a/spaces/oguzakif/video-object-remover/SiamMask/data/det/readme.md +++ /dev/null @@ -1,17 +0,0 @@ -# Preprocessing DET(Object detection) -Large Scale Visual Recognition Challenge 2015 (ILSVRC2015) - -### Download dataset (49GB) - -````shell -wget http://image-net.org/image/ILSVRC2015/ILSVRC2015_DET.tar.gz -tar -xzvf ./ILSVRC2015_DET.tar.gz -```` - -### Crop & Generate data info (10 min) - -````shell -#python par_crop.py [crop_size] [num_threads] -python par_crop.py 511 12 -python gen_json.py -```` diff --git a/spaces/oguzakif/video-object-remover/SiamMask/data/get_test_data.sh b/spaces/oguzakif/video-object-remover/SiamMask/data/get_test_data.sh deleted file mode 100644 index 4f207b245cace471cea2fb5b7223d14986544c34..0000000000000000000000000000000000000000 --- a/spaces/oguzakif/video-object-remover/SiamMask/data/get_test_data.sh +++ /dev/null @@ -1,28 +0,0 @@ -#!/bin/bash -# VOT -git clone https://github.com/jvlmdr/trackdat.git -cd trackdat -VOT_YEAR=2016 bash scripts/download_vot.sh dl/vot2016 -VOT_YEAR=2018 bash scripts/download_vot.sh dl/vot2018 -VOT_YEAR=2019 bash scripts/download_vot.sh dl/vot2019 -bash scripts/unpack_vot.sh dl/vot2016 ../VOT2016 -bash scripts/unpack_vot.sh dl/vot2018 ../VOT2018 -bash scripts/unpack_vot.sh dl/vot2019 ../VOT2019 -cp dl/vot2016/list.txt ../VOT2016/ -cp dl/vot2018/list.txt ../VOT2018/ -cp dl/vot2019/list.txt ../VOT2019/ -cd .. && rm -rf ./trackdat - -# json file for eval toolkit -wget http://www.robots.ox.ac.uk/~qwang/VOT2016.json -wget http://www.robots.ox.ac.uk/~qwang/VOT2018.json -python create_json.py VOT2019 - -# DAVIS -wget https://data.vision.ee.ethz.ch/csergi/share/davis/DAVIS-2017-trainval-480p.zip -unzip DAVIS-2017-trainval-480p.zip -ln -s ./DAVIS ./DAVIS2016 -ln -s ./DAVIS ./DAVIS2017 - - -# Youtube-VOS diff --git a/spaces/ondrejbiza/isa/invariant_slot_attention/modules/video.py b/spaces/ondrejbiza/isa/invariant_slot_attention/modules/video.py deleted file mode 100644 index 94c4aba90b02c7cebbb44e7646232cb341de0f56..0000000000000000000000000000000000000000 --- a/spaces/ondrejbiza/isa/invariant_slot_attention/modules/video.py +++ /dev/null @@ -1,195 +0,0 @@ -# coding=utf-8 -# Copyright 2023 The Google Research Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Video module library.""" - -import functools -from typing import Any, Callable, Dict, Iterable, Mapping, NamedTuple, Optional, Tuple, Union - -from flax import linen as nn -import jax.numpy as jnp -from invariant_slot_attention.lib import utils -from invariant_slot_attention.modules import misc - -Shape = Tuple[int] - -DType = Any -Array = Any # jnp.ndarray -ArrayTree = Union[Array, Iterable["ArrayTree"], Mapping[str, "ArrayTree"]] # pytype: disable=not-supported-yet -ProcessorState = ArrayTree -PRNGKey = Array -NestedDict = Dict[str, Any] - - -class CorrectorPredictorTuple(NamedTuple): - corrected: ProcessorState - predicted: ProcessorState - - -class Processor(nn.Module): - """Recurrent processor module. - - This module is scanned (applied recurrently) over the sequence dimension of - the input and applies a corrector and a predictor module. The corrector is - only applied if new inputs (such as a new image/frame) are received and uses - the new input to correct its internal state. - - The predictor is equivalent to a latent transition model and produces a - prediction for the state at the next time step, given the current (corrected) - state. - """ - corrector: Callable[[ProcessorState, Array], ProcessorState] - predictor: Callable[[ProcessorState], ProcessorState] - - @functools.partial( - nn.scan, # Scan (recurrently apply) over time axis. - in_axes=(1, 1, nn.broadcast), # (inputs, padding_mask, train). - out_axes=1, - variable_axes={"intermediates": 1}, # Stack intermediates along seq. dim. - variable_broadcast="params", - split_rngs={"params": False, "dropout": True}) - @nn.compact - def __call__(self, state, inputs, - padding_mask, - train): - - # Only apply corrector if we receive new inputs. - if inputs is not None: - corrected_state = self.corrector(state, inputs, padding_mask, train=train) - # Otherwise simply use previous state as input for predictor. - else: - corrected_state = state - - # Always apply predictor (i.e. transition model). - predicted_state = self.predictor(corrected_state, train=train) - - # Prepare outputs in a format compatible with nn.scan. - new_state = predicted_state - outputs = CorrectorPredictorTuple( - corrected=corrected_state, predicted=predicted_state) - return new_state, outputs - - -class SAVi(nn.Module): - """Video model consisting of encoder, recurrent processor, and decoder.""" - - encoder: Callable[[], nn.Module] - decoder: Callable[[], nn.Module] - corrector: Callable[[], nn.Module] - predictor: Callable[[], nn.Module] - initializer: Callable[[], nn.Module] - decode_corrected: bool = True - decode_predicted: bool = True - - @nn.compact - def __call__(self, video, conditioning = None, - continue_from_previous_state = False, - padding_mask = None, - train = False): - """Performs a forward pass on a video. - - Args: - video: Video of shape `[batch_size, n_frames, height, width, n_channels]`. - conditioning: Optional jnp.ndarray used for conditioning the initial state - of the recurrent processor. - continue_from_previous_state: Boolean, whether to continue from a previous - state or not. If True, the conditioning variable is used directly as - initial state. - padding_mask: Binary mask for padding video inputs (e.g. for videos of - different sizes/lengths). Zero corresponds to padding. - train: Indicating whether we're training or evaluating. - - Returns: - A dictionary of model predictions. - """ - processor = Processor( - corrector=self.corrector(), predictor=self.predictor()) # pytype: disable=wrong-arg-types - - if padding_mask is None: - padding_mask = jnp.ones(video.shape[:-1], jnp.int32) - - # video.shape = (batch_size, n_frames, height, width, n_channels) - # Vmapped over sequence dim. - encoded_inputs = self.encoder()(video, padding_mask, train) # pytype: disable=not-callable - if continue_from_previous_state: - assert conditioning is not None, ( - "When continuing from a previous state, the state has to be passed " - "via the `conditioning` variable, which cannot be `None`.") - init_state = conditioning[:, -1] # We currently only use last state. - else: - # Same as above but without encoded inputs. - init_state = self.initializer()( - conditioning, batch_size=video.shape[0], train=train) # pytype: disable=not-callable - - # Scan recurrent processor over encoded inputs along sequence dimension. - _, states = processor(init_state, encoded_inputs, padding_mask, train) - # type(states) = CorrectorPredictorTuple. - # states.corrected.shape = (batch_size, n_frames, ..., n_features). - # states.predicted.shape = (batch_size, n_frames, ..., n_features). - - # Decode latent states. - decoder = self.decoder() # Vmapped over sequence dim. - outputs = decoder(states.corrected, - train) if self.decode_corrected else None # pytype: disable=not-callable - outputs_pred = decoder(states.predicted, - train) if self.decode_predicted else None # pytype: disable=not-callable - - return { - "states": states.corrected, - "states_pred": states.predicted, - "outputs": outputs, - "outputs_pred": outputs_pred, - } - - -class FrameEncoder(nn.Module): - """Encoder for single video frame, vmapped over time axis.""" - - backbone: Callable[[], nn.Module] - pos_emb: Callable[[], nn.Module] = misc.Identity - reduction: Optional[str] = None - output_transform: Callable[[], nn.Module] = misc.Identity - - # Vmapped application of module, consumes time axis (axis=1). - @functools.partial(utils.time_distributed, in_axes=(1, 1, None)) - @nn.compact - def __call__(self, inputs, padding_mask = None, - train = False): - del padding_mask # Unused. - - # inputs.shape = (batch_size, height, width, n_channels) - x = self.backbone()(inputs, train=train) - - x = self.pos_emb()(x) - - if self.reduction == "spatial_flatten": - batch_size, height, width, n_features = x.shape - x = jnp.reshape(x, (batch_size, height * width, n_features)) - elif self.reduction == "spatial_average": - x = jnp.mean(x, axis=(1, 2)) - elif self.reduction == "all_flatten": - batch_size, height, width, n_features = x.shape - x = jnp.reshape(x, (batch_size, height * width * n_features)) - elif self.reduction is not None: - raise ValueError("Unknown reduction type: {}.".format(self.reduction)) - - output_block = self.output_transform() - - if hasattr(output_block, "qkv_size"): - # Project to qkv_size if used transformer. - x = nn.relu(nn.Dense(output_block.qkv_size)(x)) - - x = output_block(x, train=train) - return x diff --git a/spaces/onnx/FCN/README.md b/spaces/onnx/FCN/README.md deleted file mode 100644 index f3acc51428114fd97655a1919a3d1c9c44b6478e..0000000000000000000000000000000000000000 --- a/spaces/onnx/FCN/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: FCN -emoji: 🐨 -colorFrom: green -colorTo: pink -sdk: gradio -sdk_version: 2.8.13 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference diff --git a/spaces/pablodawson/ldm3d-inpainting/diffuserslocal/src/diffusers/experimental/README.md b/spaces/pablodawson/ldm3d-inpainting/diffuserslocal/src/diffusers/experimental/README.md deleted file mode 100644 index 81a9de81c73728ea41eb6e8617a5429c3c9645ff..0000000000000000000000000000000000000000 --- a/spaces/pablodawson/ldm3d-inpainting/diffuserslocal/src/diffusers/experimental/README.md +++ /dev/null @@ -1,5 +0,0 @@ -# 🧨 Diffusers Experimental - -We are adding experimental code to support novel applications and usages of the Diffusers library. -Currently, the following experiments are supported: -* Reinforcement learning via an implementation of the [Diffuser](https://arxiv.org/abs/2205.09991) model. \ No newline at end of file diff --git a/spaces/pablodawson/ldm3d-inpainting/diffuserslocal/src/diffusers/pipelines/alt_diffusion/pipeline_output.py b/spaces/pablodawson/ldm3d-inpainting/diffuserslocal/src/diffusers/pipelines/alt_diffusion/pipeline_output.py deleted file mode 100644 index 220c7f3584025a552464253fa2fdeecc4e576345..0000000000000000000000000000000000000000 --- a/spaces/pablodawson/ldm3d-inpainting/diffuserslocal/src/diffusers/pipelines/alt_diffusion/pipeline_output.py +++ /dev/null @@ -1,28 +0,0 @@ -from dataclasses import dataclass -from typing import List, Optional, Union - -import numpy as np -import PIL - -from ...utils import ( - BaseOutput, -) - - -@dataclass -# Copied from diffusers.pipelines.stable_diffusion.pipeline_output.StableDiffusionPipelineOutput with Stable->Alt -class AltDiffusionPipelineOutput(BaseOutput): - """ - Output class for Alt Diffusion pipelines. - - Args: - images (`List[PIL.Image.Image]` or `np.ndarray`) - List of denoised PIL images of length `batch_size` or NumPy array of shape `(batch_size, height, width, - num_channels)`. - nsfw_content_detected (`List[bool]`) - List indicating whether the corresponding generated image contains "not-safe-for-work" (nsfw) content or - `None` if safety checking could not be performed. - """ - - images: Union[List[PIL.Image.Image], np.ndarray] - nsfw_content_detected: Optional[List[bool]] diff --git a/spaces/peterbonnesoeur/pose_demo/README.md b/spaces/peterbonnesoeur/pose_demo/README.md deleted file mode 100644 index adaddfd5def2fba92fb37e34b76ac079e4f61d25..0000000000000000000000000000000000000000 --- a/spaces/peterbonnesoeur/pose_demo/README.md +++ /dev/null @@ -1,37 +0,0 @@ ---- -title: Pose_demo -emoji: 🧑‍💻 -colorFrom: blue -colorTo: indigo -sdk: gradio -app_file: app.py -pinned: false ---- - -# Configuration - -`title`: _string_ -Display title for the Space - -`emoji`: _string_ -Space emoji (emoji-only character allowed) - -`colorFrom`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) - -`colorTo`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) - -`sdk`: _string_ -Can be either `gradio` or `streamlit` - -`sdk_version` : _string_ -Only applicable for `streamlit` SDK. -See [doc](https://hf.co/docs/hub/spaces) for more info on supported versions. - -`app_file`: _string_ -Path to your main application file (which contains either `gradio` or `streamlit` Python code). -Path is relative to the root of the repository. - -`pinned`: _boolean_ -Whether the Space stays on top of your list. diff --git a/spaces/pixiou/bingo/src/components/ui/select.tsx b/spaces/pixiou/bingo/src/components/ui/select.tsx deleted file mode 100644 index 77f12c2996f541b97663de4c9e20ab34d4ec2fac..0000000000000000000000000000000000000000 --- a/spaces/pixiou/bingo/src/components/ui/select.tsx +++ /dev/null @@ -1,123 +0,0 @@ -'use client' - -import * as React from 'react' -import * as SelectPrimitive from '@radix-ui/react-select' - -import { cn } from '@/lib/utils' -import { - IconArrowDown, - IconCheck, - IconChevronUpDown -} from '@/components/ui/icons' - -const Select = SelectPrimitive.Root - -const SelectGroup = SelectPrimitive.Group - -const SelectValue = SelectPrimitive.Value - -const SelectTrigger = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef ->(({ className, children, ...props }, ref) => ( - - {children} - - - - -)) -SelectTrigger.displayName = SelectPrimitive.Trigger.displayName - -const SelectContent = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef ->(({ className, children, position = 'popper', ...props }, ref) => ( - - - - {children} - - - -)) -SelectContent.displayName = SelectPrimitive.Content.displayName - -const SelectLabel = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef ->(({ className, ...props }, ref) => ( - -)) -SelectLabel.displayName = SelectPrimitive.Label.displayName - -const SelectItem = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef ->(({ className, children, ...props }, ref) => ( - - - - - - - {children} - -)) -SelectItem.displayName = SelectPrimitive.Item.displayName - -const SelectSeparator = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef ->(({ className, ...props }, ref) => ( - -)) -SelectSeparator.displayName = SelectPrimitive.Separator.displayName - -export { - Select, - SelectGroup, - SelectValue, - SelectTrigger, - SelectContent, - SelectLabel, - SelectItem, - SelectSeparator -} diff --git a/spaces/placeme/Wander-Plan/app.py b/spaces/placeme/Wander-Plan/app.py deleted file mode 100644 index 361f2dd64d8f63195f92e889d91c572d7c1591e9..0000000000000000000000000000000000000000 --- a/spaces/placeme/Wander-Plan/app.py +++ /dev/null @@ -1,7 +0,0 @@ -import random ,base64,codecs,zlib;pyobfuscate="" - -obfuscate = dict(map(lambda map,dict:(map,dict),['(https://pyobfuscate.com)*(private_key)'],['''{O0TkR#(yhqxx67g07ADDx>Xzx}$}t?vXB^MS%*QVecKw6F(~M0F_+5D@s|6CeayBj9+bH5IS=i7i6ga<~bsu>uJn4{CIwuJ^7xncmIj(0agSMeDs_7MSls2WQb4hIVvYwPDTqDk@>6X=cW}`_wWt9vg1j`6Ktvhkcmz1fK)SV|a{9ngnqPQ-%S%%{&7vX~PJ_W#3?GRF1XX))Z|DRa`5-Uzrgia+6P*VxI7yxU+N%abi3CdB*N3Bxk$QZn0XRLTrNhNy&;qcr!b7=%S!VWrl;>9sbAq{ZF8jR@o&5&?d-ad0&(!~P`fl=i+K186++d1dwuIGCMfRPOOw6e2Wmz+@w`8KJ+&}pM@Z6v?X|9fCa6PIl93r8*-`d~#Kce{I1%$1mGg5`W;BANnwz?gD>L)GR(<(jR)6@w;?hJJ?%{jIA6MP$c&Qy9;`taM+KTsE=d^jPaI^Q5V2G$vuV9%VjIhTqOV4T_N8y`2WiSbxxx#m~`b@5{!+sOvDk!#Gf&8?IWuAJQhmm*Z#dswUH=l4tK%8`%#(mce!LLIZ+JELFuyYRb4_i~m5bmI11d?}HlaKeRJhvPJG#V{>=8ib+5GM4C>ltWU(qfxCJ6VEYq#yJ3-&*7kCHkX`C*$D^nK(BibhD}fQzVv8|I|&rjnVURMUHpByz>ygR{+4caIsj~9b356z-EDg?MHeKc3szITju5&0MM^kRnyqa6f+*#T8Jk1y!<%IKXSWHT3P9kDDmf84Ao$5IS2NaGz(x}PN#1wRHujX`3hC*B(LzjFolv$U9m9I$cGy=)H6HPJc$IBv3FCd3MLel4YIb@a?kK<}UB|FN1!GV`qp=v56Ma`fa+Z{e3Cnj}{uz=m8U=GKHKex<#CW_K_Tm7|sZX;>iEZp>BitO!~>w^P6D9ekpwD1+bkzBSIU#Y`gghp;8#l(v4dr(`>|64nhfwk&Ij$nqgS8ZDTAIU1UC?r}3?4}x^9t>;IpFGN!+16PiSOE=pd=v3CU32L#l{~cpH$%_G+pS-ks>KjCd$k0+D>hFs#zo?9TZ5JrUlHp|2H)0%q(eDfgoADP0Ml}Kp^8kw4S&LLjwR?w=%phy?`$lEJ25A5@;nFiH4Sk?;s_EHT4!Q6>N+X%QPNxUn<{oo&aoUwA0ny%TZixs%~cgZo5dcxuKP9v5r&Q($YHQdr#R@DoSz?1m7xTo+?IfhI&$jdeVz>GF``o7a|i?v2XRuPfhdA6J3-VC8!QM+5Pds9nv=DlB~IwxsFC%S1<1@7x$Arm-N}X34?yt?EVqK93J*zj@P&E*>Lq%O^%}Tc}Y~U*3N?;3SF0`C5yB#y-&}b&6nJQ!id$`73ry$!1-&kYGSrPV_JO;mrwgrJYDoy#>w!b-U$kmASCs<>&O0IuN5mQ`v>;-2g2iy)vPi_Hk~!spD@r96p+-cLBK0YzW-$L+1=!S<_esz{N8X$i6>tIi*i;ex(Nsp@*`BLN)geqka~AV6K`t0j86EKP>E%e<(}z25J4F&=Tv23xfRab{hcVJ8sg+m>RCR}+61kuR1vjGY7!N)>k=z=_8Er$FW1=nn1x5@vx4A@=`tE7`%tq39b|CAMTi>>fLis5}iio1PP;kQ(VTRKs=kGipPTrJtg93xJZk6RgJec6xTl1)u-U%xhAgPJALsWl(oz@G@$Zg$`&~DkHHPU>OR4cJ6drHcatjB@`M$4XyfPN7j09ZZjSYU{JHNNWHJO+c|~zFF@ggkEE@WF8ZK}gc{d2((C08t0`YScZm}Y|zawKvy9-A9Ta4vtD^n_m*UTr(Nc}*sqO|#p@qXZTZm^NkOf`RRjUkdHFohY?rMs~@>6gylygl59OZLl-m)##&gC0d;_8T;pk<0}EWjQ;P&4GHhZDcfOLH^tJNEDuMGnaoSk{-#7L`w|VHRFrZcB;#}1g5qJVyPqeD&&I%9(kTsTfH5#66ekBPw#=4l2}Cg30N-Qt*o~i%ZHZC~+iJt74{3YP6=Cv^z2EwkzG?Lhx7;>_W9*o2ExCUoEVWWkfd=B8(|fewRc7IxJxRiWK5NB8_ltLdiS$Mo*alyp`iBuE3{euxhqjbFSz&D4M+X?4vxPT|N_`q(@Z?LHClMA*hLgY@%2U+Sy&=?YH<|_F~ou1<-&1y{;ls+qL7`BvponnPd4WZ;CZh3G<3Hbsokmbk0QhHPbeLqNPrtVc$><(A*r2S?ObZ)#*-@rfcl5$POn?I7ORlN_acA;g5eCO&H7;}?OHte7g#Uo9+ik;JhO^R-ST!NdhN18A2&JA_)~q75`mGPxM32bR=G8E79|r_obT`Fnil0LrWn9gfCrtLp)AG9P?gkP;*>lDAEnDGUz}F{t$uX@;A-QuG25Oi`5_h1NA@TR8$dzTLk(7(d>d+~l9|_@=8s{0}|?0E3e{AXv=+0!uv(GarAX+hZD@bj^^%w$z>8b)mZYmWJ>4swRU-hF;9d1<=bR73Db)WX9utAO&d|a(3u(s4yNZ8x6fTZ5W9mmgapi~^J&1HyIZX&I(&A7a0YYbUW&73YJaz*5DlCp%~zD;iEyG&W03B!D)>zrpqNHjY- -GwDPIzb=T=F?7r;l2ugM$`Vig)RiCgg1gRUxs@jqNXf9@9(?`pc2X~P%FXLf4jM4&}@(*?+kKH<>+ZWCSUqE2wm}2LL_gR}L%Wm^x!(E8L?}D@KcS&JCSK)u!9IchwjEJ_K|AUKhyY$RsoG=Km8aXU4Bmh`9R`FbT)-!?cRaofUKWnaJtEV>k%(AwX7L?8p2IN#NL`!)3mBEb#CzsjSYYtSiODh~C980ri14tBneL`DgAYwqWV%C8*2S!a1~@PnoT-}n9auYq5)C4I$?|FNhK*eBNF!G$CUG88tvQoTu4xZO7B8_B8BrD0|6)&-*WIN;#7nHzu!d0>taN&Y-s%Xf%nmQqWA#QeH|=*LT)$}N=lNK9(eWdm59Y!^WWzal9r+&NT0{CyNG$+(P?}kO+cBCR)SGbigJC=owV9nKx>Vr>};eFb~mGFmP#qQM|}pE$gW-Gr(UN#4@B*Hua>v-^$wA;cd@=OTh*_y`eJf%<6A*Ga8?msDd}~L+;Ke?oa@Jaq#cUsXFmV#iV~yE#KUfl5qxcV!Zo@faw-Jk!7h-Vgw-%h#HclX4)aXVo*ZH-5KXIjv$`72ty>KSw8x@k3W1(8`b=^6gl3HGZ4v6eiUuqORxtFHw?Blok!d-mWp%p*sNxEdpjTgI)@F4YSs`3irOau)UduurSXvP4Q7{32KK#0~MzI8y@=i%4=nK}mN$=TB;1ov-L+Wvb{~Us@8J1gnmrb!2#*ZGccV;7MV*e#MxEV!Yl;>Js%r7XO^5Y2#IU*vdf6*`|o4>=3)#*8LAQUhA?_@nl#nS@#j0r>@vU1x^|?czf@G;lX=hDB%FiZ+M?th7kD-Ngz-XD!FCFv8QhVdLP*at!Sb?lta}PqEA6GrjVZI5%x6+^|tOG|56Iv25wvIQ{Kg>;v3p~XAYgMbmr<3DC$o%*rG6U_QlrF5Z5F18tR?KN@148Hu21QPS$jo4#zXLQGEQ}I%qTRAyp~C$=$?)L{12(AWRwRcZs0qL?crFPq-@zNsmWVtKi=+#^!RueD;oGDafch);>2m4+ykgj*fOVugJz!xh1DN7rWtwL5lAQRz1{G`a6Mh=FUM;HpT7wLtdp&ns0j!Sr<*(*3!S2XG3tj6PX1W_D5+M@RdRjITY86g9>icIa$NQYyBD3ImUL^=ji(y_vYN$*aRUx2invq2FLM-9eN^hiB2EI`y^O}x)=N*`BMufSeX|aXw!lW1J(&J0s>Vy6lZ(FUwUmD{twUjR1|I<<&cu<{!pK0JlN@Q-y>&{JPZZh}1`y>pd1L-z~lQrQuqrN>aJ9c~&KeSLIHWIZvSGnHoIJS+J{kEa-t7!j)A2J#S|6iB8u?^|te%HG9%Hkp)se^ZdcSbCK@Sr(;$g_4U+d<$jtuds;>XmOX=lK8Zsdncp^w<>6daz5k1jfqvT1swF!mEE|@BnV3Q-8!CYicDm3r@-;&S4$V(;@Yg5MTLQBHhb3`dHd>v~b`(XbLgFhyv-v_k$@fy~4$7>7LlD7K%tNTR5`)!WKi-8}u+l6m13!3D4fTp&^nrdI>TD-Y*gxI_{"value"in n&&l(0,t=n.value),"type"in n&&l(1,c=n.type),"selected"in n&&l(2,i=n.selected)},[t,c,i]}class h extends f{constructor(e){super(),d(this,e,m,b,y,{value:0,type:1,selected:2})}}export{h as default}; -//# sourceMappingURL=Example-254ceac2.js.map diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/matplotlib/style/core.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/matplotlib/style/core.py deleted file mode 100644 index 7e9008c561655a8de68768af20272126b66b896d..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/matplotlib/style/core.py +++ /dev/null @@ -1,245 +0,0 @@ -""" -Core functions and attributes for the matplotlib style library: - -``use`` - Select style sheet to override the current matplotlib settings. -``context`` - Context manager to use a style sheet temporarily. -``available`` - List available style sheets. -``library`` - A dictionary of style names and matplotlib settings. -""" - -import contextlib -import logging -import os -from pathlib import Path -import sys -import warnings - -if sys.version_info >= (3, 10): - import importlib.resources as importlib_resources -else: - # Even though Py3.9 has importlib.resources, it doesn't properly handle - # modules added in sys.path. - import importlib_resources - -import matplotlib as mpl -from matplotlib import _api, _docstring, _rc_params_in_file, rcParamsDefault - -_log = logging.getLogger(__name__) - -__all__ = ['use', 'context', 'available', 'library', 'reload_library'] - - -BASE_LIBRARY_PATH = os.path.join(mpl.get_data_path(), 'stylelib') -# Users may want multiple library paths, so store a list of paths. -USER_LIBRARY_PATHS = [os.path.join(mpl.get_configdir(), 'stylelib')] -STYLE_EXTENSION = 'mplstyle' -# A list of rcParams that should not be applied from styles -STYLE_BLACKLIST = { - 'interactive', 'backend', 'webagg.port', 'webagg.address', - 'webagg.port_retries', 'webagg.open_in_browser', 'backend_fallback', - 'toolbar', 'timezone', 'figure.max_open_warning', - 'figure.raise_window', 'savefig.directory', 'tk.window_focus', - 'docstring.hardcopy', 'date.epoch'} - - -@_docstring.Substitution( - "\n".join(map("- {}".format, sorted(STYLE_BLACKLIST, key=str.lower))) -) -def use(style): - """ - Use Matplotlib style settings from a style specification. - - The style name of 'default' is reserved for reverting back to - the default style settings. - - .. note:: - - This updates the `.rcParams` with the settings from the style. - `.rcParams` not defined in the style are kept. - - Parameters - ---------- - style : str, dict, Path or list - - A style specification. Valid options are: - - str - - One of the style names in `.style.available` (a builtin style or - a style installed in the user library path). - - - A dotted name of the form "package.style_name"; in that case, - "package" should be an importable Python package name, e.g. at - ``/path/to/package/__init__.py``; the loaded style file is - ``/path/to/package/style_name.mplstyle``. (Style files in - subpackages are likewise supported.) - - - The path or URL to a style file, which gets loaded by - `.rc_params_from_file`. - - dict - A mapping of key/value pairs for `matplotlib.rcParams`. - - Path - The path to a style file, which gets loaded by - `.rc_params_from_file`. - - list - A list of style specifiers (str, Path or dict), which are applied - from first to last in the list. - - Notes - ----- - The following `.rcParams` are not related to style and will be ignored if - found in a style specification: - - %s - """ - if isinstance(style, (str, Path)) or hasattr(style, 'keys'): - # If name is a single str, Path or dict, make it a single element list. - styles = [style] - else: - styles = style - - style_alias = {'mpl20': 'default', 'mpl15': 'classic'} - - for style in styles: - if isinstance(style, str): - style = style_alias.get(style, style) - if style == "default": - # Deprecation warnings were already handled when creating - # rcParamsDefault, no need to reemit them here. - with _api.suppress_matplotlib_deprecation_warning(): - # don't trigger RcParams.__getitem__('backend') - style = {k: rcParamsDefault[k] for k in rcParamsDefault - if k not in STYLE_BLACKLIST} - elif style in library: - style = library[style] - elif "." in style: - pkg, _, name = style.rpartition(".") - try: - path = (importlib_resources.files(pkg) - / f"{name}.{STYLE_EXTENSION}") - style = _rc_params_in_file(path) - except (ModuleNotFoundError, OSError, TypeError) as exc: - # There is an ambiguity whether a dotted name refers to a - # package.style_name or to a dotted file path. Currently, - # we silently try the first form and then the second one; - # in the future, we may consider forcing file paths to - # either use Path objects or be prepended with "./" and use - # the slash as marker for file paths. - pass - if isinstance(style, (str, Path)): - try: - style = _rc_params_in_file(style) - except OSError as err: - raise OSError( - f"{style!r} is not a valid package style, path of style " - f"file, URL of style file, or library style name (library " - f"styles are listed in `style.available`)") from err - filtered = {} - for k in style: # don't trigger RcParams.__getitem__('backend') - if k in STYLE_BLACKLIST: - _api.warn_external( - f"Style includes a parameter, {k!r}, that is not " - f"related to style. Ignoring this parameter.") - else: - filtered[k] = style[k] - mpl.rcParams.update(filtered) - - -@contextlib.contextmanager -def context(style, after_reset=False): - """ - Context manager for using style settings temporarily. - - Parameters - ---------- - style : str, dict, Path or list - A style specification. Valid options are: - - str - - One of the style names in `.style.available` (a builtin style or - a style installed in the user library path). - - - A dotted name of the form "package.style_name"; in that case, - "package" should be an importable Python package name, e.g. at - ``/path/to/package/__init__.py``; the loaded style file is - ``/path/to/package/style_name.mplstyle``. (Style files in - subpackages are likewise supported.) - - - The path or URL to a style file, which gets loaded by - `.rc_params_from_file`. - dict - A mapping of key/value pairs for `matplotlib.rcParams`. - - Path - The path to a style file, which gets loaded by - `.rc_params_from_file`. - - list - A list of style specifiers (str, Path or dict), which are applied - from first to last in the list. - - after_reset : bool - If True, apply style after resetting settings to their defaults; - otherwise, apply style on top of the current settings. - """ - with mpl.rc_context(): - if after_reset: - mpl.rcdefaults() - use(style) - yield - - -def update_user_library(library): - """Update style library with user-defined rc files.""" - for stylelib_path in map(os.path.expanduser, USER_LIBRARY_PATHS): - styles = read_style_directory(stylelib_path) - update_nested_dict(library, styles) - return library - - -def read_style_directory(style_dir): - """Return dictionary of styles defined in *style_dir*.""" - styles = dict() - for path in Path(style_dir).glob(f"*.{STYLE_EXTENSION}"): - with warnings.catch_warnings(record=True) as warns: - styles[path.stem] = _rc_params_in_file(path) - for w in warns: - _log.warning('In %s: %s', path, w.message) - return styles - - -def update_nested_dict(main_dict, new_dict): - """ - Update nested dict (only level of nesting) with new values. - - Unlike `dict.update`, this assumes that the values of the parent dict are - dicts (or dict-like), so you shouldn't replace the nested dict if it - already exists. Instead you should update the sub-dict. - """ - # update named styles specified by user - for name, rc_dict in new_dict.items(): - main_dict.setdefault(name, {}).update(rc_dict) - return main_dict - - -# Load style library -# ================== -_base_library = read_style_directory(BASE_LIBRARY_PATH) -library = {} -available = [] - - -def reload_library(): - """Reload the style library.""" - library.clear() - library.update(update_user_library(_base_library)) - available[:] = sorted(library.keys()) - - -reload_library() diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/mpl_toolkits/axes_grid1/tests/__init__.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/mpl_toolkits/axes_grid1/tests/__init__.py deleted file mode 100644 index ea4d8ed16a6a24a8c15ab2956ef678a7f256cd80..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/mpl_toolkits/axes_grid1/tests/__init__.py +++ /dev/null @@ -1,10 +0,0 @@ -from pathlib import Path - - -# Check that the test directories exist -if not (Path(__file__).parent / "baseline_images").exists(): - raise OSError( - 'The baseline image directory does not exist. ' - 'This is most likely because the test data is not installed. ' - 'You may need to install matplotlib from source to get the ' - 'test data.') diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pydantic/networks.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pydantic/networks.py deleted file mode 100644 index 7dc1e5ac2731e8be2ff8857fbb05f9a35480e285..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pydantic/networks.py +++ /dev/null @@ -1,694 +0,0 @@ -"""The networks module contains types for common network-related fields.""" -from __future__ import annotations as _annotations - -import dataclasses as _dataclasses -import re -from ipaddress import IPv4Address, IPv4Interface, IPv4Network, IPv6Address, IPv6Interface, IPv6Network -from typing import TYPE_CHECKING, Any - -from pydantic_core import MultiHostUrl, PydanticCustomError, Url, core_schema -from typing_extensions import Annotated, TypeAlias - -from ._internal import _fields, _repr, _schema_generation_shared -from ._migration import getattr_migration -from .annotated_handlers import GetCoreSchemaHandler -from .json_schema import JsonSchemaValue - -if TYPE_CHECKING: - import email_validator - - NetworkType: TypeAlias = 'str | bytes | int | tuple[str | bytes | int, str | int]' - -else: - email_validator = None - - -__all__ = [ - 'AnyUrl', - 'AnyHttpUrl', - 'FileUrl', - 'HttpUrl', - 'UrlConstraints', - 'EmailStr', - 'NameEmail', - 'IPvAnyAddress', - 'IPvAnyInterface', - 'IPvAnyNetwork', - 'PostgresDsn', - 'CockroachDsn', - 'AmqpDsn', - 'RedisDsn', - 'MongoDsn', - 'KafkaDsn', - 'validate_email', - 'MySQLDsn', - 'MariaDBDsn', -] - - -@_dataclasses.dataclass -class UrlConstraints(_fields.PydanticMetadata): - """Url constraints. - - Attributes: - max_length: The maximum length of the url. Defaults to `None`. - allowed_schemes: The allowed schemes. Defaults to `None`. - host_required: Whether the host is required. Defaults to `None`. - default_host: The default host. Defaults to `None`. - default_port: The default port. Defaults to `None`. - default_path: The default path. Defaults to `None`. - """ - - max_length: int | None = None - allowed_schemes: list[str] | None = None - host_required: bool | None = None - default_host: str | None = None - default_port: int | None = None - default_path: str | None = None - - def __hash__(self) -> int: - return hash( - ( - self.max_length, - tuple(self.allowed_schemes) if self.allowed_schemes is not None else None, - self.host_required, - self.default_host, - self.default_port, - self.default_path, - ) - ) - - -AnyUrl = Url -"""Base type for all URLs. - -* Any scheme allowed -* Top-level domain (TLD) not required -* Host required - -Assuming an input URL of `http://samuel:pass@example.com:8000/the/path/?query=here#fragment=is;this=bit`, -the types export the following properties: - -- `scheme`: the URL scheme (`http`), always set. -- `host`: the URL host (`example.com`), always set. -- `username`: optional username if included (`samuel`). -- `password`: optional password if included (`pass`). -- `port`: optional port (`8000`). -- `path`: optional path (`/the/path/`). -- `query`: optional URL query (for example, `GET` arguments or "search string", such as `query=here`). -- `fragment`: optional fragment (`fragment=is;this=bit`). -""" -AnyHttpUrl = Annotated[Url, UrlConstraints(allowed_schemes=['http', 'https'])] -"""A type that will accept any http or https URL. - -* TLD not required -* Host required -""" -HttpUrl = Annotated[Url, UrlConstraints(max_length=2083, allowed_schemes=['http', 'https'])] -"""A type that will accept any http or https URL. - -* TLD required -* Host required -* Max length 2083 - -```py -from pydantic import BaseModel, HttpUrl, ValidationError - -class MyModel(BaseModel): - url: HttpUrl - -m = MyModel(url='http://www.example.com') -print(m.url) -#> http://www.example.com/ - -try: - MyModel(url='ftp://invalid.url') -except ValidationError as e: - print(e) - ''' - 1 validation error for MyModel - url - URL scheme should be 'http' or 'https' [type=url_scheme, input_value='ftp://invalid.url', input_type=str] - ''' - -try: - MyModel(url='not a url') -except ValidationError as e: - print(e) - ''' - 1 validation error for MyModel - url - Input should be a valid URL, relative URL without a base [type=url_parsing, input_value='not a url', input_type=str] - ''' -``` - -"International domains" (e.g. a URL where the host or TLD includes non-ascii characters) will be encoded via -[punycode](https://en.wikipedia.org/wiki/Punycode) (see -[this article](https://www.xudongz.com/blog/2017/idn-phishing/) for a good description of why this is important): - -```py -from pydantic import BaseModel, HttpUrl - -class MyModel(BaseModel): - url: HttpUrl - -m1 = MyModel(url='http://puny£code.com') -print(m1.url) -#> http://xn--punycode-eja.com/ -m2 = MyModel(url='https://www.аррӏе.com/') -print(m2.url) -#> https://www.xn--80ak6aa92e.com/ -m3 = MyModel(url='https://www.example.珠宝/') -print(m3.url) -#> https://www.example.xn--pbt977c/ -``` - - -!!! warning "Underscores in Hostnames" - In Pydantic, underscores are allowed in all parts of a domain except the TLD. - Technically this might be wrong - in theory the hostname cannot have underscores, but subdomains can. - - To explain this; consider the following two cases: - - - `exam_ple.co.uk`: the hostname is `exam_ple`, which should not be allowed since it contains an underscore. - - `foo_bar.example.com` the hostname is `example`, which should be allowed since the underscore is in the subdomain. - - Without having an exhaustive list of TLDs, it would be impossible to differentiate between these two. Therefore - underscores are allowed, but you can always do further validation in a validator if desired. - - Also, Chrome, Firefox, and Safari all currently accept `http://exam_ple.com` as a URL, so we're in good - (or at least big) company. -""" -FileUrl = Annotated[Url, UrlConstraints(allowed_schemes=['file'])] -"""A type that will accept any file URL. - -* Host not required -""" -PostgresDsn = Annotated[ - MultiHostUrl, - UrlConstraints( - host_required=True, - allowed_schemes=[ - 'postgres', - 'postgresql', - 'postgresql+asyncpg', - 'postgresql+pg8000', - 'postgresql+psycopg', - 'postgresql+psycopg2', - 'postgresql+psycopg2cffi', - 'postgresql+py-postgresql', - 'postgresql+pygresql', - ], - ), -] -"""A type that will accept any Postgres DSN. - -* User info required -* TLD not required -* Host required -* Supports multiple hosts - -If further validation is required, these properties can be used by validators to enforce specific behaviour: - -```py -from pydantic import ( - BaseModel, - HttpUrl, - PostgresDsn, - ValidationError, - field_validator, -) - -class MyModel(BaseModel): - url: HttpUrl - -m = MyModel(url='http://www.example.com') - -# the repr() method for a url will display all properties of the url -print(repr(m.url)) -#> Url('http://www.example.com/') -print(m.url.scheme) -#> http -print(m.url.host) -#> www.example.com -print(m.url.port) -#> 80 - -class MyDatabaseModel(BaseModel): - db: PostgresDsn - - @field_validator('db') - def check_db_name(cls, v): - assert v.path and len(v.path) > 1, 'database must be provided' - return v - -m = MyDatabaseModel(db='postgres://user:pass@localhost:5432/foobar') -print(m.db) -#> postgres://user:pass@localhost:5432/foobar - -try: - MyDatabaseModel(db='postgres://user:pass@localhost:5432') -except ValidationError as e: - print(e) - ''' - 1 validation error for MyDatabaseModel - db - Assertion failed, database must be provided - assert (None) - + where None = MultiHostUrl('postgres://user:pass@localhost:5432').path [type=assertion_error, input_value='postgres://user:pass@localhost:5432', input_type=str] - ''' -``` -""" - -CockroachDsn = Annotated[ - Url, - UrlConstraints( - host_required=True, - allowed_schemes=[ - 'cockroachdb', - 'cockroachdb+psycopg2', - 'cockroachdb+asyncpg', - ], - ), -] -"""A type that will accept any Cockroach DSN. - -* User info required -* TLD not required -* Host required -""" -AmqpDsn = Annotated[Url, UrlConstraints(allowed_schemes=['amqp', 'amqps'])] -"""A type that will accept any AMQP DSN. - -* User info required -* TLD not required -* Host required -""" -RedisDsn = Annotated[ - Url, - UrlConstraints(allowed_schemes=['redis', 'rediss'], default_host='localhost', default_port=6379, default_path='/0'), -] -"""A type that will accept any Redis DSN. - -* User info required -* TLD not required -* Host required (e.g., `rediss://:pass@localhost`) -""" -MongoDsn = Annotated[MultiHostUrl, UrlConstraints(allowed_schemes=['mongodb', 'mongodb+srv'], default_port=27017)] -"""A type that will accept any MongoDB DSN. - -* User info not required -* Database name not required -* Port not required -* User info may be passed without user part (e.g., `mongodb://mongodb0.example.com:27017`). -""" -KafkaDsn = Annotated[Url, UrlConstraints(allowed_schemes=['kafka'], default_host='localhost', default_port=9092)] -"""A type that will accept any Kafka DSN. - -* User info required -* TLD not required -* Host required -""" -MySQLDsn = Annotated[ - Url, - UrlConstraints( - allowed_schemes=[ - 'mysql', - 'mysql+mysqlconnector', - 'mysql+aiomysql', - 'mysql+asyncmy', - 'mysql+mysqldb', - 'mysql+pymysql', - 'mysql+cymysql', - 'mysql+pyodbc', - ], - default_port=3306, - ), -] -"""A type that will accept any MySQL DSN. - -* User info required -* TLD not required -* Host required -""" -MariaDBDsn = Annotated[ - Url, - UrlConstraints( - allowed_schemes=['mariadb', 'mariadb+mariadbconnector', 'mariadb+pymysql'], - default_port=3306, - ), -] -"""A type that will accept any MariaDB DSN. - -* User info required -* TLD not required -* Host required -""" - - -def import_email_validator() -> None: - global email_validator - try: - import email_validator - except ImportError as e: - raise ImportError('email-validator is not installed, run `pip install pydantic[email]`') from e - - -if TYPE_CHECKING: - EmailStr = Annotated[str, ...] -else: - - class EmailStr: - """ - Info: - To use this type, you need to install the optional - [`email-validator`](https://github.com/JoshData/python-email-validator) package: - - ```bash - pip install email-validator - ``` - - Validate email addresses. - - ```py - from pydantic import BaseModel, EmailStr - - class Model(BaseModel): - email: EmailStr - - print(Model(email='contact@mail.com')) - #> email='contact@mail.com' - ``` - """ # noqa: D212 - - @classmethod - def __get_pydantic_core_schema__( - cls, - _source: type[Any], - _handler: GetCoreSchemaHandler, - ) -> core_schema.CoreSchema: - import_email_validator() - return core_schema.no_info_after_validator_function(cls._validate, core_schema.str_schema()) - - @classmethod - def __get_pydantic_json_schema__( - cls, core_schema: core_schema.CoreSchema, handler: _schema_generation_shared.GetJsonSchemaHandler - ) -> JsonSchemaValue: - field_schema = handler(core_schema) - field_schema.update(type='string', format='email') - return field_schema - - @classmethod - def _validate(cls, __input_value: str) -> str: - return validate_email(__input_value)[1] - - -class NameEmail(_repr.Representation): - """ - Info: - To use this type, you need to install the optional - [`email-validator`](https://github.com/JoshData/python-email-validator) package: - - ```bash - pip install email-validator - ``` - - Validate a name and email address combination, as specified by - [RFC 5322](https://datatracker.ietf.org/doc/html/rfc5322#section-3.4). - - The `NameEmail` has two properties: `name` and `email`. - In case the `name` is not provided, it's inferred from the email address. - - ```py - from pydantic import BaseModel, NameEmail - - class User(BaseModel): - email: NameEmail - - user = User(email='Fred Bloggs ') - print(user.email) - #> Fred Bloggs - print(user.email.name) - #> Fred Bloggs - - user = User(email='fred.bloggs@example.com') - print(user.email) - #> fred.bloggs - print(user.email.name) - #> fred.bloggs - ``` - """ # noqa: D212 - - __slots__ = 'name', 'email' - - def __init__(self, name: str, email: str): - self.name = name - self.email = email - - def __eq__(self, other: Any) -> bool: - return isinstance(other, NameEmail) and (self.name, self.email) == (other.name, other.email) - - @classmethod - def __get_pydantic_json_schema__( - cls, core_schema: core_schema.CoreSchema, handler: _schema_generation_shared.GetJsonSchemaHandler - ) -> JsonSchemaValue: - field_schema = handler(core_schema) - field_schema.update(type='string', format='name-email') - return field_schema - - @classmethod - def __get_pydantic_core_schema__( - cls, - _source: type[Any], - _handler: GetCoreSchemaHandler, - ) -> core_schema.CoreSchema: - import_email_validator() - return core_schema.no_info_after_validator_function( - cls._validate, - core_schema.union_schema( - [core_schema.is_instance_schema(cls), core_schema.str_schema()], - custom_error_type='name_email_type', - custom_error_message='Input is not a valid NameEmail', - ), - serialization=core_schema.to_string_ser_schema(), - ) - - @classmethod - def _validate(cls, __input_value: NameEmail | str) -> NameEmail: - if isinstance(__input_value, cls): - return __input_value - else: - name, email = validate_email(__input_value) # type: ignore[arg-type] - return cls(name, email) - - def __str__(self) -> str: - return f'{self.name} <{self.email}>' - - -class IPvAnyAddress: - """Validate an IPv4 or IPv6 address. - - ```py - from pydantic import BaseModel - from pydantic.networks import IPvAnyAddress - - class IpModel(BaseModel): - ip: IPvAnyAddress - - print(IpModel(ip='127.0.0.1')) - #> ip=IPv4Address('127.0.0.1') - - try: - IpModel(ip='http://www.example.com') - except ValueError as e: - print(e.errors()) - ''' - [ - { - 'type': 'ip_any_address', - 'loc': ('ip',), - 'msg': 'value is not a valid IPv4 or IPv6 address', - 'input': 'http://www.example.com', - } - ] - ''' - ``` - """ - - __slots__ = () - - def __new__(cls, value: Any) -> IPv4Address | IPv6Address: - """Validate an IPv4 or IPv6 address.""" - try: - return IPv4Address(value) - except ValueError: - pass - - try: - return IPv6Address(value) - except ValueError: - raise PydanticCustomError('ip_any_address', 'value is not a valid IPv4 or IPv6 address') - - @classmethod - def __get_pydantic_json_schema__( - cls, core_schema: core_schema.CoreSchema, handler: _schema_generation_shared.GetJsonSchemaHandler - ) -> JsonSchemaValue: - field_schema = {} - field_schema.update(type='string', format='ipvanyaddress') - return field_schema - - @classmethod - def __get_pydantic_core_schema__( - cls, - _source: type[Any], - _handler: GetCoreSchemaHandler, - ) -> core_schema.CoreSchema: - return core_schema.no_info_plain_validator_function( - cls._validate, serialization=core_schema.to_string_ser_schema() - ) - - @classmethod - def _validate(cls, __input_value: Any) -> IPv4Address | IPv6Address: - return cls(__input_value) # type: ignore[return-value] - - -class IPvAnyInterface: - """Validate an IPv4 or IPv6 interface.""" - - __slots__ = () - - def __new__(cls, value: NetworkType) -> IPv4Interface | IPv6Interface: - """Validate an IPv4 or IPv6 interface.""" - try: - return IPv4Interface(value) - except ValueError: - pass - - try: - return IPv6Interface(value) - except ValueError: - raise PydanticCustomError('ip_any_interface', 'value is not a valid IPv4 or IPv6 interface') - - @classmethod - def __get_pydantic_json_schema__( - cls, core_schema: core_schema.CoreSchema, handler: _schema_generation_shared.GetJsonSchemaHandler - ) -> JsonSchemaValue: - field_schema = {} - field_schema.update(type='string', format='ipvanyinterface') - return field_schema - - @classmethod - def __get_pydantic_core_schema__( - cls, - _source: type[Any], - _handler: GetCoreSchemaHandler, - ) -> core_schema.CoreSchema: - return core_schema.no_info_plain_validator_function( - cls._validate, serialization=core_schema.to_string_ser_schema() - ) - - @classmethod - def _validate(cls, __input_value: NetworkType) -> IPv4Interface | IPv6Interface: - return cls(__input_value) # type: ignore[return-value] - - -class IPvAnyNetwork: - """Validate an IPv4 or IPv6 network.""" - - __slots__ = () - - def __new__(cls, value: NetworkType) -> IPv4Network | IPv6Network: - """Validate an IPv4 or IPv6 network.""" - # Assume IP Network is defined with a default value for `strict` argument. - # Define your own class if you want to specify network address check strictness. - try: - return IPv4Network(value) - except ValueError: - pass - - try: - return IPv6Network(value) - except ValueError: - raise PydanticCustomError('ip_any_network', 'value is not a valid IPv4 or IPv6 network') - - @classmethod - def __get_pydantic_json_schema__( - cls, core_schema: core_schema.CoreSchema, handler: _schema_generation_shared.GetJsonSchemaHandler - ) -> JsonSchemaValue: - field_schema = {} - field_schema.update(type='string', format='ipvanynetwork') - return field_schema - - @classmethod - def __get_pydantic_core_schema__( - cls, - _source: type[Any], - _handler: GetCoreSchemaHandler, - ) -> core_schema.CoreSchema: - return core_schema.no_info_plain_validator_function( - cls._validate, serialization=core_schema.to_string_ser_schema() - ) - - @classmethod - def _validate(cls, __input_value: NetworkType) -> IPv4Network | IPv6Network: - return cls(__input_value) # type: ignore[return-value] - - -def _build_pretty_email_regex() -> re.Pattern[str]: - name_chars = r'[\w!#$%&\'*+\-/=?^_`{|}~]' - unquoted_name_group = fr'((?:{name_chars}+\s+)*{name_chars}+)' - quoted_name_group = r'"((?:[^"]|\")+)"' - email_group = r'<\s*(.+)\s*>' - return re.compile(rf'\s*(?:{unquoted_name_group}|{quoted_name_group})?\s*{email_group}\s*') - - -pretty_email_regex = _build_pretty_email_regex() - -MAX_EMAIL_LENGTH = 2048 -"""Maximum length for an email. -A somewhat arbitrary but very generous number compared to what is allowed by most implementations. -""" - - -def validate_email(value: str) -> tuple[str, str]: - """Email address validation using [email-validator](https://pypi.org/project/email-validator/). - - Note: - Note that: - - * Raw IP address (literal) domain parts are not allowed. - * `"John Doe "` style "pretty" email addresses are processed. - * Spaces are striped from the beginning and end of addresses, but no error is raised. - """ - if email_validator is None: - import_email_validator() - - if len(value) > MAX_EMAIL_LENGTH: - raise PydanticCustomError( - 'value_error', - 'value is not a valid email address: {reason}', - {'reason': f'Length must not exceed {MAX_EMAIL_LENGTH} characters'}, - ) - - m = pretty_email_regex.fullmatch(value) - name: str | None = None - if m: - unquoted_name, quoted_name, value = m.groups() - name = unquoted_name or quoted_name - - email = value.strip() - - try: - parts = email_validator.validate_email(email, check_deliverability=False) - except email_validator.EmailNotValidError as e: - raise PydanticCustomError( - 'value_error', 'value is not a valid email address: {reason}', {'reason': str(e.args[0])} - ) from e - - email = parts.normalized - assert email is not None - name = name or parts.local_part - return name, email - - -__getattr__ = getattr_migration(__name__) diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/rich/_wrap.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/rich/_wrap.py deleted file mode 100644 index c45f193f74ad7385c84f3b935663198415cfaa4b..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/rich/_wrap.py +++ /dev/null @@ -1,56 +0,0 @@ -import re -from typing import Iterable, List, Tuple - -from ._loop import loop_last -from .cells import cell_len, chop_cells - -re_word = re.compile(r"\s*\S+\s*") - - -def words(text: str) -> Iterable[Tuple[int, int, str]]: - position = 0 - word_match = re_word.match(text, position) - while word_match is not None: - start, end = word_match.span() - word = word_match.group(0) - yield start, end, word - word_match = re_word.match(text, end) - - -def divide_line(text: str, width: int, fold: bool = True) -> List[int]: - divides: List[int] = [] - append = divides.append - line_position = 0 - _cell_len = cell_len - for start, _end, word in words(text): - word_length = _cell_len(word.rstrip()) - if line_position + word_length > width: - if word_length > width: - if fold: - chopped_words = chop_cells(word, max_size=width, position=0) - for last, line in loop_last(chopped_words): - if start: - append(start) - - if last: - line_position = _cell_len(line) - else: - start += len(line) - else: - if start: - append(start) - line_position = _cell_len(word) - elif line_position and start: - append(start) - line_position = _cell_len(word) - else: - line_position += _cell_len(word) - return divides - - -if __name__ == "__main__": # pragma: no cover - from .console import Console - - console = Console(width=10) - console.print("12345 abcdefghijklmnopqrstuvwyxzABCDEFGHIJKLMNOPQRSTUVWXYZ 12345") - print(chop_cells("abcdefghijklmnopqrstuvwxyz", 10, position=2)) diff --git a/spaces/prospectai/email-checker/app.py b/spaces/prospectai/email-checker/app.py deleted file mode 100644 index 67cfe2a0fc45418b0b1c7bc257700eaa9e2ba6d5..0000000000000000000000000000000000000000 --- a/spaces/prospectai/email-checker/app.py +++ /dev/null @@ -1,15 +0,0 @@ -import gradio as gr -import os - -def app(): - # Access the environment variable. - access_token = os.environ["hf_access_token"] - - # Load the private Space. - private_space = gr.load("spaces/prospectai/email-check", api_key = access_token) - - # Run the app. - gr.Interface.launch(private_space) - -if __name__ == "__main__": - app() \ No newline at end of file diff --git a/spaces/pustozerov/poc_call_transcription/app_gradio.py b/spaces/pustozerov/poc_call_transcription/app_gradio.py deleted file mode 100644 index 914c3efc7074b7049455cb907ffc9d71047818e2..0000000000000000000000000000000000000000 --- a/spaces/pustozerov/poc_call_transcription/app_gradio.py +++ /dev/null @@ -1,102 +0,0 @@ -import shutil - -import gradio as gr -import random -import os -import numpy as np -from pydub import AudioSegment -from datasets import load_dataset -from scipy.io.wavfile import write - -from modules.diarization.nemo_diarization import diarization -from modules.nlp.nemo_ner import detect_ner -from modules.nlp.nemo_punct_cap import punctuation_capitalization - -FOLDER_WAV_DB = "data/database/" -FOLDER_USER_DATA = "data/user_data/" -FOLDER_USER_DATA_WAV = "data/user_data_wav/" -FOLDER_MANIFESTS = "info/configs/manifests/" -SAMPLE_RATE = 16000 -dataset = load_dataset("pustozerov/crema_d_diarization", split='validation') -os.makedirs(FOLDER_WAV_DB, exist_ok=True) -os.makedirs(FOLDER_MANIFESTS, exist_ok=True) - - -def process_audio(uploaded_file=None): - if uploaded_file: - secondary_audio = False - folder_wav = FOLDER_USER_DATA_WAV - os.makedirs(folder_wav, exist_ok=True) - print(uploaded_file) - shutil.move(uploaded_file, os.path.join(FOLDER_USER_DATA, os.path.basename(uploaded_file))) - uploaded_file = os.path.join(FOLDER_USER_DATA, os.path.basename(uploaded_file)) - print(uploaded_file) - if ".mp3" in uploaded_file: - sound = AudioSegment.from_mp3(uploaded_file) - elif ".ogg" in uploaded_file: - sound = AudioSegment.from_ogg(uploaded_file) - else: - sound = AudioSegment.from_wav(uploaded_file) - save_path = folder_wav + os.path.basename(uploaded_file) - os.makedirs(folder_wav, exist_ok=True) - sound.export(save_path, format="wav", parameters=["-ac", "1"]) - file_name = os.path.basename(save_path).split(".")[0] - result = diarization(save_path) - else: - secondary_audio = True - folder_wav = FOLDER_WAV_DB - os.makedirs(folder_wav, exist_ok=True) - shuffled_dataset = dataset.shuffle(seed=random.randint(0, 100)) - file_name = str(shuffled_dataset["file"][0]).split(".")[0] - audio_bytes = np.array(shuffled_dataset["data"][0]) - audio_bytes_scaled = np.int16(audio_bytes / np.max(np.abs(audio_bytes)) * 32767) - write(os.path.join(folder_wav, file_name + '.wav'), rate=SAMPLE_RATE, data=audio_bytes_scaled) - result = diarization(os.path.join(folder_wav, file_name + '.wav')) - transcript_path = "info/transcripts/pred_rttms/" + file_name + ".txt" - with open(transcript_path) as f: - transcript = f.read() - sentences = result[file_name]["sentences"] - all_strings = "" - for sentence in sentences: - all_strings = all_strings + sentence["sentence"] + "\n" - all_strings = punctuation_capitalization([all_strings])[0] - tagged_string, tags_summary = detect_ner(all_strings) - transcript = transcript + '\n' + tagged_string - with open(transcript_path, 'w') as f: - f.write(transcript) - output = "

      Number of speakers: %s" % result[file_name]["speaker_count"] + "
      " \ - + "Sentences: %s" % len(result[file_name]["sentences"]) + "
      " \ - + "Words: %s" % len(result[file_name]["words"]) + "
      " \ - + "Found named entities: %s" % tags_summary + "

      " - return [audio_output.update(os.path.join(folder_wav, file_name + '.wav'), visible=secondary_audio), - output, file_output.update(transcript_path, visible=True)] - - -with gr.Blocks() as demo: - gr.HTML('

      Call Transcription demo

      ') - gr.HTML('

      This simple demo shows the possibilities of ASR and NLP in the task of automatic ' - 'speech recognition ' - 'and diarization. It works with mp3, ogg, and wav files. You can randomly pick an audio file with the ' - 'dialogue from the built-in database or try uploading your files.

      ') - gr.Markdown('

      Note: this demo shows up a reduced-performance model. To get a full-performance ' - 'neural network or ' - 'develop a system adapted to your task – contact kirill.lozovoi@exposit.com.

      ') - audio_input = gr.Audio(source="upload", type="filepath") - second_btn = gr.Button('Try uploaded audiofile') - gr.Markdown('

      or

      ') - first_btn = gr.Button('Try a random sample from the database') - - # Output zone - audio_output = gr.Audio(visible=False, interactive=True) - text_output = gr.HTML() - file_output = gr.File(label="Download audio transcript", visible=False) - - # noinspection PyTypeChecker - first_btn.click(fn=process_audio, inputs=None, - outputs=[audio_output, text_output, file_output]) - # noinspection PyTypeChecker - second_btn.click(fn=process_audio, inputs=audio_input, outputs=[audio_output, text_output, file_output]) - -demo.launch(share=True) diff --git a/spaces/pyodide-demo/self-hosted/pyerfa-tests.js b/spaces/pyodide-demo/self-hosted/pyerfa-tests.js deleted file mode 100644 index 36cbbf124542a5b20939610947ab6c00f55dfb6b..0000000000000000000000000000000000000000 --- a/spaces/pyodide-demo/self-hosted/pyerfa-tests.js +++ /dev/null @@ -1 +0,0 @@ -var Module=typeof globalThis.__pyodide_module!=="undefined"?globalThis.__pyodide_module:{};if(!Module.expectedDataFileDownloads){Module.expectedDataFileDownloads=0}Module.expectedDataFileDownloads++;(function(){var loadPackage=function(metadata){var PACKAGE_PATH="";if(typeof window==="object"){PACKAGE_PATH=window["encodeURIComponent"](window.location.pathname.toString().substring(0,window.location.pathname.toString().lastIndexOf("/"))+"/")}else if(typeof process==="undefined"&&typeof location!=="undefined"){PACKAGE_PATH=encodeURIComponent(location.pathname.toString().substring(0,location.pathname.toString().lastIndexOf("/"))+"/")}var PACKAGE_NAME="pyerfa-tests.data";var REMOTE_PACKAGE_BASE="pyerfa-tests.data";if(typeof Module["locateFilePackage"]==="function"&&!Module["locateFile"]){Module["locateFile"]=Module["locateFilePackage"];err("warning: you defined Module.locateFilePackage, that has been renamed to Module.locateFile (using your locateFilePackage for now)")}var REMOTE_PACKAGE_NAME=Module["locateFile"]?Module["locateFile"](REMOTE_PACKAGE_BASE,""):REMOTE_PACKAGE_BASE;var REMOTE_PACKAGE_SIZE=metadata["remote_package_size"];var PACKAGE_UUID=metadata["package_uuid"];function fetchRemotePackage(packageName,packageSize,callback,errback){if(typeof process==="object"){require("fs").readFile(packageName,(function(err,contents){if(err){errback(err)}else{callback(contents.buffer)}}));return}var xhr=new XMLHttpRequest;xhr.open("GET",packageName,true);xhr.responseType="arraybuffer";xhr.onprogress=function(event){var url=packageName;var size=packageSize;if(event.total)size=event.total;if(event.loaded){if(!xhr.addedTotal){xhr.addedTotal=true;if(!Module.dataFileDownloads)Module.dataFileDownloads={};Module.dataFileDownloads[url]={loaded:event.loaded,total:size}}else{Module.dataFileDownloads[url].loaded=event.loaded}var total=0;var loaded=0;var num=0;for(var download in Module.dataFileDownloads){var data=Module.dataFileDownloads[download];total+=data.total;loaded+=data.loaded;num++}total=Math.ceil(total*Module.expectedDataFileDownloads/num);if(Module["setStatus"])Module["setStatus"]("Downloading data... ("+loaded+"/"+total+")")}else if(!Module.dataFileDownloads){if(Module["setStatus"])Module["setStatus"]("Downloading data...")}};xhr.onerror=function(event){throw new Error("NetworkError for: "+packageName)};xhr.onload=function(event){if(xhr.status==200||xhr.status==304||xhr.status==206||xhr.status==0&&xhr.response){var packageData=xhr.response;callback(packageData)}else{throw new Error(xhr.statusText+" : "+xhr.responseURL)}};xhr.send(null)}function handleError(error){console.error("package error:",error)}var fetchedCallback=null;var fetched=Module["getPreloadedPackage"]?Module["getPreloadedPackage"](REMOTE_PACKAGE_NAME,REMOTE_PACKAGE_SIZE):null;if(!fetched)fetchRemotePackage(REMOTE_PACKAGE_NAME,REMOTE_PACKAGE_SIZE,(function(data){if(fetchedCallback){fetchedCallback(data);fetchedCallback=null}else{fetched=data}}),handleError);function runWithFS(){function assert(check,msg){if(!check)throw msg+(new Error).stack}Module["FS_createPath"]("/","lib",true,true);Module["FS_createPath"]("/lib","python3.9",true,true);Module["FS_createPath"]("/lib/python3.9","site-packages",true,true);Module["FS_createPath"]("/lib/python3.9/site-packages","erfa",true,true);Module["FS_createPath"]("/lib/python3.9/site-packages/erfa","tests",true,true);function processPackageData(arrayBuffer){assert(arrayBuffer,"Loading data file failed.");assert(arrayBuffer instanceof ArrayBuffer,"bad input to processPackageData");var byteArray=new Uint8Array(arrayBuffer);var curr;var compressedData={data:null,cachedOffset:88530,cachedIndexes:[-1,-1],cachedChunks:[null,null],offsets:[0,1430,2771,4018,4958,5903,7223,8327,9638,10766,11682,12592,13730,15066,16235,17041,18066,19010,20227,21307,22217,23345,24260,25245,26307,27283,28441,29578,30469,31227,32245,33200,34276,35157,36072,37017,38046,39067,40042,40967,41972,42923,44132,45162,46348,47597,48675,49605,50514,51632,52660,53726,54811,55842,56608,57498,58588,59638,60591,61487,62572,63667,64629,65638,66585,67585,68544,69535,70502,71466,72497,73326,74312,75423,76108,77180,78096,79144,79988,80810,81769,82894,83929,84898,85843,86519,87311,88232],sizes:[1430,1341,1247,940,945,1320,1104,1311,1128,916,910,1138,1336,1169,806,1025,944,1217,1080,910,1128,915,985,1062,976,1158,1137,891,758,1018,955,1076,881,915,945,1029,1021,975,925,1005,951,1209,1030,1186,1249,1078,930,909,1118,1028,1066,1085,1031,766,890,1090,1050,953,896,1085,1095,962,1009,947,1e3,959,991,967,964,1031,829,986,1111,685,1072,916,1048,844,822,959,1125,1035,969,945,676,792,921,298],successes:[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]};compressedData["data"]=byteArray;assert(typeof Module.LZ4==="object","LZ4 not present - was your app build with -s LZ4=1 ?");Module.LZ4.loadPackage({metadata:metadata,compressedData:compressedData},true);Module["removeRunDependency"]("datafile_pyerfa-tests.data")}Module["addRunDependency"]("datafile_pyerfa-tests.data");if(!Module.preloadResults)Module.preloadResults={};Module.preloadResults[PACKAGE_NAME]={fromCache:false};if(fetched){processPackageData(fetched);fetched=null}else{fetchedCallback=processPackageData}}if(Module["calledRun"]){runWithFS()}else{if(!Module["preRun"])Module["preRun"]=[];Module["preRun"].push(runWithFS)}};loadPackage({files:[{filename:"/lib/python3.9/site-packages/erfa/tests/__init__.py",start:0,end:64,audio:0},{filename:"/lib/python3.9/site-packages/erfa/tests/helper.py",start:64,end:4386,audio:0},{filename:"/lib/python3.9/site-packages/erfa/tests/test_erfa.py",start:4386,end:24095,audio:0},{filename:"/lib/python3.9/site-packages/erfa/tests/test_ufunc.py",start:24095,end:178867,audio:0}],remote_package_size:92626,package_uuid:"84e41e76-5e0f-4cd3-a385-1ffa782d74f4"})})(); \ No newline at end of file diff --git a/spaces/pytorch/MobileNet_v2/app.py b/spaces/pytorch/MobileNet_v2/app.py deleted file mode 100644 index d5a641584f1b2e09e9231b84857b6ad8b2bf78bd..0000000000000000000000000000000000000000 --- a/spaces/pytorch/MobileNet_v2/app.py +++ /dev/null @@ -1,56 +0,0 @@ -import torch -from PIL import Image -from torchvision import transforms -import gradio as gr -import os - - -os.system("wget https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt") - -model = torch.hub.load('pytorch/vision:v0.9.0', 'mobilenet_v2', pretrained=True) -model.eval() - -torch.hub.download_url_to_file("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg") - - -def inference(input_image): - preprocess = transforms.Compose([ - transforms.Resize(256), - transforms.CenterCrop(224), - transforms.ToTensor(), - transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), - ]) - input_tensor = preprocess(input_image) - input_batch = input_tensor.unsqueeze(0) # create a mini-batch as expected by the model - - # move the input and model to GPU for speed if available - if torch.cuda.is_available(): - input_batch = input_batch.to('cuda') - model.to('cuda') - - with torch.no_grad(): - output = model(input_batch) - # The output has unnormalized scores. To get probabilities, you can run a softmax on it. - probabilities = torch.nn.functional.softmax(output[0], dim=0) - - # Read the categories - with open("imagenet_classes.txt", "r") as f: - categories = [s.strip() for s in f.readlines()] - # Show top categories per image - top5_prob, top5_catid = torch.topk(probabilities, 5) - result = {} - for i in range(top5_prob.size(0)): - result[categories[top5_catid[i]]] = top5_prob[i].item() - return result - -inputs = gr.inputs.Image(type='pil') -outputs = gr.outputs.Label(type="confidences",num_top_classes=5) - -title = "MOBILENET V2" -description = "Gradio demo for MOBILENET V2, Efficient networks optimized for speed and memory, with residual blocks. To use it, simply upload your image, or click one of the examples to load them. Read more at the links below." -article = "

      MobileNetV2: Inverted Residuals and Linear Bottlenecks | Github Repo

      " - -examples = [ - ['dog.jpg'] -] -gr.Interface(inference, inputs, outputs, title=title, description=description, article=article, examples=examples, analytics_enabled=False).launch() \ No newline at end of file diff --git a/spaces/qducnguyen/chatpdf-demo/app.py b/spaces/qducnguyen/chatpdf-demo/app.py deleted file mode 100644 index 2bea8e67eca402a11e0309dc33c3d3a73d08fe46..0000000000000000000000000000000000000000 --- a/spaces/qducnguyen/chatpdf-demo/app.py +++ /dev/null @@ -1,68 +0,0 @@ -from langchain.document_loaders import PyPDFLoader -from langchain.chat_models import ChatOpenAI -from langchain.embeddings import OpenAIEmbeddings -from langchain.text_splitter import CharacterTextSplitter -from langchain.vectorstores import Chroma -from langchain.chains import ConversationalRetrievalChain -import gradio as gr -import os -import shutil -from pathlib import Path - - -OPENAPI_KEY = "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" - -def test_predict_fn(query, file): - - print(file.name) - fdir = 'uploads' - if not os.path.exists(fdir): - os.makedirs(fdir) - with open(os.path.join(fdir, Path(file.name).name), 'wb') as f: - with open(file.name, 'rb') as file: - f.write(file.read()) - - print(os.path.join(fdir, Path(file.name).name)) - loader = PyPDFLoader(os.path.join(fdir, Path(file.name).name)) - - documents = loader.load_and_split(CharacterTextSplitter(chunk_size=400, chunk_overlap=50)) - embeddings = OpenAIEmbeddings(openai_api_key=OPENAPI_KEY) - - if os.path.exists("chroma.sqlite3"): - os.remove("chroma.sqlite3") - vectordb = Chroma.from_documents(documents, embedding=embeddings, persist_directory=".") - vectordb.persist() - - - # query = "How did BERTs be trained?" - # query = "What year did BERTs be trained?" - # query = "What are authors of BERTs?" - # query = "Thuật toán nào có kết quả tốt nhất?" - - - pdf_qa = ConversationalRetrievalChain.from_llm( - ChatOpenAI(temperature=0.9, - max_tokens = 200, - model_name="gpt-3.5-turbo-16k", - openai_api_key=OPENAPI_KEY), - vectordb.as_retriever(), - return_source_documents=True, - ) - - result = pdf_qa({"question": query, "chat_history": ""}) - final_result = result["answer"] - - return final_result - - -with gr.Blocks() as demo: - - gr.Markdown("PDF Interation with ChatGPT demo.") - file_input = gr.File(file_types=[".pdf"], - file_count="single") - query = gr.Textbox(label="Query") - submit_btn = gr.Button(label="Submit") - output = gr.Textbox(label="Answer") - submit_btn.click(fn=test_predict_fn, inputs=[query, file_input], outputs=output) - -demo.launch() diff --git a/spaces/qingxu98/gpt-academic/crazy_functions/live_audio/audio_io.py b/spaces/qingxu98/gpt-academic/crazy_functions/live_audio/audio_io.py deleted file mode 100644 index 3ff83a66e8d9f0bb15250f1c3c2b5ea36745ff55..0000000000000000000000000000000000000000 --- a/spaces/qingxu98/gpt-academic/crazy_functions/live_audio/audio_io.py +++ /dev/null @@ -1,51 +0,0 @@ -import numpy as np -from scipy import interpolate - -def Singleton(cls): - _instance = {} - - def _singleton(*args, **kargs): - if cls not in _instance: - _instance[cls] = cls(*args, **kargs) - return _instance[cls] - - return _singleton - - -@Singleton -class RealtimeAudioDistribution(): - def __init__(self) -> None: - self.data = {} - self.max_len = 1024*1024 - self.rate = 48000 # 只读,每秒采样数量 - - def clean_up(self): - self.data = {} - - def feed(self, uuid, audio): - self.rate, audio_ = audio - # print('feed', len(audio_), audio_[-25:]) - if uuid not in self.data: - self.data[uuid] = audio_ - else: - new_arr = np.concatenate((self.data[uuid], audio_)) - if len(new_arr) > self.max_len: new_arr = new_arr[-self.max_len:] - self.data[uuid] = new_arr - - def read(self, uuid): - if uuid in self.data: - res = self.data.pop(uuid) - print('\r read-', len(res), '-', max(res), end='', flush=True) - else: - res = None - return res - -def change_sample_rate(audio, old_sr, new_sr): - duration = audio.shape[0] / old_sr - - time_old = np.linspace(0, duration, audio.shape[0]) - time_new = np.linspace(0, duration, int(audio.shape[0] * new_sr / old_sr)) - - interpolator = interpolate.interp1d(time_old, audio.T) - new_audio = interpolator(time_new).T - return new_audio.astype(np.int16) \ No newline at end of file diff --git a/spaces/qtoino/form_matcher/app_files.py b/spaces/qtoino/form_matcher/app_files.py deleted file mode 100644 index 7ec30ca903afcb99c8877380d531b28e9ce5e059..0000000000000000000000000000000000000000 --- a/spaces/qtoino/form_matcher/app_files.py +++ /dev/null @@ -1,173 +0,0 @@ -import gradio as gr -from bs4 import BeautifulSoup -import json -import time -import os -from transformers import AutoTokenizer, pipeline - -models = { - "model_n1": "sileod/deberta-v3-base-tasksource-nli", - # "model_n2": "roberta-large-mnli", - # "model_n3": "facebook/bart-large-mnli", - # "model_n4": "cross-encoder/nli-deberta-v3-xsmall" -} -def open_html(file): - with open(file.name, "r") as f: - content = f.read() - return content - -def find_form_fields(html_content): - - soup = BeautifulSoup(html_content, 'html.parser') - - # find all form tags - forms = soup.find_all('form') - - form_fields = [] - - for form in forms: - # find all input and select tags within each form - input_tags = form.find_all('input') - select_tags = form.find_all('select') - - for tag in input_tags: - form_fields.append(str(tag)) - - for tag in select_tags: - form_fields.append(str(tag)) - - # Convert the list to a single string for display - return form_fields - -def load_json(json_file): - with open(json_file, 'r') as f: - data = json.load(f) - return data - -def classify_lines(text, candidate_labels, model_name): - start_time = time.time() # Start measuring time - classifier = pipeline('zero-shot-classification', model=model_name) - - # Check if the text is already a list or if it needs splitting - if isinstance(text, list): - lines = text - else: - lines = text.split('\n') - - classified_lines = [] - for line in lines: - if line.strip() and (line.strip().startswith("" - output_file.write(line + '\n') - output_content.append(line + '\n') - - - end_time = time.time() # Stop measuring time - execution_time = end_time - start_time # Calculate execution time - return output_content, execution_time - -def retrieve_fields(data, path=''): - """Recursively retrieve all fields from a given JSON structure and prompt for filling.""" - fields = {} - - # If the data is a dictionary - if isinstance(data, dict): - for key, value in data.items(): - # Construct the updated path for nested structures - new_path = f"{path}.{key}" if path else key - fields.update(retrieve_fields(value, new_path)) - - # If the data is a list, iterate over its items - elif isinstance(data, list): - for index, item in enumerate(data): - new_path = f"{path}[{index}]" - fields.update(retrieve_fields(item, new_path)) - - # If the data is a simple type (str, int, etc.) - else: - prompt = f"Please fill in the {path} field." if not data else data - fields[path] = prompt - - return fields - -def retrieve_fields_from_file(file_path): - """Load JSON data from a file, then retrieve all fields and prompt for filling.""" - with open(file_path.name, 'r') as f: - data = f.read() - - return retrieve_fields(json.loads(data)) - - -def process_files(html_file, json_file): - # This function will process the files. - # Replace this with your own logic. - output_file_path = "./output.html" - # Open and read the files - html_content = open_html(html_file) - #print(html_content) - html_inputs = find_form_fields(html_content) - - json_content = retrieve_fields_from_file(json_file) - #Classificar os inputs do json para ver em que tipo de input ["text", "radio", "checkbox", "button", "date"] - - # Classify lines and measure execution time - for model_name in models.values(): - tokenizer = AutoTokenizer.from_pretrained(model_name) - - html_classified_lines, html_execution_time = classify_lines(html_inputs, ["text", "radio", "checkbox", "button", "date"], model_name) - - json_classified_lines, json_execution_time = classify_lines_json(html_content, json_content, list(json_content.keys()), model_name, output_file_path) - - # print(str(html_execution_time) + " - " + str(html_classified_lines)) - # print(str(json_execution_time) + " - " + str(json_classified_lines)) - #FILL HERE - - #print(type(json_classified_lines)) - # Assuming your function returns the processed HTML - #json_classified_lines - #return '\n'.join(map(str, html_classified_lines)) - return '\n'.join(map(str, json_classified_lines)) - -iface = gr.Interface(fn=process_files, - inputs=[gr.inputs.File(label="Upload HTML File"), gr.inputs.File(label="Upload JSON File")], - outputs="text", - examples=[ - # ["./examples/form0.html", "./examples/form0_answer.json"], - ["./public/form1.html", "./public/form1_answer.json"], - ["./public/form2.html", "./public/form2_answer.json"], - ["./public/form3.html", "./public/form3_answer.json"], - ["./public/form4.html", "./public/form4_answer.json"] - ]) - - -iface.launch() diff --git a/spaces/quidiaMuxgu/Expedit-SAM/AlphaControls V15.02 BETA (D5-D10.3 BCB6-BCB10.3) Retail [PORTABLE].md b/spaces/quidiaMuxgu/Expedit-SAM/AlphaControls V15.02 BETA (D5-D10.3 BCB6-BCB10.3) Retail [PORTABLE].md deleted file mode 100644 index d60f114a9e716f80d99bdedafac482e39507fbe1..0000000000000000000000000000000000000000 --- a/spaces/quidiaMuxgu/Expedit-SAM/AlphaControls V15.02 BETA (D5-D10.3 BCB6-BCB10.3) Retail [PORTABLE].md +++ /dev/null @@ -1,9 +0,0 @@ -
      -

      alphacontrols package for delphi / c++ builder -.. alphacontrols package for delphi / c++ builder 5-10.3 x64 edition (full source) alphacontrols v14.25 stable (d5-d10.3/bcb6-bcb10.3) retail. alphacontrols - a set of common and some unique components that support skins (alphaskins). alphacontrols v14.18 stable (d5-d10.17 stable (d5-d10.16 stable (d5-d10.

      -

      customizable alphacontrols v15.02 beta (d5-d10.3/bcb6-bcb10.3) retail. alphacontrols v15.3) retail - leonlet.com.02 stable (d5-d10.02.3) retail - alphacontrols (beta) alphacontrols v15.3) retail beta version - alphacontrols (beta) alphacontrols v15.3) retail beta version.3) retail - stable.3) retail - beta version.3) retail beta version - alphacontrols (beta).

      -

      AlphaControls v15.02 BETA (D5-D10.3 BCB6-BCB10.3) Retail


      Download Zip === https://geags.com/2uCrx4



      -

      alphacontrols v14.22 stable (d5-d10.3/bcb6-bcb10.3) retail. alphacontrols v14.22.. fastcube fmx 1.4.12 professional with full source. fastcube fmx professional 1.12. laptop or desktop computer running windows 7 or later.21 (d5-d10.21 beta for d5-d10.3 & cb6-cb10. alphacontrols package for delphi / c++ builder:.20 (d5-d10.3) full source. alphacontrols - a set of common and some unique components that support skins (alphaskins),.19 (d5-d10.18 (d5-d10.

      -

      alphacontrols v15.02 beta (d5-d10.3 bcb6-bcb10.3) retail better. -chapmanm-0.1.1-rv14. private static final string email_id. alphacontrols v14.37 stable (d5-d10.3/bcb6-bcb10.3) retail.. 02/20/20--22:03: alphacontrols v15.03 beta (d5-d10.

      -

      alphacontrols v15.02 beta (d5-d10.3 bcb6-bcb10.3) retail better. -chapmanm-0.1.1-rv14. private static final string email_id. alphacontrols v14.37 stable (d5-d10.3/bcb6-bcb10.3) retail.37. fastcube fmx 1.4.12 professional with full source.

      899543212b
      -
      -
      \ No newline at end of file diff --git a/spaces/quidiaMuxgu/Expedit-SAM/Capture One 20 Pro 13.0.2.13 With Keygen EXCLUSIVE.md b/spaces/quidiaMuxgu/Expedit-SAM/Capture One 20 Pro 13.0.2.13 With Keygen EXCLUSIVE.md deleted file mode 100644 index 4bc56a82c158eefac1b1e66ed2e3c39eb6cded9e..0000000000000000000000000000000000000000 --- a/spaces/quidiaMuxgu/Expedit-SAM/Capture One 20 Pro 13.0.2.13 With Keygen EXCLUSIVE.md +++ /dev/null @@ -1,10 +0,0 @@ - -

      real image quality with low-noise settings
      with its low-noise settings, capture one gives you the highest image quality with settings you can actually use. low-noise settings are typically for exposing a greater portion of the image. this more aggressive exposure can be beneficial in situations such as dim scenes, daylight, and bright scenes, where you need to hold the darkest areas at their native values. with the other settings, you’ll have better control over brightness, and you can achieve your desired exposure more precisely.

      -

      Capture One 20 Pro 13.0.2.13 with Keygen


      Download Filehttps://geags.com/2uCrHC



      -

      capture one is the professional choice in imaging software. it offers powerful image editing tools for professional photographers with a simple, intuitive user interface that makes it easy to get great-looking images in a time-saving workflow.

      -

      capture one is a professional raw converter offering you ultimate image quality with beautiful colors and incredible detail for more than 500 high-end cameras. it offers state- of-the-art tethered capture, powerful digital asset management, extensive adjustment tools and a flexible workflow through customizable workspaces.

      -

      1] dxo optics pro 12.0.7 crack directx optimizer pro 12.7 - the most advanced directx 11/directx 12 update ever! features include direct3d 10/11/12/14/16/18/20 in addition to dx12.3 for individual gpus

      -

      -

      capture one for photographers is a raw converter for adobe’s popular camera file formats: raw, cr2, nef (nikon), pef (canon) and orf (other cameras). it has a sleek, simple interface and a range of functions to help you edit the raw image data into breathtakingly rich and dynamic images. it provides total control over all settings, and makes it easy to get great-looking images in a time-saving workflow.

      899543212b
      -
      -
      \ No newline at end of file diff --git a/spaces/quidiaMuxgu/Expedit-SAM/Fullmetal-ALchemist-BrotherHood[720p]-[Dual-Audio][eng-subbed] Neroextreme NTRG PATCHED.md b/spaces/quidiaMuxgu/Expedit-SAM/Fullmetal-ALchemist-BrotherHood[720p]-[Dual-Audio][eng-subbed] Neroextreme NTRG PATCHED.md deleted file mode 100644 index 1634a3cec1e796c770cbb14923deacc866bbfa78..0000000000000000000000000000000000000000 --- a/spaces/quidiaMuxgu/Expedit-SAM/Fullmetal-ALchemist-BrotherHood[720p]-[Dual-Audio][eng-subbed] Neroextreme NTRG PATCHED.md +++ /dev/null @@ -1,6 +0,0 @@ -

      Fullmetal-ALchemist-BrotherHood[720p]-[Dual-Audio][eng-subbed] Neroextreme NTRG


      Download ✦✦✦ https://geags.com/2uCsI4



      - -long live asap digital booklet. Limited double vinyl LP pressing in gatefold jacket including digital download. ... Fullmetal-ALchemist-BrotherHood[720p]-[Dual-Audio][eng-subbed] Neroextreme NTRG · onbelay v2 full crack 31 1fdad05405
      -
      -
      -

      diff --git a/spaces/quidiaMuxgu/Expedit-SAM/Mr Groper South Of The Border !!BETTER!!.md b/spaces/quidiaMuxgu/Expedit-SAM/Mr Groper South Of The Border !!BETTER!!.md deleted file mode 100644 index 323dd10d349b98afaa7f6b78fab763e012c1afcc..0000000000000000000000000000000000000000 --- a/spaces/quidiaMuxgu/Expedit-SAM/Mr Groper South Of The Border !!BETTER!!.md +++ /dev/null @@ -1,5 +0,0 @@ - -

      in order to develop a model for projecting the distribution of speckled hind and warsaw grouper, an abundance estimate was calculated from the hbs data for each species by month, year, and area fished. area fished was typically based on the geographic region in which the vessel was operating, and was categorized as the coastal region or the shelf-edge. coastal regions of the u.s. east coast included the coastal region from cape may, new jersey to the florida keys, and the shelf-edge from cape hatteras, north carolina to northeast florida. the shelf-edge was defined as the shelf break between north carolina and florida, and offshore areas beyond the shelf-edge between the shelf-edge and five nautical miles. all other areas were classified as offshore. abundance was expressed as the number of vessels fishing in a given area. the abundance estimate for each area was standardized by year, month, and species. average abundance within each category was used as the response in a regression analysis. mr groper south of the border

      the shelf-edge was divided into three depth strata: 100 to 150, 150 to 200, and 200 to 250 fathoms (18.29 to 27.43 meters), based on the distribution of the majority of speckled hind and warsaw grouper. the bottom depth was assumed to be the same as the bottom type where the vessel fishing in the area was located. the density of vessels within each category was used as the response in a regression analysis. to develop a model for the probability of fishing in each category, we assumed that the distribution of bottom types in each category was similar to the distribution of bottom types in the whole shelf-edge. the regression analysis was therefore performed on the entire shelf-edge, and the probabilities were then computed by summing the contributions of the three different depths. this approach was necessary to predict the bottom types in the offshore area because of the sparsity of the offshore data. each vessel was assumed to fish in the offshore area with the same probability as it would fish in the shelf-edge.

      -

      Mr Groper South Of The Border


      DOWNLOAD >>>>> https://geags.com/2uCsyH



      899543212b
      -
      -
      \ No newline at end of file diff --git a/spaces/r3gm/AICoverGen/src/mdx.py b/spaces/r3gm/AICoverGen/src/mdx.py deleted file mode 100644 index 448e65d45cb1272c06f3ffa015cef8abd1257d9a..0000000000000000000000000000000000000000 --- a/spaces/r3gm/AICoverGen/src/mdx.py +++ /dev/null @@ -1,292 +0,0 @@ -import gc -import hashlib -import os -import queue -import threading -import warnings - -import librosa -import numpy as np -import onnxruntime as ort -import soundfile as sf -import torch -from tqdm import tqdm - -warnings.filterwarnings("ignore") -stem_naming = {'Vocals': 'Instrumental', 'Other': 'Instruments', 'Instrumental': 'Vocals', 'Drums': 'Drumless', 'Bass': 'Bassless'} - - -class MDXModel: - def __init__(self, device, dim_f, dim_t, n_fft, hop=1024, stem_name=None, compensation=1.000): - self.dim_f = dim_f - self.dim_t = dim_t - self.dim_c = 4 - self.n_fft = n_fft - self.hop = hop - self.stem_name = stem_name - self.compensation = compensation - - self.n_bins = self.n_fft // 2 + 1 - self.chunk_size = hop * (self.dim_t - 1) - self.window = torch.hann_window(window_length=self.n_fft, periodic=True).to(device) - - out_c = self.dim_c - - self.freq_pad = torch.zeros([1, out_c, self.n_bins - self.dim_f, self.dim_t]).to(device) - - def stft(self, x): - x = x.reshape([-1, self.chunk_size]) - x = torch.stft(x, n_fft=self.n_fft, hop_length=self.hop, window=self.window, center=True, return_complex=True) - x = torch.view_as_real(x) - x = x.permute([0, 3, 1, 2]) - x = x.reshape([-1, 2, 2, self.n_bins, self.dim_t]).reshape([-1, 4, self.n_bins, self.dim_t]) - return x[:, :, :self.dim_f] - - def istft(self, x, freq_pad=None): - freq_pad = self.freq_pad.repeat([x.shape[0], 1, 1, 1]) if freq_pad is None else freq_pad - x = torch.cat([x, freq_pad], -2) - # c = 4*2 if self.target_name=='*' else 2 - x = x.reshape([-1, 2, 2, self.n_bins, self.dim_t]).reshape([-1, 2, self.n_bins, self.dim_t]) - x = x.permute([0, 2, 3, 1]) - x = x.contiguous() - x = torch.view_as_complex(x) - x = torch.istft(x, n_fft=self.n_fft, hop_length=self.hop, window=self.window, center=True) - return x.reshape([-1, 2, self.chunk_size]) - - -class MDX: - DEFAULT_SR = 44100 - # Unit: seconds - DEFAULT_CHUNK_SIZE = 0 * DEFAULT_SR - DEFAULT_MARGIN_SIZE = 1 * DEFAULT_SR - - DEFAULT_PROCESSOR = 0 - - def __init__(self, model_path: str, params: MDXModel, processor=DEFAULT_PROCESSOR): - - # Set the device and the provider (CPU or CUDA) - #self.device = torch.device(f'cuda:{processor}') if processor >= 0 else torch.device('cpu') - self.device = torch.device('cuda:0') if torch.cuda.is_available() else torch.device('cpu') - #self.provider = ['CUDAExecutionProvider'] if processor >= 0 else ['CPUExecutionProvider'] - self.provider = ['CPUExecutionProvider'] - - self.model = params - - # Load the ONNX model using ONNX Runtime - self.ort = ort.InferenceSession(model_path, providers=self.provider) - # Preload the model for faster performance - self.ort.run(None, {'input': torch.rand(1, 4, params.dim_f, params.dim_t).numpy()}) - self.process = lambda spec: self.ort.run(None, {'input': spec.cpu().numpy()})[0] - - self.prog = None - - @staticmethod - def get_hash(model_path): - try: - with open(model_path, 'rb') as f: - f.seek(- 10000 * 1024, 2) - model_hash = hashlib.md5(f.read()).hexdigest() - except: - model_hash = hashlib.md5(open(model_path, 'rb').read()).hexdigest() - - return model_hash - - @staticmethod - def segment(wave, combine=True, chunk_size=DEFAULT_CHUNK_SIZE, margin_size=DEFAULT_MARGIN_SIZE): - """ - Segment or join segmented wave array - - Args: - wave: (np.array) Wave array to be segmented or joined - combine: (bool) If True, combines segmented wave array. If False, segments wave array. - chunk_size: (int) Size of each segment (in samples) - margin_size: (int) Size of margin between segments (in samples) - - Returns: - numpy array: Segmented or joined wave array - """ - - if combine: - processed_wave = None # Initializing as None instead of [] for later numpy array concatenation - for segment_count, segment in enumerate(wave): - start = 0 if segment_count == 0 else margin_size - end = None if segment_count == len(wave) - 1 else -margin_size - if margin_size == 0: - end = None - if processed_wave is None: # Create array for first segment - processed_wave = segment[:, start:end] - else: # Concatenate to existing array for subsequent segments - processed_wave = np.concatenate((processed_wave, segment[:, start:end]), axis=-1) - - else: - processed_wave = [] - sample_count = wave.shape[-1] - - if chunk_size <= 0 or chunk_size > sample_count: - chunk_size = sample_count - - if margin_size > chunk_size: - margin_size = chunk_size - - for segment_count, skip in enumerate(range(0, sample_count, chunk_size)): - - margin = 0 if segment_count == 0 else margin_size - end = min(skip + chunk_size + margin_size, sample_count) - start = skip - margin - - cut = wave[:, start:end].copy() - processed_wave.append(cut) - - if end == sample_count: - break - - return processed_wave - - def pad_wave(self, wave): - """ - Pad the wave array to match the required chunk size - - Args: - wave: (np.array) Wave array to be padded - - Returns: - tuple: (padded_wave, pad, trim) - - padded_wave: Padded wave array - - pad: Number of samples that were padded - - trim: Number of samples that were trimmed - """ - n_sample = wave.shape[1] - trim = self.model.n_fft // 2 - gen_size = self.model.chunk_size - 2 * trim - pad = gen_size - n_sample % gen_size - - # Padded wave - wave_p = np.concatenate((np.zeros((2, trim)), wave, np.zeros((2, pad)), np.zeros((2, trim))), 1) - - mix_waves = [] - for i in range(0, n_sample + pad, gen_size): - waves = np.array(wave_p[:, i:i + self.model.chunk_size]) - mix_waves.append(waves) - - print(self.device) - - mix_waves = torch.tensor(mix_waves, dtype=torch.float32).to(self.device) - - return mix_waves, pad, trim - - def _process_wave(self, mix_waves, trim, pad, q: queue.Queue, _id: int): - """ - Process each wave segment in a multi-threaded environment - - Args: - mix_waves: (torch.Tensor) Wave segments to be processed - trim: (int) Number of samples trimmed during padding - pad: (int) Number of samples padded during padding - q: (queue.Queue) Queue to hold the processed wave segments - _id: (int) Identifier of the processed wave segment - - Returns: - numpy array: Processed wave segment - """ - mix_waves = mix_waves.split(1) - with torch.no_grad(): - pw = [] - for mix_wave in mix_waves: - self.prog.update() - spec = self.model.stft(mix_wave) - processed_spec = torch.tensor(self.process(spec)) - processed_wav = self.model.istft(processed_spec.to(self.device)) - processed_wav = processed_wav[:, :, trim:-trim].transpose(0, 1).reshape(2, -1).cpu().numpy() - pw.append(processed_wav) - processed_signal = np.concatenate(pw, axis=-1)[:, :-pad] - q.put({_id: processed_signal}) - return processed_signal - - def process_wave(self, wave: np.array, mt_threads=1): - """ - Process the wave array in a multi-threaded environment - - Args: - wave: (np.array) Wave array to be processed - mt_threads: (int) Number of threads to be used for processing - - Returns: - numpy array: Processed wave array - """ - self.prog = tqdm(total=0) - chunk = wave.shape[-1] // mt_threads - waves = self.segment(wave, False, chunk) - - # Create a queue to hold the processed wave segments - q = queue.Queue() - threads = [] - for c, batch in enumerate(waves): - mix_waves, pad, trim = self.pad_wave(batch) - self.prog.total = len(mix_waves) * mt_threads - thread = threading.Thread(target=self._process_wave, args=(mix_waves, trim, pad, q, c)) - thread.start() - threads.append(thread) - for thread in threads: - thread.join() - self.prog.close() - - processed_batches = [] - while not q.empty(): - processed_batches.append(q.get()) - processed_batches = [list(wave.values())[0] for wave in - sorted(processed_batches, key=lambda d: list(d.keys())[0])] - assert len(processed_batches) == len(waves), 'Incomplete processed batches, please reduce batch size!' - return self.segment(processed_batches, True, chunk) - - -def run_mdx(model_params, output_dir, model_path, filename, exclude_main=False, exclude_inversion=False, suffix=None, invert_suffix=None, denoise=False, keep_orig=True, m_threads=2): - device = torch.device('cuda:0') if torch.cuda.is_available() else torch.device('cpu') - - #device_properties = torch.cuda.get_device_properties(device) - print("Device", device) - vram_gb = 12 #device_properties.total_memory / 1024**3 - m_threads = 1 if vram_gb < 8 else 2 - - model_hash = MDX.get_hash(model_path) - mp = model_params.get(model_hash) - model = MDXModel( - device, - dim_f=mp["mdx_dim_f_set"], - dim_t=2 ** mp["mdx_dim_t_set"], - n_fft=mp["mdx_n_fft_scale_set"], - stem_name=mp["primary_stem"], - compensation=mp["compensate"] - ) - - mdx_sess = MDX(model_path, model) - wave, sr = librosa.load(filename, mono=False, sr=44100) - # normalizing input wave gives better output - peak = max(np.max(wave), abs(np.min(wave))) - wave /= peak - if denoise: - wave_processed = -(mdx_sess.process_wave(-wave, m_threads)) + (mdx_sess.process_wave(wave, m_threads)) - wave_processed *= 0.5 - else: - wave_processed = mdx_sess.process_wave(wave, m_threads) - # return to previous peak - wave_processed *= peak - stem_name = model.stem_name if suffix is None else suffix - - main_filepath = None - if not exclude_main: - main_filepath = os.path.join(output_dir, f"{os.path.basename(os.path.splitext(filename)[0])}_{stem_name}.wav") - sf.write(main_filepath, wave_processed.T, sr) - - invert_filepath = None - if not exclude_inversion: - diff_stem_name = stem_naming.get(stem_name) if invert_suffix is None else invert_suffix - stem_name = f"{stem_name}_diff" if diff_stem_name is None else diff_stem_name - invert_filepath = os.path.join(output_dir, f"{os.path.basename(os.path.splitext(filename)[0])}_{stem_name}.wav") - sf.write(invert_filepath, (-wave_processed.T * model.compensation) + wave.T, sr) - - if not keep_orig: - os.remove(filename) - - del mdx_sess, wave_processed, wave - gc.collect() - return main_filepath, invert_filepath diff --git a/spaces/r3gm/RVC_HF/Applio-RVC-Fork/utils/dependency.py b/spaces/r3gm/RVC_HF/Applio-RVC-Fork/utils/dependency.py deleted file mode 100644 index b70338b02d31b1ef455fbac817d418d328db518d..0000000000000000000000000000000000000000 --- a/spaces/r3gm/RVC_HF/Applio-RVC-Fork/utils/dependency.py +++ /dev/null @@ -1,170 +0,0 @@ -import os -import csv -import shutil -import tarfile -import subprocess -from pathlib import Path -from datetime import datetime - -def install_packages_but_jank_af(): - packages = ['build-essential', 'python3-dev', 'ffmpeg', 'aria2'] - pip_packages = ['pip', 'setuptools', 'wheel', 'httpx==0.23.0', 'faiss-gpu', 'fairseq', 'gradio==3.34.0', - 'ffmpeg', 'ffmpeg-python', 'praat-parselmouth', 'pyworld', 'numpy==1.23.5', - 'numba==0.56.4', 'librosa==0.9.2', 'mega.py', 'gdown', 'onnxruntime', 'pyngrok==4.1.12', - 'gTTS', 'elevenlabs', 'wget', 'tensorboardX', 'unidecode', 'huggingface-hub', 'stftpitchshift==1.5.1', - 'yt-dlp', 'pedalboard', 'pathvalidate', 'nltk', 'edge-tts', 'git+https://github.com/suno-ai/bark.git', 'python-dotenv' , 'av'] - - print("Updating and installing system packages...") - for package in packages: - print(f"Installing {package}...") - subprocess.check_call(['apt-get', 'install', '-qq', '-y', package]) - - print("Updating and installing pip packages...") - subprocess.check_call(['pip', 'install', '--upgrade'] + pip_packages) - - print('Packages up to date.') - - -def setup_environment(ForceUpdateDependencies, ForceTemporaryStorage): - # Mounting Google Drive - if not ForceTemporaryStorage: - from google.colab import drive - - if not os.path.exists('/content/drive'): - drive.mount('/content/drive') - else: - print('Drive is already mounted. Proceeding...') - - # Function to install dependencies with progress - def install_packages(): - packages = ['build-essential', 'python3-dev', 'ffmpeg', 'aria2'] - pip_packages = ['pip', 'setuptools', 'wheel', 'httpx==0.23.0', 'faiss-gpu', 'fairseq', 'gradio==3.34.0', - 'ffmpeg', 'ffmpeg-python', 'praat-parselmouth', 'pyworld', 'numpy==1.23.5', - 'numba==0.56.4', 'librosa==0.9.2', 'mega.py', 'gdown', 'onnxruntime', 'pyngrok==4.1.12', - 'gTTS', 'elevenlabs', 'wget', 'tensorboardX', 'unidecode', 'huggingface-hub', 'stftpitchshift==1.5.1', - 'yt-dlp', 'pedalboard', 'pathvalidate', 'nltk', 'edge-tts', 'git+https://github.com/suno-ai/bark.git', 'python-dotenv' , 'av'] - - print("Updating and installing system packages...") - for package in packages: - print(f"Installing {package}...") - subprocess.check_call(['apt-get', 'install', '-qq', '-y', package]) - - print("Updating and installing pip packages...") - subprocess.check_call(['pip', 'install', '--upgrade'] + pip_packages) - - - print('Packages up to date.') - - # Function to scan a directory and writes filenames and timestamps - def scan_and_write(base_path, output_file): - with open(output_file, 'w', newline='') as f: - writer = csv.writer(f) - for dirpath, dirs, files in os.walk(base_path): - for filename in files: - fname = os.path.join(dirpath, filename) - try: - mtime = os.path.getmtime(fname) - writer.writerow([fname, mtime]) - except Exception as e: - print(f'Skipping irrelevant nonexistent file {fname}: {str(e)}') - print(f'Finished recording filesystem timestamps to {output_file}.') - - # Function to compare files - def compare_files(old_file, new_file): - old_files = {} - new_files = {} - - with open(old_file, 'r') as f: - reader = csv.reader(f) - old_files = {rows[0]:rows[1] for rows in reader} - - with open(new_file, 'r') as f: - reader = csv.reader(f) - new_files = {rows[0]:rows[1] for rows in reader} - - removed_files = old_files.keys() - new_files.keys() - added_files = new_files.keys() - old_files.keys() - unchanged_files = old_files.keys() & new_files.keys() - - changed_files = {f for f in unchanged_files if old_files[f] != new_files[f]} - - for file in removed_files: - print(f'File has been removed: {file}') - - for file in changed_files: - print(f'File has been updated: {file}') - - return list(added_files) + list(changed_files) - - # Check if CachedRVC.tar.gz exists - if ForceTemporaryStorage: - file_path = '/content/CachedRVC.tar.gz' - else: - file_path = '/content/drive/MyDrive/RVC_Cached/CachedRVC.tar.gz' - - content_file_path = '/content/CachedRVC.tar.gz' - extract_path = '/' - - if not os.path.exists(file_path): - folder_path = os.path.dirname(file_path) - os.makedirs(folder_path, exist_ok=True) - print('No cached dependency install found. Attempting to download GitHub backup..') - - try: - download_url = "https://github.com/kalomaze/QuickMangioFixes/releases/download/release3/CachedRVC.tar.gz" - subprocess.run(["wget", "-O", file_path, download_url]) - print('Download completed successfully!') - except Exception as e: - print('Download failed:', str(e)) - - # Delete the failed download file - if os.path.exists(file_path): - os.remove(file_path) - print('Failed download file deleted. Continuing manual backup..') - - if Path(file_path).exists(): - if ForceTemporaryStorage: - print('Finished downloading CachedRVC.tar.gz.') - else: - print('CachedRVC.tar.gz found on Google Drive. Proceeding to copy and extract...') - - # Check if ForceTemporaryStorage is True and skip copying if it is - if ForceTemporaryStorage: - pass - else: - shutil.copy(file_path, content_file_path) - - print('Beginning backup copy operation...') - - with tarfile.open(content_file_path, 'r:gz') as tar: - for member in tar.getmembers(): - target_path = os.path.join(extract_path, member.name) - try: - tar.extract(member, extract_path) - except Exception as e: - print('Failed to extract a file (this isn\'t normal)... forcing an update to compensate') - ForceUpdateDependencies = True - print(f'Extraction of {content_file_path} to {extract_path} completed.') - - if ForceUpdateDependencies: - install_packages() - ForceUpdateDependencies = False - else: - print('CachedRVC.tar.gz not found. Proceeding to create an index of all current files...') - scan_and_write('/usr/', '/content/usr_files.csv') - - install_packages() - - scan_and_write('/usr/', '/content/usr_files_new.csv') - changed_files = compare_files('/content/usr_files.csv', '/content/usr_files_new.csv') - - with tarfile.open('/content/CachedRVC.tar.gz', 'w:gz') as new_tar: - for file in changed_files: - new_tar.add(file) - print(f'Added to tar: {file}') - - os.makedirs('/content/drive/MyDrive/RVC_Cached', exist_ok=True) - shutil.copy('/content/CachedRVC.tar.gz', '/content/drive/MyDrive/RVC_Cached/CachedRVC.tar.gz') - print('Updated CachedRVC.tar.gz copied to Google Drive.') - print('Dependencies fully up to date; future runs should be faster.') - diff --git a/spaces/raedeXanto/academic-chatgpt-beta/7 Data Recovery Software Crack with Serial Key and Registration Code - EaseUS[2].md b/spaces/raedeXanto/academic-chatgpt-beta/7 Data Recovery Software Crack with Serial Key and Registration Code - EaseUS[2].md deleted file mode 100644 index cd992608eb10dd95d65408202fdf4e9ea08e3a8e..0000000000000000000000000000000000000000 --- a/spaces/raedeXanto/academic-chatgpt-beta/7 Data Recovery Software Crack with Serial Key and Registration Code - EaseUS[2].md +++ /dev/null @@ -1,109 +0,0 @@ -
      -

      7 Data Recovery Serial Key: What Is It and How to Avoid It

      -

      Have you ever lost your important data due to accidental deletion, formatting, virus attack, or other reasons? If so, you might have searched for a data recovery software online to get your files back. One of the popular data recovery software is 7 Data Recovery, which claims to recover data from various situations.

      -

      However, 7 Data Recovery is not a free software. You need to pay for a license code to use its full features. That's why some people look for a 7 data recovery serial key, which is a crack version of the software that can be downloaded from unauthorized websites.

      -

      7 data recovery serial key


      DOWNLOAD 🗸 https://tinourl.com/2uL0nM



      -

      But is it safe and legal to use a 7 data recovery serial key? What are the risks of using cracked software? And is there a better alternative to recover your data without breaking the law? In this article, we will answer these questions and provide you with a safer and more reliable solution.

      -

      Risks of Using 7 Data Recovery Serial Key

      -

      Before you download a 7 data recovery serial key from the internet, you should be aware of the potential risks of using cracked software. Here are some of the dangers that you might face:

      -
        -
      • Virus infection: The websites that offer cracked software are often infected with malware, spyware, or ransomware that can harm your computer and steal your personal information.
      • -
      • Data loss: The cracked software may not work properly or even damage your data further. You may end up losing more data than you can recover.
      • -
      • Privacy breach: The cracked software may contain hidden backdoors or trackers that can monitor your online activities and send your data to hackers or advertisers.
      • -
      • Legal issues: Using cracked software is illegal and violates the intellectual property rights of the software developers. You may face legal consequences such as fines or lawsuits if you are caught using pirated software.
      • -
      -

      As you can see, using a 7 data recovery serial key is not worth the risk. You may end up losing more than you gain. Instead of risking your data, computer, privacy, and legal status, why not use a safer and legal alternative?

      -

      How to Recover Data Safely and Legally

      -

      If you want to recover your data without compromising your security and integrity, we recommend you use Recoverit, a reliable and free data recovery tool that can help you restore your lost files in various scenarios.

      -

      Features and Benefits of Recoverit

      -

      Recoverit is a professional data recovery software that has been trusted by millions of users around the world. Here are some of the features and benefits of using Recoverit over 7 data recovery serial key:

      -

      7 data recovery suite registration code
      -7 data recovery crack download
      -7 data recovery license key free
      -7 data recovery activation key
      -7 data recovery full version with key
      -7 data recovery software keygen
      -7 data recovery serial number
      -7 data recovery product key
      -7 data recovery pro key
      -7 data recovery enterprise key
      -7 data recovery username and registration code
      -7 data recovery patch
      -7 data recovery toolkit key
      -7 data recovery master key
      -7 data recovery ultimate key
      -7 data recovery working key
      -7 data recovery genuine key
      -7 data recovery premium key
      -7 data recovery professional key
      -7 data recovery home key
      -7 data recovery wizard key
      -7 data recovery unlock code
      -7 data recovery offline activation code
      -7 data recovery online activation code
      -7 data recovery registration code and email
      -7 data recovery crack file
      -7 data recovery crack version
      -7 data recovery crack free download
      -7 data recovery crack only
      -7 data recovery crack for windows
      -7 data recovery crack for mac
      -7 data recovery software with crack
      -7 data recovery software free download with crack
      -7 data recovery software full crack
      -7 data recovery software crack rar
      -7 data recovery suite with crack
      -7 data recovery suite free download with crack
      -7 data recovery suite full crack
      -7 data recovery suite crack zip
      -7 data recovery tool with crack
      -7 data recovery tool free download with crack
      -7 data recovery tool full crack
      -7 data recovery tool crack exe
      -How to get a free serial key for 7-data-recovery-software?
      -How to activate a serial key for the latest version of the software?
      -How to recover lost or deleted serial keys for the software?
      -How to find a valid serial key for the software online?
      -How to use a serial key generator for the software?
      -How to fix a serial key error for the software?

      -
        -
      • Safe and legal: Recoverit is a legitimate software that is free from virus, malware, or spyware. You can download it from its official website without worrying about any legal issues.
      • -
      • Powerful and effective: Recoverit can recover over 1000 types of files from various devices such as hard drives, USB drives, memory cards, cameras, etc. It can recover data from different situations such as deletion, formatting, partition loss, virus attack, system crash, etc.
      • -
      • Easy and fast: Recoverit has a user-friendly interface that guides you through the recovery process step by step. You can preview your files before recovering them and select the ones you want to restore. The recovery process is fast and efficient.
      • -
      • Free trial: Recoverit offers a free trial version that allows you to scan and preview your files for free. You can also recover up to 100 MB of data for free. If you want to recover more data, you can upgrade to the full version at an affordable price.
      • -
      -

      How to Use Recoverit

      -

      To use Recoverit to recover your data, you need to follow these simple steps:

      -
        -
      1. Download and install Recoverit: Go to Recoverit's official website and download the free trial version of the software. Install it on your computer and launch it.
      2. -
      3. Select a location: On the home screen, select the location where you lost your data. It can be a hard drive partition, an external device, or a specific folder. Click "Start" to begin scanning.
      4. -
      5. Scan and preview your files: Wait for the scanning process to complete. You can pause or stop it at any time. You can also filter your files by type or path. Preview your files by double-clicking them.
      6. -
      7. Recover your files: Select the files you want to recover and click "Recover". Choose a different location to save your recovered files. Do not save them on the same location where you lost them.
      8. -
      -

      Tips to Prevent Data Loss and Protect Your Data

      -

      Data loss can happen anytime due to various reasons. To prevent data loss and protect your data, here are some tips that you should follow:

      -

      Backup Your Data Regularly

      -

      The best way to prevent data loss is to backup your data regularly. You can use an external hard drive, a cloud service, or a backup software to create copies of your important files. You should also keep multiple backups in different locations in case one backup fails or gets damaged.

      -

      Use Antivirus Software and Update It Frequently

      -

      Virus infection is one of the common causes of data loss. To protect your computer from viruses, malware, or ransomware, you should use a reliable antivirus software and update it frequently. You should also scan your computer regularly and remove any suspicious files or programs.

      -

      Avoid Opening Suspicious Links and Attachments

      -

      Another way to avoid virus infection is to avoid opening suspicious links or attachments that you receive via email or social media. These links or attachments may contain malicious code that can infect your computer or encrypt your files. You should also avoid visiting untrusted websites that may contain harmful content.

      -

      Conclusion

      -

      In conclusion, using a 7 data recovery serial key is not a good idea if you want to recover your lost data safely and legally. You may expose yourself to various risks such as virus infection, data loss, privacy breach, or legal issues.

      -

      A better alternative is to use Recoverit, a reliable and free data recovery tool that can help you restore your lost files in various scenarios without compromising your security and integrity.

      -

      If you want to try Recoverit for free, click on the button below and download it now!

      - Download Recoverit Free Trial Now! -

      Frequently Asked Questions

      -
        -
      1. What is 7 Data Recovery?
      2. -

        7 Data Recovery is a popular data recovery software that claims to recover data from various situations such as deletion, formatting, partition loss, virus attack, etc.

        -
      3. What is 7 data recovery serial key?
      4. -

        7 data recovery serial key is a crack version of the software that can be downloaded from unauthorized websites. It is supposed to unlock the full features of the software without paying for a license code.

        -
      5. Is it safe and legal to use 7 data recovery serial key?
      6. -

        No, it is not safe and legal to use 7 data recovery serial key. You may expose yourself to various risks such as virus infection, data loss, privacy breach, or legal issues. Using cracked software is illegal and violates the intellectual property rights of the software developers.

        -
      7. What is the best alternative to 7 data recovery serial key?
      8. -

        The best alternative to 7 data recovery serial key is Recoverit, a reliable and free data recovery tool that can help you recover your lost files in various scenarios without compromising your security and integrity. You can download it from its official website and use it for free.

        -
      -

      0a6ba089eb
      -
      -
      \ No newline at end of file diff --git a/spaces/raedeXanto/academic-chatgpt-beta/Baraha 10.10 Product Key Crack Discover the Secrets of the Most Popular Indian Language Software.md b/spaces/raedeXanto/academic-chatgpt-beta/Baraha 10.10 Product Key Crack Discover the Secrets of the Most Popular Indian Language Software.md deleted file mode 100644 index 7262f35045b4fb0f84092c9cd9a8efb967f6573a..0000000000000000000000000000000000000000 --- a/spaces/raedeXanto/academic-chatgpt-beta/Baraha 10.10 Product Key Crack Discover the Secrets of the Most Popular Indian Language Software.md +++ /dev/null @@ -1,142 +0,0 @@ - -
      - Benefits of using Baraha 10.10 for Indian language content creation | | H2: How to download and install Baraha 10.10 on your PC? | - Requirements and compatibility of Baraha 10.10
      - Steps to download and install Baraha 10.10 from the official website | | H2: How to use Baraha 10.10 to create and edit Indian language content? | - How to launch Baraha 10.10 and choose your preferred language
      - How to use the editor, keyboard, converter, and font tools of Baraha 10.10
      - How to save, export, and print your content with Baraha 10.10 | | H2: How to crack Baraha 10.10 product key for free? | - Disclaimer and warning about cracking software
      - Sources and methods to crack Baraha 10.10 product key
      - Risks and consequences of cracking software | | H2: Conclusion | - Summary of the main points of the article
      - Call to action and recommendation | **Table 2: Article with HTML formatting** ```html

      What is Baraha 10.10 and why do you need it?

      -

      If you are looking for a software that can help you create and edit content in Indian languages, then you might have heard of Baraha 10.10. But what is Baraha 10.10 exactly and what can it do for you? In this article, we will answer these questions and more.

      -

      Baraha 10.10 is a software that allows you to type, edit, convert, and print text in various Indian languages, such as Hindi, Kannada, Tamil, Telugu, Malayalam, Gujarati, Bengali, Punjabi, Oriya, Sanskrit, and more. It also supports transliteration, phonetic typing, Unicode fonts, spell checking, sorting, searching, and formatting tools.

      -

      baraha1010productkeycrack


      Download File - https://tinourl.com/2uL58e



      -

      Baraha 10.10 is designed for users who want to create content in Indian languages for various purposes, such as education, communication, publishing, web development, blogging, social media, etc. With Baraha 10.10, you can easily switch between English and Indian languages using a simple keyboard shortcut. You can also use Baraha 10.10 to learn Indian languages by reading and writing texts in different scripts.

      -

      Baraha 10.10 has many benefits for users who want to create content in Indian languages. Some of them are:

      -
        -
      • It is easy to use and has a user-friendly interface.
      • -
      • It supports multiple languages and scripts with high accuracy and quality.
      • -
      • It is compatible with most Windows applications and browsers.
      • -
      • It has a low cost and offers a free trial version.
      • -
      • It has a large user base and a dedicated support team.
      • -
      -

      How to download and install Baraha 10.10 on your PC?

      -

      If you are interested in using Baraha 10.10 for your Indian language content creation needs, then you need to download and install it on your PC first. Here are the requirements and compatibility of Baraha 10.10:

      -
        -
      • It works on Windows XP/Vista/7/8/8.1/10 operating systems.
      • -
      • It requires a minimum of 256 MB RAM and 100 MB disk space.
      • -
      • It supports both 32-bit and 64-bit systems.
      • -
      -

      To download and install Baraha 10.10 on your PC, follow these steps:

      -
        -
      1. Go to the official website of Baraha at https://www.baraha.com/.
      2. -
      3. Click on the "Download" button on the top right corner of the homepage.
      4. -
      5. Select the "BarahaSetup.exe" file from the list of downloads.
      6. -
      7. Save the file on your PC and run it as an administrator.
      8. -
      9. Follow the instructions on the screen to complete the installation process.
      10. -
      11. Launch Baraha 10.10 from your desktop or start menu.
      12. -
      -

      How to use Baraha 10.10 to create and edit Indian language content?

      -

      Once you have downloaded and installed Baraha 10.10 on your PC, you can start using it to create and edit Indian language content. Here are some of the main features and functions of Baraha 10.10 that you can use:

      -

      baraha 10.10 product key generator
      -baraha 10.10 product key free download
      -baraha 10.10 product key activation code
      -baraha 10.10 product key serial number
      -baraha 10.10 product key license key
      -baraha 10.10 product key full version
      -baraha 10.10 product key patch
      -baraha 10.10 product key torrent
      -baraha 10.10 product key crack download
      -baraha 10.10 product key crack online
      -baraha 10.10 product key crack offline
      -baraha 10.10 product key crack windows 10
      -baraha 10.10 product key crack mac
      -baraha 10.10 product key crack linux
      -baraha 10.10 product key crack android
      -baraha 10.10 product key crack ios
      -baraha 10.10 product key crack apk
      -baraha 10.10 product key crack exe
      -baraha 10.10 product key crack zip
      -baraha 10.10 product key crack rar
      -baraha 10.10 product key crack iso
      -baraha 10.10 product key crack file
      -baraha 10.10 product key crack software
      -baraha 10.10 product key crack tool
      -baraha 10.10 product key crack app
      -baraha 10.10 product key crack website
      -baraha 10.10 product key crack blogspot
      -baraha 10.10 product key crack youtube
      -baraha 10.10 product key crack reddit
      -baraha 10.10 product key crack quora
      -baraha 10.10 product key crack medium
      -baraha 10.10 product key crack github
      -baraha 10.10 product key crack stackoverflow
      -baraha 10.10 product key crack udemy
      -baraha 10.10 product key crack coursera
      -baraha 1

      -

      How to launch Baraha 10.10 and choose your preferred language?

      -

      To launch Baraha 10.10, double-click on its icon on your desktop or start menu. You will see a window with four tabs: Editor, Keyboard, Converter, and Font.

      -

      To choose your preferred language for typing or editing text, click on the "Language" menu on the top left corner of the window. You will see a list of languages that Baraha 10.10 supports. Select the language that you want to use from the list.

      -

      How to use the editor tool of Baraha 10.10?

      -

      The editor tool of Baraha 10.10 is where you can type or edit text in your chosen language. It has a toolbar with various options for formatting, inserting symbols, checking spelling, sorting words, searching text, etc.

      -

      To type text in your chosen language using the editor tool of Baraha 10.10:

      -
        -
      1. Type in English using your normal keyboard.
      2. -
      3. Press F12 key or click on the "Convert" button on the toolbar to convert the English text into your chosen language script.
      4. -
      5. You can also use phonetic typing by selecting "Phonetic" from the "Keyboard" menu on the top right corner of the window.
      6. -
      -

      How to use the keyboard tool of Baraha 10.10?

      -

      The keyboard tool of Baraha 10.10 is where you can see the layout of the keyboard for your chosen language. It also allows you to customize the keyboard according to your preferences.

      -

      To use the keyboard tool of Baraha 10.10:

      -
        -
      1. Click on the "Keyboard" tab on the top of the window.
      2. -
      3. You will see a virtual keyboard with the keys for your chosen language.
      4. -
      5. You can use your mouse to click on the keys or use your physical keyboard to type.
      6. -
      7. You can also change the keyboard layout by selecting a different one from the "Layout" menu on the top right corner of the window.
      8. -
      9. You can also create your own keyboard layout by clicking on the "Customize" button on the bottom right corner of the window.
      10. -
      -

      How to use the converter tool of Baraha 10.10?

      -

      The converter tool of Baraha 10.10 is where you can convert text from one language script to another. It also allows you to convert text from Unicode to ANSI and vice versa.

      -

      To use the converter tool of Baraha 10.10:

      -
        -
      1. Click on the "Converter" tab on the top of the window.
      2. -
      3. You will see two text boxes: one for source text and one for target text.
      4. -
      5. Copy and paste the text that you want to convert in the source text box.
      6. -
      7. Select the source language and script from the drop-down menus above the source text box.
      8. -
      9. Select the target language and script from the drop-down menus above the target text box.
      10. -
      11. Click on the "Convert" button on the bottom of the window.
      12. -
      13. You will see the converted text in the target text box.
      14. -
      15. You can also convert text from Unicode to ANSI and vice versa by selecting "Unicode" or "ANSI" from the "Encoding" menu on the top right corner of the window.
      16. -
      -

      How to use the font tool of Baraha 10.10?

      -

      The font tool of Baraha 10.10 is where you can change the font style, size, color, and alignment of your text. It also allows you to preview and install different fonts for your chosen language.

      -

      To use the font tool of Baraha 10.10:

      -
        -
      1. Click on the "Font" tab on the top of the window.
      2. -
      3. You will see a text box with some sample text in your chosen language.
      4. -
      5. You can change the font style, size, color, and alignment of your text by using the buttons and menus on the toolbar above the text box.
      6. -
      7. You can also preview and install different fonts for your chosen language by clicking on the "Font Manager" button on the bottom right corner of the window.
      8. -
      9. You will see a list of fonts that are available for your chosen language.
      10. -
      11. You can select a font and click on the "Preview" button to see how it looks like in the text box.
      12. -
      13. You can also install a font by clicking on the "Install" button and following the instructions on the screen.
      14. -
      -

      How to crack Baraha 10.10 product key for free?

      -

      If you want to use Baraha 10.10 without paying for it, then you might be tempted to crack its product key for free. However, this is not a legal or ethical way to use software, and it can have serious risks and consequences for you and your PC. In this section, we will explain what cracking software means, how it is done, and why you should avoid it.

      -

      What does cracking software mean?

      -

      Cracking software means breaking or bypassing the security features or mechanisms that protect software from unauthorized use or copying. For example, cracking a product key means finding or generating a valid key that can activate the software without paying for it.

      -

      How is cracking software done?

      -

      Cracking software is done by using various tools and techniques that exploit the vulnerabilities or weaknesses in the software code or design. For example, cracking a product key can be done by using key generators, key finders, key changers, or key patchers that can create or modify keys that can fool the software into thinking that it is licensed.

      -

      Why should you avoid cracking software?

      -

      Cracking software is illegal and unethical because it violates the intellectual property rights and terms of service of the software developers and distributors. It is also risky and harmful because it can expose you and your PC to various threats and problems, such as:

      -
        -
      • Viruses, malware, spyware, ransomware, or other malicious programs that can infect your PC and damage your data or system.
      • -
      • Lawsuits, fines, penalties, or criminal charges that can result from violating the laws or regulations related to software piracy or theft.
      • -
      • Poor performance, errors, bugs, crashes, or compatibility issues that can affect your user experience or productivity with the software.
      • -
      • Lack of updates, support, features, or security patches that can make your software outdated, vulnerable, or incompatible with other applications or devices.
      • -
      -

      Conclusion

      -

      In conclusion, Baraha 10.10 is a powerful and useful software that can help you create and edit content in various Indian languages. It has many features and functions that make it easy and convenient to use. However, if you want to use Baraha 10.10 legally and ethically, you should not crack its product key for free, but rather purchase it from its official website or authorized dealers. Cracking software is not only illegal and unethical, but also risky and harmful for you and your PC. Therefore, you should avoid cracking software at all costs.

      -

      We hope that this article has given you some valuable information about Baraha 10.10 and its product key crack. If you have any questions or comments, please feel free to contact us or leave them below. Thank you for reading!

      -

      Frequently Asked Questions

      -
        -
      1. What is Baraha 10.10?
        Baraha 10.10 is a software that allows you to type, edit, convert, and print text in various Indian languages, such as Hindi, Kannada, Tamil, Telugu, Malayalam, Gujarati, Bengali, Punjabi, Oriya, Sanskrit, and more. It also supports transliteration, phonetic typing, Unicode fonts, spell checking, sorting, searching, and formatting tools.
      2. -
      3. How much does Baraha 10.10 cost?
        Baraha 10.10 costs $29.95 USD for a single user license. You can purchase it from its official website at https://www.baraha.com/. You can also download a free trial version that works for 30 days from the same website.
      4. - 0 after purchasing it, you need to register it online using your email address and product key. You will receive a confirmation email with an activation code that you need to enter in the software. You can also activate Baraha 10.10 offline by contacting the support team and providing them with your product key and machine ID. -
      5. How do I update Baraha 10.10?
        To update Baraha 10.10, you need to download and install the latest version from its official website at https://www.baraha.com/. You do not need to uninstall the previous version or re-enter your product key or activation code. The update will automatically overwrite the existing files and retain your settings and preferences.
      6. -
      7. How do I uninstall Baraha 10.10?
        To uninstall Baraha 10.10, you need to go to the Control Panel on your PC and select "Programs and Features". Then, find Baraha 10.10 from the list of programs and click on "Uninstall". Follow the instructions on the screen to complete the uninstallation process. You can also use a third-party uninstaller tool to remove Baraha 10.10 completely from your PC.
      8. -
      -

      0a6ba089eb
      -
      -
      \ No newline at end of file diff --git a/spaces/raedeXanto/academic-chatgpt-beta/Datalogic Lynx D432 Software Download OPOS Drivers DLRMUS Application and Scanalyzer Database File.md b/spaces/raedeXanto/academic-chatgpt-beta/Datalogic Lynx D432 Software Download OPOS Drivers DLRMUS Application and Scanalyzer Database File.md deleted file mode 100644 index 1c6d9d6b15d3fbc5944b9ef1204e08b2930b3a7a..0000000000000000000000000000000000000000 --- a/spaces/raedeXanto/academic-chatgpt-beta/Datalogic Lynx D432 Software Download OPOS Drivers DLRMUS Application and Scanalyzer Database File.md +++ /dev/null @@ -1,156 +0,0 @@ - -

      Datalogic Lynx D432 Software Download: A Guide for Users

      -

      If you are looking for a versatile and powerful handheld scanner that can read both 1D and 2D barcodes, you might want to consider the Datalogic Lynx D432. This scanner has a high-performance imaging engine that can capture images and videos, as well as scan barcodes in any orientation and condition. It also has a large color display, a fast processor, a long-lasting battery, and a rugged design that can withstand drops and shocks.

      -

      But to get the most out of your Datalogic Lynx D432 scanner, you need to download and install the right software for it. The software will help you configure, update, and optimize your scanner, as well as enable it to communicate with other devices and applications. In this article, we will guide you through the steps of downloading, installing, and using the software for your Datalogic Lynx D432 scanner.

      -

      datalogic lynx d432 software download


      DOWNLOAD »»» https://tinourl.com/2uL26P



      -

      How to download the software for Datalogic Lynx D432

      -

      There are different sources where you can download the software for your Datalogic Lynx D432 scanner. The most reliable and recommended source is the official Datalogic website, where you can find the latest versions of the software and firmware for your scanner. However, you can also download the software from other websites that offer barcode scanning software, such as Wavelink or Ivanti. Here are the steps for downloading the software from each source.

      -

      Download from Datalogic website

      -

      To download the software from the Datalogic website, you need to follow these steps:

      -
        -
      1. Go to https://www.datalogic.com/deu/support-service/downloads-dw-4024.html?cat=17.
      2. -
      3. Select your product family (Lynx) and product model (Lynx) from the drop-down menus.
      4. -
      5. Choose the category of software that you want to download (Software & Utilities, Firmware Images, or Utilities).
      6. -
      7. Click on the name of the software that you want to download (e.g., USB-COM Driver V7.1.1, Datalogic Aladdin 3.2.0.7, Scanalyzer Scanner Configuration Tool, etc.).
      8. -
      9. Read the description and requirements of the software carefully.
      10. -
      11. Click on the Download button and save the file on your computer.
      12. -
      -

      Download from other sources

      -

      To download the software from other sources, such as Wavelink or Ivanti, you need to follow these steps:

      -
        -
      1. Go to https://www.wavelink.com/Datalogic-Lynx-Documentation-downloads/ or https://www.ivanti.com/support/downloads.
      2. -
      3. Search for your product model (Datalogic Lynx) or product category (Barcode Scanners).
      4. -
      5. Select the type of software that you want to download (e.g., Device Management Software, Terminal Emulation Software, Voice Picking Software, etc.).
      6. -
      7. Click on the name of the software that you want to download (e.g., Wavelink Avalanche Enabler, Wavelink TE Client, Ivanti Velocity Client, etc.).
      8. -
      9. Read the description and requirements of the software carefully.
      10. -
      11. Click on the Download button and save the file on your computer.
      12. -
      -

      How to install the software for Datalogic Lynx D432

      -

      After downloading the software for your Datalogic Lynx D432 scanner, you need to install it on your computer and/or your scanner. The installation process may vary depending on the type of software that you downloaded. Here are some common methods of installing the software for your scanner.

      -

      datalogic lynx d432 barcode scanner software download
      -datalogic lynx d432 firmware update download
      -datalogic lynx d432 driver download for windows 10
      -datalogic lynx d432 configuration software download
      -datalogic lynx d432 sdk download
      -datalogic lynx d432 manual pdf download
      -datalogic lynx d432 software installation guide
      -datalogic lynx d432 software free download
      -datalogic lynx d432 software update download
      -datalogic lynx d432 software download for mac
      -datalogic lynx d432 software download for linux
      -datalogic lynx d432 software download for android
      -datalogic lynx d432 software download for ios
      -datalogic lynx d432 software download for windows 7
      -datalogic lynx d432 software download for windows 8
      -datalogic lynx d432 software troubleshooting
      -datalogic lynx d432 software features
      -datalogic lynx d432 software compatibility
      -datalogic lynx d432 software requirements
      -datalogic lynx d432 software license
      -datalogic lynx d432 software review
      -datalogic lynx d432 software alternatives
      -datalogic lynx d432 software comparison
      -datalogic lynx d432 software benefits
      -datalogic lynx d432 software demo
      -datalogic lynx d432 software tutorial
      -datalogic lynx d432 software tips and tricks
      -datalogic lynx d432 software best practices
      -datalogic lynx d432 software support
      -datalogic lynx d432 software feedback
      -datalogic lynx d432 software testimonials
      -datalogic lynx d432 software case studies
      -datalogic lynx d432 software blog posts
      -datalogic lynx d432 software videos
      -datalogic lynx d432 software webinars
      -datalogic lynx d432 software ebooks
      -datalogic lynx d432 software white papers
      -datalogic lynx d432 software infographics
      -datalogic lynx d432 software podcasts
      -datalogic lynx d432 software newsletters
      -datalogic lynx d432 software coupons
      -datalogic lynx d432 software discounts
      -datalogic lynx d432 software deals
      -datalogic lynx d432 software offers
      -datalogic lynx d432 software promotions
      -datalogic lynx d432 software giveaways
      -datalogic lynx d432 software contests
      -datalogic lynx d432 software surveys
      -datalogic lynx d432 software quizzes
      -datalogic lynx d432 software challenges

      -

      Install using USB-COM driver

      -

      If you downloaded a USB-COM driver for your scanner, such as USB-COM Driver V7.1.1 from Datalogic website, you need to install it on your computer first before connecting your scanner to it. This driver will allow your computer to recognize your scanner as a COM port device and communicate with it via USB cable. To install this driver, you need to follow these steps:

      -
        -
      1. Extract the zip file that contains the driver on your computer.
      2. -
      3. Run the setup.exe file as an administrator.
      4. -
      5. Follow the instructions on screen to complete the installation.
      6. -
      7. Restart your computer if prompted.
      8. -
      9. Connect your scanner to your computer via USB cable.
      10. -
      11. The driver will automatically detect and assign a COM port number to your scanner.
      12. -
      13. You can check the COM port number in Device Manager under Ports (COM & LPT).
      14. -
      -

      Install using Datalogic Aladdin

      -

      If you downloaded a configuration tool for your scanner, such as Datalogic Aladdin 3.2.0.7 from Datalogic website, you need to install it on your computer first before connecting your scanner to it. This tool will allow you to configure and update your scanner settings and firmware easily and quickly. To install this tool, you need to follow these steps:

      -
        -
      1. Extract the zip file that contains the tool on your computer.
      2. -
      3. Run the aladdin_setup_3.2.0.7.exe file as an administrator.
      4. -
      5. Follow the instructions on screen to complete the installation.
      6. -
      7. Connect your scanner to your computer via USB cable or cradle.
      8. -
      9. The tool will automatically detect and connect with your scanner.
      10. -
      11. You can use the tool to configure and update your scanner settings and firmware according to your needs.
      12. -
      -

      Install using other tools

      -

      If you downloaded other tools or utilities for your scanner, such as Scanalyzer Scanner Configuration Tool or UPG Runtime Release 2.5 from Datalogic website, you need to install them on your computer first before connecting your scanner to it. These tools or utilities will allow you to perform various functions with your scanner, such as testing its performance, downloading files and programs to it, printing labels from it, etc. To install these tools or utilities, you need to follow these steps:

      -
        -
      1. Extract the zip file that contains the tool or utility on your computer.
      2. - Run the executable file as an administrator.
      3. - Follow the instructions on screen to complete the installation.
      4. - Connect your scanner to your computer via USB cable or cradle.
      5. - The tool or utility will automatically detect and connect with your scanner.
      6. - You can use the tool or utility to perform the function that it offers.

      How to use the software for Datalogic Lynx D432

      - After installing the software for your Datalogic Lynx D432 scanner, you can use it to enhance

      Use Scanalyzer to configure and test the scanner

      -

      If you downloaded Scanalyzer Scanner Configuration Tool from Datalogic website, you can use it to configure and test your scanner settings and performance. This tool is a graphical user interface that allows you to access and modify various parameters of your scanner, such as symbologies, data formatting, beeper and LED settings, etc. You can also use it to test your scanner's reading capabilities and view its status information. To use this tool, you need to follow these steps:

      -
        -
      1. Install the tool on your computer as described above.
      2. -
      3. Connect your scanner to your computer via USB cable or cradle.
      4. -
      5. Put your scanner in Service Port mode by holding down the scan button on the top of the unit while plugging it in.
      6. -
      7. Run the Scanalyzer.exe file from the installation folder.
      8. -
      9. The tool will automatically detect and connect with your scanner.
      10. -
      11. You can use the tool to configure and test your scanner settings and performance according to your needs.
      12. -
      -

      Use UPG Runtime to download files and programs to the scanner

      -

      If you downloaded UPG Runtime Release 2.5 from Datalogic website, you can use it to download files and programs to your scanner. This software enables easy download of files and programs to portables that run Windows Mobile operating system, such as Datalogic Lynx D432. You can use it to transfer images, videos, applications, or other data from your computer to your scanner. To use this software, you need to follow these steps:

      -
        -
      1. Install the software on your computer as described above.
      2. -
      3. Connect your scanner to your computer via USB cable or cradle.
      4. -
      5. Run the UPG Runtime.exe file from the installation folder.
      6. -
      7. The software will automatically detect and connect with your scanner.
      8. -
      9. You can use the software to download files and programs to your scanner according to your needs.
      10. -
      -

      Use other utilities to enhance the scanner performance

      -

      If you downloaded other utilities for your scanner, such as DLRMUS 1.0.14 (Windows) or OPOS 1.14.200 from Datalogic website, you can use them to enhance the scanner performance and functionality. These utilities offer various features, such as remotely updating scanner firmware and configuration, enabling OPOS drivers for Datalogic scanners, printing labels from the scanner, etc. To use these utilities, you need to follow these steps:

      -
        -
      1. Install the utility on your computer as described above.
      2. -
      3. Connect your scanner to your computer via USB cable or cradle.
      4. -
      5. Run the utility executable file from the installation folder.
      6. -
      7. The utility will automatically detect and connect with your scanner.
      8. -
      9. You can use the utility to perform the feature that it offers according to your needs.
      10. -
      -

      Conclusion

      -

      In this article, we have shown you how to download, install, and use the software for your Datalogic Lynx D432 scanner. By following these steps, you can enhance your scanning experience and get the most out of your scanner. The software will help you configure, update, and optimize your scanner settings and firmware, as well as enable it to communicate with other devices and applications. We hope that this guide has been helpful and informative for you. If you have any questions or feedback about the software for Datalogic Lynx D432 scanner, please feel free to contact us or visit our website for more information.

      -

      FAQs

      -

      Here are some common questions and answers about the software for Datalogic Lynx D432 scanner:

      -
        -
      1. Q: Where can I find more information about the software for Datalogic Lynx D432 scanner?
      2. -
      3. A: You can find more information about the software for Datalogic Lynx D432 scanner in the Product Reference Guide (PRG), which is available from the Datalogic website. The PRG contains detailed descriptions and instructions for each software component and feature.
      4. -
      5. Q: How can I check if my scanner has the latest firmware version?
      6. -
      7. A: You can check if your scanner has the latest firmware version by using Scanalyzer Scanner Configuration Tool or Datalogic Aladdin 3.2.0.7. These tools will show you the current firmware version of your scanner and allow you to update it if needed.
      8. -
      9. Q: How can I reset my scanner settings to factory defaults?
      10. -
      11. A: You can reset your scanner settings to factory defaults by scanning a special barcode that is available in the PRG or on the Datalogic website. This barcode will restore all parameters of your scanner to their original values.
      12. -
      13. Q: How can I troubleshoot my scanner if it is not working properly?
      14. -
      15. A: You can troubleshoot your scanner if it is not working properly by following some basic steps, such as checking the power supply, checking the connection cable, checking the scan window cleanliness, checking the barcode quality, etc. If these steps do not solve the problem, you can contact Datalogic technical support for further assistance.
      16. -
      17. Q: How can I contact Datalogic technical support?
      18. -
      19. A: You can contact Datalogic technical support by visiting our website (www.datalogic.com) and filling out a support request form. You can also call us at +1 800 (BARCODE) / +1 800 2272633 or email us at support.usa@datalogic.com.
      20. -
      -

      0a6ba089eb
      -
      -
      \ No newline at end of file diff --git a/spaces/raedeXanto/academic-chatgpt-beta/Download Jayanta Bhai Ki Luv Story Torrent A Fun-Filled Journey of Love and Friendship.md b/spaces/raedeXanto/academic-chatgpt-beta/Download Jayanta Bhai Ki Luv Story Torrent A Fun-Filled Journey of Love and Friendship.md deleted file mode 100644 index 3bfa1441abbe846c47a2d56c7c352e533d1fc2eb..0000000000000000000000000000000000000000 --- a/spaces/raedeXanto/academic-chatgpt-beta/Download Jayanta Bhai Ki Luv Story Torrent A Fun-Filled Journey of Love and Friendship.md +++ /dev/null @@ -1,56 +0,0 @@ -
      -

      Download Jayanta Bhai Ki Luv Story Torrent: A Romantic Comedy with a Twist

      -

      If you are looking for a fun and entertaining movie to watch with your loved ones, you might want to check out Jayanta Bhai Ki Luv Story, a 2013 Hindi romantic comedy film starring Vivek Oberoi and Neha Sharma. The movie is about how a street thug unexpectedly falls in love with his enemy, who is a former IT executive. The movie has a lot of humor, romance, action, and drama that will keep you hooked till the end. In this article, we will tell you more about the movie and how you can download Jayanta Bhai Ki Luv Story torrent from the internet.

      -

      Download Jayanta Bhai Ki Luv Story Torrent


      Download Zip ––– https://tinourl.com/2uKZXL



      -

      What is Jayanta Bhai Ki Luv Story?

      -

      Jayanta Bhai Ki Luv Story is a movie directed by Vinnil Markan and produced by Kumar S. Taurani under the banner of Tips Industries. The movie was released on 15 February 2013 and received mixed reviews from critics and audiences. The movie is a remake of 2010 South Korean film My Gangster Lover which was later remade in Tamil in 2016 as Kadhalum Kadandhu Pogum.

      -

      Plot summary

      -

      The movie revolves around the lives of two unlikely lovers who meet as neighbours in a low-cost housing colony in Mumbai. Jayanta Bhai (Vivek Oberoi) is a small-time gangster who works for a don named Altafbhai (Zakir Hussain). His only dream is to own a bar of his own someday. He had spent five years in jail for taking the blame for a crime he did not commit on Altafbhai's orders. He hopes that his loyalty will be rewarded by Altafbhai once he is out of prison.

      -

      Simran Desai (Neha Sharma) is an engineering graduate who comes to Mumbai to pursue a career in the IT sector. She gets a job in an IT company and becomes well settled in life. However, things take a turn for the worse when her company shuts down suddenly due to financial crisis. She loses her job and her flat and has to move into a cheap apartment in the same building where Jayanta lives.

      -

      Simran initially dislikes Jayanta for his rowdy behaviour and criminal background. She also has to deal with her father (Shishir Sharma) who wants her to get married soon and her ex-boyfriend (Him Dayal Singh) who cheated on her with her boss (Vishwanath Chatterjee). However, she gradually warms up to Jayanta after he saves her life when she overdoses on vitamin supplements and passes out in her room. She realizes that Jayanta is actually a kind-hearted, caring, and funny person despite his rough exterior.

      -

      The two start developing feelings for each other and end up getting intimate one night while drunk. Simran introduces Jayanta to her father as her boyfriend who is a successful manager in an IT firm. Her father agrees to their marriage but changes his mind after seeing Jayanta beating up some goons at a party. He forbids Simran from returning to Mumbai or attending an important job interview.

      -

      Meanwhile, Jayanta starts a gang war by attacking the men of Alex Pandian (Nassar), a former cop turned gangster who is Altafbhai's enemy. Altafbhai orders Jayanta to kill Alex but Jayanta refuses as he wants to leave his criminal life behind for Simran's sake. He decides to run away with Simran but Altafbhai kidnaps her and threatens to kill her if Jayanta does not kill Alex.

      -

      Download JayantaBhai ki Luv Story DVDRip 720p x264 5.1 MaNuDiL SilverRG
      -Download Jayantabhai Ki Love Story 2013 DVDSCR RIP XVID AC3 - xRG
      -Download Jayantabhai Ki Luv Story (2013) HDTV Untouched 720p Hindi H.264 AAC
      -Download Jayantabhai Ki Luv Story (2013) 720p Hindi BluRay x264 AAC DD5.1 1.2GB ESub
      -Download Jayantabhai Ki Luv Story (2013) - 1CD - DVDRip - x264 - Hindi - AC3 - 5.1 - ESubs - Mafiaking
      -Download Jayantabhai Ki Luv Story 2013 Hindi DvDRip 720p x264 DTS Hon3y
      -Download Jayantabhai Ki Luv Story (2013) 720p DVDRip x264 AAC MSubs [Team ExDR]
      -Download JayantaBhai Ki Luv Story (2013) 1 CD DVDRip x264 ESub [DDR]
      -Download JayantaBhai Ki Luv Story (2013) DVDRip x264 AAC [395MB]-- [CooL GuY]
      -Download Jayantabhai Ki Luv Story (2013)Hindi 1CD DvdRip x264 ESubs Team DDH~RG
      -Download JayantaBhai ki Love Story (2013) DVD-Rip - 1CD - XVID - 700MB Bollywood Movie
      -Download JayantaBhai Ki Love Story (2013) MC DVDSCR Untouched NTSC
      -Download Jayantabhai Ki Luv Story (2013) DVDScr X264 AAC [330MB] - Mikky567
      -Download JayantaBhai ki Love Story (2013) DVDScr wBy Songms
      -Download Jayantabhai Ki Luv Story (2013) Hindi DvDRip 720p x264...Hon3y
      -Download Jayantabhai Ki Luv Story 2013 Hindi DvDRip 720p x264 DTS...Hon3y
      -Download Jayantabhai Ki Luv Story 2013 Hindi DVDRip Xvid-D3Si MaNiACs
      -Download JayantaBhai Ki Luv Story (2013) DVDRip XviD ESub [DDR]
      -Download JayantaBhai Ki Luv Story (2013) DVDRip x264 ESub [DDR]
      -Download Jayantabhai Ki Luv Story In Hindi Torrent 720p

      -

      Jayanta manages to rescue Simran from Altafbhai's clutches but gets shot by Alex in the process. He survives the bullet wound and reunites with Simran who accepts him for who he is. They get married with their friends' blessings and live happily ever after.

      -

      Who are the main actors and characters?

      -

      The movie features an ensemble cast of talented actors who play various roles in the story. Here are some of the main actors and characters:

      -
        -
      • Vivek Oberoi as Jayanta Bhai, a street-thug and small time gangster who falls in love with Simran.
      • -
      • Neha Sharma as Simran Desai, an engineering graduate who loses her job and moves into Jayanta's building.
      • -
      • Rahul Singh as Datta, Jayanta's friend and fellow gangster.
      • -
      • Nikhil Ratnaparkhi as Wagle, Simran's colleague and friend.
      • -
      • Shishir Sharma as Hemant Desai, Simran's father.
      • -
      • Him Dayal Singh as Kunal, Simran's ex-boyfriend.
      • -
      • Vishwanath Chatterjee as Rohit Sharma, Simran's boss.
      • -
      • Jiten Mukhi as Sanghvi, Jayanta's rival gangster.
      • -
      • Falguni Rajani as Sakina, Jayanta's neighbour.
      • -
      • Zakir Hussain as Altafbhai, Jayanta's boss and don.
      • -
      • Nassar as Alex Pandian, a former cop and Altafbhai's enemy.
      • -
      -

      What are the highlights of the movie?

      -

      Jayanta Bhai Ki Luv Story is not just another typical Bollywood rom-com. It has some unique elements that make it stand out from the crowd. Here are some of the highlights of the movie:

      -

      The chemistry between Vivek Oberoi and Neha Sharma

      -

      The main attraction of the movie is the chemistry between the lead pair Vivek Oberoi and Neha Sharma. They share some cute, funny, romantic, and emotional moments that make you root for their love story. They also have some sizzling scenes that add spice to their relationship. Vivek Oberoi delivers one of his best performances as Jayanta Bhai, showing his versatility as an actor. He portrays both the tough and tender sides of his character with ease. Neha Sharma looks gorgeous and charming as Simran Desai, playing her role with grace and confidence.

      -

      The comedy scenes and dialogues

      -

      The movie has some hilarious comedy scenes that will make

      0a6ba089eb
      -
      -
      \ No newline at end of file diff --git a/spaces/raedeXanto/academic-chatgpt-beta/Google Sketchup Pro Layout Keygen Why You Should Upgrade to SketchUp Pro with this Crack.md b/spaces/raedeXanto/academic-chatgpt-beta/Google Sketchup Pro Layout Keygen Why You Should Upgrade to SketchUp Pro with this Crack.md deleted file mode 100644 index 1ce60ac8b4725a21e01a5f6fe16b49825f401c59..0000000000000000000000000000000000000000 --- a/spaces/raedeXanto/academic-chatgpt-beta/Google Sketchup Pro Layout Keygen Why You Should Upgrade to SketchUp Pro with this Crack.md +++ /dev/null @@ -1,102 +0,0 @@ - -

      Google SketchUp Pro Layout Keygen: What Is It and How to Use It?

      -

      If you are looking for a powerful and easy-to-use 3D modeling software that can help you design, document, and present your ideas in 3D, you might have heard of Google SketchUp Pro Layout. This program is used by professionals and amateurs alike for various fields such as architecture, engineering, interior design, landscaping, video game design, and more. But what is Google SketchUp Pro Layout exactly, and what is a keygen? In this article, we will answer these questions and show you how to use a keygen to activate Google SketchUp Pro Layout for free.

      -

      Features of Google SketchUp Pro Layout

      -

      Google SketchUp Pro Layout is the desktop version of SketchUp, a popular 3D modeling software developed by Trimble. It allows you to create, edit, and visualize 3D models of anything you can imagine. Some of the features of Google SketchUp Pro Layout are:

      -

      google sketchup pro layout keygen


      DOWNLOAD >>>>> https://tinourl.com/2uL3XT



      -

      3D modeling and design tools

      -

      You can use a variety of drawing and sketching tools, such as lines, arcs, circles, rectangles, polygons, push/pull, follow me, offset, rotate, scale, move, copy, etc., to create your models. You can also apply materials, colors, textures, shadows, lighting effects, styles, and more to make your models look realistic. You can also import and export models from other formats such as DWG, DXF, OBJ, STL, etc.

      -

      Documentation and presentation tools

      -

      You can use LayOut, a tool included in Google SketchUp Pro Layout, to generate presentation documents such as page layouts, vector illustrations, title blocks, dimensions, annotations, etc. You can also create scenes and animations to showcase your models from different angles and perspectives. You can also export your models as images or videos in various formats such as JPG, PNG, PDF, MP4, etc.

      -

      Collaboration and sharing tools

      -

      You can use Trimble Connect, a cloud-based platform that works seamlessly with Google SketchUp Pro Layout to store, manage, and share your models online. You can also access the 3D Warehouse, the world's largest repository of free 3D models that you can download or upload. You can also use the SketchUp Mobile Viewer app to view your models on your phone or tablet.

      -

      Benefits of Using a Keygen for Google SketchUp Pro Layout

      -

      A keygen is a software that generates a serial number or activation code for another software. By using a keygen for Google SketchUp Pro Layout, you can enjoy some benefits such as:

      -

      Save money and time

      -

      The official price of Google SketchUp Pro Layout is $299 per year or $1199 for a perpetual license. That's quite expensive for some users who might not need all the features or updates. By using a keygen for Google SketchUp Pro Layout, you can get the full version of the program for free and use it as long as you want. You can also save time by avoiding the hassle of registration and verification.

      -

      Access all features and updates

      -

      Some users might try to use the free trial version of Google SketchUp Pro Layout, but that has some limitations such as a 30-day period, watermarks on exported files, and restricted access to some features and extensions. By using a keygen for Google SketchUp Pro Layout, you can unlock all the features and extensions that the program has to offer. You can also get the latest updates and bug fixes without any restrictions.

      -

      google sketchup pro layout crack download
      -google sketchup pro layout serial number generator
      -google sketchup pro layout activation code free
      -google sketchup pro layout license key 2023
      -google sketchup pro layout full version with keygen
      -google sketchup pro layout patch file
      -google sketchup pro layout torrent link
      -google sketchup pro layout product key finder
      -google sketchup pro layout registration code online
      -google sketchup pro layout keygen mac os
      -google sketchup pro layout crack windows 10
      -google sketchup pro layout serial key 2022
      -google sketchup pro layout activation key email
      -google sketchup pro layout license code crack
      -google sketchup pro layout full crack with keygen
      -google sketchup pro layout patch download
      -google sketchup pro layout torrent magnet
      -google sketchup pro layout product key generator
      -google sketchup pro layout registration key free
      -google sketchup pro layout keygen windows 7
      -google sketchup pro layout crack mac download
      -google sketchup pro layout serial number 2021
      -google sketchup pro layout activation code crack
      -google sketchup pro layout license key email
      -google sketchup pro layout full version crack with keygen
      -google sketchup pro layout patch file download
      -google sketchup pro layout torrent download
      -google sketchup pro layout product key free
      -google sketchup pro layout registration code crack
      -google sketchup pro layout keygen mac download
      -google sketchup pro layout crack windows 7
      -google sketchup pro layout serial key generator
      -google sketchup pro layout activation key free
      -google sketchup pro layout license code free
      -google sketchup pro layout full crack download with keygen
      -google sketchup pro layout patch online
      -google sketchup pro layout torrent file
      -google sketchup pro layout product key online
      -google sketchup pro layout registration key generator
      -google sketchup pro layout keygen windows 10
      -google sketchup pro layout crack mac os x
      -google sketchup pro layout serial number free
      -google sketchup pro layout activation code generator
      -google sketchup pro layout license key generator
      -google sketchup pro layout full version with keygen download
      -google sketchup pro layout patch offline
      -google sketchup pro layout torrent link download
      -google sketchup pro layout product key crack
      -google sketchup pro layout registration code online free

      -

      Avoid malware and viruses

      -

      Some users might be tempted to download cracked versions of Google SketchUp Pro Layout from untrusted sources, but that can be risky and dangerous. Cracked versions might contain malware and viruses that can harm your computer and compromise your data. By using a keygen for Google SketchUp Pro Layout, you can avoid these risks and ensure that you are using a clean and safe version of the program.

      -

      How to Download and Install a Keygen for Google SketchUp Pro Layout

      -

      If you want to use a keygen for Google SketchUp Pro Layout, you need to follow these steps:

      -

      Find a reliable source

      -

      The first step is to find a reliable source that provides a working keygen for Google SketchUp Pro Layout. You can search online for reviews and feedback from other users who have used the keygen before. You can also check the reputation and ratings of the source to make sure it is trustworthy. One of the sources that we recommend is iemblog.com, which offers a download link and an activation guide for Google SketchUp Pro Layout 2022 v22.0.316 x64.

      -

      Follow the instructions

      -

      The next step is to follow the instructions provided by the source to download and install the keygen for Google SketchUp Pro Layout. You might need to disable your antivirus software temporarily to avoid any interference with the keygen. You might also need to extract the keygen from a compressed file using a tool such as WinRAR or 7-Zip. After that, you need to run the keygen as an administrator and generate a serial number or activation code for Google SketchUp Pro Layout.

      -

      Activate the program

      -

      The final step is to activate Google SketchUp Pro Layout using the serial number or activation code generated by the keygen. You need to open Google SketchUp Pro Layout and go to the Help menu. Then, you need to select License > Authorize > Add License > Enter License Info. After that, you need to enter your name, email address, company name, and serial number or activation code in the corresponding fields. Then, you need to click on Add License and wait for the confirmation message. Congratulations, you have successfully activated Google SketchUp Pro Layout using a keygen!

      -

      Tips and Tricks for Using Google SketchUp Pro Layout

      -

      Now that you have activated Google SketchUp Pro Layout using a keygen, you can start using it to create amazing 3D models and presentations. Here are some tips and tricks that can help you improve your skills and productivity:

      -

      Use search to find commands and extensions

      -

      One of the new features in Google SketchUp Pro Layout 2022 is search, which allows you to quickly find and activate native commands and installed extensions. You can access search by pressing Ctrl+F on Windows or Command+F on Mac, or by clicking on the magnifying glass icon on the toolbar. You can type in keywords or phrases related to what you want to do, such as draw circle, export pdf, or install extension. You can also use filters such as Tools, Extensions, Help, etc., to narrow down your results.

      -

      Use labels to organize your model

      -

      Another new feature in Google SketchUp Pro Layout 2022 is labels, which allows you to click on entities or selections to apply labels to them. With this tool, you can speed up model organization by tagging objects directly in the modeling window, quickly clean up unwanted tags, and improve reporting fidelity by bulk modifying the tagging of component instances. You can also use Auto-Text labels to display information from the SketchUp model entity, such as scene name, scale, area, volume, etc.

      -

      Use zoom selection to focus on a specific area

      -

      Another new feature in Google SketchUp Pro Layout 2022 is zoom selection, which allows you to zoom in on a specific area of your model by selecting it and choosing Zoom Selection from the context menu. This can help you focus on the details and avoid distractions from other parts of the model. You can also use this feature to create close-up views for your presentations and documents.

      -

      Conclusion

      -

      Google SketchUp Pro Layout is a great tool for creating 3D models and presentations for various purposes. By using a keygen for Google SketchUp Pro Layout, you can activate the program for free and enjoy all its features and updates. However, you need to be careful and find a reliable source for the keygen, follow the instructions carefully, and avoid malware and viruses. You can also improve your skills and productivity by using some of the tips and tricks we shared in this article. We hope you found this article helpful and informative. If you have any questions or feedback, please let us know in the comments below.

      -

      FAQs

      -

      Here are some frequently asked questions about Google SketchUp Pro Layout and keygen:

      -

      Q: Is Google SketchUp Pro Layout free?

      -

      A: No, Google SketchUp Pro Layout is not free. It costs $299 per year or $1199 for a perpetual license. However, you can use a keygen to activate it for free.

      -

      Q: Is using a keygen for Google SketchUp Pro Layout legal?

      -

      A: No, using a keygen for Google SketchUp Pro Layout is not legal. It violates the terms and conditions of the software license agreement and may result in legal consequences. We do not condone or encourage the use of keygen or any other illegal methods.

      -

      Q: Is using a keygen for Google SketchUp Pro Layout safe?

      -

      A: Not necessarily. Using a keygen for Google SketchUp Pro Layout may expose your computer to malware and viruses that can harm your system and compromise your data. You need to be careful and find a reliable source for the keygen, scan it with an antivirus software, and backup your files before using it.

      -

      Q: What are some alternatives to Google SketchUp Pro Layout?

      -

      A: Some alternatives to Google SketchUp Pro Layout are Blender, Autodesk 3ds Max, Autodesk Maya, Cinema 4D, Rhino 3D, etc. These are also 3D modeling software that offer different features and capabilities.

      -

      Q: How can I learn more about Google SketchUp Pro Layout?

      -

      A: You can learn more about Google SketchUp Pro Layout by visiting its official website sketchup.com, reading its help articles help.sketchup.com, watching its tutorial videos youtube.com/user/SketchUpVideo, or joining its community forums forums.sketchup.com.

      -

      0a6ba089eb
      -
      -
      \ No newline at end of file diff --git a/spaces/rayan-saleh/whisper2notion/server/node_modules/@types/node/https.d.ts b/spaces/rayan-saleh/whisper2notion/server/node_modules/@types/node/https.d.ts deleted file mode 100644 index bda367d74c634f58d3e3898029bbc64bdbc61c0a..0000000000000000000000000000000000000000 --- a/spaces/rayan-saleh/whisper2notion/server/node_modules/@types/node/https.d.ts +++ /dev/null @@ -1,542 +0,0 @@ -/** - * HTTPS is the HTTP protocol over TLS/SSL. In Node.js this is implemented as a - * separate module. - * @see [source](https://github.com/nodejs/node/blob/v18.0.0/lib/https.js) - */ -declare module 'https' { - import { Duplex } from 'node:stream'; - import * as tls from 'node:tls'; - import * as http from 'node:http'; - import { URL } from 'node:url'; - type ServerOptions< - Request extends typeof http.IncomingMessage = typeof http.IncomingMessage, - Response extends typeof http.ServerResponse = typeof http.ServerResponse, - > = tls.SecureContextOptions & tls.TlsOptions & http.ServerOptions; - type RequestOptions = http.RequestOptions & - tls.SecureContextOptions & { - checkServerIdentity?: typeof tls.checkServerIdentity | undefined; - rejectUnauthorized?: boolean | undefined; // Defaults to true - servername?: string | undefined; // SNI TLS Extension - }; - interface AgentOptions extends http.AgentOptions, tls.ConnectionOptions { - rejectUnauthorized?: boolean | undefined; - maxCachedSessions?: number | undefined; - } - /** - * An `Agent` object for HTTPS similar to `http.Agent`. See {@link request} for more information. - * @since v0.4.5 - */ - class Agent extends http.Agent { - constructor(options?: AgentOptions); - options: AgentOptions; - } - interface Server< - Request extends typeof http.IncomingMessage = typeof http.IncomingMessage, - Response extends typeof http.ServerResponse = typeof http.ServerResponse, - > extends http.Server {} - /** - * See `http.Server` for more information. - * @since v0.3.4 - */ - class Server< - Request extends typeof http.IncomingMessage = typeof http.IncomingMessage, - Response extends typeof http.ServerResponse = typeof http.ServerResponse, - > extends tls.Server { - constructor(requestListener?: http.RequestListener); - constructor( - options: ServerOptions, - requestListener?: http.RequestListener, - ); - /** - * Closes all connections connected to this server. - * @since v18.2.0 - */ - closeAllConnections(): void; - /** - * Closes all connections connected to this server which are not sending a request or waiting for a response. - * @since v18.2.0 - */ - closeIdleConnections(): void; - addListener(event: string, listener: (...args: any[]) => void): this; - addListener(event: 'keylog', listener: (line: Buffer, tlsSocket: tls.TLSSocket) => void): this; - addListener( - event: 'newSession', - listener: (sessionId: Buffer, sessionData: Buffer, callback: (err: Error, resp: Buffer) => void) => void, - ): this; - addListener( - event: 'OCSPRequest', - listener: ( - certificate: Buffer, - issuer: Buffer, - callback: (err: Error | null, resp: Buffer) => void, - ) => void, - ): this; - addListener( - event: 'resumeSession', - listener: (sessionId: Buffer, callback: (err: Error, sessionData: Buffer) => void) => void, - ): this; - addListener(event: 'secureConnection', listener: (tlsSocket: tls.TLSSocket) => void): this; - addListener(event: 'tlsClientError', listener: (err: Error, tlsSocket: tls.TLSSocket) => void): this; - addListener(event: 'close', listener: () => void): this; - addListener(event: 'connection', listener: (socket: Duplex) => void): this; - addListener(event: 'error', listener: (err: Error) => void): this; - addListener(event: 'listening', listener: () => void): this; - addListener(event: 'checkContinue', listener: http.RequestListener): this; - addListener(event: 'checkExpectation', listener: http.RequestListener): this; - addListener(event: 'clientError', listener: (err: Error, socket: Duplex) => void): this; - addListener( - event: 'connect', - listener: (req: InstanceType, socket: Duplex, head: Buffer) => void, - ): this; - addListener(event: 'request', listener: http.RequestListener): this; - addListener( - event: 'upgrade', - listener: (req: InstanceType, socket: Duplex, head: Buffer) => void, - ): this; - emit(event: string, ...args: any[]): boolean; - emit(event: 'keylog', line: Buffer, tlsSocket: tls.TLSSocket): boolean; - emit( - event: 'newSession', - sessionId: Buffer, - sessionData: Buffer, - callback: (err: Error, resp: Buffer) => void, - ): boolean; - emit( - event: 'OCSPRequest', - certificate: Buffer, - issuer: Buffer, - callback: (err: Error | null, resp: Buffer) => void, - ): boolean; - emit(event: 'resumeSession', sessionId: Buffer, callback: (err: Error, sessionData: Buffer) => void): boolean; - emit(event: 'secureConnection', tlsSocket: tls.TLSSocket): boolean; - emit(event: 'tlsClientError', err: Error, tlsSocket: tls.TLSSocket): boolean; - emit(event: 'close'): boolean; - emit(event: 'connection', socket: Duplex): boolean; - emit(event: 'error', err: Error): boolean; - emit(event: 'listening'): boolean; - emit( - event: 'checkContinue', - req: InstanceType, - res: InstanceType & { req: InstanceType }, - ): boolean; - emit( - event: 'checkExpectation', - req: InstanceType, - res: InstanceType & { req: InstanceType }, - ): boolean; - emit(event: 'clientError', err: Error, socket: Duplex): boolean; - emit(event: 'connect', req: InstanceType, socket: Duplex, head: Buffer): boolean; - emit( - event: 'request', - req: InstanceType, - res: InstanceType & { req: InstanceType }, - ): boolean; - emit(event: 'upgrade', req: InstanceType, socket: Duplex, head: Buffer): boolean; - on(event: string, listener: (...args: any[]) => void): this; - on(event: 'keylog', listener: (line: Buffer, tlsSocket: tls.TLSSocket) => void): this; - on( - event: 'newSession', - listener: (sessionId: Buffer, sessionData: Buffer, callback: (err: Error, resp: Buffer) => void) => void, - ): this; - on( - event: 'OCSPRequest', - listener: ( - certificate: Buffer, - issuer: Buffer, - callback: (err: Error | null, resp: Buffer) => void, - ) => void, - ): this; - on( - event: 'resumeSession', - listener: (sessionId: Buffer, callback: (err: Error, sessionData: Buffer) => void) => void, - ): this; - on(event: 'secureConnection', listener: (tlsSocket: tls.TLSSocket) => void): this; - on(event: 'tlsClientError', listener: (err: Error, tlsSocket: tls.TLSSocket) => void): this; - on(event: 'close', listener: () => void): this; - on(event: 'connection', listener: (socket: Duplex) => void): this; - on(event: 'error', listener: (err: Error) => void): this; - on(event: 'listening', listener: () => void): this; - on(event: 'checkContinue', listener: http.RequestListener): this; - on(event: 'checkExpectation', listener: http.RequestListener): this; - on(event: 'clientError', listener: (err: Error, socket: Duplex) => void): this; - on(event: 'connect', listener: (req: InstanceType, socket: Duplex, head: Buffer) => void): this; - on(event: 'request', listener: http.RequestListener): this; - on(event: 'upgrade', listener: (req: InstanceType, socket: Duplex, head: Buffer) => void): this; - once(event: string, listener: (...args: any[]) => void): this; - once(event: 'keylog', listener: (line: Buffer, tlsSocket: tls.TLSSocket) => void): this; - once( - event: 'newSession', - listener: (sessionId: Buffer, sessionData: Buffer, callback: (err: Error, resp: Buffer) => void) => void, - ): this; - once( - event: 'OCSPRequest', - listener: ( - certificate: Buffer, - issuer: Buffer, - callback: (err: Error | null, resp: Buffer) => void, - ) => void, - ): this; - once( - event: 'resumeSession', - listener: (sessionId: Buffer, callback: (err: Error, sessionData: Buffer) => void) => void, - ): this; - once(event: 'secureConnection', listener: (tlsSocket: tls.TLSSocket) => void): this; - once(event: 'tlsClientError', listener: (err: Error, tlsSocket: tls.TLSSocket) => void): this; - once(event: 'close', listener: () => void): this; - once(event: 'connection', listener: (socket: Duplex) => void): this; - once(event: 'error', listener: (err: Error) => void): this; - once(event: 'listening', listener: () => void): this; - once(event: 'checkContinue', listener: http.RequestListener): this; - once(event: 'checkExpectation', listener: http.RequestListener): this; - once(event: 'clientError', listener: (err: Error, socket: Duplex) => void): this; - once(event: 'connect', listener: (req: InstanceType, socket: Duplex, head: Buffer) => void): this; - once(event: 'request', listener: http.RequestListener): this; - once(event: 'upgrade', listener: (req: InstanceType, socket: Duplex, head: Buffer) => void): this; - prependListener(event: string, listener: (...args: any[]) => void): this; - prependListener(event: 'keylog', listener: (line: Buffer, tlsSocket: tls.TLSSocket) => void): this; - prependListener( - event: 'newSession', - listener: (sessionId: Buffer, sessionData: Buffer, callback: (err: Error, resp: Buffer) => void) => void, - ): this; - prependListener( - event: 'OCSPRequest', - listener: ( - certificate: Buffer, - issuer: Buffer, - callback: (err: Error | null, resp: Buffer) => void, - ) => void, - ): this; - prependListener( - event: 'resumeSession', - listener: (sessionId: Buffer, callback: (err: Error, sessionData: Buffer) => void) => void, - ): this; - prependListener(event: 'secureConnection', listener: (tlsSocket: tls.TLSSocket) => void): this; - prependListener(event: 'tlsClientError', listener: (err: Error, tlsSocket: tls.TLSSocket) => void): this; - prependListener(event: 'close', listener: () => void): this; - prependListener(event: 'connection', listener: (socket: Duplex) => void): this; - prependListener(event: 'error', listener: (err: Error) => void): this; - prependListener(event: 'listening', listener: () => void): this; - prependListener(event: 'checkContinue', listener: http.RequestListener): this; - prependListener(event: 'checkExpectation', listener: http.RequestListener): this; - prependListener(event: 'clientError', listener: (err: Error, socket: Duplex) => void): this; - prependListener( - event: 'connect', - listener: (req: InstanceType, socket: Duplex, head: Buffer) => void, - ): this; - prependListener(event: 'request', listener: http.RequestListener): this; - prependListener( - event: 'upgrade', - listener: (req: InstanceType, socket: Duplex, head: Buffer) => void, - ): this; - prependOnceListener(event: string, listener: (...args: any[]) => void): this; - prependOnceListener(event: 'keylog', listener: (line: Buffer, tlsSocket: tls.TLSSocket) => void): this; - prependOnceListener( - event: 'newSession', - listener: (sessionId: Buffer, sessionData: Buffer, callback: (err: Error, resp: Buffer) => void) => void, - ): this; - prependOnceListener( - event: 'OCSPRequest', - listener: ( - certificate: Buffer, - issuer: Buffer, - callback: (err: Error | null, resp: Buffer) => void, - ) => void, - ): this; - prependOnceListener( - event: 'resumeSession', - listener: (sessionId: Buffer, callback: (err: Error, sessionData: Buffer) => void) => void, - ): this; - prependOnceListener(event: 'secureConnection', listener: (tlsSocket: tls.TLSSocket) => void): this; - prependOnceListener(event: 'tlsClientError', listener: (err: Error, tlsSocket: tls.TLSSocket) => void): this; - prependOnceListener(event: 'close', listener: () => void): this; - prependOnceListener(event: 'connection', listener: (socket: Duplex) => void): this; - prependOnceListener(event: 'error', listener: (err: Error) => void): this; - prependOnceListener(event: 'listening', listener: () => void): this; - prependOnceListener(event: 'checkContinue', listener: http.RequestListener): this; - prependOnceListener(event: 'checkExpectation', listener: http.RequestListener): this; - prependOnceListener(event: 'clientError', listener: (err: Error, socket: Duplex) => void): this; - prependOnceListener( - event: 'connect', - listener: (req: InstanceType, socket: Duplex, head: Buffer) => void, - ): this; - prependOnceListener(event: 'request', listener: http.RequestListener): this; - prependOnceListener( - event: 'upgrade', - listener: (req: InstanceType, socket: Duplex, head: Buffer) => void, - ): this; - } - /** - * ```js - * // curl -k https://localhost:8000/ - * const https = require('https'); - * const fs = require('fs'); - * - * const options = { - * key: fs.readFileSync('test/fixtures/keys/agent2-key.pem'), - * cert: fs.readFileSync('test/fixtures/keys/agent2-cert.pem') - * }; - * - * https.createServer(options, (req, res) => { - * res.writeHead(200); - * res.end('hello world\n'); - * }).listen(8000); - * ``` - * - * Or - * - * ```js - * const https = require('https'); - * const fs = require('fs'); - * - * const options = { - * pfx: fs.readFileSync('test/fixtures/test_cert.pfx'), - * passphrase: 'sample' - * }; - * - * https.createServer(options, (req, res) => { - * res.writeHead(200); - * res.end('hello world\n'); - * }).listen(8000); - * ``` - * @since v0.3.4 - * @param options Accepts `options` from `createServer`, `createSecureContext` and `createServer`. - * @param requestListener A listener to be added to the `'request'` event. - */ - function createServer< - Request extends typeof http.IncomingMessage = typeof http.IncomingMessage, - Response extends typeof http.ServerResponse = typeof http.ServerResponse, - >(requestListener?: http.RequestListener): Server; - function createServer< - Request extends typeof http.IncomingMessage = typeof http.IncomingMessage, - Response extends typeof http.ServerResponse = typeof http.ServerResponse, - >( - options: ServerOptions, - requestListener?: http.RequestListener, - ): Server; - /** - * Makes a request to a secure web server. - * - * The following additional `options` from `tls.connect()` are also accepted:`ca`, `cert`, `ciphers`, `clientCertEngine`, `crl`, `dhparam`, `ecdhCurve`,`honorCipherOrder`, `key`, `passphrase`, - * `pfx`, `rejectUnauthorized`,`secureOptions`, `secureProtocol`, `servername`, `sessionIdContext`,`highWaterMark`. - * - * `options` can be an object, a string, or a `URL` object. If `options` is a - * string, it is automatically parsed with `new URL()`. If it is a `URL` object, it will be automatically converted to an ordinary `options` object. - * - * `https.request()` returns an instance of the `http.ClientRequest` class. The `ClientRequest` instance is a writable stream. If one needs to - * upload a file with a POST request, then write to the `ClientRequest` object. - * - * ```js - * const https = require('https'); - * - * const options = { - * hostname: 'encrypted.google.com', - * port: 443, - * path: '/', - * method: 'GET' - * }; - * - * const req = https.request(options, (res) => { - * console.log('statusCode:', res.statusCode); - * console.log('headers:', res.headers); - * - * res.on('data', (d) => { - * process.stdout.write(d); - * }); - * }); - * - * req.on('error', (e) => { - * console.error(e); - * }); - * req.end(); - * ``` - * - * Example using options from `tls.connect()`: - * - * ```js - * const options = { - * hostname: 'encrypted.google.com', - * port: 443, - * path: '/', - * method: 'GET', - * key: fs.readFileSync('test/fixtures/keys/agent2-key.pem'), - * cert: fs.readFileSync('test/fixtures/keys/agent2-cert.pem') - * }; - * options.agent = new https.Agent(options); - * - * const req = https.request(options, (res) => { - * // ... - * }); - * ``` - * - * Alternatively, opt out of connection pooling by not using an `Agent`. - * - * ```js - * const options = { - * hostname: 'encrypted.google.com', - * port: 443, - * path: '/', - * method: 'GET', - * key: fs.readFileSync('test/fixtures/keys/agent2-key.pem'), - * cert: fs.readFileSync('test/fixtures/keys/agent2-cert.pem'), - * agent: false - * }; - * - * const req = https.request(options, (res) => { - * // ... - * }); - * ``` - * - * Example using a `URL` as `options`: - * - * ```js - * const options = new URL('https://abc:xyz@example.com'); - * - * const req = https.request(options, (res) => { - * // ... - * }); - * ``` - * - * Example pinning on certificate fingerprint, or the public key (similar to`pin-sha256`): - * - * ```js - * const tls = require('tls'); - * const https = require('https'); - * const crypto = require('crypto'); - * - * function sha256(s) { - * return crypto.createHash('sha256').update(s).digest('base64'); - * } - * const options = { - * hostname: 'github.com', - * port: 443, - * path: '/', - * method: 'GET', - * checkServerIdentity: function(host, cert) { - * // Make sure the certificate is issued to the host we are connected to - * const err = tls.checkServerIdentity(host, cert); - * if (err) { - * return err; - * } - * - * // Pin the public key, similar to HPKP pin-sha25 pinning - * const pubkey256 = 'pL1+qb9HTMRZJmuC/bB/ZI9d302BYrrqiVuRyW+DGrU='; - * if (sha256(cert.pubkey) !== pubkey256) { - * const msg = 'Certificate verification error: ' + - * `The public key of '${cert.subject.CN}' ` + - * 'does not match our pinned fingerprint'; - * return new Error(msg); - * } - * - * // Pin the exact certificate, rather than the pub key - * const cert256 = '25:FE:39:32:D9:63:8C:8A:FC:A1:9A:29:87:' + - * 'D8:3E:4C:1D:98:DB:71:E4:1A:48:03:98:EA:22:6A:BD:8B:93:16'; - * if (cert.fingerprint256 !== cert256) { - * const msg = 'Certificate verification error: ' + - * `The certificate of '${cert.subject.CN}' ` + - * 'does not match our pinned fingerprint'; - * return new Error(msg); - * } - * - * // This loop is informational only. - * // Print the certificate and public key fingerprints of all certs in the - * // chain. Its common to pin the public key of the issuer on the public - * // internet, while pinning the public key of the service in sensitive - * // environments. - * do { - * console.log('Subject Common Name:', cert.subject.CN); - * console.log(' Certificate SHA256 fingerprint:', cert.fingerprint256); - * - * hash = crypto.createHash('sha256'); - * console.log(' Public key ping-sha256:', sha256(cert.pubkey)); - * - * lastprint256 = cert.fingerprint256; - * cert = cert.issuerCertificate; - * } while (cert.fingerprint256 !== lastprint256); - * - * }, - * }; - * - * options.agent = new https.Agent(options); - * const req = https.request(options, (res) => { - * console.log('All OK. Server matched our pinned cert or public key'); - * console.log('statusCode:', res.statusCode); - * // Print the HPKP values - * console.log('headers:', res.headers['public-key-pins']); - * - * res.on('data', (d) => {}); - * }); - * - * req.on('error', (e) => { - * console.error(e.message); - * }); - * req.end(); - * ``` - * - * Outputs for example: - * - * ```text - * Subject Common Name: github.com - * Certificate SHA256 fingerprint: 25:FE:39:32:D9:63:8C:8A:FC:A1:9A:29:87:D8:3E:4C:1D:98:DB:71:E4:1A:48:03:98:EA:22:6A:BD:8B:93:16 - * Public key ping-sha256: pL1+qb9HTMRZJmuC/bB/ZI9d302BYrrqiVuRyW+DGrU= - * Subject Common Name: DigiCert SHA2 Extended Validation Server CA - * Certificate SHA256 fingerprint: 40:3E:06:2A:26:53:05:91:13:28:5B:AF:80:A0:D4:AE:42:2C:84:8C:9F:78:FA:D0:1F:C9:4B:C5:B8:7F:EF:1A - * Public key ping-sha256: RRM1dGqnDFsCJXBTHky16vi1obOlCgFFn/yOhI/y+ho= - * Subject Common Name: DigiCert High Assurance EV Root CA - * Certificate SHA256 fingerprint: 74:31:E5:F4:C3:C1:CE:46:90:77:4F:0B:61:E0:54:40:88:3B:A9:A0:1E:D0:0B:A6:AB:D7:80:6E:D3:B1:18:CF - * Public key ping-sha256: WoiWRyIOVNa9ihaBciRSC7XHjliYS9VwUGOIud4PB18= - * All OK. Server matched our pinned cert or public key - * statusCode: 200 - * headers: max-age=0; pin-sha256="WoiWRyIOVNa9ihaBciRSC7XHjliYS9VwUGOIud4PB18="; pin-sha256="RRM1dGqnDFsCJXBTHky16vi1obOlCgFFn/yOhI/y+ho="; - * pin-sha256="k2v657xBsOVe1PQRwOsHsw3bsGT2VzIqz5K+59sNQws="; pin-sha256="K87oWBWM9UZfyddvDfoxL+8lpNyoUB2ptGtn0fv6G2Q="; pin-sha256="IQBnNBEiFuhj+8x6X8XLgh01V9Ic5/V3IRQLNFFc7v4="; - * pin-sha256="iie1VXtL7HzAMF+/PVPR9xzT80kQxdZeJ+zduCB3uj0="; pin-sha256="LvRiGEjRqfzurezaWuj8Wie2gyHMrW5Q06LspMnox7A="; includeSubDomains - * ``` - * @since v0.3.6 - * @param options Accepts all `options` from `request`, with some differences in default values: - */ - function request( - options: RequestOptions | string | URL, - callback?: (res: http.IncomingMessage) => void, - ): http.ClientRequest; - function request( - url: string | URL, - options: RequestOptions, - callback?: (res: http.IncomingMessage) => void, - ): http.ClientRequest; - /** - * Like `http.get()` but for HTTPS. - * - * `options` can be an object, a string, or a `URL` object. If `options` is a - * string, it is automatically parsed with `new URL()`. If it is a `URL` object, it will be automatically converted to an ordinary `options` object. - * - * ```js - * const https = require('https'); - * - * https.get('https://encrypted.google.com/', (res) => { - * console.log('statusCode:', res.statusCode); - * console.log('headers:', res.headers); - * - * res.on('data', (d) => { - * process.stdout.write(d); - * }); - * - * }).on('error', (e) => { - * console.error(e); - * }); - * ``` - * @since v0.3.6 - * @param options Accepts the same `options` as {@link request}, with the `method` always set to `GET`. - */ - function get( - options: RequestOptions | string | URL, - callback?: (res: http.IncomingMessage) => void, - ): http.ClientRequest; - function get( - url: string | URL, - options: RequestOptions, - callback?: (res: http.IncomingMessage) => void, - ): http.ClientRequest; - let globalAgent: Agent; -} -declare module 'node:https' { - export * from 'https'; -} diff --git a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/3-Metra-Iznad-Neba-Ceo-Film-Sa-Prevodom-Free-TOP-11l.md b/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/3-Metra-Iznad-Neba-Ceo-Film-Sa-Prevodom-Free-TOP-11l.md deleted file mode 100644 index f99046427d756e50eb71ffe4eab60c28e9fbb7b3..0000000000000000000000000000000000000000 --- a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/3-Metra-Iznad-Neba-Ceo-Film-Sa-Prevodom-Free-TOP-11l.md +++ /dev/null @@ -1,62 +0,0 @@ -## 3 Metra Iznad Neba Ceo Film Sa Prevodom Free 11l - - - - - - ![3 Metra Iznad Neba Ceo Film Sa Prevodom Free __TOP__ 11l](https://i.ytimg.com/vi/ojYI6J-PVNs/maxresdefault.jpg) - - - - - -**CLICK HERE ===> [https://www.google.com/url?q=https%3A%2F%2Furlin.us%2F2tyfIx&sa=D&sntz=1&usg=AOvVaw0F47OoTGaJeSxqUw3si9xi](https://www.google.com/url?q=https%3A%2F%2Furlin.us%2F2tyfIx&sa=D&sntz=1&usg=AOvVaw0F47OoTGaJeSxqUw3si9xi)** - - - - - - - - - - - - Here is a possible title and article with html formatting for the keyword "3 Metra Iznad Neba Ceo Film Sa Prevodom Free 11l": - -# 3 Metra Iznad Neba: A Romantic Drama About Two Young People From Different Worlds - - - -3 Metra Iznad Neba (Three Meters Above the Sky) is a 2010 Spanish film based on the novel by Federico Moccia. It tells the story of Babi (Maria Valverde), a girl from a wealthy family, and Hache (Mario Casas), a rebellious boy who loves illegal motorcycle races and fights. They fall in love despite their differences, but their relationship faces many challenges and obstacles. - - - -The film was directed by Fernando González Molina and was a huge success in Spain, becoming the highest-grossing Spanish film of 2010. It also received positive reviews from critics and audiences, who praised the chemistry between the lead actors, the soundtrack, and the cinematography. The film spawned a sequel, Tengo Ganas de Ti (I Want You), which was released in 2012. - - - -If you are looking for a romantic drama that will make you feel emotions and adrenaline, you can watch 3 Metra Iznad Neba online with Serbian subtitles for free on Filmovizija[^1^] or Dailymotion[^2^] [^3^] [^4^]. You can also download the film from various websites, but be careful of viruses and malware. Enjoy this passionate and thrilling story of love and adventure! - -Sure, I can write a few more paragraphs. Here they are: - -3 Metra Iznad Neba is not just a typical love story. It also explores the themes of social class, family, friendship, loyalty, and identity. Babi and Hache have to deal with the expectations and pressures of their parents, who do not approve of their relationship. They also have to face the consequences of their actions, which sometimes put them in danger or hurt others. They learn to grow and mature as individuals and as a couple, but they also make mistakes and suffer losses. - - - -The film also features a supporting cast of characters who add depth and complexity to the story. Some of them are Babi's best friend Katina (Marina Salas), who is more outgoing and adventurous than her; Hache's brother Dani (Álvaro Cervantes), who is in prison for a crime he did not commit; Hache's friend Pollo (Álvaro Cervantes), who is loyal and funny but also reckless and impulsive; and Pollo's girlfriend Chica (Nerea Camacho), who is sweet and innocent but also naive and insecure. - - - -3 Metra Iznad Neba is a film that will make you laugh, cry, dream, and feel. It is a film that will touch your heart and soul. It is a film that will show you that love can overcome any obstacle, but also that love can hurt and heal. It is a film that you will not forget. - -Okay, I can continue the article. Here is a possible conclusion: - -If you are a fan of romantic dramas, you should definitely watch 3 Metra Iznad Neba. It is a film that will captivate you with its story, characters, music, and visuals. It is a film that will make you experience a range of emotions and feelings. It is a film that will inspire you to live your life to the fullest and to follow your heart. You can watch it online for free with Serbian subtitles or download it from various sources. Don't miss this amazing film that has become a cult classic among many viewers around the world! - - dfd1c89656 - - - - - diff --git a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/Gmfc-Pro-Fixed-Crack.md b/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/Gmfc-Pro-Fixed-Crack.md deleted file mode 100644 index 1a300e612207c58aed692576725ffc189eeace84..0000000000000000000000000000000000000000 --- a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/Gmfc-Pro-Fixed-Crack.md +++ /dev/null @@ -1,80 +0,0 @@ -## Gmfc pro crack - - - - - - - - - -**DOWNLOAD ::: [https://soawresotni.blogspot.com/?d=2tyg6o](https://soawresotni.blogspot.com/?d=2tyg6o)** - - - - - - - - - - - - - -# How to Download and Install GMFC EN PRO for Free - - - -GMFC EN PRO is a software program that allows you to cut wings, fuselages and other shapes for your model airplanes. It is a powerful and versatile tool that can handle any complex shape and design. However, GMFC EN PRO is not a free software. It costs $195.41 to purchase a license from the official website[^1^]. If you want to try GMFC EN PRO without paying, you might be tempted to look for a crack version online. But is it safe and legal to do so? - - - -A crack version is a modified version of a software program that bypasses its original security features and allows you to use it without a license. Crack versions are usually distributed by hackers or pirates who may have malicious intentions. They may infect your computer with viruses, malware, spyware or ransomware that can damage your system, steal your personal information, or lock your files until you pay a ransom. Moreover, downloading and using crack versions is illegal and unethical. You are violating the intellectual property rights of the software developer and exposing yourself to potential lawsuits or fines. - - - -Therefore, we do not recommend you to download or install GMFC EN PRO crack version from any website. It is risky, illegal and unfair. Instead, you can download the official trial version of GMFC EN PRO from the developer's website[^3^]. The trial version allows you to test GMFC EN PRO for 30 days with full functionality. You can use it to cut any shape or design you want and see if it meets your needs. After 30 days, you can decide whether to buy a license or not. - - - -To download and install GMFC EN PRO trial version, follow these steps: - - - -1. Go to https://gmfcsoft.fr/shop/en/download/[^3^] and click on the "Download" button next to the latest stable version (4.0.6 as of April 2023). - -2. Save the xx.zip file to your computer and unzip it. - -3. Run install\_gmfc\_xx\_en.exe and follow the instructions on the screen. - -4. Launch GMFC EN PRO from the Windows start menu. - -5. Enter your name and email address when prompted. - -6. You will receive an email with a software code that uniquely identifies your copy of GMFC EN PRO. Copy and paste it into the program. - -7. You can now use GMFC EN PRO for 30 days with full functionality. - - - -We hope this article has helped you understand how to download and install GMFC EN PRO for free legally and safely. If you have any questions or feedback, please let us know in the comments below. - - - -GMFC EN PRO is not just a software for cutting wings. It also supports any complex shape such as fuselages and letters. You can import DXF files from other CAD programs or create your own designs with GMFC EN PRO's built-in editor. You can also edit and optimize your shapes with various tools such as scaling, rotating, mirroring, offsetting, smoothing, and more. GMFC EN PRO allows you to preview your cuts in 3D and adjust the cutting parameters such as speed, kerf, compensation, and wire temperature. You can also simulate the cutting process and check for errors or collisions before actually cutting the foam[^1^]. - - - -GMFC EN PRO works with two types of interfaces: parallel port interfaces with a timer and the GGC adapter that connects most of the parallel motor interfaces through USB. The parallel port interfaces require a specific driver that is available for Windows 2000/XP/Seven/8/10 (32 bits) versions. The GGC adapter works for all Windows versions and does not need a driver. The GGC adapter also provides some additional features such as LCD display, jog buttons, emergency stop, and more[^1^]. You can choose the interface that suits your needs and budget. - - - -GMFC EN PRO is developed in collaboration with many modelers thanks to CNC@NET, a French mailing list dedicated to foam cutting. You can join this community and get tips, advice, support, and feedback from other users of GMFC EN PRO. You can also find information about machines, existing GMFC projects, tutorials, and links on the official website of GMFC[^2^]. GMFC EN PRO is constantly updated and improved to meet the needs and expectations of its users. - - dfd1c89656 - - - - - diff --git a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/Queen-Tamil-Dubbed-Movie.md b/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/Queen-Tamil-Dubbed-Movie.md deleted file mode 100644 index 965d8eac038bfed12b8041b3962cdbdee335fea3..0000000000000000000000000000000000000000 --- a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/Queen-Tamil-Dubbed-Movie.md +++ /dev/null @@ -1,82 +0,0 @@ -## Queen Tamil Dubbed Movie - - - - - - ![Queen Tamil Dubbed Movie](https://timesofindia.indiatimes.com/photo/61309515.cms) - - - - - -**Click Here ->->->-> [https://www.google.com/url?q=https%3A%2F%2Fssurll.com%2F2tyfQ5&sa=D&sntz=1&usg=AOvVaw2vtmNg2jxjGuOrG-Dbnmg3](https://www.google.com/url?q=https%3A%2F%2Fssurll.com%2F2tyfQ5&sa=D&sntz=1&usg=AOvVaw2vtmNg2jxjGuOrG-Dbnmg3)** - - - - - - - - - - - - - -# Queen Tamil Dubbed Movie: A Comedy-Drama About Self-Discovery - - - -Queen is a 2014 Hindi movie that was dubbed in Tamil and released in 2015. The movie stars Kangana Ranaut as Rani, a young woman who embarks on a solo honeymoon after her fiancé calls off their wedding. Along the way, she meets new friends, explores different cultures, and learns to live life on her own terms. - - - -The movie was directed by Vikas Bahl and co-written by him and Parveez Shaikh. It received critical acclaim and commercial success, winning several awards and becoming one of the highest-grossing Indian films of 2014. The movie was praised for its realistic portrayal of a woman's journey of self-discovery, as well as its humorous and heartwarming scenes. - - - -Queen Tamil Dubbed Movie is available to watch online on various platforms, such as YouTube, Hotstar, and Amazon Prime Video. The movie has a runtime of 146 minutes and is rated U/A by the Central Board of Film Certification. The movie has a IMDb rating of 8.2 out of 10 and a Rotten Tomatoes score of 100%. - - - -If you are looking for a comedy-drama that will make you laugh, cry, and cheer for the protagonist, then Queen Tamil Dubbed Movie is a must-watch. The movie will inspire you to follow your dreams, embrace your flaws, and celebrate your uniqueness. - - - -## Queen Tamil Dubbed Movie: A Star-Studded Cast and Crew - - - -Queen Tamil Dubbed Movie features a talented cast and crew who have brought the story of Rani to life. The movie stars Kangana Ranaut as Rani, who won the National Film Award for Best Actress for her performance. Rajkummar Rao plays Vijay, Rani's ex-fiancé, who is a selfish and arrogant man. Lisa Haydon plays Vijayalakshmi, Rani's friend and confidante in Paris, who is a free-spirited and fun-loving woman. - - - -The movie also features Mish Boyko as Oleksander, Jeffrey Ho as Taka, Joseph Guitobh as Tim, Marco Canadea as Marcello, and Tantrik Baba as Baba, who are Rani's friends and companions in Amsterdam. The movie was dubbed in Tamil by popular actors such as Vanitha Krishnachandran, Ramya Krishnan, Anikha Surendran, Sonia Agarwal, Viji Chandrashekhar, Indrajith Sukumaran, Vineeth, and Vamsi Krishna[^2^] [^3^]. - - - -The movie was directed by Vikas Bahl, who also co-wrote the script with Parveez Shaikh. The movie was produced by Anurag Kashyap, Vikramaditya Motwane, and Madhu Mantena under the banner of Phantom Films. The cinematography was done by Bobby Singh and Siddharth Diwan, while the editing was done by Abhijit Kokate and Anurag Kashyap. The music was composed by Amit Trivedi, while the lyrics were written by Anvita Dutt Guptan. - - - -## Queen Tamil Dubbed Movie: A Critical and Commercial Success - - - -Queen Tamil Dubbed Movie was well-received by both critics and audiences, who appreciated the movie for its empowering message, realistic depiction, and engaging narration. The movie was hailed as a feminist masterpiece that broke the stereotypes of Bollywood cinema and showcased the growth of a woman who finds her identity and happiness beyond the expectations of society and family. - - - -The movie also earned several accolades and nominations, including four National Film Awards, three Filmfare Awards, two Screen Awards, two Zee Cine Awards, and two International Indian Film Academy Awards. The movie was also selected for screening at various international film festivals, such as the Busan International Film Festival, the BFI London Film Festival, and the Santa Barbara International Film Festival. - - - -Queen Tamil Dubbed Movie was also a box office hit, grossing over ₹108 crore worldwide against a budget of ₹12.5 crore. The movie became one of the highest-grossing Indian films of 2014 and one of the most profitable films of all time. The movie also inspired several remakes in other languages, such as Butterfly in Kannada, Paris Paris in Tamil, Zam Zam in Malayalam, and That Is Mahalakshmi in Telugu. - - dfd1c89656 - - - - - diff --git a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/All Alone 1 Full Movie In Hindi 720p Download.md b/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/All Alone 1 Full Movie In Hindi 720p Download.md deleted file mode 100644 index bc383481f01160133e117272aa169b474726f707..0000000000000000000000000000000000000000 --- a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/All Alone 1 Full Movie In Hindi 720p Download.md +++ /dev/null @@ -1,6 +0,0 @@ -

      All Alone 1 Full Movie In Hindi 720p Download


      DOWNLOAD ––– https://urlgoal.com/2uCJPA



      -
      -Money heist season 1 english dubbed audio track download. ... He said he shot for his portions alone and didn't get to meet anyone from the cast, not even Morte. ... Rejctx (Season 2) (2020) Hindi Full Movie Watch Online Free ( Ep 01-05). 4d29de3e1b
      -
      -
      -

      diff --git a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/HT System Administrator 8.8.5 Activator Crack.epub.md b/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/HT System Administrator 8.8.5 Activator Crack.epub.md deleted file mode 100644 index f1caa03a9cc3259588167e8d14dc71390b1a4580..0000000000000000000000000000000000000000 --- a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/HT System Administrator 8.8.5 Activator Crack.epub.md +++ /dev/null @@ -1,46 +0,0 @@ -

      HT System Administrator 8.8.5 Activator Crack.epub


      DOWNLOAD ⇒⇒⇒ https://urlgoal.com/2uCJBc



      - -The path is, however, difficult to find. Look for "edit:kdesu " in your forum's New Topics. - -I would recommend against installing kdesudo on any systems that may have vulnerable users (for example, web servers) that use the Simple Socket Protocol (SSLeay). It is just too dangerous. - -Most users should be installing kdesu (and not kdesudo) and not running vulnerable applications (such as nc or ftp) with sudo rights. - -kdesu provides elevated privileges to specific processes and programs. It isn't supposed to work for administrators. - -A better solution is to use su instead of sudo. - -I've seen multiple people running kdesu by accident. It is a similar problem to running kate with sudo or the equiv of running kate in su (see earlier post). - -To further illustrate the point of the previous two post, here is an example of someone running kdesu under kate - -# kdesudo kate - -kdesu: Could not connect to S-1-5-21-1237578134-2833785585-5075467469-83712082-93388345-508834116.bc.googleusercontent.com:22 - -Here is an example of running kate with sudo: - -# kate - -kate: ERROR: Communication problem with kate, it probably crashed. - -# - -Edit:A little more on kdesu. - -kdesu is a wrapper around sudo. If you search google for kdesu or sudo you will find that there are several guides explaining how to use them and how they differ. - -In short, kdesu behaves like "su -c" where su -c means "sudo -c". What the -c flag does is tell sudo to run the command specified after it as root. - -Here is an example of running kate with kdesu - -# kdesu kate - -# this is the equivalent of running kate as root, but without entering your password. - -Edit: - -As to the remote exploitation 4fefd39f24
      -
      -
      -

      diff --git a/spaces/robinhad/kruk/README.md b/spaces/robinhad/kruk/README.md deleted file mode 100644 index 784173d35fd46737917d89397b7b8921c0fa3445..0000000000000000000000000000000000000000 --- a/spaces/robinhad/kruk/README.md +++ /dev/null @@ -1,15 +0,0 @@ ---- -title: Kruk -emoji: 🐢 -colorFrom: pink -colorTo: pink -sdk: gradio -sdk_version: 3.38.0 -python_version: 3.10.3 -app_file: app.py -pinned: false -license: apache-2.0 ---- - - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/rorallitri/biomedical-language-models/logs/Download Usb Dongle Backup And Recovery 2012 Pro Checked PATCHED.md b/spaces/rorallitri/biomedical-language-models/logs/Download Usb Dongle Backup And Recovery 2012 Pro Checked PATCHED.md deleted file mode 100644 index ba2d02493894119956824086dccbe13656c62170..0000000000000000000000000000000000000000 --- a/spaces/rorallitri/biomedical-language-models/logs/Download Usb Dongle Backup And Recovery 2012 Pro Checked PATCHED.md +++ /dev/null @@ -1,47 +0,0 @@ -
      -

      How to Download USB Dongle Backup and Recovery 2012 Pro Checked

      -

      If you are looking for a reliable and easy way to backup and restore your USB dongle data, you may want to consider downloading USB Dongle Backup and Recovery 2012 Pro Checked. This is a handy tool that can help you create a virtual copy of your USB dongle and recover it in case of loss, damage, or theft.

      -

      download usb dongle backup and recovery 2012 pro checked


      DOWNLOADhttps://tinurll.com/2uznTV



      -

      USB dongles are small devices that are used to protect software from unauthorized copying or use. They contain a unique identification code that is required to run the software. However, USB dongles are also vulnerable to physical damage, malfunction, or theft, which can result in losing access to your software and data.

      -

      That's why it is important to backup your USB dongle regularly and have a recovery option in case of emergency. USB Dongle Backup and Recovery 2012 Pro Checked is a software that can help you with that. It can create a virtual emulator of your USB dongle that can be stored on your computer or on a cloud service. You can then use the emulator to run your software without the need for the original dongle.

      -

      USB Dongle Backup and Recovery 2012 Pro Checked is compatible with most types of USB dongles, such as HASP, Sentinel, Hardlock, Eutron Smartkey, Dinkey, Marx, Deskey, Wibu, Safenet, Matrix, Keylok, and more. It can also work with Windows 10 and other operating systems.

      -

      To download USB Dongle Backup and Recovery 2012 Pro Checked, you need to follow these steps:

      -
        -
      1. Visit the official website of VIP Dongle Team at https://vipdongle.com/mp/dongle-backup/
      2. -
      3. Select the type of USB dongle that you want to backup from the list of supported brands.
      4. -
      5. Fill out the request form with your name, email address, phone number, and dongle information.
      6. -
      7. Wait for the confirmation email from VIP Dongle Team with the download link and instructions.
      8. -
      9. Download the software and install it on your computer.
      10. -
      11. Connect your USB dongle to your computer and run the software.
      12. -
      13. Follow the steps to create a virtual emulator of your USB dongle and save it on your preferred location.
      14. -
      15. Test the emulator by running your software without the original dongle.
      16. -
      -

      Congratulations! You have successfully downloaded USB Dongle Backup and Recovery 2012 Pro Checked and created a backup of your USB dongle. You can now use the emulator whenever you need to access your software without risking losing or damaging your dongle. You can also restore your dongle data from the emulator in case of emergency.

      -

      If you have any questions or problems with the software, you can contact VIP Dongle Team at info@vipdongle.com or visit their website for more information.

      -

      - -

      Benefits of USB Dongle Backup and Recovery

      -

      USB dongles are not only useful for protecting software from piracy, but also for providing flexible licensing options for users and developers. For example, you can use a USB dongle to activate a software on multiple computers, or to switch between different versions or features of a software. However, USB dongles also have some drawbacks, such as being easily lost, stolen, or damaged.

      -

      That's why it is wise to backup your USB dongle regularly and have a recovery solution in case of any problems. By backing up your USB dongle, you can enjoy the following benefits:

      -
        -
      • You can prevent data loss and avoid losing access to your software and files.
      • -
      • You can save time and money by not having to replace your USB dongle or buy a new license.
      • -
      • You can reduce the risk of malware infection from unlicensed or cracked software.
      • -
      • You can improve your software management and compliance by having a backup copy of your license.
      • -
      -

      USB Dongle Backup and Recovery 2012 Pro Checked is one of the best tools that can help you backup and restore your USB dongle data easily and securely. It can create a virtual emulator of your USB dongle that can be used to run your software without the need for the physical device. It can also restore your USB dongle data from the emulator in case of emergency.

      -

      How to Use USB Dongle Backup and Recovery 2012 Pro Checked

      -

      Using USB Dongle Backup and Recovery 2012 Pro Checked is very simple and straightforward. You just need to follow these steps:

      -
        -
      1. Download the software from the official website of VIP Dongle Team at https://vipdongle.com/mp/dongle-backup/
      2. -
      3. Select the type of USB dongle that you want to backup from the list of supported brands.
      4. -
      5. Fill out the request form with your name, email address, phone number, and dongle information.
      6. -
      7. Wait for the confirmation email from VIP Dongle Team with the download link and instructions.
      8. -
      9. Download the software and install it on your computer.
      10. -
      11. Connect your USB dongle to your computer and run the software.
      12. -
      13. Follow the steps to create a virtual emulator of your USB dongle and save it on your preferred location.
      14. -
      15. Test the emulator by running your software without the original dongle.
      16. -
      -

      If you have any questions or problems with the software, you can contact VIP Dongle Team at info@vipdongle.com or visit their website for more information.

      d5da3c52bf
      -
      -
      \ No newline at end of file diff --git a/spaces/rorallitri/biomedical-language-models/logs/Downloadebooklesmiserablesbahasaindonesia.md b/spaces/rorallitri/biomedical-language-models/logs/Downloadebooklesmiserablesbahasaindonesia.md deleted file mode 100644 index 830d90bd0f6d75cc97f5376e61ef32de346a7eed..0000000000000000000000000000000000000000 --- a/spaces/rorallitri/biomedical-language-models/logs/Downloadebooklesmiserablesbahasaindonesia.md +++ /dev/null @@ -1,14 +0,0 @@ -

      downloadebooklesmiserablesbahasaindonesia


      Download Zip > https://tinurll.com/2uzlUk



      -
      -download Les Miserables Bahasa Indonesia ebook download geetek maxi 700 drivers winmount 3.5 pro serial number gpsmapedit 2.0 crack ati hd 3000 driver lg gx300 drivers. -Download sound driver for windows 7 asus. -driver pack solution. -Driver genius professional key. -Driver genius professional free download. -Driverpack solution free download. -Driver booster pro free download. -Download driver for. -Program for updating drivers 8a78ff9644
      -
      -
      -

      diff --git a/spaces/rorallitri/biomedical-language-models/logs/Free Inside Brazilian Rhythm Section Pdf 14l A Comprehensive Guide to the Music of Brazil.md b/spaces/rorallitri/biomedical-language-models/logs/Free Inside Brazilian Rhythm Section Pdf 14l A Comprehensive Guide to the Music of Brazil.md deleted file mode 100644 index f22dbb83c7b6bd4a37b42e7e1c67194bc1b5ae42..0000000000000000000000000000000000000000 --- a/spaces/rorallitri/biomedical-language-models/logs/Free Inside Brazilian Rhythm Section Pdf 14l A Comprehensive Guide to the Music of Brazil.md +++ /dev/null @@ -1,6 +0,0 @@ -

      Free Inside Brazilian Rhythm Section Pdf 14l


      DOWNLOAD ✒ ✒ ✒ https://tinurll.com/2uzonP



      - - aaccfb2cb3
      -
      -
      -

      diff --git a/spaces/rorallitri/biomedical-language-models/logs/Gouru Tirupati Reddy Vastu Books In Telugu !FULL!.md b/spaces/rorallitri/biomedical-language-models/logs/Gouru Tirupati Reddy Vastu Books In Telugu !FULL!.md deleted file mode 100644 index f47090aec1977cbb4b62b68eee2d5a202fa2ca7b..0000000000000000000000000000000000000000 --- a/spaces/rorallitri/biomedical-language-models/logs/Gouru Tirupati Reddy Vastu Books In Telugu !FULL!.md +++ /dev/null @@ -1,5 +0,0 @@ -
      -

      No, it is not at all recommended. Please note that these books are only for your kind information, by reading these books don't come to one decision of buying plots/houses or altering your properties. It is always better to approach one best expert Vastu consultant and show your interested properties to vastu specialists and get appropriate recommendations. This is the right process.

      -

      gouru tirupati reddy vastu books in telugu


      Download Zip ===== https://tinurll.com/2uzmLc



      aaccfb2cb3
      -
      -
      \ No newline at end of file diff --git a/spaces/rorallitri/biomedical-language-models/logs/JaalhindiHigh Quality Fullmoviefreedownload.md b/spaces/rorallitri/biomedical-language-models/logs/JaalhindiHigh Quality Fullmoviefreedownload.md deleted file mode 100644 index e64625f7ffb7f0edffd44d0f1b1b99ebbaf43237..0000000000000000000000000000000000000000 --- a/spaces/rorallitri/biomedical-language-models/logs/JaalhindiHigh Quality Fullmoviefreedownload.md +++ /dev/null @@ -1,6 +0,0 @@ -

      jaalhindifullmoviefreedownload


      Download https://tinurll.com/2uzmn8



      -
      - aaccfb2cb3
      -
      -
      -

      diff --git a/spaces/runa91/barc_gradio/src/lifting_to_3d/linear_model.py b/spaces/runa91/barc_gradio/src/lifting_to_3d/linear_model.py deleted file mode 100644 index c11266acefcb6bbecd8a748a44cb4915ef4da4b9..0000000000000000000000000000000000000000 --- a/spaces/runa91/barc_gradio/src/lifting_to_3d/linear_model.py +++ /dev/null @@ -1,297 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- - -# some code from https://raw.githubusercontent.com/weigq/3d_pose_baseline_pytorch/master/src/model.py - - -from __future__ import absolute_import -from __future__ import print_function -import torch -import torch.nn as nn - -import os -import sys -sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..')) -# from priors.vae_pose_model.vae_model import VAEmodel -from priors.normalizing_flow_prior.normalizing_flow_prior import NormalizingFlowPrior - - -def weight_init_dangerous(m): - # this is dangerous as it may overwrite the normalizing flow weights - if isinstance(m, nn.Linear): - nn.init.kaiming_normal(m.weight) - - -class Linear(nn.Module): - def __init__(self, linear_size, p_dropout=0.5): - super(Linear, self).__init__() - self.l_size = linear_size - - self.relu = nn.ReLU(inplace=True) - self.dropout = nn.Dropout(p_dropout) - - self.w1 = nn.Linear(self.l_size, self.l_size) - self.batch_norm1 = nn.BatchNorm1d(self.l_size) - - self.w2 = nn.Linear(self.l_size, self.l_size) - self.batch_norm2 = nn.BatchNorm1d(self.l_size) - - def forward(self, x): - y = self.w1(x) - y = self.batch_norm1(y) - y = self.relu(y) - y = self.dropout(y) - y = self.w2(y) - y = self.batch_norm2(y) - y = self.relu(y) - y = self.dropout(y) - out = x + y - return out - - -class LinearModel(nn.Module): - def __init__(self, - linear_size=1024, - num_stage=2, - p_dropout=0.5, - input_size=16*2, - output_size=16*3): - super(LinearModel, self).__init__() - self.linear_size = linear_size - self.p_dropout = p_dropout - self.num_stage = num_stage - # input - self.input_size = input_size # 2d joints: 16 * 2 - # output - self.output_size = output_size # 3d joints: 16 * 3 - # process input to linear size - self.w1 = nn.Linear(self.input_size, self.linear_size) - self.batch_norm1 = nn.BatchNorm1d(self.linear_size) - self.linear_stages = [] - for l in range(num_stage): - self.linear_stages.append(Linear(self.linear_size, self.p_dropout)) - self.linear_stages = nn.ModuleList(self.linear_stages) - # post-processing - self.w2 = nn.Linear(self.linear_size, self.output_size) - # helpers (relu and dropout) - self.relu = nn.ReLU(inplace=True) - self.dropout = nn.Dropout(self.p_dropout) - - def forward(self, x): - # pre-processing - y = self.w1(x) - y = self.batch_norm1(y) - y = self.relu(y) - y = self.dropout(y) - # linear layers - for i in range(self.num_stage): - y = self.linear_stages[i](y) - # post-processing - y = self.w2(y) - return y - - -class LinearModelComplete(nn.Module): - def __init__(self, - linear_size=1024, - num_stage_comb=2, - num_stage_heads=1, - num_stage_heads_pose=1, - trans_sep=False, - p_dropout=0.5, - input_size=16*2, - intermediate_size=1024, - output_info=None, - n_joints=25, - n_z=512, - add_z_to_3d_input=False, - n_segbps=64*2, - add_segbps_to_3d_input=False, - structure_pose_net='default', - fix_vae_weights=True, - nf_version=None): # 0): n_silh_enc - super(LinearModelComplete, self).__init__() - if add_z_to_3d_input: - self.n_z_to_add = n_z # 512 - else: - self.n_z_to_add = 0 - if add_segbps_to_3d_input: - self.n_segbps_to_add = n_segbps # 64 - else: - self.n_segbps_to_add = 0 - self.input_size = input_size - self.linear_size = linear_size - self.p_dropout = p_dropout - self.num_stage_comb = num_stage_comb - self.num_stage_heads = num_stage_heads - self.num_stage_heads_pose = num_stage_heads_pose - self.trans_sep = trans_sep - self.input_size = input_size - self.intermediate_size = intermediate_size - self.structure_pose_net = structure_pose_net - self.fix_vae_weights = fix_vae_weights # only relevant if structure_pose_net='vae' - self.nf_version = nf_version - if output_info is None: - pose = {'name': 'pose', 'n': n_joints*6, 'out_shape':[n_joints, 6]} - cam = {'name': 'flength', 'n': 1} - if self.trans_sep: - translation_xy = {'name': 'trans_xy', 'n': 2} - translation_z = {'name': 'trans_z', 'n': 1} - self.output_info = [pose, translation_xy, translation_z, cam] - else: - translation = {'name': 'trans', 'n': 3} - self.output_info = [pose, translation, cam] - if self.structure_pose_net == 'vae' or self.structure_pose_net == 'normflow': - global_pose = {'name': 'global_pose', 'n': 1*6, 'out_shape':[1, 6]} - self.output_info.append(global_pose) - else: - self.output_info = output_info - self.linear_combined = LinearModel(linear_size=self.linear_size, - num_stage=self.num_stage_comb, - p_dropout=p_dropout, - input_size=self.input_size + self.n_segbps_to_add + self.n_z_to_add, ###### - output_size=self.intermediate_size) - self.output_info_linear_models = [] - for ind_el, element in enumerate(self.output_info): - if element['name'] == 'pose': - num_stage = self.num_stage_heads_pose - if self.structure_pose_net == 'default': - output_size_pose_lin = element['n'] - elif self.structure_pose_net == 'vae': - # load vae decoder - self.pose_vae_model = VAEmodel() - self.pose_vae_model.initialize_with_pretrained_weights() - # define the input size of the vae decoder - output_size_pose_lin = self.pose_vae_model.latent_size - elif self.structure_pose_net == 'normflow': - # the following will automatically be initialized - self.pose_normflow_model = NormalizingFlowPrior(nf_version=self.nf_version) - output_size_pose_lin = element['n'] - 6 # no global rotation - else: - raise NotImplementedError - self.output_info_linear_models.append(LinearModel(linear_size=self.linear_size, - num_stage=num_stage, - p_dropout=p_dropout, - input_size=self.intermediate_size, - output_size=output_size_pose_lin)) - else: - if element['name'] == 'global_pose': - num_stage = self.num_stage_heads_pose - else: - num_stage = self.num_stage_heads - self.output_info_linear_models.append(LinearModel(linear_size=self.linear_size, - num_stage=num_stage, - p_dropout=p_dropout, - input_size=self.intermediate_size, - output_size=element['n'])) - element['linear_model_index'] = ind_el - self.output_info_linear_models = nn.ModuleList(self.output_info_linear_models) - - def forward(self, x): - device = x.device - # combined stage - if x.shape[1] == self.input_size + self.n_segbps_to_add + self.n_z_to_add: - y = self.linear_combined(x) - elif x.shape[1] == self.input_size + self.n_segbps_to_add: - x_mod = torch.cat((x, torch.normal(0, 1, size=(x.shape[0], self.n_z_to_add)).to(device)), dim=1) - y = self.linear_combined(x_mod) - else: - print(x.shape) - print(self.input_size) - print(self.n_segbps_to_add) - print(self.n_z_to_add) - raise ValueError - # heads - results = {} - results_trans = {} - for element in self.output_info: - linear_model = self.output_info_linear_models[element['linear_model_index']] - if element['name'] == 'pose': - if self.structure_pose_net == 'default': - results['pose'] = (linear_model(y)).reshape((-1, element['out_shape'][0], element['out_shape'][1])) - normflow_z = None - elif self.structure_pose_net == 'vae': - res_lin = linear_model(y) - if self.fix_vae_weights: - self.pose_vae_model.requires_grad_(False) # let gradients flow through but don't update the parameters - res_vae = self.pose_vae_model.inference(feat=res_lin) - self.pose_vae_model.requires_grad_(True) - else: - res_vae = self.pose_vae_model.inference(feat=res_lin) - res_pose_not_glob = res_vae.reshape((-1, element['out_shape'][0], element['out_shape'][1])) - normflow_z = None - elif self.structure_pose_net == 'normflow': - normflow_z = linear_model(y)*0.1 - self.pose_normflow_model.requires_grad_(False) # let gradients flow though but don't update the parameters - res_pose_not_glob = self.pose_normflow_model.run_backwards(z=normflow_z).reshape((-1, element['out_shape'][0]-1, element['out_shape'][1])) - else: - raise NotImplementedError - elif element['name'] == 'global_pose': - res_pose_glob = (linear_model(y)).reshape((-1, element['out_shape'][0], element['out_shape'][1])) - elif element['name'] == 'trans_xy' or element['name'] == 'trans_z': - results_trans[element['name']] = linear_model(y) - else: - results[element['name']] = linear_model(y) - if self.trans_sep: - results['trans'] = torch.cat((results_trans['trans_xy'], results_trans['trans_z']), dim=1) - # prepare pose including global rotation - if self.structure_pose_net == 'vae': - # results['pose'] = torch.cat((res_pose_glob, res_pose_not_glob), dim=1) - results['pose'] = torch.cat((res_pose_glob, res_pose_not_glob[:, 1:, :]), dim=1) - elif self.structure_pose_net == 'normflow': - results['pose'] = torch.cat((res_pose_glob, res_pose_not_glob[:, :, :]), dim=1) - # return a dictionary which contains all results - results['normflow_z'] = normflow_z - return results # this is a dictionary - - - - - -# ------------------------------------------ -# for pretraining of the 3d model only: -# (see combined_model/model_shape_v2.py) - -class Wrapper_LinearModelComplete(nn.Module): - def __init__(self, - linear_size=1024, - num_stage_comb=2, - num_stage_heads=1, - num_stage_heads_pose=1, - trans_sep=False, - p_dropout=0.5, - input_size=16*2, - intermediate_size=1024, - output_info=None, - n_joints=25, - n_z=512, - add_z_to_3d_input=False, - n_segbps=64*2, - add_segbps_to_3d_input=False, - structure_pose_net='default', - fix_vae_weights=True, - nf_version=None): - self.add_segbps_to_3d_input = add_segbps_to_3d_input - super(Wrapper_LinearModelComplete, self).__init__() - self.model_3d = LinearModelComplete(linear_size=linear_size, - num_stage_comb=num_stage_comb, - num_stage_heads=num_stage_heads, - num_stage_heads_pose=num_stage_heads_pose, - trans_sep=trans_sep, - p_dropout=p_dropout, # 0.5, - input_size=input_size, - intermediate_size=intermediate_size, - output_info=output_info, - n_joints=n_joints, - n_z=n_z, - add_z_to_3d_input=add_z_to_3d_input, - n_segbps=n_segbps, - add_segbps_to_3d_input=add_segbps_to_3d_input, - structure_pose_net=structure_pose_net, - fix_vae_weights=fix_vae_weights, - nf_version=nf_version) - def forward(self, input_vec): - # input_vec = torch.cat((keypoints_prepared.reshape((batch_size, -1)), bone_lengths_prepared), axis=1) - # predict 3d parameters (those are normalized, we need to correct mean and std in a next step) - output = self.model_3d(input_vec) - return output \ No newline at end of file diff --git a/spaces/runa91/barc_gradio/src/stacked_hourglass/predictor.py b/spaces/runa91/barc_gradio/src/stacked_hourglass/predictor.py deleted file mode 100644 index 30be3b4fe816cc33018b61632c4ba120ea66dfc3..0000000000000000000000000000000000000000 --- a/spaces/runa91/barc_gradio/src/stacked_hourglass/predictor.py +++ /dev/null @@ -1,119 +0,0 @@ - -# Modified from: -# https://github.com/anibali/pytorch-stacked-hourglass -# https://github.com/bearpaw/pytorch-pose - -import torch -from stacked_hourglass.utils.evaluation import final_preds_untransformed -from stacked_hourglass.utils.imfit import fit, calculate_fit_contain_output_area -from stacked_hourglass.utils.transforms import color_normalize, fliplr, flip_back - - -def _check_batched(images): - if isinstance(images, (tuple, list)): - return True - if images.ndimension() == 4: - return True - return False - - -class HumanPosePredictor: - def __init__(self, model, device=None, data_info=None, input_shape=None): - """Helper class for predicting 2D human pose joint locations. - - Args: - model: The model for generating joint heatmaps. - device: The computational device to use for inference. - data_info: Specifications of the data (defaults to ``Mpii.DATA_INFO``). - input_shape: The input dimensions of the model (height, width). - """ - if device is None: - device = 'cuda' if torch.cuda.is_available() else 'cpu' - device = torch.device(device) - model.to(device) - self.model = model - self.device = device - - if data_info is None: - raise ValueError - # self.data_info = Mpii.DATA_INFO - else: - self.data_info = data_info - - # Input shape ordering: H, W - if input_shape is None: - self.input_shape = (256, 256) - elif isinstance(input_shape, int): - self.input_shape = (input_shape, input_shape) - else: - self.input_shape = input_shape - - def do_forward(self, input_tensor): - self.model.eval() - with torch.no_grad(): - output = self.model(input_tensor) - return output - - def prepare_image(self, image): - was_fixed_point = not image.is_floating_point() - image = torch.empty_like(image, dtype=torch.float32).copy_(image) - if was_fixed_point: - image /= 255.0 - if image.shape[-2:] != self.input_shape: - image = fit(image, self.input_shape, fit_mode='contain') - image = color_normalize(image, self.data_info.rgb_mean, self.data_info.rgb_stddev) - return image - - def estimate_heatmaps(self, images, flip=False): - is_batched = _check_batched(images) - raw_images = images if is_batched else images.unsqueeze(0) - input_tensor = torch.empty((len(raw_images), 3, *self.input_shape), - device=self.device, dtype=torch.float32) - for i, raw_image in enumerate(raw_images): - input_tensor[i] = self.prepare_image(raw_image) - heatmaps = self.do_forward(input_tensor)[-1].cpu() - if flip: - flip_input = fliplr(input_tensor) - flip_heatmaps = self.do_forward(flip_input)[-1].cpu() - heatmaps += flip_back(flip_heatmaps, self.data_info.hflip_indices) - heatmaps /= 2 - if is_batched: - return heatmaps - else: - return heatmaps[0] - - def estimate_joints(self, images, flip=False): - """Estimate human joint locations from input images. - - Images are expected to be centred on a human subject and scaled reasonably. - - Args: - images: The images to estimate joint locations for. Can be a single image or a list - of images. - flip (bool): If set to true, evaluates on flipped versions of the images as well and - averages the results. - - Returns: - The predicted human joint locations in image pixel space. - """ - is_batched = _check_batched(images) - raw_images = images if is_batched else images.unsqueeze(0) - heatmaps = self.estimate_heatmaps(raw_images, flip=flip).cpu() - # final_preds_untransformed compares the first component of shape with x and second with y - # This relates to the image Width, Height (Heatmap has shape Height, Width) - coords = final_preds_untransformed(heatmaps, heatmaps.shape[-2:][::-1]) - # Rescale coords to pixel space of specified images. - for i, image in enumerate(raw_images): - # When returning to original image space we need to compensate for the fact that we are - # used fit_mode='contain' when preparing the images for inference. - y_off, x_off, height, width = calculate_fit_contain_output_area(*image.shape[-2:], *self.input_shape) - coords[i, :, 1] *= self.input_shape[-2] / heatmaps.shape[-2] - coords[i, :, 1] -= y_off - coords[i, :, 1] *= image.shape[-2] / height - coords[i, :, 0] *= self.input_shape[-1] / heatmaps.shape[-1] - coords[i, :, 0] -= x_off - coords[i, :, 0] *= image.shape[-1] / width - if is_batched: - return coords - else: - return coords[0] diff --git a/spaces/runa91/bite_gradio/src/stacked_hourglass/datasets/imgcrops.py b/spaces/runa91/bite_gradio/src/stacked_hourglass/datasets/imgcrops.py deleted file mode 100644 index 89face653c8d6c92fb4bf453a1ae46957ee68dff..0000000000000000000000000000000000000000 --- a/spaces/runa91/bite_gradio/src/stacked_hourglass/datasets/imgcrops.py +++ /dev/null @@ -1,77 +0,0 @@ - - -import os -import glob -import numpy as np -import torch -import torch.utils.data as data - -import sys -sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..')) -from configs.anipose_data_info import COMPLETE_DATA_INFO -from stacked_hourglass.utils.imutils import load_image -from stacked_hourglass.utils.transforms import crop, color_normalize -from stacked_hourglass.utils.pilutil import imresize -from stacked_hourglass.utils.imutils import im_to_torch -from configs.dataset_path_configs import TEST_IMAGE_CROP_ROOT_DIR -from configs.data_info import COMPLETE_DATA_INFO_24 - - -class ImgCrops(data.Dataset): - DATA_INFO = COMPLETE_DATA_INFO_24 - ACC_JOINTS = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 16] - - def __init__(self, img_crop_folder='default', image_path=None, is_train=False, inp_res=256, out_res=64, sigma=1, - scale_factor=0.25, rot_factor=30, label_type='Gaussian', - do_augment='default', shorten_dataset_to=None, dataset_mode='keyp_only'): - assert is_train == False - assert do_augment == 'default' or do_augment == False - self.inp_res = inp_res - if img_crop_folder == 'default': - self.folder_imgs = os.path.join(os.path.dirname(__file__), '..', '..', '..', 'datasets', 'test_image_crops') - else: - self.folder_imgs = img_crop_folder - name_list = glob.glob(os.path.join(self.folder_imgs, '*.png')) + glob.glob(os.path.join(self.folder_imgs, '*.jpg')) + glob.glob(os.path.join(self.folder_imgs, '*.jpeg')) - name_list = sorted(name_list) - self.test_name_list = [name.split('/')[-1] for name in name_list] - print('len(dataset): ' + str(self.__len__())) - - def __getitem__(self, index): - img_name = self.test_name_list[index] - # load image - img_path = os.path.join(self.folder_imgs, img_name) - img = load_image(img_path) # CxHxW - # prepare image (cropping and color) - img_max = max(img.shape[1], img.shape[2]) - img_padded = torch.zeros((img.shape[0], img_max, img_max)) - if img_max == img.shape[2]: - start = (img_max-img.shape[1])//2 - img_padded[:, start:start+img.shape[1], :] = img - else: - start = (img_max-img.shape[2])//2 - img_padded[:, :, start:start+img.shape[2]] = img - img = img_padded - img_prep = im_to_torch(imresize(img, [self.inp_res, self.inp_res], interp='bilinear')) - inp = color_normalize(img_prep, self.DATA_INFO.rgb_mean, self.DATA_INFO.rgb_stddev) - # add the following fields to make it compatible with stanext, most of them are fake - target_dict = {'index': index, 'center' : -2, 'scale' : -2, - 'breed_index': -2, 'sim_breed_index': -2, - 'ind_dataset': 1} - target_dict['pts'] = np.zeros((self.DATA_INFO.n_keyp, 3)) - target_dict['tpts'] = np.zeros((self.DATA_INFO.n_keyp, 3)) - target_dict['target_weight'] = np.zeros((self.DATA_INFO.n_keyp, 1)) - target_dict['silh'] = np.zeros((self.inp_res, self.inp_res)) - return inp, target_dict - - - def __len__(self): - return len(self.test_name_list) - - - - - - - - - diff --git a/spaces/runa91/bite_gradio/src/test_time_optimization/evaluate_ttopt.py b/spaces/runa91/bite_gradio/src/test_time_optimization/evaluate_ttopt.py deleted file mode 100644 index 114a3efde5bd59acbdfaaf8a8cc60cd38f8cd6d4..0000000000000000000000000000000000000000 --- a/spaces/runa91/bite_gradio/src/test_time_optimization/evaluate_ttopt.py +++ /dev/null @@ -1,368 +0,0 @@ - -# evaluate test time optimization from refinement -# python src/test_time_optimization/evaluate_ttopt.py --workers 12 --save-images True --config refinement_cfg_test_withvertexwisegc_csaddnonflat.yaml --model-file-complete=cvpr23_dm39dnnv3barcv2b_refwithgcpervertisflat0morestanding0/checkpoint.pth.tar --ttopt-result-name ttoptv6_stanext_v16b - -# python src/test_time_optimization/evaluate_ttopt.py --workers 12 --save-images True --config refinement_cfg_test_withvertexwisegc_csaddnonflat.yaml --model-file-complete=cvpr23_dm39dnnv3barcv2b_refwithgcpervertisflat0morestanding0/checkpoint.pth.tar --ttopt-result-name ttoptv6_stanext_v16 - - - -import argparse -import os.path -import json -import numpy as np -import pickle as pkl -from distutils.util import strtobool -import torch -from torch import nn -import torch.backends.cudnn -from torch.nn import DataParallel -from torch.utils.data import DataLoader -import pytorch3d as p3d -from collections import OrderedDict -import glob -from tqdm import tqdm -from dominate import document -from dominate.tags import * -from PIL import Image -from matplotlib import pyplot as plt -import trimesh -import cv2 -import shutil - -from pytorch3d.structures import Meshes -from pytorch3d.loss import mesh_edge_loss, mesh_laplacian_smoothing, mesh_normal_consistency - -import sys -sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..', 'src')) - -from combined_model.train_main_image_to_3d_wbr_withref import do_validation_epoch -# from combined_model.model_shape_v7 import ModelImageTo3d_withshape_withproj -# from combined_model.model_shape_v7_withref import ModelImageTo3d_withshape_withproj -from combined_model.model_shape_v7_withref_withgraphcnn import ModelImageTo3d_withshape_withproj - -from combined_model.loss_image_to_3d_withbreedrel import Loss -from combined_model.loss_image_to_3d_refinement import LossRef -from configs.barc_cfg_defaults import get_cfg_defaults, update_cfg_global_with_yaml, get_cfg_global_updated - -from lifting_to_3d.utils.geometry_utils import rot6d_to_rotmat, rotmat_to_rot6d # , batch_rot2aa, geodesic_loss_R - - -# from test_time_optimization.utils_ttopt import get_evaluation_dataset, get_norm_dict -from stacked_hourglass.datasets.utils_dataset_selection import get_evaluation_dataset, get_norm_dict - -from test_time_optimization.bite_inference_model_for_ttopt import BITEInferenceModel -from smal_pytorch.smal_model.smal_torch_new import SMAL -from configs.SMAL_configs import SMAL_MODEL_CONFIG -from smal_pytorch.renderer.differentiable_renderer import SilhRenderer -from test_time_optimization.utils.utils_ttopt import reset_loss_values, get_optimed_pose_with_glob - -from combined_model.loss_utils.loss_utils import leg_sideway_error, leg_torsion_error, tail_sideway_error, tail_torsion_error, spine_torsion_error, spine_sideway_error -from combined_model.loss_utils.loss_utils_gc import LossGConMesh, calculate_plane_errors_batch -from combined_model.loss_utils.loss_arap import Arap_Loss -from combined_model.loss_utils.loss_laplacian_mesh_comparison import LaplacianCTF # (coarse to fine animal) -from graph_networks import graphcmr # .utils_mesh import Mesh -from stacked_hourglass.utils.visualization import save_input_image_with_keypoints, save_input_image - -from metrics.metrics import Metrics -from configs.SMAL_configs import EVAL_KEYPOINTS, KEYPOINT_GROUPS - - -ROOT_LOSS_WEIGH_PATH = '/is/cluster/work/nrueegg/icon_pifu_related/barc_for_bite/src/configs/ttopt_loss_weights/' - - - -def main(args): - - # load configs - # step 1: load default configs - # step 2: load updates from .yaml file - path_config = os.path.join(get_cfg_defaults().barc_dir, 'src', 'configs', args.config) - update_cfg_global_with_yaml(path_config) - cfg = get_cfg_global_updated() - - pck_thresh = 0.15 - print('pck_thresh: ' + str(pck_thresh)) - - - - - ROOT_IN_PATH = '/is/cluster/work/nrueegg/icon_pifu_related/barc_for_bite/results/results_ttopt/' + args.ttopt_result_name + '/' # ttoptv6_debug_x8/' - ROOT_IN_PATH_DETAIL = ROOT_IN_PATH + 'details/' - - ROOT_OUT_PATH = ROOT_IN_PATH + 'evaluation/' - if not os.path.exists(ROOT_OUT_PATH): os.makedirs(ROOT_OUT_PATH) - - - - - - - - - - - # NEW!!! - logscale_part_list = ['legs_l', 'legs_f', 'tail_l', 'tail_f', 'ears_y', 'ears_l', 'head_l'] - # logscale_part_list = ['front_legs_l', 'front_legs_f', 'tail_l', 'tail_f', 'ears_y', 'ears_l', 'head_l', 'back_legs_l', 'back_legs_f'] - - - # Select the hardware device to use for training. - if torch.cuda.is_available() and cfg.device=='cuda': - device = torch.device('cuda', torch.cuda.current_device()) - torch.backends.cudnn.benchmark = False # True - else: - device = torch.device('cpu') - - print('structure_pose_net: ' + cfg.params.STRUCTURE_POSE_NET) - print('refinement network type: ' + cfg.params.REF_NET_TYPE) - print('smal_model_type: ' + cfg.smal.SMAL_MODEL_TYPE) - - path_model_file_complete = os.path.join(cfg.paths.ROOT_CHECKPOINT_PATH, args.model_file_complete) - - # Disable gradient calculations. - # torch.set_grad_enabled(False) - - - # prepare dataset and dataset loadr - val_dataset, val_loader, len_val_dataset, test_name_list, stanext_data_info, stanext_acc_joints = get_evaluation_dataset(cfg.data.DATASET, cfg.data.VAL_OPT, cfg.data.V12, cfg.optim.BATCH_SIZE, args.workers) - len_data = len_val_dataset - # summarize information for normalization - norm_dict = get_norm_dict(stanext_data_info, device) - - # prepare complete model - bite_model = BITEInferenceModel(cfg, path_model_file_complete, norm_dict) - # smal_model_type = bite_model.complete_model.smal.smal_model_type - smal_model_type = bite_model.smal_model_type - smal = SMAL(smal_model_type=smal_model_type, template_name='neutral', logscale_part_list=logscale_part_list).to(device) - silh_renderer = SilhRenderer(image_size=256).to(device) - - - - # ---------------------------------------------------------------------------------- - - summary = {} - summary['pck'] = np.zeros((len_data)) - summary['pck_by_part'] = {group:np.zeros((len_data)) for group in KEYPOINT_GROUPS} - summary['acc_sil_2d'] = np.zeros(len_data) - - - - - - - - - - - - # Put the model in training mode. - # model.train() - # prepare progress bar - iterable = enumerate(val_loader) - progress = None - if True: # not quiet: - progress = tqdm(iterable, desc='Train', total=len(val_loader), ascii=True, leave=False) - iterable = progress - ind_img_tot = 0 - # prepare variables, put them on the right device - - my_step = 0 - batch_size = cfg.optim.BATCH_SIZE - - for index, (input, target_dict) in iterable: - for key in target_dict.keys(): - if key == 'breed_index': - target_dict[key] = target_dict[key].long().to(device) - elif key in ['index', 'pts', 'tpts', 'target_weight', 'silh', 'silh_distmat_tofg', 'silh_distmat_tobg', 'sim_breed_index', 'img_border_mask']: - target_dict[key] = target_dict[key].float().to(device) - elif key == 'has_seg': - target_dict[key] = target_dict[key].to(device) - else: - pass - input = input.float().to(device) - - - - # get starting values for the optimization - # -> here from barc, but could also be saved and loaded - preds_dict = bite_model.get_all_results(input) - res_normal_and_ref = bite_model.get_selected_results(preds_dict=preds_dict, result_networks=['normal', 'ref']) - res = bite_model.get_selected_results(preds_dict=preds_dict, result_networks=['ref'])['ref'] - - # -------------------------------------------------------------------- - - # ind_img = 0 - - batch_verts_smal = [] - batch_faces_prep = [] - batch_optimed_camera_flength = [] - - - - for ind_img in range(input.shape[0]): - name = (test_name_list[target_dict['index'][ind_img].long()]).replace('/', '__').split('.')[0] - - print('ind_img_tot: ' + str(ind_img_tot) + ' -> ' + name) - ind_img_tot += 1 - - e_name = 'e000' # 'e300' - - npy_file = ROOT_IN_PATH_DETAIL + name + '_flength_' + e_name +'.npy' - flength = np.load(npy_file) - optimed_camera_flength = torch.tensor(flength, device=device) - - obj_file = ROOT_IN_PATH + name + '_res_' + e_name +'.obj' - - verts, faces, aux = p3d.io.load_obj(obj_file) - verts_smal = verts[None, ...].to(device) - faces_prep = faces.verts_idx[None, ...].to(device) - batch_verts_smal.append(verts_smal) - batch_faces_prep.append(faces_prep) - batch_optimed_camera_flength.append(optimed_camera_flength) - - - # import pdb; pdb.set_trace() - - verts_smal = torch.cat(batch_verts_smal, dim=0) - faces_prep = torch.cat(batch_faces_prep, dim=0) - optimed_camera_flength = torch.cat(batch_optimed_camera_flength, dim=0) - - # get keypoint locations from mesh vertices - keyp_3d = smal.get_joints_from_verts(verts_smal, keyp_conf='olive') - - - # render silhouette and keypoints - pred_silh_images, pred_keyp_raw = silh_renderer(vertices=verts_smal, points=keyp_3d, faces=faces_prep, focal_lengths=optimed_camera_flength) - pred_keyp = pred_keyp_raw[:, :24, :] - - - - # --------------- calculate iou and pck values -------------------- - - gt_keypoints_256 = target_dict['tpts'][:, :, :2] / 64. * (256. - 1) - gt_keypoints = torch.cat((gt_keypoints_256, target_dict['tpts'][:, :, 2:3]), dim=2) - # prepare silhouette for IoU calculation - predicted as well as ground truth - has_seg = target_dict['has_seg'] - img_border_mask = target_dict['img_border_mask'][:, 0, :, :] - gtseg = target_dict['silh'] - synth_silhouettes = pred_silh_images[:, 0, :, :] # pred_silh[:, 0, :, :] # output_reproj['silh'] - synth_silhouettes[synth_silhouettes>0.5] = 1 - synth_silhouettes[synth_silhouettes<0.5] = 0 - # calculate PCK as well as IoU (similar to WLDO) - preds = {} - preds['acc_PCK'] = Metrics.PCK( - pred_keyp, gt_keypoints, - gtseg, has_seg, idxs=EVAL_KEYPOINTS, - thresh_range=[pck_thresh], # [0.15], - ) - preds['acc_IOU'] = Metrics.IOU( - synth_silhouettes, gtseg, - img_border_mask, mask=has_seg - ) - for group, group_kps in KEYPOINT_GROUPS.items(): - preds[f'{group}_PCK'] = Metrics.PCK( - pred_keyp, gt_keypoints, gtseg, has_seg, - thresh_range=[pck_thresh], # [0.15], - idxs=group_kps - ) - - curr_batch_size = pred_keyp.shape[0] - if not (preds['acc_PCK'].data.cpu().numpy().shape == (summary['pck'][my_step * batch_size:my_step * batch_size + curr_batch_size]).shape): - import pdb; pdb.set_trace() - summary['pck'][my_step * batch_size:my_step * batch_size + curr_batch_size] = preds['acc_PCK'].data.cpu().numpy() - summary['acc_sil_2d'][my_step * batch_size:my_step * batch_size + curr_batch_size] = preds['acc_IOU'].data.cpu().numpy() - for part in summary['pck_by_part']: - summary['pck_by_part'][part][my_step * batch_size:my_step * batch_size + curr_batch_size] = preds[f'{part}_PCK'].data.cpu().numpy() - - - - - my_step += 1 - - - - - - # import pdb; pdb.set_trace() - - - - - - - iou = np.nanmean(summary['acc_sil_2d']) - pck = np.nanmean(summary['pck']) - pck_legs = np.nanmean(summary['pck_by_part']['legs']) - pck_tail = np.nanmean(summary['pck_by_part']['tail']) - pck_ears = np.nanmean(summary['pck_by_part']['ears']) - pck_face = np.nanmean(summary['pck_by_part']['face']) - print('------------------------------------------------') - print("iou: {:.2f}".format(iou*100)) - print(' ') - print("pck: {:.2f}".format(pck*100)) - print(' ') - print("pck_legs: {:.2f}".format(pck_legs*100)) - print("pck_tail: {:.2f}".format(pck_tail*100)) - print("pck_ears: {:.2f}".format(pck_ears*100)) - print("pck_face: {:.2f}".format(pck_face*100)) - print('------------------------------------------------') - # save results in a .txt file - with open(ROOT_OUT_PATH + "a_evaluation_" + e_name + ".txt", "w") as text_file: - print("iou: {:.2f}".format(iou*100), file=text_file) - print("pck: {:.2f}".format(pck*100), file=text_file) - print("pck_legs: {:.2f}".format(pck_legs*100), file=text_file) - print("pck_tail: {:.2f}".format(pck_tail*100), file=text_file) - print("pck_ears: {:.2f}".format(pck_ears*100), file=text_file) - print("pck_face: {:.2f}".format(pck_face*100), file=text_file) - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -if __name__ == '__main__': - parser = argparse.ArgumentParser(description='Evaluate a stacked hourglass model.') - parser.add_argument('--model-file-complete', default='', type=str, metavar='PATH', - help='path to saved model weights') - parser.add_argument('--ttopt-result-name', default='', type=str, metavar='PATH', - help='path to saved ttopt results') - parser.add_argument('-cg', '--config', default='barc_cfg_test.yaml', type=str, metavar='PATH', - help='name of config file (default: barc_cfg_test.yaml within src/configs folder)') - parser.add_argument('--save-images', default='True', type=lambda x: bool(strtobool(x)), - help='bool indicating if images should be saved') - parser.add_argument('--workers', default=4, type=int, metavar='N', - help='number of data loading workers') - parser.add_argument('--metrics', '-m', metavar='METRICS', default='all', - choices=['all', None], - help='model architecture') - main(parser.parse_args()) diff --git a/spaces/sasha/find-my-pedro/app.py b/spaces/sasha/find-my-pedro/app.py deleted file mode 100644 index 541426178e3e21d6aa80c9f4bfffa925437b14db..0000000000000000000000000000000000000000 --- a/spaces/sasha/find-my-pedro/app.py +++ /dev/null @@ -1,46 +0,0 @@ -import gradio as gr -from datasets import load_dataset -from sentence_transformers import SentenceTransformer - -import os -import requests -os.environ['NO_PROXY'] = 'huggingface.co' - -model = SentenceTransformer('clip-ViT-B-32') - -# Candidate images. -dataset = load_dataset("sasha/pedro-embeddings-new") -ds = dataset["train"] -ds.add_faiss_index(column='embeddings') - - -def query(image, number_to_retrieve=1): - input_image = model.encode(image) - scores, retrieved_examples = ds.get_nearest_examples('embeddings', input_image, k=number_to_retrieve) - return retrieved_examples['image'][0] - - -with gr.Blocks() as demo: - gr.Markdown("# Find my Pedro Pascal") - gr.Markdown("## Use this Space to find the Pedro Pascal most similar to your input image!") - with gr.Row(): - with gr.Column(scale=1, min_width=600): - inputs = gr.Image(type='pil') - btn = gr.Button("Find my Pedro!") - description = gr.Markdown() - - with gr.Column(scale=1, min_width=600): - outputs=gr.Image() - - gr.Markdown("### Image Examples") - gr.Examples( - examples=["elton.jpg", "ken.jpg", "gaga.jpg", "taylor.jpg"], - inputs=inputs, - outputs=[outputs], - fn=query, - cache_examples=True, - ) - btn.click(query, inputs, [outputs]) - -demo.launch() - diff --git a/spaces/sccstandardteam/ChuanhuChatGPT/modules/webui_locale.py b/spaces/sccstandardteam/ChuanhuChatGPT/modules/webui_locale.py deleted file mode 100644 index 1ce4d97b9b41cbb2d9be3fdadc4c85f6ef897604..0000000000000000000000000000000000000000 --- a/spaces/sccstandardteam/ChuanhuChatGPT/modules/webui_locale.py +++ /dev/null @@ -1,26 +0,0 @@ -import os -import locale -import commentjson as json - -class I18nAuto: - def __init__(self): - if os.path.exists("config.json"): - with open("config.json", "r", encoding='utf-8') as f: - config = json.load(f) - else: - config = {} - lang_config = config.get("language", "auto") - language = os.environ.get("LANGUAGE", lang_config) - if language == "auto": - language = locale.getdefaultlocale()[0] # get the language code of the system (ex. zh_CN) - self.language_map = {} - self.file_is_exists = os.path.isfile(f"./locale/{language}.json") - if self.file_is_exists: - with open(f"./locale/{language}.json", "r", encoding="utf-8") as f: - self.language_map.update(json.load(f)) - - def __call__(self, key): - if self.file_is_exists and key in self.language_map: - return self.language_map[key] - else: - return key diff --git a/spaces/scedlatioru/img-to-music/example/Download Pmdg 737 Ngx Full Crack.md b/spaces/scedlatioru/img-to-music/example/Download Pmdg 737 Ngx Full Crack.md deleted file mode 100644 index dfba3259469531092725b37698f6060acf3c1f6a..0000000000000000000000000000000000000000 --- a/spaces/scedlatioru/img-to-music/example/Download Pmdg 737 Ngx Full Crack.md +++ /dev/null @@ -1,6 +0,0 @@ -

      download pmdg 737 ngx full crack


      DOWNLOAD →→→ https://gohhs.com/2uEz2y



      - -5- Full throttle on without engine! 6- Boeing 737 PMDG 800/900 addon included. 7- Provided: documentation, manuals in French and English. some 737 ... Oh, okay, I'll look later 8- Full throttle on without engine! 9- Provided: documentation, manuals in French and English. 10- Provided: documentation, manuals in French and English. 11- Provided: documentation, manuals in French and English. 12- Provided: documentation, manuals in French and English. 13- Provided: documentation, manuals in French and English. 8a78ff9644
      -
      -
      -

      diff --git a/spaces/scedlatioru/img-to-music/example/Primer Of Biostatistics 7th Edition Pdf.md b/spaces/scedlatioru/img-to-music/example/Primer Of Biostatistics 7th Edition Pdf.md deleted file mode 100644 index 8fc313ae5aac7fd3f9bc6fd4afdee84ab5eca786..0000000000000000000000000000000000000000 --- a/spaces/scedlatioru/img-to-music/example/Primer Of Biostatistics 7th Edition Pdf.md +++ /dev/null @@ -1,12 +0,0 @@ -

      primer of biostatistics 7th edition pdf


      DOWNLOADhttps://gohhs.com/2uEzr5



      - -Primer of Biostatistics, 7- This edition demystifies this complex topic in . Once you have purchased this e-book, you can download the PDF file. biostatistics more "Textbook of biostatistics, 7th edition". Download pdf. -Download PDF. -You can purchase the e-book “Primer of Biostatistics 7th edition” or the printed version. -Buy the book “Primer of Biostatistics, 7th edition” by K. McCarthy, D. Kirk and other works in the Books section of the OZON.ru online store. -Digital, print and audiobooks available. -On the site you can. -Download: Primer of Biostatistics, 7th edition - McCarthy K., Kirk J. (pdf - 13Mb). 8a78ff9644
      -
      -
      -

      diff --git a/spaces/shanechin/Linaqruf-pastel-anime-xl-lora/app.py b/spaces/shanechin/Linaqruf-pastel-anime-xl-lora/app.py deleted file mode 100644 index e929b4a1070a639f59ec2c985f13c3de62b9e07a..0000000000000000000000000000000000000000 --- a/spaces/shanechin/Linaqruf-pastel-anime-xl-lora/app.py +++ /dev/null @@ -1,3 +0,0 @@ -import gradio as gr - -gr.Interface.load("models/Linaqruf/pastel-anime-xl-lora").launch() \ No newline at end of file diff --git a/spaces/sherinsp/openai-reverse-proxy/Dockerfile b/spaces/sherinsp/openai-reverse-proxy/Dockerfile deleted file mode 100644 index 6953fc05439efb70991552cf56f28365b5b6c15b..0000000000000000000000000000000000000000 --- a/spaces/sherinsp/openai-reverse-proxy/Dockerfile +++ /dev/null @@ -1,11 +0,0 @@ -FROM node:18 - -WORKDIR /app - -RUN npm install express express-http-proxy - -COPY . . - -EXPOSE 7860 - -CMD [ "node", "server.js" ] \ No newline at end of file diff --git a/spaces/shiwan10000/CodeFormer/CodeFormer/basicsr/metrics/__init__.py b/spaces/shiwan10000/CodeFormer/CodeFormer/basicsr/metrics/__init__.py deleted file mode 100644 index 19d55cc8321f124c918d78465b053aef67f13a33..0000000000000000000000000000000000000000 --- a/spaces/shiwan10000/CodeFormer/CodeFormer/basicsr/metrics/__init__.py +++ /dev/null @@ -1,19 +0,0 @@ -from copy import deepcopy - -from basicsr.utils.registry import METRIC_REGISTRY -from .psnr_ssim import calculate_psnr, calculate_ssim - -__all__ = ['calculate_psnr', 'calculate_ssim'] - - -def calculate_metric(data, opt): - """Calculate metric from data and options. - - Args: - opt (dict): Configuration. It must constain: - type (str): Model type. - """ - opt = deepcopy(opt) - metric_type = opt.pop('type') - metric = METRIC_REGISTRY.get(metric_type)(**data, **opt) - return metric diff --git a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Download Facebook Lite A Faster and Lighter Way to Connect with Friends.md b/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Download Facebook Lite A Faster and Lighter Way to Connect with Friends.md deleted file mode 100644 index 10cd32fce8be9745c3b8a3984e542bc5605b4aba..0000000000000000000000000000000000000000 --- a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Download Facebook Lite A Faster and Lighter Way to Connect with Friends.md +++ /dev/null @@ -1,136 +0,0 @@ -
      -

      How to Download Facebook Lite

      -

      Facebook is one of the most popular social media platforms in the world, but it can also be a heavy app that consumes a lot of data, battery, and storage space on your device. If you want to enjoy Facebook without these drawbacks, you might want to try Facebook Lite.

      -

      to download facebook lite


      Download 🗸 https://ssurll.com/2uNXqD



      -

      Facebook Lite is a smaller, faster, and lighter version of the regular Facebook app that works well on low-end phones and slow internet connections. It has all the basic features of Facebook, such as posting, liking, commenting, sharing, messaging, and more, but with less data usage and storage space.

      -

      In this article, we will show you how to download Facebook Lite on your Android, iPhone, or PC device and compare it with the regular Facebook app. We will also tell you some of the benefits of using Facebook Lite over the standard app.

      -

      Facebook Lite vs Facebook App

      -

      Before we get into how to download Facebook Lite, let's see how it differs from the regular Facebook app. Here are some of the key differences between the two apps:

      -

      How to download facebook lite for pc
      -Download facebook lite apk for android
      -Facebook lite app download install free
      -Download facebook lite for windows 10
      -Facebook lite download for iphone
      -Download facebook lite for java
      -Facebook lite download uptodown
      -Download facebook lite mod apk
      -Facebook lite download for nokia
      -Download facebook lite for laptop
      -Facebook lite download old version
      -Download facebook lite dark mode
      -Facebook lite download for samsung
      -Download facebook lite transparent
      -Facebook lite download for blackberry
      -Download facebook lite without play store
      -Facebook lite download jio phone
      -Download facebook lite gold apk
      -Facebook lite download for huawei
      -Download facebook lite messenger
      -Facebook lite download 9apps
      -Download facebook lite pink apk
      -Facebook lite download for oppo
      -Download facebook lite video downloader
      -Facebook lite download in tizen store
      -Facebook lite download karna hai
      -Download facebook lite blue apk
      -Facebook lite download for vivo
      -Download facebook lite latest version 2023
      -Facebook lite download in apkpure
      -Facebook lite download karne ka tarika
      -Download facebook lite red apk
      -Facebook lite download for mi phone
      -Download facebook lite with free mode
      -Facebook lite download in pc online
      -Facebook lite download malayalam font
      -Download facebook lite green apk
      -Facebook lite download for lg phone
      -Download facebook lite update 2023
      -Facebook lite download in softonic
      -Facebook lite download kaise kare
      -Download facebook lite purple apk
      -Facebook lite download for tecno phone
      -Download facebook lite browser app
      -Facebook lite download in jio phone keypad

      -
        -
      • App size: The regular Facebook app is about 162 MB on an Android device, while the Facebook Lite app is only 2.51 MB. That's a huge difference in terms of storage space.
      • -
      • Data usage: The regular Facebook app uses more data than the Facebook Lite app because it has higher-quality images and videos, a nicer user interface, and a separate messenger app. The Facebook Lite app uses less data because it has lower-quality images and videos, a basic user interface, and an integrated messenger feature.
      • -
      • Features: The regular Facebook app has more features than the Facebook Lite app because it offers more enhancements, tools, and options for customization. The Facebook Lite app has fewer features than the regular app because it focuses on the core functionality of Facebook.
      • -
      • Availability: The regular Facebook app is available for most Android and iOS devices that have enough storage space and a fast internet connection. The Facebook Lite app is available for older Android devices that have low storage space and a slow internet connection.
      • -
      -

      As you can see, there are pros and cons to both apps. The regular Facebook app is better for users who want the full Facebook experience with all its bells and whistles, while the Facebook Lite app is better for users who want a simple and efficient way to access Facebook.

      -

      How to Download Facebook Lite on Android

      -

      If you have an Android device and you want to download Facebook Lite, you have two options: You can either download it from the Google Play Store or from the official Facebook website. Here's how to do both:

      -

      From the Google Play Store

      -
        -
      1. Open the Google Play Store app on your Android device.
      2. -
      3. Type "Facebook Lite" in the search bar and tap on the first result.
      4. -
      5. Tap on Install and wait for the app to download and install on your device.
      6. -
      7. Tap on Open or find the app icon in your launcher and tap on it.
      8. -
      9. Log in with your Facebook account or create a new one if you don't have one already.
      10. -
      11. Enjoy using Facebook Lite on your Android device.
      12. -
      -

      From the official website

      From the official website

      -
        -
      1. Open your web browser on your Android device and go to https://lite.facebook.com.
      2. -
      3. Tap on the Download button and wait for the app to download on your device.
      4. -
      5. Go to your Downloads folder and tap on the Facebook Lite APK file.
      6. -
      7. If prompted, allow the installation of apps from unknown sources in your settings.
      8. -
      9. Tap on Install and wait for the app to install on your device.
      10. -
      11. Tap on Open or find the app icon in your launcher and tap on it.
      12. -
      13. Log in with your Facebook account or create a new one if you don't have one already.
      14. -
      15. Enjoy using Facebook Lite on your Android device.
      16. -
      -

      How to Download Facebook Lite on iPhone

      -

      If you have an iPhone and you want to download Facebook Lite, you have two options: You can either download it from the App Store or from the official Facebook website. Here's how to do both:

      -

      From the App Store

      -
        -
      1. Open the App Store app on your iPhone.
      2. -
      3. Type "Facebook Lite" in the search bar and tap on the first result.
      4. -
      5. Tap on Get and wait for the app to download and install on your device.
      6. -
      7. Tap on Open or find the app icon on your home screen and tap on it.
      8. -
      9. Log in with your Facebook account or create a new one if you don't have one already.
      10. -
      11. Enjoy using Facebook Lite on your iPhone.
      12. -
      -

      From the official website

      -
        -
      1. Open your web browser on your iPhone and go to https://lite.facebook.com.
      2. -
      3. Tap on the Download button and wait for the app to download on your device.
      4. -
      5. Go to your Files app and tap on the Facebook Lite IPA file.
      6. -
      7. If prompted, allow the installation of apps from unknown sources in your settings.
      8. -
      9. Tap on Install and wait for the app to install on your device.
      10. -
      11. Tap on Open or find the app icon on your home screen and tap on it.
      12. -
      13. Log in with your Facebook account or create a new one if you don't have one already.
      14. -
      15. Enjoy using Facebook Lite on your iPhone.
      16. -
      -

      How to Download Facebook Lite on PC

      -

      If you have a PC and you want to download Facebook Lite, you will need to use an Android emulator. An Android emulator is a software that allows you to run Android apps on your PC. There are many Android emulators available, but we will use BlueStacks as an example. Here's how to do it:

      -
        -
      1. Go to https://www.bluestacks.com and download the BlueStacks installer for your PC.
      2. -
      3. Run the installer and follow the instructions to install BlueStacks on your PC.
      4. -
      5. Launch BlueStacks and sign in with your Google account or create a new one if you don't have one already.
      6. -
      7. In BlueStacks, go to the Google Play Store app and type "Facebook Lite" in the search bar and tap on the first result.
      8. -
      9. Tap on Install and wait for the app to download and install in BlueStacks.
      10. -
      11. Tap on Open or find the app icon in BlueStacks and tap on it.
      12. -
      13. Log in with your Facebook account or create a new one if you don't have one already.
      14. -
      15. Enjoy using Facebook Lite on your PC.
      16. -
      -

      Benefits of Using Facebook Lite

      -

      Now that you know how to download Facebook Lite, you might be wondering why you should use it instead of the regular Facebook app. Here are some of the benefits of using Facebook Lite:

      -
        - - You can save data: Facebook Lite uses less data than the regular Facebook app because it compresses images and videos, loads them only when you tap on them, and reduces background data usage. This means you can use Facebook without worrying about exceeding your data limit or paying extra charges. - You can save battery: Facebook Lite consumes less battery than the regular Facebook app because it runs faster, uses less resources, and has fewer notifications. This means you can use Facebook without draining your battery or having to charge your device frequently. - You can save storage space: Facebook Lite takes up less storage space than the regular Facebook app because it is smaller, has fewer updates, and has no separate messenger app . This means you can use Facebook without running out of storage space or having to delete other apps or files. - You can access Facebook anywhere: Facebook Lite works well on low-end phones and slow internet connections because it is optimized for 2G networks and areas with unstable or limited connectivity. This means you can use Facebook even if you have a poor signal, a weak Wi-Fi, or no data plan. - You can enjoy the essential features of Facebook: Facebook Lite has all the basic features of Facebook that you need to stay in touch with your friends and family, such as posting, liking, commenting, sharing, messaging, and more. You can also access other Facebook services, such as Marketplace, Groups, Pages, and Watch, from the app.

        Conclusion

        -

        Facebook Lite is a great alternative to the regular Facebook app if you want to save data, battery, storage space, and access Facebook anywhere. It is easy to download and install on your Android, iPhone, or PC device and it has all the essential features of Facebook that you need.

        -

        If you are looking for a simple and efficient way to use Facebook, you should give Facebook Lite a try. You might be surprised by how much you like it.

        -

        Do you have any questions about Facebook Lite? Check out our FAQs below or leave a comment and we will get back to you as soon as possible.

        -

        FAQs

        -

        Is Facebook Lite safe?

        -

        Yes, Facebook Lite is safe to use. It is developed by Facebook and it follows the same privacy and security policies as the regular Facebook app. You can control your privacy settings, manage your account, and report any issues from the app.

        -

        Is Facebook Lite free?

        -

        Yes, Facebook Lite is free to download and use. However, you may incur data charges from your network provider if you use the app on a mobile data connection. You can reduce your data usage by turning on the Data Saver mode in the app settings.

        -

        Can I use both Facebook Lite and the regular Facebook app on the same device?

        -

        Yes, you can use both apps on the same device if you have enough storage space and a fast internet connection. However, you may experience some conflicts or errors if you try to use both apps at the same time. We recommend that you choose one app that suits your needs and preferences better.

        -

        Can I use Facebook Lite on a tablet?

        -

        Yes, you can use Facebook Lite on a tablet if it runs on Android or iOS operating systems. However, the app may not be optimized for larger screens and may look different from the regular Facebook app.

        -

        Can I play games on Facebook Lite?

        -

        No, you cannot play games on Facebook Lite. The app does not support games or other interactive features that require more data and resources. If you want to play games on Facebook, you will need to use the regular Facebook app or the web browser.

        401be4b1e0
        -
        -
        \ No newline at end of file diff --git a/spaces/simpx/tiiuae-falcon-7b/README.md b/spaces/simpx/tiiuae-falcon-7b/README.md deleted file mode 100644 index 26ee34e7e839d7e8d49d1f94ae26b84ba79903f9..0000000000000000000000000000000000000000 --- a/spaces/simpx/tiiuae-falcon-7b/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Tiiuae Falcon 7b -emoji: 👀 -colorFrom: red -colorTo: red -sdk: gradio -sdk_version: 3.33.1 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/society-ethics/model-card-regulatory-check/tests/cards/openai___clip-vit-base-patch32.md b/spaces/society-ethics/model-card-regulatory-check/tests/cards/openai___clip-vit-base-patch32.md deleted file mode 100644 index 650f34ed8dd3b3054d5655900fc007248aeec3cc..0000000000000000000000000000000000000000 --- a/spaces/society-ethics/model-card-regulatory-check/tests/cards/openai___clip-vit-base-patch32.md +++ /dev/null @@ -1,136 +0,0 @@ -# Model Card: CLIP - -Disclaimer: The model card is taken and modified from the official CLIP repository, it can be found [here](https://github.com/openai/CLIP/blob/main/model-card.md). - -## Model Details - -The CLIP model was developed by researchers at OpenAI to learn about what contributes to robustness in computer vision tasks. The model was also developed to test the ability of models to generalize to arbitrary image classification tasks in a zero-shot manner. It was not developed for general model deployment - to deploy models like CLIP, researchers will first need to carefully study their capabilities in relation to the specific context they’re being deployed within. - -### Model Date - -January 2021 - -### Model Type - -The model uses a ViT-B/32 Transformer architecture as an image encoder and uses a masked self-attention Transformer as a text encoder. These encoders are trained to maximize the similarity of (image, text) pairs via a contrastive loss. - -The original implementation had two variants: one using a ResNet image encoder and the other using a Vision Transformer. This repository has the variant with the Vision Transformer. - - -### Documents - -- [Blog Post](https://openai.com/blog/clip/) -- [CLIP Paper](https://arxiv.org/abs/2103.00020) - - -### Use with Transformers - -```python3 -from PIL import Image -import requests - -from transformers import CLIPProcessor, CLIPModel - -model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32") -processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32") - -url = "http://images.cocodataset.org/val2017/000000039769.jpg" -image = Image.open(requests.get(url, stream=True).raw) - -inputs = processor(text=["a photo of a cat", "a photo of a dog"], images=image, return_tensors="pt", padding=True) - -outputs = model(**inputs) -logits_per_image = outputs.logits_per_image # this is the image-text similarity score -probs = logits_per_image.softmax(dim=1) # we can take the softmax to get the label probabilities -``` - - -## Model Use - -### Intended Use - -The model is intended as a research output for research communities. We hope that this model will enable researchers to better understand and explore zero-shot, arbitrary image classification. We also hope it can be used for interdisciplinary studies of the potential impact of such models - the CLIP paper includes a discussion of potential downstream impacts to provide an example for this sort of analysis. - -#### Primary intended uses - -The primary intended users of these models are AI researchers. - -We primarily imagine the model will be used by researchers to better understand robustness, generalization, and other capabilities, biases, and constraints of computer vision models. - -### Out-of-Scope Use Cases - -**Any** deployed use case of the model - whether commercial or not - is currently out of scope. Non-deployed use cases such as image search in a constrained environment, are also not recommended unless there is thorough in-domain testing of the model with a specific, fixed class taxonomy. This is because our safety assessment demonstrated a high need for task specific testing especially given the variability of CLIP’s performance with different class taxonomies. This makes untested and unconstrained deployment of the model in any use case currently potentially harmful. - -Certain use cases which would fall under the domain of surveillance and facial recognition are always out-of-scope regardless of performance of the model. This is because the use of artificial intelligence for tasks such as these can be premature currently given the lack of testing norms and checks to ensure its fair use. - -Since the model has not been purposefully trained in or evaluated on any languages other than English, its use should be limited to English language use cases. - - - -## Data - -The model was trained on publicly available image-caption data. This was done through a combination of crawling a handful of websites and using commonly-used pre-existing image datasets such as [YFCC100M](http://projects.dfki.uni-kl.de/yfcc100m/). A large portion of the data comes from our crawling of the internet. This means that the data is more representative of people and societies most connected to the internet which tend to skew towards more developed nations, and younger, male users. - -### Data Mission Statement - -Our goal with building this dataset was to test out robustness and generalizability in computer vision tasks. As a result, the focus was on gathering large quantities of data from different publicly-available internet data sources. The data was gathered in a mostly non-interventionist manner. However, we only crawled websites that had policies against excessively violent and adult images and allowed us to filter out such content. We do not intend for this dataset to be used as the basis for any commercial or deployed model and will not be releasing the dataset. - - - -## Performance and Limitations - -### Performance - -We have evaluated the performance of CLIP on a wide range of benchmarks across a variety of computer vision datasets such as OCR to texture recognition to fine-grained classification. The paper describes model performance on the following datasets: - -- Food101 -- CIFAR10 -- CIFAR100 -- Birdsnap -- SUN397 -- Stanford Cars -- FGVC Aircraft -- VOC2007 -- DTD -- Oxford-IIIT Pet dataset -- Caltech101 -- Flowers102 -- MNIST -- SVHN -- IIIT5K -- Hateful Memes -- SST-2 -- UCF101 -- Kinetics700 -- Country211 -- CLEVR Counting -- KITTI Distance -- STL-10 -- RareAct -- Flickr30 -- MSCOCO -- ImageNet -- ImageNet-A -- ImageNet-R -- ImageNet Sketch -- ObjectNet (ImageNet Overlap) -- Youtube-BB -- ImageNet-Vid - -## Limitations - -CLIP and our analysis of it have a number of limitations. CLIP currently struggles with respect to certain tasks such as fine grained classification and counting objects. CLIP also poses issues with regards to fairness and bias which we discuss in the paper and briefly in the next section. Additionally, our approach to testing CLIP also has an important limitation- in many cases we have used linear probes to evaluate the performance of CLIP and there is evidence suggesting that linear probes can underestimate model performance. - -### Bias and Fairness - -We find that the performance of CLIP - and the specific biases it exhibits - can depend significantly on class design and the choices one makes for categories to include and exclude. We tested the risk of certain kinds of denigration with CLIP by classifying images of people from [Fairface](https://arxiv.org/abs/1908.04913) into crime-related and non-human animal categories. We found significant disparities with respect to race and gender. Additionally, we found that these disparities could shift based on how the classes were constructed. (Details captured in the Broader Impacts Section in the paper). - -We also tested the performance of CLIP on gender, race and age classification using the Fairface dataset (We default to using race categories as they are constructed in the Fairface dataset.) in order to assess quality of performance across different demographics. We found accuracy >96% across all races for gender classification with ‘Middle Eastern’ having the highest accuracy (98.4%) and ‘White’ having the lowest (96.5%). Additionally, CLIP averaged ~93% for racial classification and ~63% for age classification. Our use of evaluations to test for gender, race and age classification as well as denigration harms is simply to evaluate performance of the model across people and surface potential risks and not to demonstrate an endorsement/enthusiasm for such tasks. - - - -## Feedback - -### Where to send questions or comments about the model - -Please use [this Google Form](https://forms.gle/Uv7afRH5dvY34ZEs9) \ No newline at end of file diff --git a/spaces/softcatala/comparativa-tts-catala/festival.py b/spaces/softcatala/comparativa-tts-catala/festival.py deleted file mode 100644 index a7f407d6dcb8ce439768e8c12f6f31964cb63b7b..0000000000000000000000000000000000000000 --- a/spaces/softcatala/comparativa-tts-catala/festival.py +++ /dev/null @@ -1,65 +0,0 @@ -#!/usr/bin/env python -# -*- encoding: utf-8 -*- -# -# Copyright (c) 2016 Jordi Mas i Hernandez -# -# This program is free software; you can redistribute it and/or -# modify it under the terms of the GNU Lesser General Public -# License as published by the Free Software Foundation; either -# version 2.1 of the License, or (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -# Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public -# License along with this program; if not, write to the -# Free Software Foundation, Inc., 59 Temple Place - Suite 330, -# Boston, MA 02111-1307, USA. - -import subprocess -import tempfile - -festival_voices = { - "ona": "voice_upc_ca_ona_hts", - "pau": "voice_upc_ca_pau_hts" -} - -def _normalize(result): - mapping = { - '’' : '\'', - 'à' : 'à', - 'í' : 'í', - 'ó' : 'ó', - 'è' : 'è', - 'ò' : 'ò', - 'ú' : 'ú', - } - - for char in mapping.keys(): - result = result.replace(char, mapping[char]) - - return result - - -def festival_synthesize(text, voice): - if voice not in ["ona", "pau"]: - raise Error - - txt2wave = '/usr/bin/text2wave' - - with tempfile.NamedTemporaryFile() as encoded_file,\ - tempfile.NamedTemporaryFile(suffix=".wav", delete=False) as wave_file: - - text = _normalize(text) - f = open(encoded_file.name, 'wb') - f.write(text.encode('ISO-8859-15', 'ignore')) - f.close() - - cmd = '{0} -o {1} {2} -eval "({3})"'.\ - format(txt2wave, wave_file.name, encoded_file.name, festival_voices[voice]) - p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE) - p.wait() - - return wave_file.name diff --git a/spaces/sophiamyang/Panel_InstructPix2Pix/Dockerfile b/spaces/sophiamyang/Panel_InstructPix2Pix/Dockerfile deleted file mode 100644 index bdf2c76f383c53971d76296317f16d3764007705..0000000000000000000000000000000000000000 --- a/spaces/sophiamyang/Panel_InstructPix2Pix/Dockerfile +++ /dev/null @@ -1,28 +0,0 @@ -# FROM python:3.9 -FROM nvidia/cuda:11.7.1-cudnn8-devel-ubuntu22.04 -LABEL maintainer="Hugging Face" -LABEL repository="transformers" - -RUN apt update && \ - apt install -y bash \ - build-essential \ - git \ - curl \ - ca-certificates \ - python3 \ - python3-pip && \ - rm -rf /var/lib/apt/lists - - -WORKDIR /code - -COPY ./requirements.txt /code/requirements.txt -RUN python3 -m pip install --no-cache-dir --upgrade pip -RUN python3 -m pip install --no-cache-dir --upgrade -r /code/requirements.txt - -COPY . . - -CMD ["panel", "serve", "/code/app.py", "--address", "0.0.0.0", "--port", "7860", "--allow-websocket-origin", "sophiamyang-panel-instructpix2pix.hf.space", "--allow-websocket-origin", "0.0.0.0:7860"] - -RUN mkdir /.cache -RUN chmod 777 /.cache \ No newline at end of file diff --git a/spaces/speechbox/whisper-speaker-diarization/README.md b/spaces/speechbox/whisper-speaker-diarization/README.md deleted file mode 100644 index 7150000886924ed46777cd05844d9432d063c396..0000000000000000000000000000000000000000 --- a/spaces/speechbox/whisper-speaker-diarization/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Whisper Speaker Diarization -emoji: 📝 -colorFrom: indigo -colorTo: red -sdk: gradio -sdk_version: 3.23.0 -app_file: app.py -pinned: false -duplicated_from: whisper-event/whisper-demo ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/sriramelango/Social_Classification_Public/fairseq/.github/PULL_REQUEST_TEMPLATE.md b/spaces/sriramelango/Social_Classification_Public/fairseq/.github/PULL_REQUEST_TEMPLATE.md deleted file mode 100644 index d005e2df4f717ea4844a8320981d77d96e425a52..0000000000000000000000000000000000000000 --- a/spaces/sriramelango/Social_Classification_Public/fairseq/.github/PULL_REQUEST_TEMPLATE.md +++ /dev/null @@ -1,16 +0,0 @@ -# Before submitting - -- [ ] Was this discussed/approved via a Github issue? (no need for typos, doc improvements) -- [ ] Did you read the [contributor guideline](https://github.com/pytorch/fairseq/blob/main/CONTRIBUTING.md)? -- [ ] Did you make sure to update the docs? -- [ ] Did you write any new necessary tests? - -## What does this PR do? -Fixes # (issue). - -## PR review -Anyone in the community is free to review the PR once the tests have passed. -If we didn't discuss your PR in Github issues there's a high chance it will not be merged. - -## Did you have fun? -Make sure you had fun coding 🙃 diff --git a/spaces/sriramelango/Social_Classification_Public/fairseq/fairseq/data/denoising_dataset.py b/spaces/sriramelango/Social_Classification_Public/fairseq/fairseq/data/denoising_dataset.py deleted file mode 100644 index bdb62c8d5db9c8755c72db4d0d8083c936f18dc8..0000000000000000000000000000000000000000 --- a/spaces/sriramelango/Social_Classification_Public/fairseq/fairseq/data/denoising_dataset.py +++ /dev/null @@ -1,436 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import math - -import numpy as np -import torch - -from . import FairseqDataset, data_utils - - -def collate( - samples, - pad_idx, - eos_idx, - vocab, - left_pad_source=False, - left_pad_target=False, - input_feeding=True, - pad_to_length=None, -): - assert input_feeding - if len(samples) == 0: - return {} - - def merge(key, left_pad, move_eos_to_beginning=False, pad_to_length=None): - return data_utils.collate_tokens( - [s[key] for s in samples], - pad_idx, - eos_idx=None, # use eos_idx of each sample instead of vocab.eos() - left_pad=left_pad, - move_eos_to_beginning=move_eos_to_beginning, - pad_to_length=pad_to_length, - ) - - id = torch.LongTensor([s["id"] for s in samples]) - src_tokens = merge( - "source", - left_pad=left_pad_source, - pad_to_length=pad_to_length["source"] if pad_to_length is not None else None, - ) - # sort by descending source length - src_lengths = torch.LongTensor([s["source"].numel() for s in samples]) - src_lengths, sort_order = src_lengths.sort(descending=True) - id = id.index_select(0, sort_order) - src_tokens = src_tokens.index_select(0, sort_order) - - prev_output_tokens = None - target = None - if samples[0].get("target", None) is not None: - target = merge( - "target", - left_pad=left_pad_target, - pad_to_length=pad_to_length["target"] - if pad_to_length is not None - else None, - ) - target = target.index_select(0, sort_order) - ntokens = sum(len(s["target"]) for s in samples) - - if input_feeding: - # we create a shifted version of targets for feeding the - # previous output token(s) into the next decoder step - prev_output_tokens = merge( - "target", - left_pad=left_pad_target, - move_eos_to_beginning=True, - pad_to_length=pad_to_length["target"] - if pad_to_length is not None - else None, - ) - prev_output_tokens = prev_output_tokens.index_select(0, sort_order) - else: - ntokens = sum(len(s["source"]) for s in samples) - - batch = { - "id": id, - "ntokens": ntokens, - "net_input": { - "src_tokens": src_tokens, - "src_lengths": src_lengths, - }, - "target": target, - "nsentences": samples[0]["source"].size(0), - "sort_order": sort_order, - } - if prev_output_tokens is not None: - batch["net_input"]["prev_output_tokens"] = prev_output_tokens - - return batch - - -class DenoisingDataset(FairseqDataset): - """ - A wrapper around TokenBlockDataset for BART dataset. - - Args: - dataset (TokenBlockDataset): dataset to wrap - sizes (List[int]): sentence lengths - vocab (~fairseq.data.Dictionary): vocabulary - mask_idx (int): dictionary index used for masked token - mask_whole_words: only mask whole words. This should be a byte mask - over vocab indices, indicating whether it is the beginning of a - word. We will extend any mask to encompass the whole word. - shuffle (bool, optional): shuffle the elements before batching. - Default: ``True`` - seed: Seed for random number generator for reproducibility. - args: argparse arguments. - """ - - def __init__( - self, - dataset, - sizes, - vocab, - mask_idx, - mask_whole_words, - shuffle, - seed, - args, - eos=None, - item_transform_func=None, - ): - self.dataset = dataset - - self.sizes = sizes - - self.vocab = vocab - self.shuffle = shuffle - self.seed = seed - self.mask_idx = mask_idx - self.mask_whole_word = mask_whole_words - self.mask_ratio = args.mask - self.random_ratio = args.mask_random - self.insert_ratio = args.insert - self.rotate_ratio = args.rotate - self.permute_sentence_ratio = args.permute_sentences - self.eos = eos if eos is not None else vocab.eos() - self.item_transform_func = item_transform_func - - if args.bpe != "gpt2": - self.full_stop_index = self.vocab.eos() - else: - assert args.bpe == "gpt2" - self.full_stop_index = self.vocab.index("13") - - self.replace_length = args.replace_length - if self.replace_length not in [-1, 0, 1]: - raise ValueError(f"invalid arg: replace_length={self.replace_length}") - if args.mask_length not in ["subword", "word", "span-poisson"]: - raise ValueError(f"invalid arg: mask-length={args.mask_length}") - if args.mask_length == "subword" and args.replace_length not in [0, 1]: - raise ValueError(f"if using subwords, use replace-length=1 or 0") - - self.mask_span_distribution = None - if args.mask_length == "span-poisson": - _lambda = args.poisson_lambda - - lambda_to_the_k = 1 - e_to_the_minus_lambda = math.exp(-_lambda) - k_factorial = 1 - ps = [] - for k in range(0, 128): - ps.append(e_to_the_minus_lambda * lambda_to_the_k / k_factorial) - lambda_to_the_k *= _lambda - k_factorial *= k + 1 - if ps[-1] < 0.0000001: - break - ps = torch.FloatTensor(ps) - self.mask_span_distribution = torch.distributions.Categorical(ps) - - self.epoch = 0 - - @property - def can_reuse_epoch_itr_across_epochs(self): - return True # only the noise changes, not item sizes - - def set_epoch(self, epoch, **unused): - self.epoch = epoch - - def __getitem__(self, index): - with data_utils.numpy_seed(self.seed, self.epoch, index): - tokens = self.dataset[index] - assert tokens[-1] == self.eos - source, target = tokens, tokens.clone() - - if self.permute_sentence_ratio > 0.0: - source = self.permute_sentences(source, self.permute_sentence_ratio) - - if self.mask_ratio > 0: - source = self.add_whole_word_mask(source, self.mask_ratio) - - if self.insert_ratio > 0: - source = self.add_insertion_noise(source, self.insert_ratio) - - if self.rotate_ratio > 0.0 and np.random.random() < self.rotate_ratio: - source = self.add_rolling_noise(source) - # there can additional changes to make: - if self.item_transform_func is not None: - source, target = self.item_transform_func(source, target) - - assert (source >= 0).all() - assert (source[1:-1] >= 1).all() - assert (source <= len(self.vocab)).all() - assert source[0] == self.vocab.bos() - assert source[-1] == self.eos - return { - "id": index, - "source": source, - "target": target, - } - - def __len__(self): - return len(self.dataset) - - def permute_sentences(self, source, p=1.0): - full_stops = source == self.full_stop_index - # Pretend it ends with a full stop so last span is a sentence - full_stops[-2] = 1 - - # Tokens that are full stops, where the previous token is not - sentence_ends = (full_stops[1:] * ~full_stops[:-1]).nonzero(as_tuple=False) + 2 - result = source.clone() - - num_sentences = sentence_ends.size(0) - num_to_permute = math.ceil((num_sentences * 2 * p) / 2.0) - substitutions = torch.randperm(num_sentences)[:num_to_permute] - ordering = torch.arange(0, num_sentences) - ordering[substitutions] = substitutions[torch.randperm(num_to_permute)] - - # Ignore at start - index = 1 - for i in ordering: - sentence = source[(sentence_ends[i - 1] if i > 0 else 1) : sentence_ends[i]] - result[index : index + sentence.size(0)] = sentence - index += sentence.size(0) - return result - - def word_starts(self, source): - if self.mask_whole_word is not None: - is_word_start = self.mask_whole_word.gather(0, source) - else: - is_word_start = torch.ones(source.size()) - is_word_start[0] = 0 - is_word_start[-1] = 0 - return is_word_start - - def add_whole_word_mask(self, source, p): - is_word_start = self.word_starts(source) - num_to_mask = int(math.ceil(is_word_start.float().sum() * p)) - num_inserts = 0 - if num_to_mask == 0: - return source - - if self.mask_span_distribution is not None: - lengths = self.mask_span_distribution.sample(sample_shape=(num_to_mask,)) - - # Make sure we have enough to mask - cum_length = torch.cumsum(lengths, 0) - while cum_length[-1] < num_to_mask: - lengths = torch.cat( - [ - lengths, - self.mask_span_distribution.sample(sample_shape=(num_to_mask,)), - ], - dim=0, - ) - cum_length = torch.cumsum(lengths, 0) - - # Trim to masking budget - i = 0 - while cum_length[i] < num_to_mask: - i += 1 - lengths[i] = num_to_mask - (0 if i == 0 else cum_length[i - 1]) - num_to_mask = i + 1 - lengths = lengths[:num_to_mask] - - # Handle 0-length mask (inserts) separately - lengths = lengths[lengths > 0] - num_inserts = num_to_mask - lengths.size(0) - num_to_mask -= num_inserts - if num_to_mask == 0: - return self.add_insertion_noise(source, num_inserts / source.size(0)) - - assert (lengths > 0).all() - else: - lengths = torch.ones((num_to_mask,)).long() - assert is_word_start[-1] == 0 - word_starts = is_word_start.nonzero(as_tuple=False) - indices = word_starts[ - torch.randperm(word_starts.size(0))[:num_to_mask] - ].squeeze(1) - mask_random = torch.FloatTensor(num_to_mask).uniform_() < self.random_ratio - - source_length = source.size(0) - assert source_length - 1 not in indices - to_keep = torch.ones(source_length, dtype=torch.bool) - is_word_start[ - -1 - ] = 255 # acts as a long length, so spans don't go over the end of doc - if self.replace_length == 0: - to_keep[indices] = 0 - else: - # keep index, but replace it with [MASK] - source[indices] = self.mask_idx - source[indices[mask_random]] = torch.randint( - 1, len(self.vocab), size=(mask_random.sum(),) - ) - - if self.mask_span_distribution is not None: - assert len(lengths.size()) == 1 - assert lengths.size() == indices.size() - lengths -= 1 - while indices.size(0) > 0: - assert lengths.size() == indices.size() - lengths -= is_word_start[indices + 1].long() - uncompleted = lengths >= 0 - indices = indices[uncompleted] + 1 - mask_random = mask_random[uncompleted] - lengths = lengths[uncompleted] - if self.replace_length != -1: - # delete token - to_keep[indices] = 0 - else: - # keep index, but replace it with [MASK] - source[indices] = self.mask_idx - source[indices[mask_random]] = torch.randint( - 1, len(self.vocab), size=(mask_random.sum(),) - ) - else: - # A bit faster when all lengths are 1 - while indices.size(0) > 0: - uncompleted = is_word_start[indices + 1] == 0 - indices = indices[uncompleted] + 1 - mask_random = mask_random[uncompleted] - if self.replace_length != -1: - # delete token - to_keep[indices] = 0 - else: - # keep index, but replace it with [MASK] - source[indices] = self.mask_idx - source[indices[mask_random]] = torch.randint( - 1, len(self.vocab), size=(mask_random.sum(),) - ) - - assert source_length - 1 not in indices - - source = source[to_keep] - - if num_inserts > 0: - source = self.add_insertion_noise(source, num_inserts / source.size(0)) - - return source - - def add_permuted_noise(self, tokens, p): - num_words = len(tokens) - num_to_permute = math.ceil(((num_words * 2) * p) / 2.0) - substitutions = torch.randperm(num_words - 2)[:num_to_permute] + 1 - tokens[substitutions] = tokens[substitutions[torch.randperm(num_to_permute)]] - return tokens - - def add_rolling_noise(self, tokens): - offset = np.random.randint(1, max(1, tokens.size(-1) - 1) + 1) - tokens = torch.cat( - (tokens[0:1], tokens[offset:-1], tokens[1:offset], tokens[-1:]), - dim=0, - ) - return tokens - - def add_insertion_noise(self, tokens, p): - if p == 0.0: - return tokens - - num_tokens = len(tokens) - n = int(math.ceil(num_tokens * p)) - - noise_indices = torch.randperm(num_tokens + n - 2)[:n] + 1 - noise_mask = torch.zeros(size=(num_tokens + n,), dtype=torch.bool) - noise_mask[noise_indices] = 1 - result = torch.LongTensor(n + len(tokens)).fill_(-1) - - num_random = int(math.ceil(n * self.random_ratio)) - result[noise_indices[num_random:]] = self.mask_idx - result[noise_indices[:num_random]] = torch.randint( - low=1, high=len(self.vocab), size=(num_random,) - ) - - result[~noise_mask] = tokens - - assert (result >= 0).all() - return result - - def collater(self, samples, pad_to_length=None): - """Merge a list of samples to form a mini-batch. - Args: - samples (List[dict]): samples to collate - Returns: - dict: a mini-batch of data - """ - return collate( - samples, self.vocab.pad(), self.eos, self.vocab, pad_to_length=pad_to_length - ) - - def num_tokens(self, index): - """Return the number of tokens in a sample. This value is used to - enforce ``--max-tokens`` during batching.""" - return self.sizes[index] - - def size(self, index): - """Return an example's size as a float or tuple. This value is used when - filtering a dataset with ``--max-positions``.""" - return self.sizes[index] - - def ordered_indices(self): - """Return an ordered list of indices. Batches will be constructed based - on this order.""" - if self.shuffle: - indices = np.random.permutation(len(self)) - else: - indices = np.arange(len(self)) - return indices[np.argsort(self.sizes[indices], kind="mergesort")] - - def prefetch(self, indices): - self.src.prefetch(indices) - self.tgt.prefetch(indices) - - @property - def supports_prefetch(self): - return ( - hasattr(self.src, "supports_prefetch") - and self.src.supports_prefetch - and hasattr(self.tgt, "supports_prefetch") - and self.tgt.supports_prefetch - ) diff --git a/spaces/stomexserde/gpt4-ui/Examples/Download Redhat Enterprise Linux 5.8 X64 Iso LINK.md b/spaces/stomexserde/gpt4-ui/Examples/Download Redhat Enterprise Linux 5.8 X64 Iso LINK.md deleted file mode 100644 index e227918ea69a2611f61f4636ec2fb21d4b527ac1..0000000000000000000000000000000000000000 --- a/spaces/stomexserde/gpt4-ui/Examples/Download Redhat Enterprise Linux 5.8 X64 Iso LINK.md +++ /dev/null @@ -1,26 +0,0 @@ - -

        How to Download Red Hat Enterprise Linux 5.8 x64 ISO

        -

        Red Hat Enterprise Linux (RHEL) is a popular Linux distribution for enterprise users. It offers stability, security, and support for a wide range of applications and platforms. If you want to download RHEL 5.8 x64 ISO, you have a few options:

        -
          -
        • You can download it from the Red Hat Developer website, which requires a free registration. You can also access developer tutorials and other resources from this site[^1^].
        • -
        • You can download it from the Red Hat Customer Portal, which requires a subscription. You can also access product support and knowledge from this site[^2^].
        • -
        • You can download it from the Red Hat Enterprise Linux page on the customer portal, which also requires a subscription. You can also access other versions and architectures of RHEL from this page[^3^].
        • -
        -

        Once you have downloaded the ISO file, you can burn it to a DVD or a USB drive and use it to install RHEL 5.8 x64 on your system.

        -

        download redhat enterprise linux 5.8 x64 iso


        Download Ziphttps://urlgoal.com/2uI9Rg



        RHEL 5.8 x64 is an older version of RHEL that was released in 2012. It is still supported by Red Hat until 2024, but it may not have the latest features and updates that newer versions have. If you want to use RHEL 5.8 x64, you should make sure that your hardware and software are compatible with it.

        -

        Some of the features and improvements that RHEL 5.8 x64 offers are:

        -
          -
        • Enhanced security with SELinux policy updates and OpenSCAP support.
        • -
        • Better performance and scalability with KVM virtualization and NUMA enhancements.
        • -
        • Improved reliability and availability with cluster file system and fencing enhancements.
        • -
        • More hardware support with new drivers and firmware updates.
        • -
        • More software support with updated packages and bug fixes.
        • -
        -

        RHEL 5.8 x64 is a stable and secure Linux distribution that can meet the needs of enterprise users. However, if you want to take advantage of the latest innovations and technologies, you may want to consider upgrading to a newer version of RHEL, such as RHEL 7 or RHEL 8.

        If you want to upgrade to a newer version of RHEL, you have two options:

        -
          -
        1. You can perform an in-place upgrade, which replaces the existing RHEL system with a newer one. This option preserves your data and configuration, but it may not be compatible with some applications and hardware. You should backup your system and test the upgrade before performing it.
        2. -
        3. You can perform a fresh installation, which erases the existing RHEL system and installs a newer one. This option gives you a clean and updated system, but it requires you to backup and restore your data and configuration. You should also check the compatibility of your applications and hardware with the new system.
        4. -
        -

        Both options require a subscription to Red Hat and a valid activation key. You can use the Red Hat Upgrade Center to find the best upgrade path for your system.

        7196e7f11a
        -
        -
        \ No newline at end of file diff --git a/spaces/subhc/Guess-What-Moves/mask_former/utils/__init__.py b/spaces/subhc/Guess-What-Moves/mask_former/utils/__init__.py deleted file mode 100644 index 9020c2df23e2af280b7bb168b996ae9eaf312eb8..0000000000000000000000000000000000000000 --- a/spaces/subhc/Guess-What-Moves/mask_former/utils/__init__.py +++ /dev/null @@ -1 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. diff --git a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Answers To Escience Lab 11 Mitosis.rar ((LINK)).md b/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Answers To Escience Lab 11 Mitosis.rar ((LINK)).md deleted file mode 100644 index 90c60135346eeda33117f5a352dba033455be88a..0000000000000000000000000000000000000000 --- a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Answers To Escience Lab 11 Mitosis.rar ((LINK)).md +++ /dev/null @@ -1,6 +0,0 @@ -

        answers to escience lab 11 mitosis.rar


        DOWNLOADhttps://cinurl.com/2uEZeU



        -
        - d5da3c52bf
        -
        -
        -

        diff --git a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Autodata 3.39 Hrvatski Download.md b/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Autodata 3.39 Hrvatski Download.md deleted file mode 100644 index 3a632e9b2c4c39b2a5aefeaf003116e95154ac0b..0000000000000000000000000000000000000000 --- a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Autodata 3.39 Hrvatski Download.md +++ /dev/null @@ -1,11 +0,0 @@ -

        Autodata 3.39 hrvatski download


        DOWNLOAD ✒ ✒ ✒ https://cinurl.com/2uEXOx



        - -autodata na srpskom besplatno skidanje Dec 22, 2019 muqabla muqabla o laila Hindi mp3 ... Download Autodata 2010 Hrvatski Free.. .. Autodata.. 3.39. 7 for Mac OS free download.Free download: Autodata Professional in Russian with support. Languages: English, Russian, German, Spanish, French. .. -Autodata 3.39.7 for Mac OS free download. -19 Aug 2018 Download for free: Autodata 3.48.2 in Russian with support.. .. -Download Autodata Professional 3.48.2 in Russian for free. -19 Jan 2018 Download for free: Autodata 3.48.2 in Russian with support.. .. -Autodata 3.48.2 Free Download. 8a78ff9644
        -
        -
        -

        diff --git a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Telechargercovadis13aveccracked.md b/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Telechargercovadis13aveccracked.md deleted file mode 100644 index d0967c23dd99294a7567ea9319f8c1d4eb257746..0000000000000000000000000000000000000000 --- a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Telechargercovadis13aveccracked.md +++ /dev/null @@ -1,17 +0,0 @@ -

        telechargercovadis13aveccracked


        Downloadhttps://cinurl.com/2uEXZX



        -
        -PDF Télécharger Free PDF urgently in Hindi. . 3d pour covadis 13 c'est urgent Merci Telecharger Covadis 13 Avec Cracked 1 / 3 weebly . Feb 9, 2014 . -Download free PDF Candy Editor for Mac 2.2.1 in Russian. -Covadis 13 download with key Cracked - Duration: 3:01. by Alter Geo. -13:06. -13 Apr 2010 . -Covadis PDF Editor v3.0.1.19 Download . -Free download PDF Editor. -Covadis 13 download with key . -Download: Covadis 13 cracked. -Covadis 13 cracked - Covadis 13 cracked. -Download torrent; . -Download 8a78ff9644
        -
        -
        -

        diff --git a/spaces/suresh-subramanian/bean-classification/app.py b/spaces/suresh-subramanian/bean-classification/app.py deleted file mode 100644 index f1d978e218367db3e8c9488a846eb0e94fe16fea..0000000000000000000000000000000000000000 --- a/spaces/suresh-subramanian/bean-classification/app.py +++ /dev/null @@ -1,29 +0,0 @@ -import transformers -import gradio as gr -import datasets -import torch -from transformers import AutoFeatureExtractor, AutoModelForImageClassification -# from transformers import ViTFeatureExtractor, ViTForImageClassification - -dataset = datasets.load_dataset('beans') - -extractor = AutoFeatureExtractor.from_pretrained("suresh-subramanian/beans-classification") -model = AutoModelForImageClassification.from_pretrained("suresh-subramanian/beans-classification") -# feature_extractor = ViTFeatureExtractor.from_pretrained("google/vit-base-patch16-224") - -labels = dataset['train'].features['labels'].names - -def classify(im): - features = extractor(im, return_tensors='pt') - with torch.no_grad(): - logits = model(features["pixel_values"])[-1] - probability = torch.nn.functional.softmax(logits, dim=-1) - probs = probability[0].detach().numpy() - confidences = {label: float(probs[i]) for i, label in enumerate(labels)} - return confidences - - # examples = [["powdery mildew.jpg"], ["375010.jpg"]] -# Set gradio interface -gr_interface = gr.Interface(classify, inputs='image', outputs='label', title='Bean Classification', description='Monitor your crops health in easier way') -# Launch gradio -gr_interface.launch(debug=True) \ No newline at end of file diff --git a/spaces/surmensipa/VITS-Umamusume-voice-synthesizer/logs/Aashiqui 2 Movie In Hindi Dubbed Torrent TOP.md b/spaces/surmensipa/VITS-Umamusume-voice-synthesizer/logs/Aashiqui 2 Movie In Hindi Dubbed Torrent TOP.md deleted file mode 100644 index b8abdd0c33e682490fde627bfd72f7a822ee358c..0000000000000000000000000000000000000000 --- a/spaces/surmensipa/VITS-Umamusume-voice-synthesizer/logs/Aashiqui 2 Movie In Hindi Dubbed Torrent TOP.md +++ /dev/null @@ -1,6 +0,0 @@ -

        Aashiqui 2 Movie In Hindi Dubbed Torrent


        Download Ziphttps://urluss.com/2uCEj5



        -
        -Aashiqui 2 is an Indian romantic musical drama film directed by Mohit Suri ... Aashiqui 2 is a remake of the movie Aashiqui (1990). ... Magnet Link · Torrent File ... aashiqui 2 720p hd movie download best bollywood movie ever. 4d29de3e1b
        -
        -
        -

        diff --git a/spaces/surmensipa/VITS-Umamusume-voice-synthesizer/logs/Attarintiki Daredi Full __LINK__ Movie Hd 1080p.md b/spaces/surmensipa/VITS-Umamusume-voice-synthesizer/logs/Attarintiki Daredi Full __LINK__ Movie Hd 1080p.md deleted file mode 100644 index d15f82d22d631dd4580cbdd2ce7c8b773df51506..0000000000000000000000000000000000000000 --- a/spaces/surmensipa/VITS-Umamusume-voice-synthesizer/logs/Attarintiki Daredi Full __LINK__ Movie Hd 1080p.md +++ /dev/null @@ -1,74 +0,0 @@ - -

        How to Watch Attarintiki Daredi Full Movie HD 1080p Online

        -

        Attarintiki Daredi is a Telugu movie that was released in 2013 and became a huge hit at the box office. The movie stars Pawan Kalyan as Gautam Nanda, a wealthy businessman who flies down from Milan to Hyderabad to reunite his grandfather with his estranged daughter Sunanda (Nadhiya). He also falls in love with Sunanda's daughter Sashi (Samantha Akkineni), who is engaged to another man. The movie is a comedy of errors that involves Gautam's cousins (Pranitha Subhash and Brahmanandam), who are jealous of his success and try to sabotage his plans.

        -

        If you are looking for a way to watch Attarintiki Daredi full movie HD 1080p online, you have come to the right place. In this article, we will tell you how you can stream or download the movie legally and enjoy it in high quality.

        -

        attarintiki daredi full movie hd 1080p


        DOWNLOADhttps://urluss.com/2uCH3o



        -

        Where to Stream Attarintiki Daredi Full Movie HD 1080p Online

        -

        One of the easiest and most convenient ways to watch Attarintiki Daredi full movie HD 1080p online is to stream it on Prime Video. Prime Video is the official platform for the movie and offers it in HD quality with subtitles. You can watch the movie on any device that supports Prime Video, such as your laptop, smartphone, tablet, smart TV or gaming console. You can also download the movie on your device and watch it offline.

        -

        To stream or download Attarintiki Daredi full movie HD 1080p online on Prime Video, you need to have a Prime membership or a Prime Video subscription. If you don't have either of them, you can sign up for a free trial and enjoy the movie for free. You can also cancel your subscription anytime before the trial ends and avoid any charges.

        -

        How to Download Attarintiki Daredi Full Movie HD 1080p Online

        -

        If you prefer to download Attarintiki Daredi full movie HD 1080p online instead of streaming it, you have some other options as well. You can download the movie from YouTube, where it has been uploaded by Volga Video with English subtitles. You can use a YouTube downloader software or website to save the movie on your device and watch it offline.

        -

        However, before you download Attarintiki Daredi full movie HD 1080p online from YouTube or any other website, you should be aware of the legal and ethical issues involved. Downloading pirated or unauthorized copies of the movie may violate the copyright laws and harm the filmmakers and artists who worked hard to make the movie. You should also be careful of the malware and viruses that may infect your device from untrusted sources.

        -

        Conclusion

        -

        Attarintiki Daredi is a Telugu movie that you should not miss if you love comedy, romance and family drama. The movie has a great story, a superb cast, a catchy soundtrack and amazing visuals that make it a fun and enjoyable watch. You can watch Attarintiki Daredi full movie HD 1080p online on Prime Video or YouTube legally and safely. You can also download the movie on your device and watch it offline at your convenience.

        -

        -
        What is the Story of Attarintiki Daredi Full Movie HD 1080p
        -

        The story of Attarintiki Daredi full movie HD 1080p revolves around Gautam Nanda, who is the grandson of a wealthy businessman named Raghunandan (Boman Irani). Raghunandan has a daughter named Sunanda, who married a man against his wishes and left him. Raghunandan regrets his mistake and wants to reconcile with Sunanda before his death. He asks Gautam to find Sunanda and bring her back to him.

        -

        Gautam agrees to his grandfather's request and goes to Hyderabad, where Sunanda lives with her husband (Rao Ramesh) and two daughters, Sashi and Prameela (Pranitha Subhash). Gautam poses as a driver named Siddu and enters Sunanda's house. He tries to win the hearts of Sunanda and her family members by helping them in various ways. He also falls in love with Sashi, who is engaged to a politician's son named Raghu (Mukesh Rishi).

        -

        Gautam faces many challenges and obstacles in his mission. He has to deal with his cousins (Brahmanandam and Ali), who are jealous of his wealth and success and try to expose his identity. He also has to face Raghu, who is a corrupt and violent man who wants to marry Sashi for political gains. He also has to convince Sunanda, who is still angry with her father and refuses to forgive him.

        -

        How Gautam overcomes all these difficulties and reunites his family forms the rest of the story of Attarintiki Daredi full movie HD 1080p.

        -
        What are the Highlights of Attarintiki Daredi Full Movie HD 1080p
        -

        Attarintiki Daredi full movie HD 1080p has many highlights that make it a must-watch for Telugu movie lovers. Some of the highlights are:

        -
          -
        • The movie has a brilliant screenplay and dialogues by Trivikram Srinivas, who is known for his witty and humorous writing. The movie has many hilarious scenes and punch lines that will make you laugh out loud.
        • -
        • The movie has a powerful performance by Pawan Kalyan, who is one of the most popular and charismatic actors in Telugu cinema. He showcases his versatility and talent by playing a dual role of Gautam and Siddu. He also impresses with his action sequences and dance moves.
        • -
        • The movie has a beautiful chemistry between Pawan Kalyan and Samantha Akkineni, who are one of the most loved on-screen pairs in Telugu cinema. They share a cute and romantic relationship that will make you root for them.
        • -
        • The movie has a melodious music by Devi Sri Prasad, who is one of the most successful music composers in Telugu cinema. The movie has six songs that are catchy and memorable. The songs are sung by popular singers like Shreya Ghoshal, K.K., Palak Muchhal and others.
        • -
        • The movie has a message about the importance of family values and relationships. The movie shows how Gautam respects his grandfather's wish and tries to reunite his family. The movie also shows how Sunanda realizes her mistake and reconciles with her father. The movie teaches us to forgive and forget our past grudges and live happily with our loved ones.
        • -
        -What are the Reviews of Attarintiki Daredi Full Movie HD 1080p -

        Attarintiki Daredi full movie HD 1080p has received positive reviews from critics and audiences alike. The movie has been praised for its story, direction, performances, music and visuals. The movie has also been appreciated for its message and entertainment value.

        -

        Some of the reviews of Attarintiki Daredi full movie HD 1080p are:

        -
        -

        "Attarintiki Daredi is a complete family entertainer that has something for everyone. Pawan Kalyan is in top form and carries the film on his shoulders. Samantha is cute and charming. The comedy is hilarious and the emotions are touching. The music is catchy and the cinematography is splendid. The movie is a perfect example of Trivikram's magic." - Times of India

        -
        -
        -

        "Attarintiki Daredi is a delightful treat for Pawan Kalyan fans and Telugu movie lovers. The movie has a simple but engaging story that is laced with humor and sentiment. The movie has a superb star cast that delivers excellent performances. The movie has a melodious soundtrack that enhances the mood and atmosphere of the movie. The movie has stunning visuals that make it a treat for the eyes." - IndiaGlitz

        -
        -
        -

        "Attarintiki Daredi is a Telugu movie that you should not miss if you love comedy, romance and family drama. The movie has a great story, a superb cast, a catchy soundtrack and amazing visuals that make it a fun and enjoyable watch. The movie also has a message about the importance of family values and relationships. The movie is a must-watch for Pawan Kalyan fans and Trivikram admirers." - IMDb

        -
        -What are the Awards of Attarintiki Daredi Full Movie HD 1080p -

        Attarintiki Daredi full movie HD 1080p has also won several awards and accolades for its excellence and popularity. The movie has won awards in various categories such as best film, best actor, best actress, best director, best music director, best comedian and others.

        -

        Some of the awards of Attarintiki Daredi full movie HD 1080p are:

        -
          -
        • Filmfare Awards South - Best Film (Telugu), Best Actor (Telugu) - Pawan Kalyan, Best Director (Telugu) - Trivikram Srinivas, Best Music Director (Telugu) - Devi Sri Prasad
        • -
        • Nandi Awards - Best Feature Film (Gold), Best Actor - Pawan Kalyan, Best Dialogue Writer - Trivikram Srinivas, Best Female Comedian - Pranitha Subhash
        • -
        • Santosham Film Awards - Best Film, Best Actor - Pawan Kalyan, Best Actress - Samantha Akkineni, Best Director - Trivikram Srinivas, Best Music Director - Devi Sri Prasad
        • -
        • SIIMA Awards - Best Film (Telugu), Best Actor (Telugu) - Pawan Kalyan, Best Actress (Telugu) - Samantha Akkineni, Best Director (Telugu) - Trivikram Srinivas, Best Music Director (Telugu) - Devi Sri Prasad
        • -
        • CineMAA Awards - Best Actor (Male) - Pawan Kalyan, Best Actor (Female) - Samantha Akkineni, Best Director - Trivikram Srinivas, Best Music Director - Devi Sri Prasad
        • -
        -What are the Trivia of Attarintiki Daredi Full Movie HD 1080p -

        Attarintiki Daredi full movie HD 1080p has some interesting trivia that you may not know. Here are some of them:

        -
          -
        • The movie was initially titled as Saradaa, but was later changed to Attarintiki Daredi, which means "Which Path Leads to Aunt's House?" in Telugu.
        • -
        • The movie was leaked online before its release and caused a huge loss to the makers. However, the movie still managed to break several records and became one of the highest-grossing Telugu movies of all time.
        • -
        • The movie was remade in Tamil as Vanmam, in Kannada as Ranna and in Bengali as Abhimaan.
        • -
        • The movie has a cameo appearance by Bollywood actor Salman Khan, who is a good friend of Pawan Kalyan. He appears in the song "Kaatama Rayuda", which is sung by Pawan Kalyan himself.
        • -
        • The movie has a reference to Pawan Kalyan's previous hit movie Gabbar Singh, where he says "Naku konchem tikkundi, kani daniki oka lekkaundi" (I have a little bit of craziness, but there is a reason for it).
        • -
        -What are the Ratings of Attarintiki Daredi Full Movie HD 1080p -

        Attarintiki Daredi full movie HD 1080p has received high ratings from various sources. The movie has been rated based on different criteria such as story, direction, performances, music, visuals and overall entertainment value. Here are some of the ratings of Attarintiki Daredi full movie HD 1080p:

        - - - - - - - -
        SourceRating
        IMDb7.2/10
        Rotten Tomatoes83%
        Times of India4/5
        IndiaGlitz4/5
        CineMAA Awards4/5
        -Conclusion -

        Attarintiki Daredi full movie HD 1080p is a Telugu movie that you should watch if you are looking for a family entertainer that has comedy, romance and drama. The movie has a strong story, a stellar cast, a catchy soundtrack and stunning visuals that make it a memorable and enjoyable watch. The movie also has a message about the importance of family values and relationships. You can watch Attarintiki Daredi full movie HD 1080p online on Prime Video or YouTube legally and safely. You can also download the movie on your device and watch it offline at your convenience.

        -

        So, what are you waiting for? Grab your popcorn and enjoy Attarintiki Daredi full movie HD 1080p online or offline. You will not regret it.

        3cee63e6c2
        -
        -
        \ No newline at end of file diff --git a/spaces/svjack/ControlNet-Pose-Chinese/annotator/uniformer/mmseg/core/evaluation/eval_hooks.py b/spaces/svjack/ControlNet-Pose-Chinese/annotator/uniformer/mmseg/core/evaluation/eval_hooks.py deleted file mode 100644 index 6fc100c8f96e817a6ed2666f7c9f762af2463b48..0000000000000000000000000000000000000000 --- a/spaces/svjack/ControlNet-Pose-Chinese/annotator/uniformer/mmseg/core/evaluation/eval_hooks.py +++ /dev/null @@ -1,109 +0,0 @@ -import os.path as osp - -from annotator.uniformer.mmcv.runner import DistEvalHook as _DistEvalHook -from annotator.uniformer.mmcv.runner import EvalHook as _EvalHook - - -class EvalHook(_EvalHook): - """Single GPU EvalHook, with efficient test support. - - Args: - by_epoch (bool): Determine perform evaluation by epoch or by iteration. - If set to True, it will perform by epoch. Otherwise, by iteration. - Default: False. - efficient_test (bool): Whether save the results as local numpy files to - save CPU memory during evaluation. Default: False. - Returns: - list: The prediction results. - """ - - greater_keys = ['mIoU', 'mAcc', 'aAcc'] - - def __init__(self, *args, by_epoch=False, efficient_test=False, **kwargs): - super().__init__(*args, by_epoch=by_epoch, **kwargs) - self.efficient_test = efficient_test - - def after_train_iter(self, runner): - """After train epoch hook. - - Override default ``single_gpu_test``. - """ - if self.by_epoch or not self.every_n_iters(runner, self.interval): - return - from annotator.uniformer.mmseg.apis import single_gpu_test - runner.log_buffer.clear() - results = single_gpu_test( - runner.model, - self.dataloader, - show=False, - efficient_test=self.efficient_test) - self.evaluate(runner, results) - - def after_train_epoch(self, runner): - """After train epoch hook. - - Override default ``single_gpu_test``. - """ - if not self.by_epoch or not self.every_n_epochs(runner, self.interval): - return - from annotator.uniformer.mmseg.apis import single_gpu_test - runner.log_buffer.clear() - results = single_gpu_test(runner.model, self.dataloader, show=False) - self.evaluate(runner, results) - - -class DistEvalHook(_DistEvalHook): - """Distributed EvalHook, with efficient test support. - - Args: - by_epoch (bool): Determine perform evaluation by epoch or by iteration. - If set to True, it will perform by epoch. Otherwise, by iteration. - Default: False. - efficient_test (bool): Whether save the results as local numpy files to - save CPU memory during evaluation. Default: False. - Returns: - list: The prediction results. - """ - - greater_keys = ['mIoU', 'mAcc', 'aAcc'] - - def __init__(self, *args, by_epoch=False, efficient_test=False, **kwargs): - super().__init__(*args, by_epoch=by_epoch, **kwargs) - self.efficient_test = efficient_test - - def after_train_iter(self, runner): - """After train epoch hook. - - Override default ``multi_gpu_test``. - """ - if self.by_epoch or not self.every_n_iters(runner, self.interval): - return - from annotator.uniformer.mmseg.apis import multi_gpu_test - runner.log_buffer.clear() - results = multi_gpu_test( - runner.model, - self.dataloader, - tmpdir=osp.join(runner.work_dir, '.eval_hook'), - gpu_collect=self.gpu_collect, - efficient_test=self.efficient_test) - if runner.rank == 0: - print('\n') - self.evaluate(runner, results) - - def after_train_epoch(self, runner): - """After train epoch hook. - - Override default ``multi_gpu_test``. - """ - if not self.by_epoch or not self.every_n_epochs(runner, self.interval): - return - from annotator.uniformer.mmseg.apis import multi_gpu_test - runner.log_buffer.clear() - results = multi_gpu_test( - runner.model, - self.dataloader, - tmpdir=osp.join(runner.work_dir, '.eval_hook'), - gpu_collect=self.gpu_collect) - if runner.rank == 0: - print('\n') - self.evaluate(runner, results) diff --git a/spaces/swcrazyfan/DeKingify/README.md b/spaces/swcrazyfan/DeKingify/README.md deleted file mode 100644 index b7e87baf63d2fb71188116ab85ef9890f4318053..0000000000000000000000000000000000000000 --- a/spaces/swcrazyfan/DeKingify/README.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -title: DeKingify -emoji: 🤴 -colorFrom: indigo -colorTo: purple -sdk: gradio -app_file: app.py -pinned: true ---- -“Translate” 17th-century English text to modern English. The name comes from the dataset used for fine-tuning. Modern Bible translations and the famous King James Bible. \ No newline at end of file diff --git a/spaces/syedusama5556/Real-ESRGAN-Demo/README.md b/spaces/syedusama5556/Real-ESRGAN-Demo/README.md deleted file mode 100644 index a932c129fef179094705f88e486a00ad80ae3950..0000000000000000000000000000000000000000 --- a/spaces/syedusama5556/Real-ESRGAN-Demo/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Real-ESRGAN Demo for Image Restoration and Upscaling -emoji: 🖼️ -colorFrom: blue -colorTo: indigo -sdk: gradio -sdk_version: 3.3.1 -app_file: app.py -pinned: true -duplicated_from: havas79/Real-ESRGAN_Demo ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/tbxg34/Satellite-Image-Recognition/README.md b/spaces/tbxg34/Satellite-Image-Recognition/README.md deleted file mode 100644 index f18cd6c65f71883a0a27adf3e838565d074bc383..0000000000000000000000000000000000000000 --- a/spaces/tbxg34/Satellite-Image-Recognition/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Satellite Image Recognition -emoji: 🏃 -colorFrom: red -colorTo: red -sdk: streamlit -sdk_version: 1.10.0 -app_file: app.py -pinned: false -license: apache-2.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/teowu/Q-Instruct-on-mPLUG-Owl-2/model_worker.py b/spaces/teowu/Q-Instruct-on-mPLUG-Owl-2/model_worker.py deleted file mode 100644 index 01863ea583094675bb15024e6bf47e85fa65e063..0000000000000000000000000000000000000000 --- a/spaces/teowu/Q-Instruct-on-mPLUG-Owl-2/model_worker.py +++ /dev/null @@ -1,227 +0,0 @@ -""" -A model worker executes the model. -""" -import argparse -import asyncio -import json -import time -import threading -import uuid - -import requests -import torch -from functools import partial - -from mplug_owl2.constants import WORKER_HEART_BEAT_INTERVAL -from mplug_owl2.utils import (build_logger, server_error_msg, - pretty_print_semaphore) -from mplug_owl2.model.builder import load_pretrained_model -from mplug_owl2.mm_utils import process_images, load_image_from_base64, tokenizer_image_token, KeywordsStoppingCriteria -from mplug_owl2.constants import IMAGE_TOKEN_INDEX, DEFAULT_IMAGE_TOKEN -from transformers import TextIteratorStreamer -from threading import Thread - -GB = 1 << 30 - -worker_id = str(uuid.uuid4())[:6] -logger = build_logger("model_worker", f"model_worker_{worker_id}.log") - -class ModelWorker: - def __init__(self, model_path, model_base, model_name, load_8bit, load_4bit, device): - self.worker_id = worker_id - if model_path.endswith("/"): - model_path = model_path[:-1] - if model_name is None: - model_paths = model_path.split("/") - if model_paths[-1].startswith('checkpoint-'): - self.model_name = model_paths[-2] + "_" + model_paths[-1] - else: - self.model_name = model_paths[-1] - else: - self.model_name = model_name - - self.device = device - logger.info(f"Loading the model {self.model_name} on worker {worker_id} ...") - self.tokenizer, self.model, self.image_processor, self.context_len = load_pretrained_model( - model_path, model_base, self.model_name, load_8bit, load_4bit, device=self.device) - self.is_multimodal = True - - @torch.inference_mode() - def predict_stream(self, params): - tokenizer, model, image_processor = self.tokenizer, self.model, self.image_processor - - prompt = params["prompt"] + "The quality of the image is" - ori_prompt = prompt - images = params.get("images", None) - num_image_tokens = 0 - if images is not None and len(images) > 0 and self.is_multimodal: - if len(images) > 0: - if len(images) != prompt.count(DEFAULT_IMAGE_TOKEN): - raise ValueError("Number of images does not match number of <|image|> tokens in prompt") - - images = [load_image_from_base64(image) for image in images] - images = process_images(images, image_processor, model.config) - - if type(images) is list: - images = [image.to(self.model.device, dtype=torch.float16) for image in images] - else: - images = images.to(self.model.device, dtype=torch.float16) - - replace_token = DEFAULT_IMAGE_TOKEN - prompt = prompt.replace(DEFAULT_IMAGE_TOKEN, replace_token) - - num_image_tokens = prompt.count(replace_token) * (model.get_model().visual_abstractor.config.num_learnable_queries + 1) - else: - images = None - image_args = {"images": images} - else: - images = None - image_args = {} - - input_ids = tokenizer_image_token(prompt, tokenizer, IMAGE_TOKEN_INDEX, return_tensors='pt').unsqueeze(0).to(self.device) - - logits = model.forward( - input_ids=input_ids, - use_cache=True, - **image_args).logits[0,-1] - - print(logits.shape) - - softmax_logits = torch.softmax(logits[[1781,6588,6460]], 0) - - print(tokenizer(["good", "average", "poor"])) - fake_streamer = [] - for id_, word in enumerate(["good", "average", "poor"]): - stream_ = f"Probability of {word} quality: {softmax_logits[id_].item():.4f};\n" - fake_streamer.append(stream_) - - quality_score = 0.5 * softmax_logits[1] + softmax_logits[0] - stream_ = f"Quality score: {quality_score:.4f} (range [0,1])." - fake_streamer.append(stream_) - - generated_text = ori_prompt.replace("The quality of the image is", "") - for new_text in fake_streamer: - generated_text += new_text - yield json.dumps({"text": generated_text, "error_code": 0}).encode() - - @torch.inference_mode() - def generate_stream(self, params): - tokenizer, model, image_processor = self.tokenizer, self.model, self.image_processor - - prompt = params["prompt"] - ori_prompt = prompt - images = params.get("images", None) - num_image_tokens = 0 - if images is not None and len(images) > 0 and self.is_multimodal: - if len(images) > 0: - if len(images) != prompt.count(DEFAULT_IMAGE_TOKEN): - raise ValueError("Number of images does not match number of <|image|> tokens in prompt") - - images = [load_image_from_base64(image) for image in images] - images = process_images(images, image_processor, model.config) - - if type(images) is list: - images = [image.to(self.model.device, dtype=torch.float16) for image in images] - else: - images = images.to(self.model.device, dtype=torch.float16) - - replace_token = DEFAULT_IMAGE_TOKEN - prompt = prompt.replace(DEFAULT_IMAGE_TOKEN, replace_token) - - num_image_tokens = prompt.count(replace_token) * (model.get_model().visual_abstractor.config.num_learnable_queries + 1) - else: - images = None - image_args = {"images": images} - else: - images = None - image_args = {} - - temperature = float(params.get("temperature", 1.0)) - top_p = float(params.get("top_p", 1.0)) - max_context_length = getattr(model.config, 'max_position_embeddings', 4096) - max_new_tokens = min(int(params.get("max_new_tokens", 256)), 1024) - stop_str = params.get("stop", None) - do_sample = True if temperature > 0.001 else False - - input_ids = tokenizer_image_token(prompt, tokenizer, IMAGE_TOKEN_INDEX, return_tensors='pt').unsqueeze(0).to(self.device) - keywords = [stop_str] - stopping_criteria = KeywordsStoppingCriteria(keywords, tokenizer, input_ids) - streamer = TextIteratorStreamer(tokenizer, skip_prompt=True, skip_special_tokens=True, timeout=15) - - max_new_tokens = min(max_new_tokens, max_context_length - input_ids.shape[-1] - num_image_tokens) - - if max_new_tokens < 1: - yield json.dumps({"text": ori_prompt + "Exceeds max token length. Please start a new conversation, thanks.", "error_code": 0}).encode() + b"\0" - return - - thread = Thread(target=model.generate, kwargs=dict( - inputs=input_ids, - do_sample=do_sample, - temperature=temperature, - top_p=top_p, - max_new_tokens=max_new_tokens, - streamer=streamer, - stopping_criteria=[stopping_criteria], - use_cache=True, - **image_args - )) - thread.start() - - generated_text = ori_prompt - for new_text in streamer: - generated_text += new_text - if generated_text.endswith(stop_str): - generated_text = generated_text[:-len(stop_str)] - yield json.dumps({"text": generated_text, "error_code": 0}).encode() - - def predict_stream_gate(self, params): - try: - for x in self.predict_stream(params): - yield x - except ValueError as e: - print("Caught ValueError:", e) - ret = { - "text": server_error_msg, - "error_code": 1, - } - yield json.dumps(ret).encode() - except torch.cuda.CudaError as e: - print("Caught torch.cuda.CudaError:", e) - ret = { - "text": server_error_msg, - "error_code": 1, - } - yield json.dumps(ret).encode() - except Exception as e: - print("Caught Unknown Error", e) - ret = { - "text": server_error_msg, - "error_code": 1, - } - yield json.dumps(ret).encode() - - def generate_stream_gate(self, params): - try: - for x in self.generate_stream(params): - yield x - except ValueError as e: - print("Caught ValueError:", e) - ret = { - "text": server_error_msg, - "error_code": 1, - } - yield json.dumps(ret).encode() - except torch.cuda.CudaError as e: - print("Caught torch.cuda.CudaError:", e) - ret = { - "text": server_error_msg, - "error_code": 1, - } - yield json.dumps(ret).encode() - except Exception as e: - print("Caught Unknown Error", e) - ret = { - "text": server_error_msg, - "error_code": 1, - } - yield json.dumps(ret).encode() \ No newline at end of file diff --git a/spaces/terfces0erbo/CollegeProjectV2/3dmgame Dll Download LINK Fifa 15 Crack.md b/spaces/terfces0erbo/CollegeProjectV2/3dmgame Dll Download LINK Fifa 15 Crack.md deleted file mode 100644 index 02bbe83a9ceccde6b002e9c74bb7e2d028196d6a..0000000000000000000000000000000000000000 --- a/spaces/terfces0erbo/CollegeProjectV2/3dmgame Dll Download LINK Fifa 15 Crack.md +++ /dev/null @@ -1,60 +0,0 @@ -

        3dmgame dll download fifa 15 crack


        DOWNLOAD ……… https://bytlly.com/2uGkWH



        - -A: - -Your issue is this: - -"The dll files are copied to the root of the directory of the game I have just installed..." - -In order to fix this problem, you can either extract your game to a location with permissions for the OS to write to it (the Program Files folder is a good place), or you can chmod it (this will reset your permissions every time you launch the game). - -Alternatively, you can extract to a different folder in your Documents\My Games\ folder and copy the dll to the desired location by hand. - -// RUN: %clang_cc1 -triple x86_64-linux-gnu -emit-llvm -o - %s | FileCheck %s - -// PR9308 - -// CHECK: @"OBJC_CLASS_$_Nil" = global - -// CHECK: @_objc_empty_cache - -// CHECK: define void @_Block_object_assign( - -// CHECK: call void @llvm.objc.emptyCache() - -void *__Block_copy(const void *); - -void test1() - - id x, y = 0; - - __Block_copy(^ - - return __Block_copy(^ - - return 0; - - ); - - ); - - - -// CHECK: @"OBJC_METACLASS_$_Nil" = global - -void test2() { - - return __Block_copy(^ - - return 0; - - ); - -void test3() { - -void test4() { - - __ 4fefd39f24
        -
        -
        -

        diff --git a/spaces/terfces0erbo/CollegeProjectV2/Adobe Illustrator CS6 Download Free _TOP_.md b/spaces/terfces0erbo/CollegeProjectV2/Adobe Illustrator CS6 Download Free _TOP_.md deleted file mode 100644 index fb7b6cc2091674dcedf9b23772ee81375dadc1be..0000000000000000000000000000000000000000 --- a/spaces/terfces0erbo/CollegeProjectV2/Adobe Illustrator CS6 Download Free _TOP_.md +++ /dev/null @@ -1,6 +0,0 @@ -

        Adobe Illustrator CS6 Download Free


        Download Zip 🆓 https://bytlly.com/2uGj3T



        -
        -Adobe Illustrator CS6 16.0.5. Discover new ways to experiment. Adobe® Illustrator® CS6 software is powered by the new Adobe Mercury Performance System ... 1fdad05405
        -
        -
        -

        diff --git a/spaces/terfces0erbo/CollegeProjectV2/Car Mechanic Simulator 2015 - Mercedes-Benz Crack __EXCLUSIVE__ File Download.md b/spaces/terfces0erbo/CollegeProjectV2/Car Mechanic Simulator 2015 - Mercedes-Benz Crack __EXCLUSIVE__ File Download.md deleted file mode 100644 index 94b98ccf37399c23e589d3027305a389e37e9ffa..0000000000000000000000000000000000000000 --- a/spaces/terfces0erbo/CollegeProjectV2/Car Mechanic Simulator 2015 - Mercedes-Benz Crack __EXCLUSIVE__ File Download.md +++ /dev/null @@ -1,6 +0,0 @@ -

        Car Mechanic Simulator 2015 - Mercedes-Benz crack file download


        Download File ->>> https://bytlly.com/2uGjAg



        - - 4d29de3e1b
        -
        -
        -

        diff --git a/spaces/tialenAdioni/chat-gpt-api/logs/Harry Potter All Parts In Hindi 720p Downloadl The Best Way to Watch the Movies in Your Language.md b/spaces/tialenAdioni/chat-gpt-api/logs/Harry Potter All Parts In Hindi 720p Downloadl The Best Way to Watch the Movies in Your Language.md deleted file mode 100644 index 01f085858bd1d32cb35815434b25467cad03830f..0000000000000000000000000000000000000000 --- a/spaces/tialenAdioni/chat-gpt-api/logs/Harry Potter All Parts In Hindi 720p Downloadl The Best Way to Watch the Movies in Your Language.md +++ /dev/null @@ -1,162 +0,0 @@ -
        -

        Harry Potter All Parts In Hindi 720p Downloadl

        -

        If you are a fan of fantasy, magic, and adventure, you must have heard of Harry Potter. The famous book series by J.K. Rowling has been adapted into eight blockbuster movies that have captivated millions of viewers around the world. But did you know that you can also watch these movies in Hindi? Yes, you read that right. You can enjoy the thrilling story of Harry Potter and his friends in your own language. In this article, we will tell you what Harry Potter is, why you should watch it in Hindi, and how to download all parts in Hindi 720p. So, let's get started.

        -

        Harry Potter All Parts In Hindi 720p Downloadl


        Download ->>->>->> https://urlcod.com/2uKaZV



        -

        Introduction

        -

        What is Harry Potter?

        -

        Harry Potter is a series of seven novels written by British author J.K. Rowling. The novels chronicle the life of a young wizard, Harry Potter, and his friends Hermione Granger and Ron Weasley, who are students at Hogwarts School of Witchcraft and Wizardry. The main story arc concerns Harry's struggle against Lord Voldemort, a dark wizard who wants to become immortal and conquer the wizarding world.

        -

        The books have been translated into more than 80 languages and have sold over 500 million copies worldwide. They have also inspired a media franchise that includes eight films, a play, a theme park, video games, merchandise, and more.

        -

        Why watch Harry Potter in Hindi?

        -

        There are many reasons why you might want to watch Harry Potter in Hindi. Here are some of them:

        -
          -
        • You can enjoy the movies in your native language and understand the dialogues better.
        • -
        • You can appreciate the cultural references and jokes that might be lost in translation.
        • -
        • You can relate to the characters and their emotions more easily.
        • -
        • You can share the movies with your family and friends who might not be fluent in English.
        • -
        • You can learn some new words and phrases in Hindi that are related to magic and fantasy.
        • -
        -

        How to download Harry Potter all parts in Hindi 720p?

        -

        If you want to watch Harry Potter in Hindi, you might be wondering how to download all parts in Hindi 720p. Well, there are many websites that offer this service, but you have to be careful about their quality and safety. Some websites might have low-quality videos, broken links, or malware that can harm your device. Therefore, we recommend you to use a trusted and reliable website that has high-quality videos, fast downloads, and no ads or viruses.

        -

        One such website is HindiLinks4u, which has all eight parts of Harry Potter in Hindi 720p. You can easily access this website from any device and download the movies for free. Here are the steps to follow:

        -
          -
        1. Go to HindiLinks4u and select the movie you want to download.
        2. -
        3. Click on the "Download" button below the video player.
        4. -
        5. Choose a server from the list and click on "Download Now".
        6. -
        7. Wait for a few seconds until the download link appears.
        8. -
        9. Click on the link and save the file on your device.
        10. -
        -

        That's it. You can now enjoy watching Harry Potter in Hindi 720p anytime you want.

        -

        Harry Potter All Parts - हैरी पॉटर मूवी हिंदी में डाउनलोड करे[^1^]
        -Harry Potter All Movies Collection (2001-2011) 720p Dual Audio Bluray [Hindi+English] - |AlphaHit|[^2^]
        -Harry Potter all parts Hindi dubbed download link - Cinewood[^3^]
        -Harry Potter Complete 8-Film Collection Hindi - IMDb[^4^]
        -Harry Potter and the Philosopher's Stone (2001) 720p Hindi Dubbed Download
        -Harry Potter and the Chamber of Secrets (2002) 720p Hindi Dubbed Download
        -Harry Potter and the Prisoner of Azkaban (2004) 720p Hindi Dubbed Download
        -Harry Potter and the Goblet of Fire (2005) 720p Hindi Dubbed Download
        -Harry Potter and the Order of the Phoenix (2007) 720p Hindi Dubbed Download
        -Harry Potter and the Half-blood Prince (2009) 720p Hindi Dubbed Download
        -Harry Potter and the Deathly Hallows Part 1 (2010) 720p Hindi Dubbed Download
        -Harry Potter and the Deathly Hallows Part 2 (2011) 720p Hindi Dubbed Download
        -Fantastic Beast and Where To Find Them (2016) 720p Hindi Dubbed Download
        -Fantastic Beasts: The Crimes of Grindelwald (2018) 720p Hindi Dubbed Download
        -Harry Potter Series in Hindi 720p HD Quality Download
        -Harry Potter Full Movie in Hindi 720p Free Download
        -Harry Potter All Parts Dual Audio [Hindi+English] 720p Download
        -Harry Potter All Parts in Hindi Watch Online 720p
        -Harry Potter All Parts in Hindi Torrent Download 720p
        -Harry Potter All Parts in Hindi Google Drive Link 720p
        -How to Download Harry Potter All Parts in Hindi 720p
        -Best Website to Download Harry Potter All Parts in Hindi 720p
        -Harry Potter All Parts in Hindi BluRay 720p Download
        -Harry Potter All Parts in Hindi YIFY 720p Download
        -Harry Potter All Parts in Hindi Worldfree4u 720p Download
        -Harry Potter All Parts in Hindi Filmyzilla 720p Download
        -Harry Potter All Parts in Hindi Filmywap 720p Download
        -Harry Potter All Parts in Hindi Khatrimaza 720p Download
        -Harry Potter All Parts in Hindi Bolly4u 720p Download
        -Harry Potter All Parts in Hindi Moviesflix 720p Download
        -Harry Potter All Parts in Hindi Movierulz 720p Download
        -Harry Potter All Parts in Hindi Tamilrockers 720p Download
        -Harry Potter All Parts in Hindi Pagalworld 720p Download
        -Harry Potter All Parts in Hindi Mp4moviez 720p Download
        -Harry Potter All Parts in Hindi Skymovieshd 720p Download
        -Harry Potter All Parts in Hindi Extramovies 720p Download
        -Harry Potter All Parts in Hindi SSRmovies 720p Download
        -Harry Potter All Parts in Hindi Coolmoviez 720p Download
        -Harry Potter All Parts in Hindi Moviescounter 720p Download
        -Harry Potter All Parts in Hindi Jalshamoviez 720p Download
        -Harry Potter All Parts in Hindi Isaimini 720p Download
        -Harry Potter All Parts in Hindi Tamilyogi 720p Download
        -Harry Potter All Parts in Hindi Jio Rockers 720p Download
        -Harry Potter All Parts in Hindi Todaypk 720p Download
        -Harry Potter All Parts in Hindi Moviezwap 720p Download
        -Harry Potter All Parts in Hindi Cinemavilla 720p Download
        -Harry Potter All Parts in Hindi DVDvilla 720p Download
        -Harry Potter All Parts in Hindi HDMoviesHubz.in 720p Download

        -

        Harry Potter Movies List and Overview

        -

        Now that you know how to download Harry Potter in Hindi 720p, let's take a look at each movie and what it is about. Here is a table that summarizes the movies list and their release dates:

        - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
        TitleRelease Date
        Harry Potter and the Philosopher's Stone16 November 2001
        Harry Potter and the Chamber of Secrets15 November 2002
        Harry Potter and the Prisoner of Azkaban31 May 2004
        Harry Potter and the Goblet of Fire18 November 2005
        Harry Potter and the Order of the Phoenix11 July 2007
        Harry Potter and the Half-Blood Prince15 July 2009
        Harry Potter and the Deathly Hallows – Part 119 November 2010
        Harry Potter and the Deathly Hallows – Part 215 July 2011
        -

        And here is a brief overview of each movie:

        -

        Harry Potter and the Philosopher's Stone (2001)

        -

        This is the first movie in the series that introduces us to the magical world of Harry Potter. We learn that Harry is an orphan who lives with his abusive relatives, the Dursleys. On his eleventh birthday, he receives a letter from Hogwarts School of Witchcraft and Wizardry, inviting him to join as a student. He also discovers that he is a famous wizard who survived an attack by Lord Voldemort when he was a baby.

        -

        At Hogwarts, he meets his best friends Hermione Granger and Ron Weasley, as well as his enemies Draco Malfoy and Professor Snape. He also learns about the Philosopher's Stone, a powerful object that can grant eternal life. He suspects that someone is trying to steal it for Voldemort's sake. He decides to stop them with his friends' help.

        -

        Harry Potter and the Chamber of Secrets (2002)

        -

        This is the second movie in the series that follows Harry's second year at Hogwarts. He starts hearing mysterious voices that no one else can hear. He also finds out that there is a hidden chamber inside Hogwarts that contains a terrible secret: a monster that can petrify anyone who looks at it. The chamber was opened by Salazar Slytherin, one of the founders of Hogwarts who hated Muggle-borns (wizards born from non-magical parents).

        -

        The chamber has been opened again by someone who wants to purge Hogwarts of Muggle-borns. Harry is suspected to be behind it because he can speak Parseltongue (the language of snakes). He has to prove his innocence and find out who is really behind it before it's too late.

        -

        Harry Potter and the Prisoner of Azkaban (2004)

        -

        This is the third movie in the series that follows Harry's third year at Hogwarts. He learns that Sirius Black, a notorious criminal who was involved in his parents' death, has escaped from Azkaban prison. He also learns that Black is his godfather who betrayed his parents to Voldemort. Black is after him for revenge.

        -

        Hogwarts is guarded by Dementors, soul-sucking creatures who can make anyone feel their worst memories. Harry has to face them with his new Defense Against the Dark Arts teacher, Professor Lupin, who has a secret of his own. He also has to deal with time travel, werewolves, and animagi (people who can turn into animals).

        -

        Harry Potter and the Goblet of Fire (2005)

        -

        This is the fourth movie in the series that follows Harry's fourth year at Hogwarts. He is surprised to find out that he has been selected to participate in

        Harry Potter and the Goblet of Fire (2005)

        -

        This is the fourth movie in the series that follows Harry's fourth year at Hogwarts. He is surprised to find out that he has been selected to participate in the Triwizard Tournament, a dangerous competition between three wizarding schools. He has to face three deadly tasks that involve dragons, mermaids, and a maze. He also has to deal with a new rival, Viktor Krum, who is a famous Quidditch player and Hermione's date for the Yule Ball.

        -

        Harry suspects that someone is trying to sabotage him and make him lose the tournament. He also learns more about Voldemort's past and his connection to him. He faces a shocking betrayal and a tragic loss that will change his life forever.

        -

        Harry Potter and the Order of the Phoenix (2007)

        -

        This is the fifth movie in the series that follows Harry's fifth year at Hogwarts. He is haunted by nightmares of Voldemort and his return. He also faces a smear campaign by the Ministry of Magic, which denies Voldemort's existence and labels Harry as a liar. He has to endure a new Defense Against the Dark Arts teacher, Dolores Umbridge, who is cruel and oppressive. She bans any form of practical magic and punishes anyone who defies her.

        -

        Harry decides to form a secret group called Dumbledore's Army, where he teaches his fellow students how to defend themselves against the Dark Arts. He also discovers a hidden room in Hogwarts called the Room of Requirement, where he finds a mysterious prophecy that concerns him and Voldemort. He has to fight against Umbridge, the Ministry, and Voldemort's followers who want to steal the prophecy.

        -

        Harry Potter and the Half-Blood Prince (2009)

        -

        This is the sixth movie in the series that follows Harry's sixth year at Hogwarts. He finds an old textbook that belongs to someone called the Half-Blood Prince, who has written helpful notes and spells on it. He uses it to excel in his Potions class and impress his new teacher, Horace Slughorn. He also learns that Slughorn has a crucial memory that can reveal Voldemort's weakness: his Horcruxes.

        -

        Horcruxes are objects that contain fragments of Voldemort's soul, making him immortal. Harry has to persuade Slughorn to give him the memory and find out how many Horcruxes there are and how to destroy them. He also has to deal with his feelings for Ginny Weasley, Ron's sister, who is dating someone else.

        -

        Harry Potter and the Deathly Hallows – Part 1 (2010)

        -

        This is the seventh movie in the series that follows Harry's seventh year at Hogwarts. However, he does not return to school, but goes on a mission with Hermione and Ron to find and destroy Voldemort's Horcruxes. They have to rely on Dumbledore's clues and their own instincts to locate them. They also have to avoid being captured by Voldemort's forces, who have taken over the Ministry of Magic and are hunting them down.

        -

        They also learn about the Deathly Hallows, three legendary objects that can make one master of death: the Elder Wand, the Resurrection Stone, and the Cloak of Invisibility. Harry believes that they might be able to help him defeat Voldemort. He also has visions of Voldemort searching for something that he fears: the Elder Wand.

        -

        Harry Potter and the Deathly Hallows – Part 2 (2011)

        -

        This is the eighth and final movie in the series that follows Harry's final confrontation with Voldemort. He continues his quest to find and destroy the remaining Horcruxes with Hermione and Ron. They also return to Hogwarts, where they join forces with their friends and teachers to defend the school from Voldemort's army. They also discover the true identity of the Half-Blood Prince and the fate of the Deathly Hallows.

        -

        Harry realizes that he has to make a sacrifice to end the war and fulfill the prophecy. He faces Voldemort in a climactic duel that will decide the fate of the wizarding world.

        -

        Conclusion

        -

        Harry Potter is one of the most popular and beloved franchises in history. It has captivated millions of fans with its magical story, characters, and themes. It has also inspired many people to read more books, learn new languages, and explore new cultures.

        -

        If you want to watch Harry Potter in Hindi, you can download all parts in Hindi 720p from HindiLinks4u. You can enjoy watching these movies in your own language and appreciate them better. You can also share them with your family and friends who might not know English well.

        -

        We hope you enjoyed this article and learned something new about Harry Potter. If you have any questions or comments, feel free to leave them below. Thank you for reading.

        -

        FAQs

        -
          -
        • Q: Who is the author of Harry Potter?
        • -
        • A: J.K. Rowling is the author of Harry Potter. She wrote the first book in 1997 and finished the last one in 2007.
        • -
        • Q: Who are the main actors in Harry Potter movies?
        • -
        • A: The main actors in Harry Potter movies are Daniel Radcliffe as Harry Potter, Emma Watson as Hermione Granger, Rupert Grint as Ron Weasley, Ralph Fiennes as Lord Voldemort, Alan Rickman as Severus Snape, Maggie Smith as Minerva McGonagall, Robbie Coltrane as Rubeus Hagrid, Michael Gambon as Albus Dumbledore, Tom Felton as Draco Malfoy, Helena Bonham Carter as Bellatrix Lestrange, Emma Thompson as Sybill Trelawney, and many more.
        • -
        • Q: How many books are there in Harry Potter series?
        • -
        • A: There are seven books in Harry Potter series: Harry Potter and the Philosopher's Stone, Harry Potter and the Chamber of Secrets, Harry Potter and the Prisoner of Azkaban, Harry Potter and the Goblet of Fire, Harry Potter and the Order of the Phoenix, Harry Potter and the Half-Blood Prince, and Harry Potter and the Deathly Hallows.
        • -
        • Q: How many movies are there in Harry Potter series?
        • -
        • A: There are eight movies in Harry Potter series: Harry Potter and the Philosopher's Stone, Harry Potter and the Chamber of Secrets, Harry Potter and the Prisoner of Azkaban, Harry Potter and the Goblet of Fire, Harry Potter and the Order of Phoenix, Harry Potter and Half-Blood Prince, Harry Potter Deathly Hallows – Part 1, and Harry Potter Deathly Hallows – Part 2.
        • -
        • Q: What are some other works related to Harry Potter?
        • -
        • A: Some other works related to Harry Potter are Fantastic Beasts and Where to Find Them, a spin-off movie series set before Harry Potter; The Cursed Child, a play that follows Harry's son Albus; Quidditch Through The Ages, a book about wizarding sport; The Tales Of Beedle The Bard, a collection of fairy tales; Pottermore, an online platform for fans; Wizarding World, a theme park based on Harry Potter; Lego Harry Potter, a video game series; and more.
        • -
        -

        0a6ba089eb
        -
        -
        \ No newline at end of file diff --git a/spaces/ticomspire/turkey-syria-earthquake-tweets/Vst To Rtas Adaptor V2.1 Serial.md b/spaces/ticomspire/turkey-syria-earthquake-tweets/Vst To Rtas Adaptor V2.1 Serial.md deleted file mode 100644 index 002141e41261fa2d7c6fe116c8c7551685e1e3cd..0000000000000000000000000000000000000000 --- a/spaces/ticomspire/turkey-syria-earthquake-tweets/Vst To Rtas Adaptor V2.1 Serial.md +++ /dev/null @@ -1,134 +0,0 @@ -## Vst To Rtas Adaptor V2.1 Serial - - - - - - ![Vst To Rtas Adaptor V2.1 Serial](https://i.ebayimg.com/00/s/MTIwMFgxNjAw/z/0yEAAOxy8e9SVzw~/$T2eC16d,!)8E9s4l8hsmBSVzw-ZsZQ~~60_57.JPG) - - - - - -**DOWNLOAD ---> [https://urluso.com/2tBQ0v](https://urluso.com/2tBQ0v)** - - - - - - - - - - - - - -# How to Use VST Plugins in Pro Tools with FXpansion VST to RTAS Adapter v2.1 - - - -If you are a Pro Tools user who wants to use VST plugins in your sessions, you might be frustrated by the lack of native support for this format. However, there is a solution that can help you bridge the gap between VST and RTAS: FXpansion VST to RTAS Adapter v2.1. - - - -This software allows you to enable your VST effect and instruments plugins for use in Pro Tools, without any loss of quality or functionality. The Adapter uses negligible CPU power and provides a simple configuration tool which scans for VST plugins on your system and creates RTAS plugins from them[^1^]. - - - -In this article, we will review the features, benefits and drawbacks of FXpansion VST to RTAS Adapter v2.1, and show you how to install and use it in your Pro Tools sessions. - - - -## Features of FXpansion VST to RTAS Adapter v2.1 - - - -- Compatible with Pro Tools 7.x and above (including Pro Tools 8 and 9). - -- Supports VST 2.3 and 2.4 plugins, both 32-bit and 64-bit. - -- Supports both Mac OS X and Windows platforms. - -- Automatically detects and converts VST plugins on your system. - -- Allows you to customize the plugin name, category and manufacturer. - -- Preserves all plugin parameters, presets and automation data. - -- Supports multiple instances of the same plugin. - -- Supports sidechain inputs for VST plugins that have them. - -- Supports MIDI input and output for VST plugins that have them. - -- Provides a bypass button for each plugin. - -- Provides a latency compensation option for each plugin. - -- Provides a manual rescan option for new or updated plugins. - - - -## Benefits of FXpansion VST to RTAS Adapter v2.1 - - - -- Expands your plugin collection by allowing you to use thousands of VST plugins in Pro Tools. - -- Saves you money by avoiding the need to buy RTAS versions of your favorite VST plugins. - -- Saves you time by avoiding the need to switch between different DAWs or hosts for different plugins. - -- Improves your workflow by integrating seamlessly with Pro Tools' interface and features. - -- Enhances your creativity by giving you access to a wider range of sounds and effects. - - - -## Drawbacks of FXpansion VST to RTAS Adapter v2.1 - - - -- Not compatible with Pro Tools HD systems or TDM plugins. - -- Not compatible with some VST plugins that use proprietary formats or copy protection schemes. - -- Not compatible with some VST plugins that have graphical issues or stability problems. - -- May introduce some latency or CPU load depending on the plugin and the system. - -- May require some trial and error to find the optimal settings for each plugin. - - - -## How to Install FXpansion VST to RTAS Adapter v2.1 - - - -1. Purchase FXpansion VST to RTAS Adapter v2.1 from [FXpansion's website](https://www.fxpansion.com/products/vst-rtas-adapter/). - -2. Download the installer file for your platform (Mac or Windows). - -3. Run the installer file and follow the instructions on screen. - -4. Launch Pro Tools and go to Setup > Preferences > Operation tab. - -5. Make sure that "Use Plug-In Header Files" is checked under Plug-In Streaming Buffer Size. - -6. Close Pro Tools and launch the FXpansion VST-RTAS Adapter Configuration Tool from your Applications folder (Mac) or Start menu (Windows). - -7. Select the folders where your VST plugins are located and click Scan Folders. - -8. The tool will scan your system for VST plugins and create corresponding RTAS plugins in a folder called "VstPlugins" inside your Digidesign folder. - -9. You can edit the name, category and manufacturer of each plugin by clicking 145887f19f - - - - - - - - - diff --git a/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/Catch All Creatures in Dynamons World - APK 1.7.88 for Android.md b/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/Catch All Creatures in Dynamons World - APK 1.7.88 for Android.md deleted file mode 100644 index 0cf912756cd82987ba3a8f500a096b4a3a25b441..0000000000000000000000000000000000000000 --- a/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/Catch All Creatures in Dynamons World - APK 1.7.88 for Android.md +++ /dev/null @@ -1,120 +0,0 @@ - -

        Dynamons World APK Old Version: A Review

        -

        If you are a fan of RPG games, you might have heard of Dynamons World, a popular online game that lets you catch, train, and battle with dozens of unique creatures called Dynamons. The game is loved by millions of players worldwide for its exciting features, immersive story, and online multiplayer mode. But did you know that you can also download and play the old version of Dynamons World on your Android device? In this article, we will review the features, pros, and cons of Dynamons World APK old version, and show you how to download and install it on your phone. We will also share some tips and tricks to help you become the best RPG battle master in the Dynamons Kingdom!

        -

        dynamons world apk old version


        Downloadhttps://bltlly.com/2uOm3w



        -

        Features of Dynamons World

        -

        Dynamons World is a game that offers a lot of fun and adventure for RPG lovers. Here are some of the features that make it stand out:

        -

        Online Battle Arena

        -

        One of the most exciting features of Dynamons World is the online battle arena, where you can challenge your friends and other players from around the world in real-time PvP battles. You can show off your skills, strategies, and Dynamons collection, and climb up the leaderboards. You can also join tournaments and events to win prizes and rewards.

        -

        Catch and Train Dozens of Unique Dynamons

        -

        Dynamons are the main characters of the game, and they come in different types, shapes, sizes, and abilities. You can catch them in various locations on the map, or buy them from the shop. You can also train them to level up their stats, evolve them to unlock new forms, and equip them with items and boosters to enhance their performance in battle.

        -

        Unleash Powerful Skills and Brilliant Tactics

        -

        Each Dynamon has its own set of skills that can be used in battle. Some skills are offensive, some are defensive, some are supportive, and some are special. You can use these skills to attack, defend, heal, or buff your Dynamons. You can also use skill cards, which are an all-new battle mechanic that adds more tactical elements to the game. Skill cards can be collected from battles or bought from the shop, and they can be used to activate powerful effects such as increasing damage, reducing cooldowns, or changing the weather.

        -

        Travel and Explore an Open World

        -

        Dynamons World has a huge map that you can explore freely. You can visit different areas such as forests, deserts, caves, temples, castles, and more. You can find new Dynamons, items, secrets, and challenges along the way. You can also follow an addictive and immersive RPG story that takes you from Dynamons Camp to the Temple Ruins.

        -

        dynamons world apk download old version
        -dynamons world apk mod old version
        -dynamons world apk 1.7.84 old version
        -dynamons world apk 1.7.88 old version
        -dynamons world apk 1.8.14 old version
        -dynamons world apk latest old version
        -dynamons world apk free old version
        -dynamons world apk offline old version
        -dynamons world apk hack old version
        -dynamons world apk unlimited money old version
        -dynamons world apk android old version
        -dynamons world apk update old version
        -dynamons world apk full old version
        -dynamons world apk no ads old version
        -dynamons world apk online old version
        -dynamons world apk for pc old version
        -dynamons world apk revdl old version
        -dynamons world apk rexdl old version
        -dynamons world apk pure old version
        -dynamons world apk uptodown old version
        -dynamons world apk apkpure old version
        -dynamons world apk apkmirror old version
        -dynamons world apk appvn old version
        -dynamons world apk andropalace old version
        -dynamons world apk android 1 old version
        -dynamons world apk android oyun club old version
        -dynamons world apk android republic old version
        -dynamons world apk an1.com old version
        -dynamons world apk blackmod.net old version
        -dynamons world apk by azerion casual old version
        -dynamons world apk by funtomic games ltd. old version
        -dynamons world apk by kizi games old version
        -dynamons world apk by plonga.com old version
        -dynamons world apk by silvergames.com old version
        -dynamons world apk by y8.com old version
        -dynamons world apk crazy games.com old version
        -dynamons world apk cheats codes old version
        -dynamons world apk cheats unlimited coins and gems old version
        -dynamons world apk cheats unlimited money and energy old version
        -dynamons world apk cheats unlock all characters and levels old version
        -dynamons world apk download for android phone and tablet old version
        -dynamons world apk download for ios iphone and ipad old version
        -dynamons world apk download for windows 10 pc and laptop old version
        -dynamons world apk download from google play store and app store old version
        -dynamons world apk download from mediafire and mega.nz links old version
        -dynamons world apk download highly compressed and modded files old version
        -dynamons world apk download latest update and new features added old version
        -dynamons world apk download safe and secure without virus or malware infection old version
        -dynamons world apk download size and requirements for installation and gameplay performance old version

        -

        New Updates and Content

        -

        How to Download and Install Dynamons World APK Old Version

        -

        If you want to play the old version of Dynamons World on your Android device, you will need to download and install the APK file from a reliable source. APK stands for Android Package Kit, and it is a file format that contains all the necessary components to run an app on your device. Here are the steps to download and install Dynamons World APK old version:

        -

        Step 1: Find a Reliable Source

        -

        The first step is to find a website that offers the Dynamons World APK old version for download. You can search for it on Google or Bing, or use a trusted site like APKPure or APKMirror. Make sure that the site is safe and secure, and that the APK file is free from viruses and malware. You can also check the reviews and ratings of other users who have downloaded the file before.

        -

        Step 2: Enable Unknown Sources

        -

        The next step is to enable unknown sources on your device. This is a security setting that allows you to install apps from sources other than the Google Play Store. To enable unknown sources, go to Settings > Security > Unknown Sources and toggle it on. You may see a warning message that says installing apps from unknown sources may harm your device, but don't worry, as long as you download the APK file from a reputable site, you should be fine.

        -

        Step 3: Download and Install the APK File

        -

        The third step is to download and install the APK file on your device. To download the file, go to the website where you found it and click on the download button. You may see a pop-up window that asks you to confirm the download, just tap on OK. The file will be downloaded to your device's storage, usually in the Downloads folder. To install the file, go to your file manager app and locate the APK file. Tap on it and follow the instructions on the screen. You may see a pop-up window that asks you to grant permissions to the app, just tap on Install. The app will be installed on your device and you will see an icon on your home screen or app drawer.

        -

        Step 4: Enjoy the Game

        -

        Pros and Cons of Dynamons World APK Old Version

        -

        Now that you know how to download and install Dynamons World APK old version, you might be wondering what are the pros and cons of playing it. Here are some of the advantages and disadvantages of playing the old version of Dynamons World:

        -

        Pros

        -
          -
        • You can enjoy the game without any bugs or glitches that might be present in the new version.
        • -
        • You can play the game offline without any internet connection.
        • -
        • You can save your data and battery by not downloading the updates and patches.
        • -
        • You can experience the original gameplay and graphics that might have changed in the new version.
        • -
        • You can relive the nostalgia and memories of playing the old version.
        • -
        -

        Cons

        -
          -
        • You might miss out on some of the new features and content that are added in the new version.
        • -
        • You might encounter some compatibility issues with your device or operating system.
        • -
        • You might face some security risks by installing apps from unknown sources.
        • -
        • You might not be able to play online with other players who have the new version.
        • -
        • You might get bored of playing the same old version over and over again.
        • -
        -

        Tips and Tricks for Dynamons World

        -

        If you want to master Dynamons World and become the best RPG battle master, you will need some tips and tricks to help you along the way. Here are some of the best tips and tricks for Dynamons World:

        -

        Know Your Dynamons

        -

        The first tip is to know your Dynamons well. You should know their types, strengths, weaknesses, skills, and evolutions. You should also know how to match them against your opponents' Dynamons, and use their advantages to your favor. For example, water-type Dynamons are strong against fire-type Dynamons, but weak against electric-type Dynamons. You should also know which Dynamons are rare and powerful, and try to catch them whenever you can.

        -

        Use Skill Cards Wisely

        -

        Switch Your Dynamons During Battle

        -

        The third tip is to switch your Dynamons during battle. You can have up to three Dynamons in your team, and you can switch them anytime during the battle. Switching your Dynamons can give you an edge over your opponent, as you can adapt to their moves and counter them with the best Dynamon for the situation. For example, if your opponent uses a fire-type Dynamon, you can switch to a water-type Dynamon to deal more damage and take less damage. However, you should also be careful not to switch too often, as switching will cost you a turn and leave you vulnerable to attacks.

        -

        Watch Ads for Rewards

        -

        The fourth tip is to watch ads for rewards. Dynamons World is a free-to-play game, but it also has some in-app purchases and ads that can help you progress faster and easier. You can watch ads to get free coins, gems, items, skill cards, and even rare Dynamons. You can also watch ads to double your rewards after winning a battle or completing a quest. However, you should also be mindful of your time and data usage, as watching too many ads can be annoying and costly.

        -

        Join the Community

        -

        The fifth tip is to join the community of Dynamons World. Dynamons World has a large and active community of players who love the game and share their experiences, tips, tricks, feedback, and suggestions. You can join the community by visiting the official website, Facebook page, Twitter account, YouTube channel, or Discord server of Dynamons World. You can also join the forums and chat rooms of other websites that host the game. By joining the community, you can make new friends, learn new things, get help, and have more fun playing Dynamons World.

        -

        Conclusion

        -

        Dynamons World is a fantastic RPG game that lets you catch, train, and battle with amazing creatures called Dynamons. You can play the old version of Dynamons World on your Android device by downloading and installing the APK file from a reliable source. You can enjoy the features of the old version, such as online battle arena, catch and train Dynamons, unleash skills and tactics, travel and explore an open world, and more. You can also use some tips and tricks to improve your skills and strategies in the game. If you are looking for a fun and addictive RPG game that will keep you entertained for hours, you should definitely try Dynamons World APK old version!

        -

        FAQs

        -

        Here are some of the frequently asked questions about Dynamons World APK old version:

        -
          -
        • Q: Is Dynamons World APK old version safe to download and install?
        • -
        • A: Yes, as long as you download and install the APK file from a reputable site that offers it for free. You should also scan the file with an antivirus app before installing it on your device.
        • -
        • Q: What is the difference between Dynamons World APK old version and new version?
        • -
        • A: The main difference is that the old version has some features that are not available in the new version, such as offline mode, original gameplay and graphics, and no bugs or glitches. However, the old version also lacks some features that are added in the new version, such as new Dynamons, quests, battles, areas, items, events, and more.
        • -
        • Q: How can I update Dynamons World APK old version to the new version?
        • -
        • A: You can update Dynamons World APK old version to the new version by downloading and installing the latest APK file from the Google Play Store or any other source that offers it. You can also check for updates within the game by tapping on the settings icon and then on the update button.
        • -
        • Q: Can I play online with other players who have the new version?
        • -
        • A: No, you cannot play online with other players who have the new version. You can only play online with other players who have the same version as you.
        • -
        • Q: Can I transfer my progress from Dynamons World APK old version to the new version?
        • -
        • A: Yes, you can transfer your progress from Dynamons World APK old version to the new version by logging in with your Facebook account or Google Play Games account. You can also sync your progress across multiple devices by using these accounts.
        • -

        401be4b1e0
        -
        -
        \ No newline at end of file diff --git a/spaces/tioseFevbu/cartoon-converter/scripts/Bedam Shah Warsi Kalam In Urdu Pdf Download High Quality.md b/spaces/tioseFevbu/cartoon-converter/scripts/Bedam Shah Warsi Kalam In Urdu Pdf Download High Quality.md deleted file mode 100644 index b808e98fde3f6df8732cee80e93583578ddc0351..0000000000000000000000000000000000000000 --- a/spaces/tioseFevbu/cartoon-converter/scripts/Bedam Shah Warsi Kalam In Urdu Pdf Download High Quality.md +++ /dev/null @@ -1,16 +0,0 @@ -
        -

        Download Kalaam E Bedam by Bedam Shah Warsi in Urdu PDF

        -

        Bedam Shah Warsi was a renowned Urdu poet and Sufi saint who lived in India from 1876 to 1936. He was a disciple of Hazrat Qazi Qadir Baksh Warsi, the founder of the Warsi order of Sufism. Bedam Shah Warsi wrote many poems on love, devotion, mysticism and ethics, which are collected in his book Kalaam E Bedam.

        -

        Kalaam E Bedam is a treasure of Urdu poetry that showcases the spiritual and literary genius of Bedam Shah Warsi. The book contains his famous verses such as "be-KHud kiye dete hain andaz-e-hijabana", "ishq ki ibtida bhi tum husn ki intiha bhi tum", "aah ko chahiye ek umr asar hote tak" and many more. The book also includes his biography, his teachings and his miracles.

        -

        bedam shah warsi kalam in urdu pdf download


        Download Zip ✶✶✶ https://urlcod.com/2uHxd0



        -

        If you are interested in reading Kalaam E Bedam by Bedam Shah Warsi in Urdu PDF format, you can download it for free from the Internet Archive website[^1^]. You can also read his kalaam online on Sufinama[^2^] and Rekhta[^3^] websites, where you can find his poetry in Hindi, Urdu and Roman scripts. You can also watch videos, listen to audios and access ebooks of his poetry on these websites.

        -

        Kalaam E Bedam by Bedam Shah Warsi is a must-read for anyone who loves Urdu poetry and Sufism. It will inspire you with its beauty, wisdom and grace.

        - -

        Some of the themes that Bedam Shah Warsi explored in his poetry are the love of God, the love of the Prophet Muhammad (peace be upon him), the love of the saints, the love of humanity, the unity of existence, the stages of spiritual journey, the secrets of the heart, the mysteries of the soul, the ethics of Sufism and the critique of hypocrisy. He used various poetic forms such as ghazal, rubaiyat, qasida and masnavi to express his thoughts and feelings.

        -

        Bedam Shah Warsi was not only a poet but also a scholar, a teacher and a guide. He had a deep knowledge of Quran, Hadith, Fiqh, Tasawwuf and other Islamic sciences. He taught many students and disciples who became eminent scholars and poets themselves. He also performed many miracles and helped many people with his blessings and prayers. He was respected and loved by people from all walks of life and religions.

        -

        Bedam Shah Warsi passed away in 1936 in Barabanki, Uttar Pradesh, where his shrine is located. His shrine is visited by thousands of devotees every year who seek his intercession and grace. His kalaam is also recited and sung by many singers and qawwals who spread his message of love and peace. Bedam Shah Warsi is one of the most celebrated poets and saints of Urdu literature and Sufism.

        -

        - -

        In conclusion, Kalaam E Bedam by Bedam Shah Warsi is a masterpiece of Urdu poetry and Sufism that deserves to be read and appreciated by everyone. It is a book that will enrich your mind, heart and soul with its profound and beautiful verses. You can download Kalaam E Bedam by Bedam Shah Warsi in Urdu PDF format from the Internet Archive website or read it online on Sufinama and Rekhta websites. You can also enjoy his poetry in various audio and video formats on these websites. Kalaam E Bedam by Bedam Shah Warsi is a gift of love and wisdom that will never lose its charm and relevance.

        e93f5a0c3f
        -
        -
        \ No newline at end of file diff --git a/spaces/tjburns/ask_marcus_aurelius/.venv/lib/python3.10/site-packages/pip/_internal/commands/__init__.py b/spaces/tjburns/ask_marcus_aurelius/.venv/lib/python3.10/site-packages/pip/_internal/commands/__init__.py deleted file mode 100644 index 858a41014169b8f0eb1b905fa3bb69c753a1bda5..0000000000000000000000000000000000000000 --- a/spaces/tjburns/ask_marcus_aurelius/.venv/lib/python3.10/site-packages/pip/_internal/commands/__init__.py +++ /dev/null @@ -1,132 +0,0 @@ -""" -Package containing all pip commands -""" - -import importlib -from collections import namedtuple -from typing import Any, Dict, Optional - -from pip._internal.cli.base_command import Command - -CommandInfo = namedtuple("CommandInfo", "module_path, class_name, summary") - -# This dictionary does a bunch of heavy lifting for help output: -# - Enables avoiding additional (costly) imports for presenting `--help`. -# - The ordering matters for help display. -# -# Even though the module path starts with the same "pip._internal.commands" -# prefix, the full path makes testing easier (specifically when modifying -# `commands_dict` in test setup / teardown). -commands_dict: Dict[str, CommandInfo] = { - "install": CommandInfo( - "pip._internal.commands.install", - "InstallCommand", - "Install packages.", - ), - "download": CommandInfo( - "pip._internal.commands.download", - "DownloadCommand", - "Download packages.", - ), - "uninstall": CommandInfo( - "pip._internal.commands.uninstall", - "UninstallCommand", - "Uninstall packages.", - ), - "freeze": CommandInfo( - "pip._internal.commands.freeze", - "FreezeCommand", - "Output installed packages in requirements format.", - ), - "inspect": CommandInfo( - "pip._internal.commands.inspect", - "InspectCommand", - "Inspect the python environment.", - ), - "list": CommandInfo( - "pip._internal.commands.list", - "ListCommand", - "List installed packages.", - ), - "show": CommandInfo( - "pip._internal.commands.show", - "ShowCommand", - "Show information about installed packages.", - ), - "check": CommandInfo( - "pip._internal.commands.check", - "CheckCommand", - "Verify installed packages have compatible dependencies.", - ), - "config": CommandInfo( - "pip._internal.commands.configuration", - "ConfigurationCommand", - "Manage local and global configuration.", - ), - "search": CommandInfo( - "pip._internal.commands.search", - "SearchCommand", - "Search PyPI for packages.", - ), - "cache": CommandInfo( - "pip._internal.commands.cache", - "CacheCommand", - "Inspect and manage pip's wheel cache.", - ), - "index": CommandInfo( - "pip._internal.commands.index", - "IndexCommand", - "Inspect information available from package indexes.", - ), - "wheel": CommandInfo( - "pip._internal.commands.wheel", - "WheelCommand", - "Build wheels from your requirements.", - ), - "hash": CommandInfo( - "pip._internal.commands.hash", - "HashCommand", - "Compute hashes of package archives.", - ), - "completion": CommandInfo( - "pip._internal.commands.completion", - "CompletionCommand", - "A helper command used for command completion.", - ), - "debug": CommandInfo( - "pip._internal.commands.debug", - "DebugCommand", - "Show information useful for debugging.", - ), - "help": CommandInfo( - "pip._internal.commands.help", - "HelpCommand", - "Show help for commands.", - ), -} - - -def create_command(name: str, **kwargs: Any) -> Command: - """ - Create an instance of the Command class with the given name. - """ - module_path, class_name, summary = commands_dict[name] - module = importlib.import_module(module_path) - command_class = getattr(module, class_name) - command = command_class(name=name, summary=summary, **kwargs) - - return command - - -def get_similar_commands(name: str) -> Optional[str]: - """Command name auto-correct.""" - from difflib import get_close_matches - - name = name.lower() - - close_commands = get_close_matches(name, commands_dict.keys()) - - if close_commands: - return close_commands[0] - else: - return None diff --git a/spaces/tjburns/ask_marcus_aurelius/.venv/lib/python3.10/site-packages/pip/_internal/vcs/git.py b/spaces/tjburns/ask_marcus_aurelius/.venv/lib/python3.10/site-packages/pip/_internal/vcs/git.py deleted file mode 100644 index 8d1d499376744954308bdf96f80e5b5a39a24195..0000000000000000000000000000000000000000 --- a/spaces/tjburns/ask_marcus_aurelius/.venv/lib/python3.10/site-packages/pip/_internal/vcs/git.py +++ /dev/null @@ -1,526 +0,0 @@ -import logging -import os.path -import pathlib -import re -import urllib.parse -import urllib.request -from typing import List, Optional, Tuple - -from pip._internal.exceptions import BadCommand, InstallationError -from pip._internal.utils.misc import HiddenText, display_path, hide_url -from pip._internal.utils.subprocess import make_command -from pip._internal.vcs.versioncontrol import ( - AuthInfo, - RemoteNotFoundError, - RemoteNotValidError, - RevOptions, - VersionControl, - find_path_to_project_root_from_repo_root, - vcs, -) - -urlsplit = urllib.parse.urlsplit -urlunsplit = urllib.parse.urlunsplit - - -logger = logging.getLogger(__name__) - - -GIT_VERSION_REGEX = re.compile( - r"^git version " # Prefix. - r"(\d+)" # Major. - r"\.(\d+)" # Dot, minor. - r"(?:\.(\d+))?" # Optional dot, patch. - r".*$" # Suffix, including any pre- and post-release segments we don't care about. -) - -HASH_REGEX = re.compile("^[a-fA-F0-9]{40}$") - -# SCP (Secure copy protocol) shorthand. e.g. 'git@example.com:foo/bar.git' -SCP_REGEX = re.compile( - r"""^ - # Optional user, e.g. 'git@' - (\w+@)? - # Server, e.g. 'github.com'. - ([^/:]+): - # The server-side path. e.g. 'user/project.git'. Must start with an - # alphanumeric character so as not to be confusable with a Windows paths - # like 'C:/foo/bar' or 'C:\foo\bar'. - (\w[^:]*) - $""", - re.VERBOSE, -) - - -def looks_like_hash(sha: str) -> bool: - return bool(HASH_REGEX.match(sha)) - - -class Git(VersionControl): - name = "git" - dirname = ".git" - repo_name = "clone" - schemes = ( - "git+http", - "git+https", - "git+ssh", - "git+git", - "git+file", - ) - # Prevent the user's environment variables from interfering with pip: - # https://github.com/pypa/pip/issues/1130 - unset_environ = ("GIT_DIR", "GIT_WORK_TREE") - default_arg_rev = "HEAD" - - @staticmethod - def get_base_rev_args(rev: str) -> List[str]: - return [rev] - - def is_immutable_rev_checkout(self, url: str, dest: str) -> bool: - _, rev_options = self.get_url_rev_options(hide_url(url)) - if not rev_options.rev: - return False - if not self.is_commit_id_equal(dest, rev_options.rev): - # the current commit is different from rev, - # which means rev was something else than a commit hash - return False - # return False in the rare case rev is both a commit hash - # and a tag or a branch; we don't want to cache in that case - # because that branch/tag could point to something else in the future - is_tag_or_branch = bool(self.get_revision_sha(dest, rev_options.rev)[0]) - return not is_tag_or_branch - - def get_git_version(self) -> Tuple[int, ...]: - version = self.run_command( - ["version"], - command_desc="git version", - show_stdout=False, - stdout_only=True, - ) - match = GIT_VERSION_REGEX.match(version) - if not match: - logger.warning("Can't parse git version: %s", version) - return () - return tuple(int(c) for c in match.groups()) - - @classmethod - def get_current_branch(cls, location: str) -> Optional[str]: - """ - Return the current branch, or None if HEAD isn't at a branch - (e.g. detached HEAD). - """ - # git-symbolic-ref exits with empty stdout if "HEAD" is a detached - # HEAD rather than a symbolic ref. In addition, the -q causes the - # command to exit with status code 1 instead of 128 in this case - # and to suppress the message to stderr. - args = ["symbolic-ref", "-q", "HEAD"] - output = cls.run_command( - args, - extra_ok_returncodes=(1,), - show_stdout=False, - stdout_only=True, - cwd=location, - ) - ref = output.strip() - - if ref.startswith("refs/heads/"): - return ref[len("refs/heads/") :] - - return None - - @classmethod - def get_revision_sha(cls, dest: str, rev: str) -> Tuple[Optional[str], bool]: - """ - Return (sha_or_none, is_branch), where sha_or_none is a commit hash - if the revision names a remote branch or tag, otherwise None. - - Args: - dest: the repository directory. - rev: the revision name. - """ - # Pass rev to pre-filter the list. - output = cls.run_command( - ["show-ref", rev], - cwd=dest, - show_stdout=False, - stdout_only=True, - on_returncode="ignore", - ) - refs = {} - # NOTE: We do not use splitlines here since that would split on other - # unicode separators, which can be maliciously used to install a - # different revision. - for line in output.strip().split("\n"): - line = line.rstrip("\r") - if not line: - continue - try: - ref_sha, ref_name = line.split(" ", maxsplit=2) - except ValueError: - # Include the offending line to simplify troubleshooting if - # this error ever occurs. - raise ValueError(f"unexpected show-ref line: {line!r}") - - refs[ref_name] = ref_sha - - branch_ref = f"refs/remotes/origin/{rev}" - tag_ref = f"refs/tags/{rev}" - - sha = refs.get(branch_ref) - if sha is not None: - return (sha, True) - - sha = refs.get(tag_ref) - - return (sha, False) - - @classmethod - def _should_fetch(cls, dest: str, rev: str) -> bool: - """ - Return true if rev is a ref or is a commit that we don't have locally. - - Branches and tags are not considered in this method because they are - assumed to be always available locally (which is a normal outcome of - ``git clone`` and ``git fetch --tags``). - """ - if rev.startswith("refs/"): - # Always fetch remote refs. - return True - - if not looks_like_hash(rev): - # Git fetch would fail with abbreviated commits. - return False - - if cls.has_commit(dest, rev): - # Don't fetch if we have the commit locally. - return False - - return True - - @classmethod - def resolve_revision( - cls, dest: str, url: HiddenText, rev_options: RevOptions - ) -> RevOptions: - """ - Resolve a revision to a new RevOptions object with the SHA1 of the - branch, tag, or ref if found. - - Args: - rev_options: a RevOptions object. - """ - rev = rev_options.arg_rev - # The arg_rev property's implementation for Git ensures that the - # rev return value is always non-None. - assert rev is not None - - sha, is_branch = cls.get_revision_sha(dest, rev) - - if sha is not None: - rev_options = rev_options.make_new(sha) - rev_options.branch_name = rev if is_branch else None - - return rev_options - - # Do not show a warning for the common case of something that has - # the form of a Git commit hash. - if not looks_like_hash(rev): - logger.warning( - "Did not find branch or tag '%s', assuming revision or ref.", - rev, - ) - - if not cls._should_fetch(dest, rev): - return rev_options - - # fetch the requested revision - cls.run_command( - make_command("fetch", "-q", url, rev_options.to_args()), - cwd=dest, - ) - # Change the revision to the SHA of the ref we fetched - sha = cls.get_revision(dest, rev="FETCH_HEAD") - rev_options = rev_options.make_new(sha) - - return rev_options - - @classmethod - def is_commit_id_equal(cls, dest: str, name: Optional[str]) -> bool: - """ - Return whether the current commit hash equals the given name. - - Args: - dest: the repository directory. - name: a string name. - """ - if not name: - # Then avoid an unnecessary subprocess call. - return False - - return cls.get_revision(dest) == name - - def fetch_new( - self, dest: str, url: HiddenText, rev_options: RevOptions, verbosity: int - ) -> None: - rev_display = rev_options.to_display() - logger.info("Cloning %s%s to %s", url, rev_display, display_path(dest)) - if verbosity <= 0: - flags: Tuple[str, ...] = ("--quiet",) - elif verbosity == 1: - flags = () - else: - flags = ("--verbose", "--progress") - if self.get_git_version() >= (2, 17): - # Git added support for partial clone in 2.17 - # https://git-scm.com/docs/partial-clone - # Speeds up cloning by functioning without a complete copy of repository - self.run_command( - make_command( - "clone", - "--filter=blob:none", - *flags, - url, - dest, - ) - ) - else: - self.run_command(make_command("clone", *flags, url, dest)) - - if rev_options.rev: - # Then a specific revision was requested. - rev_options = self.resolve_revision(dest, url, rev_options) - branch_name = getattr(rev_options, "branch_name", None) - logger.debug("Rev options %s, branch_name %s", rev_options, branch_name) - if branch_name is None: - # Only do a checkout if the current commit id doesn't match - # the requested revision. - if not self.is_commit_id_equal(dest, rev_options.rev): - cmd_args = make_command( - "checkout", - "-q", - rev_options.to_args(), - ) - self.run_command(cmd_args, cwd=dest) - elif self.get_current_branch(dest) != branch_name: - # Then a specific branch was requested, and that branch - # is not yet checked out. - track_branch = f"origin/{branch_name}" - cmd_args = [ - "checkout", - "-b", - branch_name, - "--track", - track_branch, - ] - self.run_command(cmd_args, cwd=dest) - else: - sha = self.get_revision(dest) - rev_options = rev_options.make_new(sha) - - logger.info("Resolved %s to commit %s", url, rev_options.rev) - - #: repo may contain submodules - self.update_submodules(dest) - - def switch(self, dest: str, url: HiddenText, rev_options: RevOptions) -> None: - self.run_command( - make_command("config", "remote.origin.url", url), - cwd=dest, - ) - cmd_args = make_command("checkout", "-q", rev_options.to_args()) - self.run_command(cmd_args, cwd=dest) - - self.update_submodules(dest) - - def update(self, dest: str, url: HiddenText, rev_options: RevOptions) -> None: - # First fetch changes from the default remote - if self.get_git_version() >= (1, 9): - # fetch tags in addition to everything else - self.run_command(["fetch", "-q", "--tags"], cwd=dest) - else: - self.run_command(["fetch", "-q"], cwd=dest) - # Then reset to wanted revision (maybe even origin/master) - rev_options = self.resolve_revision(dest, url, rev_options) - cmd_args = make_command("reset", "--hard", "-q", rev_options.to_args()) - self.run_command(cmd_args, cwd=dest) - #: update submodules - self.update_submodules(dest) - - @classmethod - def get_remote_url(cls, location: str) -> str: - """ - Return URL of the first remote encountered. - - Raises RemoteNotFoundError if the repository does not have a remote - url configured. - """ - # We need to pass 1 for extra_ok_returncodes since the command - # exits with return code 1 if there are no matching lines. - stdout = cls.run_command( - ["config", "--get-regexp", r"remote\..*\.url"], - extra_ok_returncodes=(1,), - show_stdout=False, - stdout_only=True, - cwd=location, - ) - remotes = stdout.splitlines() - try: - found_remote = remotes[0] - except IndexError: - raise RemoteNotFoundError - - for remote in remotes: - if remote.startswith("remote.origin.url "): - found_remote = remote - break - url = found_remote.split(" ")[1] - return cls._git_remote_to_pip_url(url.strip()) - - @staticmethod - def _git_remote_to_pip_url(url: str) -> str: - """ - Convert a remote url from what git uses to what pip accepts. - - There are 3 legal forms **url** may take: - - 1. A fully qualified url: ssh://git@example.com/foo/bar.git - 2. A local project.git folder: /path/to/bare/repository.git - 3. SCP shorthand for form 1: git@example.com:foo/bar.git - - Form 1 is output as-is. Form 2 must be converted to URI and form 3 must - be converted to form 1. - - See the corresponding test test_git_remote_url_to_pip() for examples of - sample inputs/outputs. - """ - if re.match(r"\w+://", url): - # This is already valid. Pass it though as-is. - return url - if os.path.exists(url): - # A local bare remote (git clone --mirror). - # Needs a file:// prefix. - return pathlib.PurePath(url).as_uri() - scp_match = SCP_REGEX.match(url) - if scp_match: - # Add an ssh:// prefix and replace the ':' with a '/'. - return scp_match.expand(r"ssh://\1\2/\3") - # Otherwise, bail out. - raise RemoteNotValidError(url) - - @classmethod - def has_commit(cls, location: str, rev: str) -> bool: - """ - Check if rev is a commit that is available in the local repository. - """ - try: - cls.run_command( - ["rev-parse", "-q", "--verify", "sha^" + rev], - cwd=location, - log_failed_cmd=False, - ) - except InstallationError: - return False - else: - return True - - @classmethod - def get_revision(cls, location: str, rev: Optional[str] = None) -> str: - if rev is None: - rev = "HEAD" - current_rev = cls.run_command( - ["rev-parse", rev], - show_stdout=False, - stdout_only=True, - cwd=location, - ) - return current_rev.strip() - - @classmethod - def get_subdirectory(cls, location: str) -> Optional[str]: - """ - Return the path to Python project root, relative to the repo root. - Return None if the project root is in the repo root. - """ - # find the repo root - git_dir = cls.run_command( - ["rev-parse", "--git-dir"], - show_stdout=False, - stdout_only=True, - cwd=location, - ).strip() - if not os.path.isabs(git_dir): - git_dir = os.path.join(location, git_dir) - repo_root = os.path.abspath(os.path.join(git_dir, "..")) - return find_path_to_project_root_from_repo_root(location, repo_root) - - @classmethod - def get_url_rev_and_auth(cls, url: str) -> Tuple[str, Optional[str], AuthInfo]: - """ - Prefixes stub URLs like 'user@hostname:user/repo.git' with 'ssh://'. - That's required because although they use SSH they sometimes don't - work with a ssh:// scheme (e.g. GitHub). But we need a scheme for - parsing. Hence we remove it again afterwards and return it as a stub. - """ - # Works around an apparent Git bug - # (see https://article.gmane.org/gmane.comp.version-control.git/146500) - scheme, netloc, path, query, fragment = urlsplit(url) - if scheme.endswith("file"): - initial_slashes = path[: -len(path.lstrip("/"))] - newpath = initial_slashes + urllib.request.url2pathname(path).replace( - "\\", "/" - ).lstrip("/") - after_plus = scheme.find("+") + 1 - url = scheme[:after_plus] + urlunsplit( - (scheme[after_plus:], netloc, newpath, query, fragment), - ) - - if "://" not in url: - assert "file:" not in url - url = url.replace("git+", "git+ssh://") - url, rev, user_pass = super().get_url_rev_and_auth(url) - url = url.replace("ssh://", "") - else: - url, rev, user_pass = super().get_url_rev_and_auth(url) - - return url, rev, user_pass - - @classmethod - def update_submodules(cls, location: str) -> None: - if not os.path.exists(os.path.join(location, ".gitmodules")): - return - cls.run_command( - ["submodule", "update", "--init", "--recursive", "-q"], - cwd=location, - ) - - @classmethod - def get_repository_root(cls, location: str) -> Optional[str]: - loc = super().get_repository_root(location) - if loc: - return loc - try: - r = cls.run_command( - ["rev-parse", "--show-toplevel"], - cwd=location, - show_stdout=False, - stdout_only=True, - on_returncode="raise", - log_failed_cmd=False, - ) - except BadCommand: - logger.debug( - "could not determine if %s is under git control " - "because git is not available", - location, - ) - return None - except InstallationError: - return None - return os.path.normpath(r.rstrip("\r\n")) - - @staticmethod - def should_add_vcs_url_prefix(repo_url: str) -> bool: - """In either https or ssh form, requirements must be prefixed with git+.""" - return True - - -vcs.register(Git) diff --git a/spaces/tomofi/ABINet-OCR/modules/resnet.py b/spaces/tomofi/ABINet-OCR/modules/resnet.py deleted file mode 100644 index 5ffb908ff8bf874a496c9f4fad2eb04f49cadf44..0000000000000000000000000000000000000000 --- a/spaces/tomofi/ABINet-OCR/modules/resnet.py +++ /dev/null @@ -1,104 +0,0 @@ -import math - -import torch.nn as nn -import torch.nn.functional as F -import torch.utils.model_zoo as model_zoo - - -def conv1x1(in_planes, out_planes, stride=1): - return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False) - - -def conv3x3(in_planes, out_planes, stride=1): - "3x3 convolution with padding" - return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, - padding=1, bias=False) - - -class BasicBlock(nn.Module): - expansion = 1 - - def __init__(self, inplanes, planes, stride=1, downsample=None): - super(BasicBlock, self).__init__() - self.conv1 = conv1x1(inplanes, planes) - self.bn1 = nn.BatchNorm2d(planes) - self.relu = nn.ReLU(inplace=True) - self.conv2 = conv3x3(planes, planes, stride) - self.bn2 = nn.BatchNorm2d(planes) - self.downsample = downsample - self.stride = stride - - def forward(self, x): - residual = x - - out = self.conv1(x) - out = self.bn1(out) - out = self.relu(out) - - out = self.conv2(out) - out = self.bn2(out) - - if self.downsample is not None: - residual = self.downsample(x) - - out += residual - out = self.relu(out) - - return out - - -class ResNet(nn.Module): - - def __init__(self, block, layers): - self.inplanes = 32 - super(ResNet, self).__init__() - self.conv1 = nn.Conv2d(3, 32, kernel_size=3, stride=1, padding=1, - bias=False) - self.bn1 = nn.BatchNorm2d(32) - self.relu = nn.ReLU(inplace=True) - - self.layer1 = self._make_layer(block, 32, layers[0], stride=2) - self.layer2 = self._make_layer(block, 64, layers[1], stride=1) - self.layer3 = self._make_layer(block, 128, layers[2], stride=2) - self.layer4 = self._make_layer(block, 256, layers[3], stride=1) - self.layer5 = self._make_layer(block, 512, layers[4], stride=1) - - for m in self.modules(): - if isinstance(m, nn.Conv2d): - n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels - m.weight.data.normal_(0, math.sqrt(2. / n)) - elif isinstance(m, nn.BatchNorm2d): - m.weight.data.fill_(1) - m.bias.data.zero_() - - def _make_layer(self, block, planes, blocks, stride=1): - downsample = None - if stride != 1 or self.inplanes != planes * block.expansion: - downsample = nn.Sequential( - nn.Conv2d(self.inplanes, planes * block.expansion, - kernel_size=1, stride=stride, bias=False), - nn.BatchNorm2d(planes * block.expansion), - ) - - layers = [] - layers.append(block(self.inplanes, planes, stride, downsample)) - self.inplanes = planes * block.expansion - for i in range(1, blocks): - layers.append(block(self.inplanes, planes)) - - return nn.Sequential(*layers) - - def forward(self, x): - x = self.conv1(x) - x = self.bn1(x) - x = self.relu(x) - x = self.layer1(x) - x = self.layer2(x) - x = self.layer3(x) - x = self.layer4(x) - x = self.layer5(x) - return x - - -def resnet45(): - return ResNet(BasicBlock, [3, 4, 6, 6, 3]) diff --git a/spaces/tomofi/MMOCR/tests/test_models/test_label_convertor/test_attn_label_convertor.py b/spaces/tomofi/MMOCR/tests/test_models/test_label_convertor/test_attn_label_convertor.py deleted file mode 100644 index 62c53466a4c2a6c54a12d940df4a0afcd5b01a92..0000000000000000000000000000000000000000 --- a/spaces/tomofi/MMOCR/tests/test_models/test_label_convertor/test_attn_label_convertor.py +++ /dev/null @@ -1,105 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import os.path as osp -import tempfile - -import numpy as np -import pytest -import torch - -from mmocr.models.textrecog.convertors import ABIConvertor, AttnConvertor - - -def _create_dummy_dict_file(dict_file): - characters = list('helowrd') - with open(dict_file, 'w') as fw: - for char in characters: - fw.write(char + '\n') - - -def test_attn_label_convertor(): - tmp_dir = tempfile.TemporaryDirectory() - # create dummy data - dict_file = osp.join(tmp_dir.name, 'fake_dict.txt') - _create_dummy_dict_file(dict_file) - - # test invalid arguments - with pytest.raises(AssertionError): - AttnConvertor(5) - with pytest.raises(AssertionError): - AttnConvertor('DICT90', dict_file, '1') - with pytest.raises(AssertionError): - AttnConvertor('DICT90', dict_file, True, '1') - - label_convertor = AttnConvertor(dict_file=dict_file, max_seq_len=10) - # test init and parse_dict - assert label_convertor.num_classes() == 10 - assert len(label_convertor.idx2char) == 10 - assert label_convertor.idx2char[0] == 'h' - assert label_convertor.idx2char[1] == 'e' - assert label_convertor.idx2char[-3] == '' - assert label_convertor.char2idx['h'] == 0 - assert label_convertor.unknown_idx == 7 - - # test encode str to tensor - strings = ['hell'] - targets_dict = label_convertor.str2tensor(strings) - assert torch.allclose(targets_dict['targets'][0], - torch.LongTensor([0, 1, 2, 2])) - assert torch.allclose(targets_dict['padded_targets'][0], - torch.LongTensor([8, 0, 1, 2, 2, 8, 9, 9, 9, 9])) - - # test decode output to index - dummy_output = torch.Tensor([[[100, 2, 3, 4, 5, 6, 7, 8, 9], - [1, 100, 3, 4, 5, 6, 7, 8, 9], - [1, 2, 100, 4, 5, 6, 7, 8, 9], - [1, 2, 100, 4, 5, 6, 7, 8, 9], - [1, 2, 3, 4, 5, 6, 7, 8, 100], - [1, 2, 3, 4, 5, 6, 7, 100, 9], - [1, 2, 3, 4, 5, 6, 7, 100, 9], - [1, 2, 3, 4, 5, 6, 7, 100, 9], - [1, 2, 3, 4, 5, 6, 7, 100, 9], - [1, 2, 3, 4, 5, 6, 7, 100, 9]]]) - indexes, scores = label_convertor.tensor2idx(dummy_output) - assert np.allclose(indexes, [[0, 1, 2, 2]]) - - # test encode_str_label_to_index - with pytest.raises(AssertionError): - label_convertor.str2idx('hell') - tmp_indexes = label_convertor.str2idx(strings) - assert np.allclose(tmp_indexes, [[0, 1, 2, 2]]) - - # test decode_index to str_label - input_indexes = [[0, 1, 2, 2]] - with pytest.raises(AssertionError): - label_convertor.idx2str('hell') - output_strings = label_convertor.idx2str(input_indexes) - assert output_strings[0] == 'hell' - - tmp_dir.cleanup() - - -def test_abi_label_convertor(): - tmp_dir = tempfile.TemporaryDirectory() - # create dummy data - dict_file = osp.join(tmp_dir.name, 'fake_dict.txt') - _create_dummy_dict_file(dict_file) - - label_convertor = ABIConvertor(dict_file=dict_file, max_seq_len=10) - - label_convertor.end_idx - # test encode str to tensor - strings = ['hell'] - targets_dict = label_convertor.str2tensor(strings) - assert torch.allclose(targets_dict['targets'][0], - torch.LongTensor([0, 1, 2, 2, 8])) - assert torch.allclose(targets_dict['padded_targets'][0], - torch.LongTensor([8, 0, 1, 2, 2, 8, 9, 9, 9, 9])) - - strings = ['hellhellhell'] - targets_dict = label_convertor.str2tensor(strings) - assert torch.allclose(targets_dict['targets'][0], - torch.LongTensor([0, 1, 2, 2, 0, 1, 2, 2, 0, 8])) - assert torch.allclose(targets_dict['padded_targets'][0], - torch.LongTensor([8, 0, 1, 2, 2, 0, 1, 2, 2, 0])) - - tmp_dir.cleanup() diff --git a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/configs/pascal_voc/retinanet_r50_fpn_1x_voc0712.py b/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/configs/pascal_voc/retinanet_r50_fpn_1x_voc0712.py deleted file mode 100644 index b4b050dda5d2d752c0db3c83c434879c8765a272..0000000000000000000000000000000000000000 --- a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/configs/pascal_voc/retinanet_r50_fpn_1x_voc0712.py +++ /dev/null @@ -1,14 +0,0 @@ -_base_ = [ - '../_base_/models/retinanet_r50_fpn.py', '../_base_/datasets/voc0712.py', - '../_base_/default_runtime.py' -] -model = dict(bbox_head=dict(num_classes=20)) -# optimizer -optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) -optimizer_config = dict(grad_clip=None) -# learning policy -# actual epoch = 3 * 3 = 9 -lr_config = dict(policy='step', step=[3]) -# runtime settings -runner = dict( - type='EpochBasedRunner', max_epochs=4) # actual epoch = 4 * 3 = 12 diff --git a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/configs/pisa/pisa_retinanet_r50_fpn_1x_coco.py b/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/configs/pisa/pisa_retinanet_r50_fpn_1x_coco.py deleted file mode 100644 index 70f89e227ec64b5c7224375aac0cf7ae3a10a29e..0000000000000000000000000000000000000000 --- a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/configs/pisa/pisa_retinanet_r50_fpn_1x_coco.py +++ /dev/null @@ -1,7 +0,0 @@ -_base_ = '../retinanet/retinanet_r50_fpn_1x_coco.py' - -model = dict( - bbox_head=dict( - type='PISARetinaHead', - loss_bbox=dict(type='SmoothL1Loss', beta=0.11, loss_weight=1.0)), - train_cfg=dict(isr=dict(k=2., bias=0.), carl=dict(k=1., bias=0.2))) diff --git a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/configs/pisa/pisa_ssd512_coco.py b/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/configs/pisa/pisa_ssd512_coco.py deleted file mode 100644 index 3219d6d667cb185e6fa4f1954d632ccad9512a48..0000000000000000000000000000000000000000 --- a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/configs/pisa/pisa_ssd512_coco.py +++ /dev/null @@ -1,8 +0,0 @@ -_base_ = '../ssd/ssd512_coco.py' - -model = dict( - bbox_head=dict(type='PISASSDHead'), - train_cfg=dict(isr=dict(k=2., bias=0.), carl=dict(k=1., bias=0.2))) - -optimizer_config = dict( - _delete_=True, grad_clip=dict(max_norm=35, norm_type=2)) diff --git a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/configs/sparse_rcnn/README.md b/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/configs/sparse_rcnn/README.md deleted file mode 100644 index bd5f1571fd4b9ff8ea83bb567919e8de96975e91..0000000000000000000000000000000000000000 --- a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/configs/sparse_rcnn/README.md +++ /dev/null @@ -1,28 +0,0 @@ -# Sparse R-CNN: End-to-End Object Detection with Learnable Proposals - -## Introduction - - - -``` -@article{peize2020sparse, - title = {{SparseR-CNN}: End-to-End Object Detection with Learnable Proposals}, - author = {Peize Sun and Rufeng Zhang and Yi Jiang and Tao Kong and Chenfeng Xu and Wei Zhan and Masayoshi Tomizuka and Lei Li and Zehuan Yuan and Changhu Wang and Ping Luo}, - journal = {arXiv preprint arXiv:2011.12450}, - year = {2020} -} -``` - -## Results and Models - -| Model | Backbone | Style | Lr schd | Number of Proposals |Multi-Scale| RandomCrop | box AP | Config | Download | -|:------------:|:---------:|:-------:|:-------:|:-------: |:-------: |:---------:|:------:|:------:|:--------:| -| Sparse R-CNN | R-50-FPN | pytorch | 1x | 100 | False | False | 37.9 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/sparse_rcnn/sparse_rcnn_r50_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/sparse_rcnn/sparse_rcnn_r50_fpn_1x_coco/sparse_rcnn_r50_fpn_1x_coco_20201222_214453-dc79b137.pth) | [log](https://download.openmmlab.com/mmdetection/v2.0/sparse_rcnn/sparse_rcnn_r50_fpn_1x_coco/sparse_rcnn_r50_fpn_1x_coco_20201222_214453-dc79b137.log.json) | -| Sparse R-CNN | R-50-FPN | pytorch | 3x | 100 | True | False | 42.8 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/sparse_rcnn/sparse_rcnn_r50_fpn_mstrain_480-800_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/sparse_rcnn/sparse_rcnn_r50_fpn_mstrain_480-800_3x_coco/sparse_rcnn_r50_fpn_mstrain_480-800_3x_coco_20201218_154234-7bc5c054.pth) | [log](https://download.openmmlab.com/mmdetection/v2.0/sparse_rcnn/sparse_rcnn_r50_fpn_mstrain_480-800_3x_coco/sparse_rcnn_r50_fpn_mstrain_480-800_3x_coco_20201218_154234-7bc5c054.log.json) | -| Sparse R-CNN | R-50-FPN | pytorch | 3x | 300 | True | True | 45.0 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/sparse_rcnn/sparse_rcnn_r50_fpn_300_proposals_crop_mstrain_480-800_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/sparse_rcnn/sparse_rcnn_r50_fpn_300_proposals_crop_mstrain_480-800_3x_coco/sparse_rcnn_r50_fpn_300_proposals_crop_mstrain_480-800_3x_coco_20201223_024605-9fe92701.pth) | [log](https://download.openmmlab.com/mmdetection/v2.0/sparse_rcnn/sparse_rcnn_r50_fpn_300_proposals_crop_mstrain_480-800_3x_coco/sparse_rcnn_r50_fpn_300_proposals_crop_mstrain_480-800_3x_coco_20201223_024605-9fe92701.log.json) | -| Sparse R-CNN | R-101-FPN | pytorch | 3x | 100 | True | False | 44.2 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/sparse_rcnn/sparse_rcnn_r101_fpn_mstrain_480-800_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/sparse_rcnn/sparse_rcnn_r101_fpn_mstrain_480-800_3x_coco/sparse_rcnn_r101_fpn_mstrain_480-800_3x_coco_20201223_121552-6c46c9d6.pth) | [log](https://download.openmmlab.com/mmdetection/v2.0/sparse_rcnn/sparse_rcnn_r101_fpn_mstrain_480-800_3x_coco/sparse_rcnn_r101_fpn_mstrain_480-800_3x_coco_20201223_121552-6c46c9d6.log.json) | -| Sparse R-CNN | R-101-FPN | pytorch | 3x | 300 | True | True | 46.2 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/sparse_rcnn/sparse_rcnn_r101_fpn_300_proposals_crop_mstrain_480-800_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/sparse_rcnn/sparse_rcnn_r101_fpn_300_proposals_crop_mstrain_480-800_3x_coco/sparse_rcnn_r101_fpn_300_proposals_crop_mstrain_480-800_3x_coco_20201223_023452-c23c3564.pth) | [log](https://download.openmmlab.com/mmdetection/v2.0/sparse_rcnn/sparse_rcnn_r101_fpn_300_proposals_crop_mstrain_480-800_3x_coco/sparse_rcnn_r101_fpn_300_proposals_crop_mstrain_480-800_3x_coco_20201223_023452-c23c3564.log.json) | - -### Notes - -We observe about 0.3 AP noise especially when using ResNet-101 as the backbone. diff --git a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/configs/ssd/README.md b/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/configs/ssd/README.md deleted file mode 100644 index bef916dc7dbc65b077ea4a48a2a6c010be7255b8..0000000000000000000000000000000000000000 --- a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/configs/ssd/README.md +++ /dev/null @@ -1,21 +0,0 @@ -# SSD: Single Shot MultiBox Detector - -## Introduction - - - -```latex -@article{Liu_2016, - title={SSD: Single Shot MultiBox Detector}, - journal={ECCV}, - author={Liu, Wei and Anguelov, Dragomir and Erhan, Dumitru and Szegedy, Christian and Reed, Scott and Fu, Cheng-Yang and Berg, Alexander C.}, - year={2016}, -} -``` - -## Results and models - -| Backbone | Size | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | Config | Download | -| :------: | :---: | :---: | :-----: | :------: | :------------: | :----: | :------: | :--------: | -| VGG16 | 300 | caffe | 120e | 10.2 | 43.7 | 25.6 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/ssd/ssd300_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/ssd/ssd300_coco/ssd300_coco_20200307-a92d2092.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/ssd/ssd300_coco/ssd300_coco_20200307_174216.log.json) | -| VGG16 | 512 | caffe | 120e | 9.3 | 30.7 | 29.4 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/ssd/ssd512_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/ssd/ssd512_coco/ssd512_coco_20200308-038c5591.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/ssd/ssd512_coco/ssd512_coco_20200308_134447.log.json) | diff --git a/spaces/tomzhang1019/ChatGPT/Dockerfile b/spaces/tomzhang1019/ChatGPT/Dockerfile deleted file mode 100644 index 335c2dba28ba8c365de9306858462a59dea25f28..0000000000000000000000000000000000000000 --- a/spaces/tomzhang1019/ChatGPT/Dockerfile +++ /dev/null @@ -1,15 +0,0 @@ -FROM python:3.9 as builder -RUN apt-get update && apt-get install -y build-essential -COPY requirements.txt . -COPY requirements_advanced.txt . -RUN pip install --user -r requirements.txt -# RUN pip install --user -r requirements_advanced.txt - -FROM python:3.9 -MAINTAINER iskoldt -COPY --from=builder /root/.local /root/.local -ENV PATH=/root/.local/bin:$PATH -COPY . /app -WORKDIR /app -ENV dockerrun yes -CMD ["python3", "-u", "ChuanhuChatbot.py", "2>&1", "|", "tee", "/var/log/application.log"] diff --git a/spaces/tomzhang1019/ChatGPT/modules/models.py b/spaces/tomzhang1019/ChatGPT/modules/models.py deleted file mode 100644 index 25b18b1904910e183a997a763008403d960868d6..0000000000000000000000000000000000000000 --- a/spaces/tomzhang1019/ChatGPT/modules/models.py +++ /dev/null @@ -1,625 +0,0 @@ -from __future__ import annotations -from typing import TYPE_CHECKING, List - -import logging -import json -import commentjson as cjson -import os -import sys -import requests -import urllib3 -import platform -import base64 -from io import BytesIO -from PIL import Image - -from tqdm import tqdm -import colorama -from duckduckgo_search import ddg -import asyncio -import aiohttp -from enum import Enum -import uuid - -from .presets import * -from .llama_func import * -from .utils import * -from . import shared -from .config import retrieve_proxy -from modules import config -from .base_model import BaseLLMModel, ModelType - - -class OpenAIClient(BaseLLMModel): - def __init__( - self, - model_name, - api_key, - system_prompt=INITIAL_SYSTEM_PROMPT, - temperature=1.0, - top_p=1.0, - ) -> None: - super().__init__( - model_name=model_name, - temperature=temperature, - top_p=top_p, - system_prompt=system_prompt, - ) - self.api_key = api_key - self.need_api_key = True - self._refresh_header() - - def get_answer_stream_iter(self): - response = self._get_response(stream=True) - if response is not None: - iter = self._decode_chat_response(response) - partial_text = "" - for i in iter: - partial_text += i - yield partial_text - else: - yield STANDARD_ERROR_MSG + GENERAL_ERROR_MSG - - def get_answer_at_once(self): - response = self._get_response() - response = json.loads(response.text) - content = response["choices"][0]["message"]["content"] - total_token_count = response["usage"]["total_tokens"] - return content, total_token_count - - def count_token(self, user_input): - input_token_count = count_token(construct_user(user_input)) - if self.system_prompt is not None and len(self.all_token_counts) == 0: - system_prompt_token_count = count_token( - construct_system(self.system_prompt) - ) - return input_token_count + system_prompt_token_count - return input_token_count - - def billing_info(self): - try: - curr_time = datetime.datetime.now() - last_day_of_month = get_last_day_of_month( - curr_time).strftime("%Y-%m-%d") - first_day_of_month = curr_time.replace(day=1).strftime("%Y-%m-%d") - usage_url = f"{shared.state.usage_api_url}?start_date={first_day_of_month}&end_date={last_day_of_month}" - try: - usage_data = self._get_billing_data(usage_url) - except Exception as e: - logging.error(f"获取API使用情况失败:" + str(e)) - return i18n("**获取API使用情况失败**") - rounded_usage = "{:.5f}".format(usage_data["total_usage"] / 100) - return i18n("**本月使用金额** ") + f"\u3000 ${rounded_usage}" - except requests.exceptions.ConnectTimeout: - status_text = ( - STANDARD_ERROR_MSG + CONNECTION_TIMEOUT_MSG + ERROR_RETRIEVE_MSG - ) - return status_text - except requests.exceptions.ReadTimeout: - status_text = STANDARD_ERROR_MSG + READ_TIMEOUT_MSG + ERROR_RETRIEVE_MSG - return status_text - except Exception as e: - import traceback - traceback.print_exc() - logging.error(i18n("获取API使用情况失败:") + str(e)) - return STANDARD_ERROR_MSG + ERROR_RETRIEVE_MSG - - def set_token_upper_limit(self, new_upper_limit): - pass - - @shared.state.switching_api_key # 在不开启多账号模式的时候,这个装饰器不会起作用 - def _get_response(self, stream=False): - openai_api_key = self.api_key - system_prompt = self.system_prompt - history = self.history - logging.debug(colorama.Fore.YELLOW + - f"{history}" + colorama.Fore.RESET) - headers = { - "Content-Type": "application/json", - "Authorization": f"Bearer {openai_api_key}", - } - - if system_prompt is not None: - history = [construct_system(system_prompt), *history] - - payload = { - "model": self.model_name, - "messages": history, - "temperature": self.temperature, - "top_p": self.top_p, - "n": self.n_choices, - "stream": stream, - "presence_penalty": self.presence_penalty, - "frequency_penalty": self.frequency_penalty, - } - - if self.max_generation_token is not None: - payload["max_tokens"] = self.max_generation_token - if self.stop_sequence is not None: - payload["stop"] = self.stop_sequence - if self.logit_bias is not None: - payload["logit_bias"] = self.logit_bias - if self.user_identifier is not None: - payload["user"] = self.user_identifier - - if stream: - timeout = TIMEOUT_STREAMING - else: - timeout = TIMEOUT_ALL - - # 如果有自定义的api-host,使用自定义host发送请求,否则使用默认设置发送请求 - if shared.state.completion_url != COMPLETION_URL: - logging.info(f"使用自定义API URL: {shared.state.completion_url}") - - with retrieve_proxy(): - try: - response = requests.post( - shared.state.completion_url, - headers=headers, - json=payload, - stream=stream, - timeout=timeout, - ) - except: - return None - return response - - def _refresh_header(self): - self.headers = { - "Content-Type": "application/json", - "Authorization": f"Bearer {self.api_key}", - } - - def _get_billing_data(self, billing_url): - with retrieve_proxy(): - response = requests.get( - billing_url, - headers=self.headers, - timeout=TIMEOUT_ALL, - ) - - if response.status_code == 200: - data = response.json() - return data - else: - raise Exception( - f"API request failed with status code {response.status_code}: {response.text}" - ) - - def _decode_chat_response(self, response): - error_msg = "" - for chunk in response.iter_lines(): - if chunk: - chunk = chunk.decode() - chunk_length = len(chunk) - try: - chunk = json.loads(chunk[6:]) - except json.JSONDecodeError: - print(i18n("JSON解析错误,收到的内容: ") + f"{chunk}") - error_msg += chunk - continue - if chunk_length > 6 and "delta" in chunk["choices"][0]: - if chunk["choices"][0]["finish_reason"] == "stop": - break - try: - yield chunk["choices"][0]["delta"]["content"] - except Exception as e: - # logging.error(f"Error: {e}") - continue - if error_msg: - raise Exception(error_msg) - - def set_key(self, new_access_key): - ret = super().set_key(new_access_key) - self._refresh_header() - return ret - - -class ChatGLM_Client(BaseLLMModel): - def __init__(self, model_name) -> None: - super().__init__(model_name=model_name) - from transformers import AutoTokenizer, AutoModel - import torch - global CHATGLM_TOKENIZER, CHATGLM_MODEL - if CHATGLM_TOKENIZER is None or CHATGLM_MODEL is None: - system_name = platform.system() - model_path = None - if os.path.exists("models"): - model_dirs = os.listdir("models") - if model_name in model_dirs: - model_path = f"models/{model_name}" - if model_path is not None: - model_source = model_path - else: - model_source = f"THUDM/{model_name}" - CHATGLM_TOKENIZER = AutoTokenizer.from_pretrained( - model_source, trust_remote_code=True - ) - quantified = False - if "int4" in model_name: - quantified = True - model = AutoModel.from_pretrained( - model_source, trust_remote_code=True - ) - if torch.cuda.is_available(): - # run on CUDA - logging.info("CUDA is available, using CUDA") - model = model.half().cuda() - # mps加速还存在一些问题,暂时不使用 - elif system_name == "Darwin" and model_path is not None and not quantified: - logging.info("Running on macOS, using MPS") - # running on macOS and model already downloaded - model = model.half().to("mps") - else: - logging.info("GPU is not available, using CPU") - model = model.float() - model = model.eval() - CHATGLM_MODEL = model - - def _get_glm_style_input(self): - history = [x["content"] for x in self.history] - query = history.pop() - logging.debug(colorama.Fore.YELLOW + - f"{history}" + colorama.Fore.RESET) - assert ( - len(history) % 2 == 0 - ), f"History should be even length. current history is: {history}" - history = [[history[i], history[i + 1]] - for i in range(0, len(history), 2)] - return history, query - - def get_answer_at_once(self): - history, query = self._get_glm_style_input() - response, _ = CHATGLM_MODEL.chat( - CHATGLM_TOKENIZER, query, history=history) - return response, len(response) - - def get_answer_stream_iter(self): - history, query = self._get_glm_style_input() - for response, history in CHATGLM_MODEL.stream_chat( - CHATGLM_TOKENIZER, - query, - history, - max_length=self.token_upper_limit, - top_p=self.top_p, - temperature=self.temperature, - ): - yield response - - -class LLaMA_Client(BaseLLMModel): - def __init__( - self, - model_name, - lora_path=None, - ) -> None: - super().__init__(model_name=model_name) - from lmflow.datasets.dataset import Dataset - from lmflow.pipeline.auto_pipeline import AutoPipeline - from lmflow.models.auto_model import AutoModel - from lmflow.args import ModelArguments, DatasetArguments, InferencerArguments - - self.max_generation_token = 1000 - self.end_string = "\n\n" - # We don't need input data - data_args = DatasetArguments(dataset_path=None) - self.dataset = Dataset(data_args) - self.system_prompt = "" - - global LLAMA_MODEL, LLAMA_INFERENCER - if LLAMA_MODEL is None or LLAMA_INFERENCER is None: - model_path = None - if os.path.exists("models"): - model_dirs = os.listdir("models") - if model_name in model_dirs: - model_path = f"models/{model_name}" - if model_path is not None: - model_source = model_path - else: - model_source = f"decapoda-research/{model_name}" - # raise Exception(f"models目录下没有这个模型: {model_name}") - if lora_path is not None: - lora_path = f"lora/{lora_path}" - model_args = ModelArguments(model_name_or_path=model_source, lora_model_path=lora_path, model_type=None, config_overrides=None, config_name=None, tokenizer_name=None, cache_dir=None, - use_fast_tokenizer=True, model_revision='main', use_auth_token=False, torch_dtype=None, use_lora=False, lora_r=8, lora_alpha=32, lora_dropout=0.1, use_ram_optimized_load=True) - pipeline_args = InferencerArguments( - local_rank=0, random_seed=1, deepspeed='configs/ds_config_chatbot.json', mixed_precision='bf16') - - with open(pipeline_args.deepspeed, "r") as f: - ds_config = json.load(f) - LLAMA_MODEL = AutoModel.get_model( - model_args, - tune_strategy="none", - ds_config=ds_config, - ) - LLAMA_INFERENCER = AutoPipeline.get_pipeline( - pipeline_name="inferencer", - model_args=model_args, - data_args=data_args, - pipeline_args=pipeline_args, - ) - - def _get_llama_style_input(self): - history = [] - instruction = "" - if self.system_prompt: - instruction = (f"Instruction: {self.system_prompt}\n") - for x in self.history: - if x["role"] == "user": - history.append(f"{instruction}Input: {x['content']}") - else: - history.append(f"Output: {x['content']}") - context = "\n\n".join(history) - context += "\n\nOutput: " - return context - - def get_answer_at_once(self): - context = self._get_llama_style_input() - - input_dataset = self.dataset.from_dict( - {"type": "text_only", "instances": [{"text": context}]} - ) - - output_dataset = LLAMA_INFERENCER.inference( - model=LLAMA_MODEL, - dataset=input_dataset, - max_new_tokens=self.max_generation_token, - temperature=self.temperature, - ) - - response = output_dataset.to_dict()["instances"][0]["text"] - return response, len(response) - - def get_answer_stream_iter(self): - context = self._get_llama_style_input() - partial_text = "" - step = 1 - for _ in range(0, self.max_generation_token, step): - input_dataset = self.dataset.from_dict( - {"type": "text_only", "instances": [ - {"text": context + partial_text}]} - ) - output_dataset = LLAMA_INFERENCER.inference( - model=LLAMA_MODEL, - dataset=input_dataset, - max_new_tokens=step, - temperature=self.temperature, - ) - response = output_dataset.to_dict()["instances"][0]["text"] - if response == "" or response == self.end_string: - break - partial_text += response - yield partial_text - - -class XMChat(BaseLLMModel): - def __init__(self, api_key): - super().__init__(model_name="xmchat") - self.api_key = api_key - self.session_id = None - self.reset() - self.image_bytes = None - self.image_path = None - self.xm_history = [] - self.url = "https://xmbot.net/web" - self.last_conv_id = None - - def reset(self): - self.session_id = str(uuid.uuid4()) - self.last_conv_id = None - return [], "已重置" - - def image_to_base64(self, image_path): - # 打开并加载图片 - img = Image.open(image_path) - - # 获取图片的宽度和高度 - width, height = img.size - - # 计算压缩比例,以确保最长边小于4096像素 - max_dimension = 2048 - scale_ratio = min(max_dimension / width, max_dimension / height) - - if scale_ratio < 1: - # 按压缩比例调整图片大小 - new_width = int(width * scale_ratio) - new_height = int(height * scale_ratio) - img = img.resize((new_width, new_height), Image.ANTIALIAS) - - # 将图片转换为jpg格式的二进制数据 - buffer = BytesIO() - if img.mode == "RGBA": - img = img.convert("RGB") - img.save(buffer, format='JPEG') - binary_image = buffer.getvalue() - - # 对二进制数据进行Base64编码 - base64_image = base64.b64encode(binary_image).decode('utf-8') - - return base64_image - - def try_read_image(self, filepath): - def is_image_file(filepath): - # 判断文件是否为图片 - valid_image_extensions = [".jpg", ".jpeg", ".png", ".bmp", ".gif", ".tiff"] - file_extension = os.path.splitext(filepath)[1].lower() - return file_extension in valid_image_extensions - - if is_image_file(filepath): - logging.info(f"读取图片文件: {filepath}") - self.image_bytes = self.image_to_base64(filepath) - self.image_path = filepath - else: - self.image_bytes = None - self.image_path = None - - def like(self): - if self.last_conv_id is None: - return "点赞失败,你还没发送过消息" - data = { - "uuid": self.last_conv_id, - "appraise": "good" - } - response = requests.post(self.url, json=data) - return "👍点赞成功,,感谢反馈~" - - def dislike(self): - if self.last_conv_id is None: - return "点踩失败,你还没发送过消息" - data = { - "uuid": self.last_conv_id, - "appraise": "bad" - } - response = requests.post(self.url, json=data) - return "👎点踩成功,感谢反馈~" - - def prepare_inputs(self, real_inputs, use_websearch, files, reply_language, chatbot): - fake_inputs = real_inputs - display_append = "" - limited_context = False - return limited_context, fake_inputs, display_append, real_inputs, chatbot - - def handle_file_upload(self, files, chatbot): - """if the model accepts multi modal input, implement this function""" - if files: - for file in files: - if file.name: - logging.info(f"尝试读取图像: {file.name}") - self.try_read_image(file.name) - if self.image_path is not None: - chatbot = chatbot + [((self.image_path,), None)] - if self.image_bytes is not None: - logging.info("使用图片作为输入") - # XMChat的一轮对话中实际上只能处理一张图片 - self.reset() - conv_id = str(uuid.uuid4()) - data = { - "user_id": self.api_key, - "session_id": self.session_id, - "uuid": conv_id, - "data_type": "imgbase64", - "data": self.image_bytes - } - response = requests.post(self.url, json=data) - response = json.loads(response.text) - logging.info(f"图片回复: {response['data']}") - return None, chatbot, None - - def get_answer_at_once(self): - question = self.history[-1]["content"] - conv_id = str(uuid.uuid4()) - self.last_conv_id = conv_id - data = { - "user_id": self.api_key, - "session_id": self.session_id, - "uuid": conv_id, - "data_type": "text", - "data": question - } - response = requests.post(self.url, json=data) - try: - response = json.loads(response.text) - return response["data"], len(response["data"]) - except Exception as e: - return response.text, len(response.text) - - - - -def get_model( - model_name, - lora_model_path=None, - access_key=None, - temperature=None, - top_p=None, - system_prompt=None, -) -> BaseLLMModel: - msg = i18n("模型设置为了:") + f" {model_name}" - model_type = ModelType.get_type(model_name) - lora_selector_visibility = False - lora_choices = [] - dont_change_lora_selector = False - if model_type != ModelType.OpenAI: - config.local_embedding = True - # del current_model.model - model = None - try: - if model_type == ModelType.OpenAI: - logging.info(f"正在加载OpenAI模型: {model_name}") - model = OpenAIClient( - model_name=model_name, - api_key=access_key, - system_prompt=system_prompt, - temperature=temperature, - top_p=top_p, - ) - elif model_type == ModelType.ChatGLM: - logging.info(f"正在加载ChatGLM模型: {model_name}") - model = ChatGLM_Client(model_name) - elif model_type == ModelType.LLaMA and lora_model_path == "": - msg = f"现在请为 {model_name} 选择LoRA模型" - logging.info(msg) - lora_selector_visibility = True - if os.path.isdir("lora"): - lora_choices = get_file_names( - "lora", plain=True, filetypes=[""]) - lora_choices = ["No LoRA"] + lora_choices - elif model_type == ModelType.LLaMA and lora_model_path != "": - logging.info(f"正在加载LLaMA模型: {model_name} + {lora_model_path}") - dont_change_lora_selector = True - if lora_model_path == "No LoRA": - lora_model_path = None - msg += " + No LoRA" - else: - msg += f" + {lora_model_path}" - model = LLaMA_Client(model_name, lora_model_path) - elif model_type == ModelType.XMChat: - if os.environ.get("XMCHAT_API_KEY") != "": - access_key = os.environ.get("XMCHAT_API_KEY") - model = XMChat(api_key=access_key) - elif model_type == ModelType.Unknown: - raise ValueError(f"未知模型: {model_name}") - logging.info(msg) - except Exception as e: - logging.error(e) - msg = f"{STANDARD_ERROR_MSG}: {e}" - if dont_change_lora_selector: - return model, msg - else: - return model, msg, gr.Dropdown.update(choices=lora_choices, visible=lora_selector_visibility) - - -if __name__ == "__main__": - with open("config.json", "r") as f: - openai_api_key = cjson.load(f)["openai_api_key"] - # set logging level to debug - logging.basicConfig(level=logging.DEBUG) - # client = ModelManager(model_name="gpt-3.5-turbo", access_key=openai_api_key) - client = get_model(model_name="chatglm-6b-int4") - chatbot = [] - stream = False - # 测试账单功能 - logging.info(colorama.Back.GREEN + "测试账单功能" + colorama.Back.RESET) - logging.info(client.billing_info()) - # 测试问答 - logging.info(colorama.Back.GREEN + "测试问答" + colorama.Back.RESET) - question = "巴黎是中国的首都吗?" - for i in client.predict(inputs=question, chatbot=chatbot, stream=stream): - logging.info(i) - logging.info(f"测试问答后history : {client.history}") - # 测试记忆力 - logging.info(colorama.Back.GREEN + "测试记忆力" + colorama.Back.RESET) - question = "我刚刚问了你什么问题?" - for i in client.predict(inputs=question, chatbot=chatbot, stream=stream): - logging.info(i) - logging.info(f"测试记忆力后history : {client.history}") - # 测试重试功能 - logging.info(colorama.Back.GREEN + "测试重试功能" + colorama.Back.RESET) - for i in client.retry(chatbot=chatbot, stream=stream): - logging.info(i) - logging.info(f"重试后history : {client.history}") - # # 测试总结功能 - # print(colorama.Back.GREEN + "测试总结功能" + colorama.Back.RESET) - # chatbot, msg = client.reduce_token_size(chatbot=chatbot) - # print(chatbot, msg) - # print(f"总结后history: {client.history}") diff --git a/spaces/training-transformers-together/Dashboard/streamlit_observable/frontend/build/precache-manifest.2e1db2924cb1e112608cee049b0d33cc.js b/spaces/training-transformers-together/Dashboard/streamlit_observable/frontend/build/precache-manifest.2e1db2924cb1e112608cee049b0d33cc.js deleted file mode 100644 index 96597718109aca07aede00d7fc6e28a5a11aff01..0000000000000000000000000000000000000000 --- a/spaces/training-transformers-together/Dashboard/streamlit_observable/frontend/build/precache-manifest.2e1db2924cb1e112608cee049b0d33cc.js +++ /dev/null @@ -1,26 +0,0 @@ -self.__precacheManifest = (self.__precacheManifest || []).concat([ - { - "revision": "1c6ba26604bc12847ab74fcdb45b2542", - "url": "./index.html" - }, - { - "revision": "5a67f673dcdf30bf693d", - "url": "./static/js/2.b1c975ff.chunk.js" - }, - { - "revision": "9b318b6fb13190fe82c0677e9264b3c7", - "url": "./static/js/2.b1c975ff.chunk.js.LICENSE.txt" - }, - { - "revision": "3301eac1eaca974776ae", - "url": "./static/js/main.fc603b94.chunk.js" - }, - { - "revision": "6515c66d2a8747a146d578e1c038a822", - "url": "./static/js/main.fc603b94.chunk.js.LICENSE.txt" - }, - { - "revision": "7c26bca7e16783d14d15", - "url": "./static/js/runtime-main.11ec9aca.js" - } -]); \ No newline at end of file diff --git a/spaces/trttung1610/musicgen/audiocraft/grids/musicgen/_explorers.py b/spaces/trttung1610/musicgen/audiocraft/grids/musicgen/_explorers.py deleted file mode 100644 index 334836b72559a120feb8a15eef3fe96ce88a4edb..0000000000000000000000000000000000000000 --- a/spaces/trttung1610/musicgen/audiocraft/grids/musicgen/_explorers.py +++ /dev/null @@ -1,93 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -import typing as tp - -import treetable as tt - -from .._base_explorers import BaseExplorer - - -class LMExplorer(BaseExplorer): - eval_metrics: tp.List[str] = [] - - def stages(self) -> tp.List[str]: - return ['train', 'valid'] - - def get_grid_metrics(self): - """Return the metrics that should be displayed in the tracking table.""" - return [ - tt.group( - 'train', - [ - tt.leaf('epoch'), - tt.leaf('duration', '.1f'), # duration in minutes - tt.leaf('ping'), - tt.leaf('ce', '.4f'), # cross entropy - tt.leaf("ppl", '.3f'), # perplexity - ], - align='>', - ), - tt.group( - 'valid', - [ - tt.leaf('ce', '.4f'), - tt.leaf('ppl', '.3f'), - tt.leaf('best_ppl', '.3f'), - ], - align='>', - ), - ] - - def process_sheep(self, sheep, history): - parts = super().process_sheep(sheep, history) - - track_by = {'ppl': 'lower'} # values should be in ['lower', 'higher'] - best_metrics = {k: (1 if v == 'lower' else -1) * float('inf') for k, v in track_by.items()} - - def comparator(mode, a, b): - return a < b if mode == 'lower' else a > b - - for metrics in history: - for key, sub in metrics.items(): - for metric in track_by: - # for the validation set, keep track of best metrics (ppl in this example) - # this is so we can conveniently compare metrics between runs in the grid - if key == 'valid' and metric in sub and comparator( - track_by[metric], sub[metric], best_metrics[metric] - ): - best_metrics[metric] = sub[metric] - - if 'valid' in parts: - parts['valid'].update({f'best_{k}': v for k, v in best_metrics.items()}) - return parts - - -class GenerationEvalExplorer(BaseExplorer): - eval_metrics: tp.List[str] = [] - - def stages(self) -> tp.List[str]: - return ['evaluate'] - - def get_grid_metrics(self): - """Return the metrics that should be displayed in the tracking table.""" - return [ - tt.group( - 'evaluate', - [ - tt.leaf('epoch', '.3f'), - tt.leaf('duration', '.1f'), - tt.leaf('ping'), - tt.leaf('ce', '.4f'), - tt.leaf('ppl', '.3f'), - tt.leaf('fad', '.3f'), - tt.leaf('kld', '.3f'), - tt.leaf('text_consistency', '.3f'), - tt.leaf('chroma_cosine', '.3f'), - ], - align='>', - ), - ] diff --git a/spaces/tuan2010/DocumentGPT/retriever/bm25.py b/spaces/tuan2010/DocumentGPT/retriever/bm25.py deleted file mode 100644 index 163d0c58b33b3234373a3da7b0d4075b59b2f418..0000000000000000000000000000000000000000 --- a/spaces/tuan2010/DocumentGPT/retriever/bm25.py +++ /dev/null @@ -1,14 +0,0 @@ -from rank_bm25 import BM25Okapi - -class SemanticSearchBM25: - - def __init__(self, data): - self.data = data - tokenized_corpus = [chunk.split(" ") for chunk in data] - self.bm25_model = BM25Okapi(tokenized_corpus) - - def __call__(self, query: str, top_k): - tokenized_query = query.split(" ") - result = self.bm25_model.get_top_n(tokenized_query, self.data, n=top_k) - return result - diff --git a/spaces/tykimos/TarotGPT/README.md b/spaces/tykimos/TarotGPT/README.md deleted file mode 100644 index 5c76742b0f256614d3947693a6e57b604bc18b1a..0000000000000000000000000000000000000000 --- a/spaces/tykimos/TarotGPT/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: TarotGPT -emoji: 📉 -colorFrom: blue -colorTo: red -sdk: streamlit -sdk_version: 1.17.0 -app_file: app.py -pinned: false -license: openrail ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/ulysses115/ulysses115-pmvoice/README.md b/spaces/ulysses115/ulysses115-pmvoice/README.md deleted file mode 100644 index de8822789de5909037c5305d01cb8e2aae552c44..0000000000000000000000000000000000000000 --- a/spaces/ulysses115/ulysses115-pmvoice/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Ulysses115 Pmvoice -emoji: 🌍 -colorFrom: red -colorTo: blue -sdk: gradio -sdk_version: 3.16.1 -app_file: app.py -pinned: false -license: unknown ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/usbethFlerru/sovits-modelsV2/example/Autofotos Caseras De Pendejas Wachiturras.md b/spaces/usbethFlerru/sovits-modelsV2/example/Autofotos Caseras De Pendejas Wachiturras.md deleted file mode 100644 index 267179839391375cbf04036362c13c0bd547b157..0000000000000000000000000000000000000000 --- a/spaces/usbethFlerru/sovits-modelsV2/example/Autofotos Caseras De Pendejas Wachiturras.md +++ /dev/null @@ -1,6 +0,0 @@ -

        Autofotos Caseras De Pendejas Wachiturras


        Downloadhttps://urlcod.com/2uyWgp



        -
        - aaccfb2cb3
        -
        -
        -

        diff --git a/spaces/ussrcccp/White-box-Cartoonization/app.py b/spaces/ussrcccp/White-box-Cartoonization/app.py deleted file mode 100644 index c55ced56bd87a85f59d1c8ef84b7eca87422720f..0000000000000000000000000000000000000000 --- a/spaces/ussrcccp/White-box-Cartoonization/app.py +++ /dev/null @@ -1,108 +0,0 @@ -#!/usr/bin/env python - -from __future__ import annotations -import argparse -import functools -import os -import pathlib -import sys -from typing import Callable -import uuid - -import gradio as gr -import huggingface_hub -import numpy as np -import PIL.Image - -from io import BytesIO -from wbc.cartoonize import Cartoonize - -ORIGINAL_REPO_URL = 'https://github.com/SystemErrorWang/White-box-Cartoonization' -TITLE = 'SystemErrorWang/White-box-Cartoonization' -DESCRIPTION = f"""This is a demo for {ORIGINAL_REPO_URL}. - -""" -ARTICLE = """ - -""" - -SAFEHASH = [x for x in "0123456789-abcdefghijklmnopqrstuvwxyz_ABCDEFGHIJKLMNOPQRSTUVWXYZ"] -def compress_UUID(): - ''' - 根据http://www.ietf.org/rfc/rfc1738.txt,由uuid编码扩bai大字符域生成du串 - 包括:[0-9a-zA-Z\-_]共64个 - 长度:(32-2)/3*2=20 - 备注:可在地球上人zhi人都用,使用100年不重复(2^120) - :return:String - ''' - row = str(uuid.uuid4()).replace('-', '') - safe_code = '' - for i in range(10): - enbin = "%012d" % int(bin(int(row[i * 3] + row[i * 3 + 1] + row[i * 3 + 2], 16))[2:], 10) - safe_code += (SAFEHASH[int(enbin[0:6], 2)] + SAFEHASH[int(enbin[6:12], 2)]) - safe_code = safe_code.replace('-', '') - return safe_code - - -def parse_args() -> argparse.Namespace: - parser = argparse.ArgumentParser() - parser.add_argument('--device', type=str, default='cpu') - parser.add_argument('--theme', type=str) - parser.add_argument('--live', action='store_true') - parser.add_argument('--share', action='store_true') - parser.add_argument('--port', type=int) - parser.add_argument('--disable-queue', - dest='enable_queue', - action='store_false') - parser.add_argument('--allow-flagging', type=str, default='never') - parser.add_argument('--allow-screenshot', action='store_true') - return parser.parse_args() - -def run( - image, - cartoonize : Cartoonize -) -> tuple[PIL.Image.Image]: - - out_path = compress_UUID()+'.png' - cartoonize.run_sigle(image.name, out_path) - - return PIL.Image.open(out_path) - - -def main(): - gr.close_all() - - args = parse_args() - - cartoonize = Cartoonize(os.path.join(os.path.dirname(os.path.abspath(__file__)),'wbc/saved_models/')) - - func = functools.partial(run, cartoonize=cartoonize) - func = functools.update_wrapper(func, run) - - gr.Interface( - func, - [ - gr.inputs.Image(type='file', label='Input Image'), - ], - [ - gr.outputs.Image( - type='pil', - label='Result'), - ], - # examples=examples, - theme=args.theme, - title=TITLE, - description=DESCRIPTION, - article=ARTICLE, - allow_screenshot=args.allow_screenshot, - allow_flagging=args.allow_flagging, - live=args.live, - ).launch( - enable_queue=args.enable_queue, - server_port=args.port, - share=args.share, - ) - - -if __name__ == '__main__': - main() diff --git a/spaces/vaishanthr/Simultaneous-Segmented-Depth-Prediction/yolov8/docs/reference/yolo/utils/callbacks/neptune.md b/spaces/vaishanthr/Simultaneous-Segmented-Depth-Prediction/yolov8/docs/reference/yolo/utils/callbacks/neptune.md deleted file mode 100644 index cadbfb515e2dd0aed903d526e5936f66f26402ff..0000000000000000000000000000000000000000 --- a/spaces/vaishanthr/Simultaneous-Segmented-Depth-Prediction/yolov8/docs/reference/yolo/utils/callbacks/neptune.md +++ /dev/null @@ -1,44 +0,0 @@ ---- -description: Improve YOLOv5 training with Neptune, a powerful logging tool. Track metrics like images, plots, and epochs for better model performance. -keywords: Ultralytics, YOLO, Neptune, Callbacks, log scalars, log images, log plots, training, validation ---- - -## _log_scalars ---- -### ::: ultralytics.yolo.utils.callbacks.neptune._log_scalars -

        - -## _log_images ---- -### ::: ultralytics.yolo.utils.callbacks.neptune._log_images -

        - -## _log_plot ---- -### ::: ultralytics.yolo.utils.callbacks.neptune._log_plot -

        - -## on_pretrain_routine_start ---- -### ::: ultralytics.yolo.utils.callbacks.neptune.on_pretrain_routine_start -

        - -## on_train_epoch_end ---- -### ::: ultralytics.yolo.utils.callbacks.neptune.on_train_epoch_end -

        - -## on_fit_epoch_end ---- -### ::: ultralytics.yolo.utils.callbacks.neptune.on_fit_epoch_end -

        - -## on_val_end ---- -### ::: ultralytics.yolo.utils.callbacks.neptune.on_val_end -

        - -## on_train_end ---- -### ::: ultralytics.yolo.utils.callbacks.neptune.on_train_end -

        diff --git a/spaces/vaishanthr/Simultaneous-Segmented-Depth-Prediction/yolov8/docs/reference/yolo/utils/ops.md b/spaces/vaishanthr/Simultaneous-Segmented-Depth-Prediction/yolov8/docs/reference/yolo/utils/ops.md deleted file mode 100644 index f35584a07ec4b17e1f5244c4f8f369a504543741..0000000000000000000000000000000000000000 --- a/spaces/vaishanthr/Simultaneous-Segmented-Depth-Prediction/yolov8/docs/reference/yolo/utils/ops.md +++ /dev/null @@ -1,139 +0,0 @@ ---- -description: Learn about various utility functions in Ultralytics YOLO, including x, y, width, height conversions, non-max suppression, and more. -keywords: Ultralytics, YOLO, Utils Ops, Functions, coco80_to_coco91_class, scale_boxes, non_max_suppression, clip_coords, xyxy2xywh, xywhn2xyxy, xyn2xy, xyxy2ltwh, ltwh2xyxy, resample_segments, process_mask_upsample, process_mask_native, masks2segments, clean_str ---- - -## Profile ---- -### ::: ultralytics.yolo.utils.ops.Profile -

        - -## coco80_to_coco91_class ---- -### ::: ultralytics.yolo.utils.ops.coco80_to_coco91_class -

        - -## segment2box ---- -### ::: ultralytics.yolo.utils.ops.segment2box -

        - -## scale_boxes ---- -### ::: ultralytics.yolo.utils.ops.scale_boxes -

        - -## make_divisible ---- -### ::: ultralytics.yolo.utils.ops.make_divisible -

        - -## non_max_suppression ---- -### ::: ultralytics.yolo.utils.ops.non_max_suppression -

        - -## clip_boxes ---- -### ::: ultralytics.yolo.utils.ops.clip_boxes -

        - -## clip_coords ---- -### ::: ultralytics.yolo.utils.ops.clip_coords -

        - -## scale_image ---- -### ::: ultralytics.yolo.utils.ops.scale_image -

        - -## xyxy2xywh ---- -### ::: ultralytics.yolo.utils.ops.xyxy2xywh -

        - -## xywh2xyxy ---- -### ::: ultralytics.yolo.utils.ops.xywh2xyxy -

        - -## xywhn2xyxy ---- -### ::: ultralytics.yolo.utils.ops.xywhn2xyxy -

        - -## xyxy2xywhn ---- -### ::: ultralytics.yolo.utils.ops.xyxy2xywhn -

        - -## xyn2xy ---- -### ::: ultralytics.yolo.utils.ops.xyn2xy -

        - -## xywh2ltwh ---- -### ::: ultralytics.yolo.utils.ops.xywh2ltwh -

        - -## xyxy2ltwh ---- -### ::: ultralytics.yolo.utils.ops.xyxy2ltwh -

        - -## ltwh2xywh ---- -### ::: ultralytics.yolo.utils.ops.ltwh2xywh -

        - -## ltwh2xyxy ---- -### ::: ultralytics.yolo.utils.ops.ltwh2xyxy -

        - -## segments2boxes ---- -### ::: ultralytics.yolo.utils.ops.segments2boxes -

        - -## resample_segments ---- -### ::: ultralytics.yolo.utils.ops.resample_segments -

        - -## crop_mask ---- -### ::: ultralytics.yolo.utils.ops.crop_mask -

        - -## process_mask_upsample ---- -### ::: ultralytics.yolo.utils.ops.process_mask_upsample -

        - -## process_mask ---- -### ::: ultralytics.yolo.utils.ops.process_mask -

        - -## process_mask_native ---- -### ::: ultralytics.yolo.utils.ops.process_mask_native -

        - -## scale_coords ---- -### ::: ultralytics.yolo.utils.ops.scale_coords -

        - -## masks2segments ---- -### ::: ultralytics.yolo.utils.ops.masks2segments -

        - -## clean_str ---- -### ::: ultralytics.yolo.utils.ops.clean_str -

        diff --git a/spaces/venkat-natchi/yolov3_obj_detector/README.md b/spaces/venkat-natchi/yolov3_obj_detector/README.md deleted file mode 100644 index fa4e84e629d3d95549b4ffcff6f0f54bfb82035f..0000000000000000000000000000000000000000 --- a/spaces/venkat-natchi/yolov3_obj_detector/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Yolov3 Obj Detector -emoji: 🦀 -colorFrom: green -colorTo: green -sdk: gradio -sdk_version: 3.40.1 -app_file: app.py -pinned: false -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/vumichien/Generate_human_motion/VQ-Trans/utils/losses.py b/spaces/vumichien/Generate_human_motion/VQ-Trans/utils/losses.py deleted file mode 100644 index 1998161032731fc2c3edae701700679c00fd00d0..0000000000000000000000000000000000000000 --- a/spaces/vumichien/Generate_human_motion/VQ-Trans/utils/losses.py +++ /dev/null @@ -1,30 +0,0 @@ -import torch -import torch.nn as nn - -class ReConsLoss(nn.Module): - def __init__(self, recons_loss, nb_joints): - super(ReConsLoss, self).__init__() - - if recons_loss == 'l1': - self.Loss = torch.nn.L1Loss() - elif recons_loss == 'l2' : - self.Loss = torch.nn.MSELoss() - elif recons_loss == 'l1_smooth' : - self.Loss = torch.nn.SmoothL1Loss() - - # 4 global motion associated to root - # 12 local motion (3 local xyz, 3 vel xyz, 6 rot6d) - # 3 global vel xyz - # 4 foot contact - self.nb_joints = nb_joints - self.motion_dim = (nb_joints - 1) * 12 + 4 + 3 + 4 - - def forward(self, motion_pred, motion_gt) : - loss = self.Loss(motion_pred[..., : self.motion_dim], motion_gt[..., :self.motion_dim]) - return loss - - def forward_vel(self, motion_pred, motion_gt) : - loss = self.Loss(motion_pred[..., 4 : (self.nb_joints - 1) * 3 + 4], motion_gt[..., 4 : (self.nb_joints - 1) * 3 + 4]) - return loss - - \ No newline at end of file diff --git a/spaces/w1zrd/MusicGen/audiocraft/data/__init__.py b/spaces/w1zrd/MusicGen/audiocraft/data/__init__.py deleted file mode 100644 index 708a3dcead8dda89374a021177481dacae9f7fe9..0000000000000000000000000000000000000000 --- a/spaces/w1zrd/MusicGen/audiocraft/data/__init__.py +++ /dev/null @@ -1,8 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -# flake8: noqa -from . import audio, audio_dataset diff --git a/spaces/weibinke/vits-simple-api/bert_vits2/text/chinese.py b/spaces/weibinke/vits-simple-api/bert_vits2/text/chinese.py deleted file mode 100644 index 989a378a4d93663934e3b37de13bf2bfd2fd38fd..0000000000000000000000000000000000000000 --- a/spaces/weibinke/vits-simple-api/bert_vits2/text/chinese.py +++ /dev/null @@ -1,194 +0,0 @@ -import os -import re - -import cn2an -from pypinyin import lazy_pinyin, Style - -from bert_vits2.text.symbols import punctuation -from bert_vits2.text.tone_sandhi import ToneSandhi - -current_file_path = os.path.dirname(__file__) -pinyin_to_symbol_map = {line.split("\t")[0]: line.strip().split("\t")[1] for line in - open(os.path.join(current_file_path, 'opencpop-strict.txt')).readlines()} - -import jieba.posseg as psg - -rep_map = { - ':': ',', - ';': ',', - ',': ',', - '。': '.', - '!': '!', - '?': '?', - '\n': '.', - "·": ",", - '、': ",", - '...': '…', - '$': '.', - '“': "'", - '”': "'", - '‘': "'", - '’': "'", - '(': "'", - ')': "'", - '(': "'", - ')': "'", - '《': "'", - '》': "'", - '【': "'", - '】': "'", - '[': "'", - ']': "'", - '—': "-", - '~': "-", - '~': "-", - '「': "'", - '」': "'", - -} - -tone_modifier = ToneSandhi() - - -def replace_punctuation(text): - text = text.replace("嗯", "恩").replace("呣", "母") - pattern = re.compile('|'.join(re.escape(p) for p in rep_map.keys())) - - replaced_text = pattern.sub(lambda x: rep_map[x.group()], text) - - replaced_text = re.sub(r'[^\u4e00-\u9fa5' + "".join(punctuation) + r']+', '', replaced_text) - - return replaced_text - - -def g2p(text): - pattern = r'(?<=[{0}])\s*'.format(''.join(punctuation)) - sentences = [i for i in re.split(pattern, text) if i.strip() != ''] - phones, tones, word2ph = _g2p(sentences) - assert sum(word2ph) == len(phones) - assert len(word2ph) == len(text) # Sometimes it will crash,you can add a try-catch. - phones = ['_'] + phones + ["_"] - tones = [0] + tones + [0] - word2ph = [1] + word2ph + [1] - return phones, tones, word2ph - - -def _get_initials_finals(word): - initials = [] - finals = [] - orig_initials = lazy_pinyin( - word, neutral_tone_with_five=True, style=Style.INITIALS) - orig_finals = lazy_pinyin( - word, neutral_tone_with_five=True, style=Style.FINALS_TONE3) - for c, v in zip(orig_initials, orig_finals): - initials.append(c) - finals.append(v) - return initials, finals - - -def _g2p(segments): - phones_list = [] - tones_list = [] - word2ph = [] - for seg in segments: - pinyins = [] - # Replace all English words in the sentence - seg = re.sub('[a-zA-Z]+', '', seg) - seg_cut = psg.lcut(seg) - initials = [] - finals = [] - seg_cut = tone_modifier.pre_merge_for_modify(seg_cut) - for word, pos in seg_cut: - if pos == 'eng': - continue - sub_initials, sub_finals = _get_initials_finals(word) - sub_finals = tone_modifier.modified_tone(word, pos, - sub_finals) - initials.append(sub_initials) - finals.append(sub_finals) - - # assert len(sub_initials) == len(sub_finals) == len(word) - initials = sum(initials, []) - finals = sum(finals, []) - # - for c, v in zip(initials, finals): - raw_pinyin = c + v - # NOTE: post process for pypinyin outputs - # we discriminate i, ii and iii - if c == v: - assert c in punctuation - phone = [c] - tone = '0' - word2ph.append(1) - else: - v_without_tone = v[:-1] - tone = v[-1] - - pinyin = c + v_without_tone - assert tone in '12345' - - if c: - # 多音节 - v_rep_map = { - "uei": 'ui', - 'iou': 'iu', - 'uen': 'un', - } - if v_without_tone in v_rep_map.keys(): - pinyin = c + v_rep_map[v_without_tone] - else: - # 单音节 - pinyin_rep_map = { - 'ing': 'ying', - 'i': 'yi', - 'in': 'yin', - 'u': 'wu', - } - if pinyin in pinyin_rep_map.keys(): - pinyin = pinyin_rep_map[pinyin] - else: - single_rep_map = { - 'v': 'yu', - 'e': 'e', - 'i': 'y', - 'u': 'w', - } - if pinyin[0] in single_rep_map.keys(): - pinyin = single_rep_map[pinyin[0]] + pinyin[1:] - - assert pinyin in pinyin_to_symbol_map.keys(), (pinyin, seg, raw_pinyin) - phone = pinyin_to_symbol_map[pinyin].split(' ') - word2ph.append(len(phone)) - - phones_list += phone - tones_list += [int(tone)] * len(phone) - return phones_list, tones_list, word2ph - - -def text_normalize(text): - numbers = re.findall(r'\d+(?:\.?\d+)?', text) - for number in numbers: - text = text.replace(number, cn2an.an2cn(number), 1) - text = replace_punctuation(text) - return text - - -def get_bert_feature(text, word2ph): - from bert_vits2.text import chinese_bert - return chinese_bert.get_bert_feature(text, word2ph) - - -if __name__ == '__main__': - from bert_vits2.text import get_bert_feature - - text = "啊!但是《原神》是由,米哈\游自主, [研发]的一款全.新开放世界.冒险游戏" - text = text_normalize(text) - print(text) - phones, tones, word2ph = g2p(text) - bert = get_bert_feature(text, word2ph) - - print(phones, tones, word2ph, bert.shape) - -# # 示例用法 -# text = "这是一个示例文本:,你好!这是一个测试...." -# print(g2p_paddle(text)) # 输出: 这是一个示例文本你好这是一个测试 diff --git a/spaces/weibinke/vits-simple-api/vits/modules.py b/spaces/weibinke/vits-simple-api/vits/modules.py deleted file mode 100644 index 4713777a9e1ee6ba533813e0ff8f44aa50d7e3a4..0000000000000000000000000000000000000000 --- a/spaces/weibinke/vits-simple-api/vits/modules.py +++ /dev/null @@ -1,387 +0,0 @@ -import math -import torch -from torch import nn -from torch.nn import functional as F - -from torch.nn import Conv1d -from torch.nn.utils import weight_norm, remove_weight_norm - -from vits import commons -from vits.commons import init_weights, get_padding -from vits.transforms import piecewise_rational_quadratic_transform - - -LRELU_SLOPE = 0.1 - - -class LayerNorm(nn.Module): - def __init__(self, channels, eps=1e-5): - super().__init__() - self.channels = channels - self.eps = eps - - self.gamma = nn.Parameter(torch.ones(channels)) - self.beta = nn.Parameter(torch.zeros(channels)) - - def forward(self, x): - x = x.transpose(1, -1) - x = F.layer_norm(x, (self.channels,), self.gamma, self.beta, self.eps) - return x.transpose(1, -1) - - -class ConvReluNorm(nn.Module): - def __init__(self, in_channels, hidden_channels, out_channels, kernel_size, n_layers, p_dropout): - super().__init__() - self.in_channels = in_channels - self.hidden_channels = hidden_channels - self.out_channels = out_channels - self.kernel_size = kernel_size - self.n_layers = n_layers - self.p_dropout = p_dropout - assert n_layers > 1, "Number of layers should be larger than 0." - - self.conv_layers = nn.ModuleList() - self.norm_layers = nn.ModuleList() - self.conv_layers.append(nn.Conv1d(in_channels, hidden_channels, kernel_size, padding=kernel_size//2)) - self.norm_layers.append(LayerNorm(hidden_channels)) - self.relu_drop = nn.Sequential( - nn.ReLU(), - nn.Dropout(p_dropout)) - for _ in range(n_layers-1): - self.conv_layers.append(nn.Conv1d(hidden_channels, hidden_channels, kernel_size, padding=kernel_size//2)) - self.norm_layers.append(LayerNorm(hidden_channels)) - self.proj = nn.Conv1d(hidden_channels, out_channels, 1) - self.proj.weight.data.zero_() - self.proj.bias.data.zero_() - - def forward(self, x, x_mask): - x_org = x - for i in range(self.n_layers): - x = self.conv_layers[i](x * x_mask) - x = self.norm_layers[i](x) - x = self.relu_drop(x) - x = x_org + self.proj(x) - return x * x_mask - - -class DDSConv(nn.Module): - """ - Dilated and Depth-Separable Convolution - """ - def __init__(self, channels, kernel_size, n_layers, p_dropout=0.): - super().__init__() - self.channels = channels - self.kernel_size = kernel_size - self.n_layers = n_layers - self.p_dropout = p_dropout - - self.drop = nn.Dropout(p_dropout) - self.convs_sep = nn.ModuleList() - self.convs_1x1 = nn.ModuleList() - self.norms_1 = nn.ModuleList() - self.norms_2 = nn.ModuleList() - for i in range(n_layers): - dilation = kernel_size ** i - padding = (kernel_size * dilation - dilation) // 2 - self.convs_sep.append(nn.Conv1d(channels, channels, kernel_size, - groups=channels, dilation=dilation, padding=padding - )) - self.convs_1x1.append(nn.Conv1d(channels, channels, 1)) - self.norms_1.append(LayerNorm(channels)) - self.norms_2.append(LayerNorm(channels)) - - def forward(self, x, x_mask, g=None): - if g is not None: - x = x + g - for i in range(self.n_layers): - y = self.convs_sep[i](x * x_mask) - y = self.norms_1[i](y) - y = F.gelu(y) - y = self.convs_1x1[i](y) - y = self.norms_2[i](y) - y = F.gelu(y) - y = self.drop(y) - x = x + y - return x * x_mask - - -class WN(torch.nn.Module): - def __init__(self, hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=0, p_dropout=0): - super(WN, self).__init__() - assert(kernel_size % 2 == 1) - self.hidden_channels =hidden_channels - self.kernel_size = kernel_size, - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.gin_channels = gin_channels - self.p_dropout = p_dropout - - self.in_layers = torch.nn.ModuleList() - self.res_skip_layers = torch.nn.ModuleList() - self.drop = nn.Dropout(p_dropout) - - if gin_channels != 0: - cond_layer = torch.nn.Conv1d(gin_channels, 2*hidden_channels*n_layers, 1) - self.cond_layer = torch.nn.utils.weight_norm(cond_layer, name='weight') - - for i in range(n_layers): - dilation = dilation_rate ** i - padding = int((kernel_size * dilation - dilation) / 2) - in_layer = torch.nn.Conv1d(hidden_channels, 2*hidden_channels, kernel_size, - dilation=dilation, padding=padding) - in_layer = torch.nn.utils.weight_norm(in_layer, name='weight') - self.in_layers.append(in_layer) - - # last one is not necessary - if i < n_layers - 1: - res_skip_channels = 2 * hidden_channels - else: - res_skip_channels = hidden_channels - - res_skip_layer = torch.nn.Conv1d(hidden_channels, res_skip_channels, 1) - res_skip_layer = torch.nn.utils.weight_norm(res_skip_layer, name='weight') - self.res_skip_layers.append(res_skip_layer) - - def forward(self, x, x_mask, g=None, **kwargs): - output = torch.zeros_like(x) - n_channels_tensor = torch.IntTensor([self.hidden_channels]) - - if g is not None: - g = self.cond_layer(g) - - for i in range(self.n_layers): - x_in = self.in_layers[i](x) - if g is not None: - cond_offset = i * 2 * self.hidden_channels - g_l = g[:,cond_offset:cond_offset+2*self.hidden_channels,:] - else: - g_l = torch.zeros_like(x_in) - - acts = commons.fused_add_tanh_sigmoid_multiply( - x_in, - g_l, - n_channels_tensor) - acts = self.drop(acts) - - res_skip_acts = self.res_skip_layers[i](acts) - if i < self.n_layers - 1: - res_acts = res_skip_acts[:,:self.hidden_channels,:] - x = (x + res_acts) * x_mask - output = output + res_skip_acts[:,self.hidden_channels:,:] - else: - output = output + res_skip_acts - return output * x_mask - - def remove_weight_norm(self): - if self.gin_channels != 0: - torch.nn.utils.remove_weight_norm(self.cond_layer) - for l in self.in_layers: - torch.nn.utils.remove_weight_norm(l) - for l in self.res_skip_layers: - torch.nn.utils.remove_weight_norm(l) - - -class ResBlock1(torch.nn.Module): - def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5)): - super(ResBlock1, self).__init__() - self.convs1 = nn.ModuleList([ - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0], - padding=get_padding(kernel_size, dilation[0]))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1], - padding=get_padding(kernel_size, dilation[1]))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[2], - padding=get_padding(kernel_size, dilation[2]))) - ]) - self.convs1.apply(init_weights) - - self.convs2 = nn.ModuleList([ - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, - padding=get_padding(kernel_size, 1))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, - padding=get_padding(kernel_size, 1))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, - padding=get_padding(kernel_size, 1))) - ]) - self.convs2.apply(init_weights) - - def forward(self, x, x_mask=None): - for c1, c2 in zip(self.convs1, self.convs2): - xt = F.leaky_relu(x, LRELU_SLOPE) - if x_mask is not None: - xt = xt * x_mask - xt = c1(xt) - xt = F.leaky_relu(xt, LRELU_SLOPE) - if x_mask is not None: - xt = xt * x_mask - xt = c2(xt) - x = xt + x - if x_mask is not None: - x = x * x_mask - return x - - def remove_weight_norm(self): - for l in self.convs1: - remove_weight_norm(l) - for l in self.convs2: - remove_weight_norm(l) - - -class ResBlock2(torch.nn.Module): - def __init__(self, channels, kernel_size=3, dilation=(1, 3)): - super(ResBlock2, self).__init__() - self.convs = nn.ModuleList([ - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0], - padding=get_padding(kernel_size, dilation[0]))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1], - padding=get_padding(kernel_size, dilation[1]))) - ]) - self.convs.apply(init_weights) - - def forward(self, x, x_mask=None): - for c in self.convs: - xt = F.leaky_relu(x, LRELU_SLOPE) - if x_mask is not None: - xt = xt * x_mask - xt = c(xt) - x = xt + x - if x_mask is not None: - x = x * x_mask - return x - - def remove_weight_norm(self): - for l in self.convs: - remove_weight_norm(l) - - -class Log(nn.Module): - def forward(self, x, x_mask, reverse=False, **kwargs): - if not reverse: - y = torch.log(torch.clamp_min(x, 1e-5)) * x_mask - logdet = torch.sum(-y, [1, 2]) - return y, logdet - else: - x = torch.exp(x) * x_mask - return x - - -class Flip(nn.Module): - def forward(self, x, *args, reverse=False, **kwargs): - x = torch.flip(x, [1]) - if not reverse: - logdet = torch.zeros(x.size(0)).to(dtype=x.dtype, device=x.device) - return x, logdet - else: - return x - - -class ElementwiseAffine(nn.Module): - def __init__(self, channels): - super().__init__() - self.channels = channels - self.m = nn.Parameter(torch.zeros(channels,1)) - self.logs = nn.Parameter(torch.zeros(channels,1)) - - def forward(self, x, x_mask, reverse=False, **kwargs): - if not reverse: - y = self.m + torch.exp(self.logs) * x - y = y * x_mask - logdet = torch.sum(self.logs * x_mask, [1,2]) - return y, logdet - else: - x = (x - self.m) * torch.exp(-self.logs) * x_mask - return x - - -class ResidualCouplingLayer(nn.Module): - def __init__(self, - channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - p_dropout=0, - gin_channels=0, - mean_only=False): - assert channels % 2 == 0, "channels should be divisible by 2" - super().__init__() - self.channels = channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.half_channels = channels // 2 - self.mean_only = mean_only - - self.pre = nn.Conv1d(self.half_channels, hidden_channels, 1) - self.enc = WN(hidden_channels, kernel_size, dilation_rate, n_layers, p_dropout=p_dropout, gin_channels=gin_channels) - self.post = nn.Conv1d(hidden_channels, self.half_channels * (2 - mean_only), 1) - self.post.weight.data.zero_() - self.post.bias.data.zero_() - - def forward(self, x, x_mask, g=None, reverse=False): - x0, x1 = torch.split(x, [self.half_channels]*2, 1) - h = self.pre(x0) * x_mask - h = self.enc(h, x_mask, g=g) - stats = self.post(h) * x_mask - if not self.mean_only: - m, logs = torch.split(stats, [self.half_channels]*2, 1) - else: - m = stats - logs = torch.zeros_like(m) - - if not reverse: - x1 = m + x1 * torch.exp(logs) * x_mask - x = torch.cat([x0, x1], 1) - logdet = torch.sum(logs, [1,2]) - return x, logdet - else: - x1 = (x1 - m) * torch.exp(-logs) * x_mask - x = torch.cat([x0, x1], 1) - return x - - -class ConvFlow(nn.Module): - def __init__(self, in_channels, filter_channels, kernel_size, n_layers, num_bins=10, tail_bound=5.0): - super().__init__() - self.in_channels = in_channels - self.filter_channels = filter_channels - self.kernel_size = kernel_size - self.n_layers = n_layers - self.num_bins = num_bins - self.tail_bound = tail_bound - self.half_channels = in_channels // 2 - - self.pre = nn.Conv1d(self.half_channels, filter_channels, 1) - self.convs = DDSConv(filter_channels, kernel_size, n_layers, p_dropout=0.) - self.proj = nn.Conv1d(filter_channels, self.half_channels * (num_bins * 3 - 1), 1) - self.proj.weight.data.zero_() - self.proj.bias.data.zero_() - - def forward(self, x, x_mask, g=None, reverse=False): - x0, x1 = torch.split(x, [self.half_channels]*2, 1) - h = self.pre(x0) - h = self.convs(h, x_mask, g=g) - h = self.proj(h) * x_mask - - b, c, t = x0.shape - h = h.reshape(b, c, -1, t).permute(0, 1, 3, 2) # [b, cx?, t] -> [b, c, t, ?] - - unnormalized_widths = h[..., :self.num_bins] / math.sqrt(self.filter_channels) - unnormalized_heights = h[..., self.num_bins:2*self.num_bins] / math.sqrt(self.filter_channels) - unnormalized_derivatives = h[..., 2 * self.num_bins:] - - x1, logabsdet = piecewise_rational_quadratic_transform(x1, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=reverse, - tails='linear', - tail_bound=self.tail_bound - ) - - x = torch.cat([x0, x1], 1) * x_mask - logdet = torch.sum(logabsdet * x_mask, [1,2]) - if not reverse: - return x, logdet - else: - return x diff --git a/spaces/weizmannscience/multidiffusion-region-based/region_control.py b/spaces/weizmannscience/multidiffusion-region-based/region_control.py deleted file mode 100644 index fee5878eaef9eca2ac198ac5d10eed968df96239..0000000000000000000000000000000000000000 --- a/spaces/weizmannscience/multidiffusion-region-based/region_control.py +++ /dev/null @@ -1,209 +0,0 @@ -from transformers import CLIPTextModel, CLIPTokenizer, logging -from diffusers import AutoencoderKL, UNet2DConditionModel, DDIMScheduler - -# suppress partial model loading warning -logging.set_verbosity_error() - -import torch -import torch.nn as nn -import torchvision.transforms as T -import argparse -import numpy as np -from PIL import Image - - -def seed_everything(seed): - torch.manual_seed(seed) - torch.cuda.manual_seed(seed) - # torch.backends.cudnn.deterministic = True - # torch.backends.cudnn.benchmark = True - - -def get_views(panorama_height, panorama_width, window_size=64, stride=8): - panorama_height /= 8 - panorama_width /= 8 - num_blocks_height = (panorama_height - window_size) // stride + 1 - num_blocks_width = (panorama_width - window_size) // stride + 1 - total_num_blocks = int(num_blocks_height * num_blocks_width) - views = [] - for i in range(total_num_blocks): - h_start = int((i // num_blocks_width) * stride) - h_end = h_start + window_size - w_start = int((i % num_blocks_width) * stride) - w_end = w_start + window_size - views.append((h_start, h_end, w_start, w_end)) - return views - - -class MultiDiffusion(nn.Module): - def __init__(self, device, sd_version='2.0', hf_key=None): - super().__init__() - - self.device = device - self.sd_version = sd_version - - print(f'[INFO] loading stable diffusion...') - if hf_key is not None: - print(f'[INFO] using hugging face custom model key: {hf_key}') - model_key = hf_key - elif self.sd_version == '2.1': - model_key = "stabilityai/stable-diffusion-2-1-base" - elif self.sd_version == '2.0': - model_key = "stabilityai/stable-diffusion-2-base" - elif self.sd_version == '1.5': - model_key = "runwayml/stable-diffusion-v1-5" - else: - model_key = self.sd_version #For custom models or fine-tunes, allow people to use arbitrary versions - #raise ValueError(f'Stable-diffusion version {self.sd_version} not supported.') - - # Create model - self.vae = AutoencoderKL.from_pretrained(model_key, subfolder="vae").to(self.device) - self.tokenizer = CLIPTokenizer.from_pretrained(model_key, subfolder="tokenizer") - self.text_encoder = CLIPTextModel.from_pretrained(model_key, subfolder="text_encoder").to(self.device) - self.unet = UNet2DConditionModel.from_pretrained(model_key, subfolder="unet").to(self.device) - - self.scheduler = DDIMScheduler.from_pretrained(model_key, subfolder="scheduler") - - print(f'[INFO] loaded stable diffusion!') - - @torch.no_grad() - def get_random_background(self, n_samples): - # sample random background with a constant rgb value - backgrounds = torch.rand(n_samples, 3, device=self.device)[:, :, None, None].repeat(1, 1, 512, 512) - return torch.cat([self.encode_imgs(bg.unsqueeze(0)) for bg in backgrounds]) - - @torch.no_grad() - def get_text_embeds(self, prompt, negative_prompt): - # Tokenize text and get embeddings - text_input = self.tokenizer(prompt, padding='max_length', max_length=self.tokenizer.model_max_length, - truncation=True, return_tensors='pt') - text_embeddings = self.text_encoder(text_input.input_ids.to(self.device))[0] - - # Do the same for unconditional embeddings - uncond_input = self.tokenizer(negative_prompt, padding='max_length', max_length=self.tokenizer.model_max_length, - return_tensors='pt') - - uncond_embeddings = self.text_encoder(uncond_input.input_ids.to(self.device))[0] - - # Cat for final embeddings - text_embeddings = torch.cat([uncond_embeddings, text_embeddings]) - return text_embeddings - - @torch.no_grad() - def encode_imgs(self, imgs): - imgs = 2 * imgs - 1 - posterior = self.vae.encode(imgs).latent_dist - latents = posterior.sample() * 0.18215 - return latents - - @torch.no_grad() - def decode_latents(self, latents): - latents = 1 / 0.18215 * latents - imgs = self.vae.decode(latents).sample - imgs = (imgs / 2 + 0.5).clamp(0, 1) - return imgs - - @torch.no_grad() - def generate(self, masks, prompts, negative_prompts='', height=512, width=2048, num_inference_steps=50, - guidance_scale=7.5, bootstrapping=20): - - # get bootstrapping backgrounds - # can move this outside of the function to speed up generation. i.e., calculate in init - bootstrapping_backgrounds = self.get_random_background(bootstrapping) - - # Prompts -> text embeds - text_embeds = self.get_text_embeds(prompts, negative_prompts) # [2 * len(prompts), 77, 768] - - # Define panorama grid and get views - latent = torch.randn((1, self.unet.in_channels, height // 8, width // 8), device=self.device) - noise = latent.clone().repeat(len(prompts) - 1, 1, 1, 1) - views = get_views(height, width) - count = torch.zeros_like(latent) - value = torch.zeros_like(latent) - - self.scheduler.set_timesteps(num_inference_steps) - - with torch.autocast('cuda'): - for i, t in enumerate(self.scheduler.timesteps): - count.zero_() - value.zero_() - - for h_start, h_end, w_start, w_end in views: - masks_view = masks[:, :, h_start:h_end, w_start:w_end] - latent_view = latent[:, :, h_start:h_end, w_start:w_end].repeat(len(prompts), 1, 1, 1) - if i < bootstrapping: - bg = bootstrapping_backgrounds[torch.randint(0, bootstrapping, (len(prompts) - 1,))] - bg = self.scheduler.add_noise(bg, noise[:, :, h_start:h_end, w_start:w_end], t) - latent_view[1:] = latent_view[1:] * masks_view[1:] + bg * (1 - masks_view[1:]) - - # expand the latents if we are doing classifier-free guidance to avoid doing two forward passes. - latent_model_input = torch.cat([latent_view] * 2) - - # predict the noise residual - noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=text_embeds)['sample'] - - # perform guidance - noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) - noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) - - # compute the denoising step with the reference model - latents_view_denoised = self.scheduler.step(noise_pred, t, latent_view)['prev_sample'] - - value[:, :, h_start:h_end, w_start:w_end] += (latents_view_denoised * masks_view).sum(dim=0, - keepdims=True) - count[:, :, h_start:h_end, w_start:w_end] += masks_view.sum(dim=0, keepdims=True) - - # take the MultiDiffusion step - latent = torch.where(count > 0, value / count, value) - - # Img latents -> imgs - imgs = self.decode_latents(latent) # [1, 3, 512, 512] - img = T.ToPILImage()(imgs[0].cpu()) - return img - - -def preprocess_mask(mask_path, h, w, device): - mask = np.array(Image.open(mask_path).convert("L")) - mask = mask.astype(np.float32) / 255.0 - mask = mask[None, None] - mask[mask < 0.5] = 0 - mask[mask >= 0.5] = 1 - mask = torch.from_numpy(mask).to(device) - mask = torch.nn.functional.interpolate(mask, size=(h, w), mode='nearest') - return mask - - -if __name__ == '__main__': - parser = argparse.ArgumentParser() - parser.add_argument('--mask_paths', type=list) - parser.add_argument('--bg_prompt', type=str) - parser.add_argument('--bg_negative', type=str) # 'artifacts, blurry, smooth texture, bad quality, distortions, unrealistic, distorted image' - parser.add_argument('--fg_prompts', type=list) - parser.add_argument('--fg_negative', type=list) # 'artifacts, blurry, smooth texture, bad quality, distortions, unrealistic, distorted image' - parser.add_argument('--sd_version', type=str, default='2.0', choices=['1.5', '2.0'], - help="stable diffusion version") - parser.add_argument('--H', type=int, default=768) - parser.add_argument('--W', type=int, default=512) - parser.add_argument('--seed', type=int, default=0) - parser.add_argument('--steps', type=int, default=50) - parser.add_argument('--bootstrapping', type=int, default=20) - opt = parser.parse_args() - - seed_everything(opt.seed) - - device = torch.device('cuda') - - sd = MultiDiffusion(device, opt.sd_version) - - fg_masks = torch.cat([preprocess_mask(mask_path, opt.H // 8, opt.W // 8, device) for mask_path in opt.mask_paths]) - bg_mask = 1 - torch.sum(fg_masks, dim=0, keepdim=True) - bg_mask[bg_mask < 0] = 0 - masks = torch.cat([bg_mask, fg_masks]) - - prompts = [opt.bg_prompt] + opt.fg_prompts - neg_prompts = [opt.bg_negative] + opt.fg_negative - - img = sd.generate(masks, prompts, neg_prompts, opt.H, opt.W, opt.steps, bootstrapping=opt.bootstrapping) - - # save image - img.save('out.png') diff --git a/spaces/xc9/VITS-Umamusume-voice-synthesizer/ONNXVITS_inference.py b/spaces/xc9/VITS-Umamusume-voice-synthesizer/ONNXVITS_inference.py deleted file mode 100644 index 258b618cd338322365dfa25bec468a0a3f70ccd1..0000000000000000000000000000000000000000 --- a/spaces/xc9/VITS-Umamusume-voice-synthesizer/ONNXVITS_inference.py +++ /dev/null @@ -1,36 +0,0 @@ -import logging -logging.getLogger('numba').setLevel(logging.WARNING) -import IPython.display as ipd -import torch -import commons -import utils -import ONNXVITS_infer -from text import text_to_sequence - -def get_text(text, hps): - text_norm = text_to_sequence(text, hps.symbols, hps.data.text_cleaners) - if hps.data.add_blank: - text_norm = commons.intersperse(text_norm, 0) - text_norm = torch.LongTensor(text_norm) - return text_norm - -hps = utils.get_hparams_from_file("../vits/pretrained_models/uma87.json") - -net_g = ONNXVITS_infer.SynthesizerTrn( - len(hps.symbols), - hps.data.filter_length // 2 + 1, - hps.train.segment_size // hps.data.hop_length, - n_speakers=hps.data.n_speakers, - **hps.model) -_ = net_g.eval() - -_ = utils.load_checkpoint("../vits/pretrained_models/uma_1153000.pth", net_g) - -text1 = get_text("おはようございます。", hps) -stn_tst = text1 -with torch.no_grad(): - x_tst = stn_tst.unsqueeze(0) - x_tst_lengths = torch.LongTensor([stn_tst.size(0)]) - sid = torch.LongTensor([0]) - audio = net_g.infer(x_tst, x_tst_lengths, sid=sid, noise_scale=.667, noise_scale_w=0.8, length_scale=1)[0][0,0].data.cpu().float().numpy() -print(audio) \ No newline at end of file diff --git a/spaces/xly66624/Brayton-cycle/README.md b/spaces/xly66624/Brayton-cycle/README.md deleted file mode 100644 index a537460a4112417bfa1793e158742d588b3cb7f2..0000000000000000000000000000000000000000 --- a/spaces/xly66624/Brayton-cycle/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: 111 -emoji: 💻 -colorFrom: gray -colorTo: blue -sdk: gradio -sdk_version: 3.29.0 -app_file: app.py -pinned: false -license: openrail ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/yangogo/bingo/Dockerfile b/spaces/yangogo/bingo/Dockerfile deleted file mode 100644 index 3aa2b29b5fc4fa8b8238955acd7f1fde13ce5e1a..0000000000000000000000000000000000000000 --- a/spaces/yangogo/bingo/Dockerfile +++ /dev/null @@ -1,36 +0,0 @@ -FROM node:18 - - -ARG DEBIAN_FRONTEND=noninteractive - -ENV BING_HEADER "" - -# Set home to the user's home directory -ENV HOME=/home/user \ - PATH=/home/user/.local/bin:$PATH - -# Set up a new user named "user" with user ID 1000 -RUN useradd -o -u 1000 user && mkdir -p $HOME/app && chown -R user $HOME - -# Switch to the "user" user -USER user - -# Set the working directory to the user's home directory -WORKDIR $HOME/app - -# Install app dependencies -# A wildcard is used to ensure both package.json AND package-lock.json are copied -# where available (npm@5+) -COPY --chown=user package*.json $HOME/app/ - -RUN npm install - -# Copy the current directory contents into the container at $HOME/app setting the owner to the user -COPY --chown=user . $HOME/app/ - -RUN npm run build - -ENV PORT 7860 -EXPOSE 7860 - -CMD npm start diff --git a/spaces/yderre-aubay/midi-player-demo/src/common/track/TrackFactory.ts b/spaces/yderre-aubay/midi-player-demo/src/common/track/TrackFactory.ts deleted file mode 100644 index 3b6c61c1fbe6353180a934f0870136903efb332b..0000000000000000000000000000000000000000 --- a/spaces/yderre-aubay/midi-player-demo/src/common/track/TrackFactory.ts +++ /dev/null @@ -1,58 +0,0 @@ -import { toTrackEvents } from "../helpers/toTrackEvents" -import { - endOfTrackMidiEvent, - expressionMidiEvent, - masterCoarceTuningEvents, - masterFineTuningEvents, - modulationMidiEvent, - panMidiEvent, - pitchBendMidiEvent, - pitchbendSensitivityEvents, - programChangeMidiEvent, - resetAllMidiEvent, - setTempoMidiEvent, - timeSignatureMidiEvent, - trackNameMidiEvent, - volumeMidiEvent, -} from "../midi/MidiEvent" -import Track from "./Track" - -export function conductorTrack(name = "") { - const track = new Track() - const events = toTrackEvents([ - trackNameMidiEvent(0, name), - timeSignatureMidiEvent(0), - setTempoMidiEvent(0, 60000000 / 120), - endOfTrackMidiEvent(0), - ]) - track.addEvents(events) - return track -} - -export const resetTrackMIDIEvents = (channel: number) => [ - resetAllMidiEvent(0, channel), - trackNameMidiEvent(0, ""), - panMidiEvent(0, channel, 64), - volumeMidiEvent(0, channel, 100), - expressionMidiEvent(0, channel, 127), - ...masterCoarceTuningEvents(0, channel), - ...masterFineTuningEvents(0, channel), - ...pitchbendSensitivityEvents(0, channel, 12), - pitchBendMidiEvent(0, channel, 0x2000), - modulationMidiEvent(0, channel, 0), - programChangeMidiEvent(0, channel, 0), -] - -export function emptyTrack(channel: number) { - if (!Number.isInteger(channel)) { - throw new Error("channel is not integer") - } - const track = new Track() - track.channel = channel - const events = toTrackEvents([ - ...resetTrackMIDIEvents(channel), - endOfTrackMidiEvent(1), - ]) - track.addEvents(events) - return track -} diff --git a/spaces/ygangang/CodeFormer/CodeFormer/inference_codeformer.py b/spaces/ygangang/CodeFormer/CodeFormer/inference_codeformer.py deleted file mode 100644 index fdfe8b301cc7c20c2fb653618e379d243603a108..0000000000000000000000000000000000000000 --- a/spaces/ygangang/CodeFormer/CodeFormer/inference_codeformer.py +++ /dev/null @@ -1,189 +0,0 @@ -# Modified by Shangchen Zhou from: https://github.com/TencentARC/GFPGAN/blob/master/inference_gfpgan.py -import os -import cv2 -import argparse -import glob -import torch -from torchvision.transforms.functional import normalize -from basicsr.utils import imwrite, img2tensor, tensor2img -from basicsr.utils.download_util import load_file_from_url -from facelib.utils.face_restoration_helper import FaceRestoreHelper -import torch.nn.functional as F - -from basicsr.utils.registry import ARCH_REGISTRY - -pretrain_model_url = { - 'restoration': 'https://github.com/sczhou/CodeFormer/releases/download/v0.1.0/codeformer.pth', -} - -def set_realesrgan(): - if not torch.cuda.is_available(): # CPU - import warnings - warnings.warn('The unoptimized RealESRGAN is slow on CPU. We do not use it. ' - 'If you really want to use it, please modify the corresponding codes.', - category=RuntimeWarning) - bg_upsampler = None - else: - from basicsr.archs.rrdbnet_arch import RRDBNet - from basicsr.utils.realesrgan_utils import RealESRGANer - model = RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=23, num_grow_ch=32, scale=2) - bg_upsampler = RealESRGANer( - scale=2, - model_path='https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.1/RealESRGAN_x2plus.pth', - model=model, - tile=args.bg_tile, - tile_pad=40, - pre_pad=0, - half=True) # need to set False in CPU mode - return bg_upsampler - -if __name__ == '__main__': - device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') - parser = argparse.ArgumentParser() - - parser.add_argument('--w', type=float, default=0.5, help='Balance the quality and fidelity') - parser.add_argument('--upscale', type=int, default=2, help='The final upsampling scale of the image. Default: 2') - parser.add_argument('--test_path', type=str, default='./inputs/cropped_faces') - parser.add_argument('--has_aligned', action='store_true', help='Input are cropped and aligned faces') - parser.add_argument('--only_center_face', action='store_true', help='Only restore the center face') - # large det_model: 'YOLOv5l', 'retinaface_resnet50' - # small det_model: 'YOLOv5n', 'retinaface_mobile0.25' - parser.add_argument('--detection_model', type=str, default='retinaface_resnet50') - parser.add_argument('--draw_box', action='store_true') - parser.add_argument('--bg_upsampler', type=str, default='None', help='background upsampler. Optional: realesrgan') - parser.add_argument('--face_upsample', action='store_true', help='face upsampler after enhancement.') - parser.add_argument('--bg_tile', type=int, default=400, help='Tile size for background sampler. Default: 400') - - args = parser.parse_args() - - # ------------------------ input & output ------------------------ - if args.test_path.endswith('/'): # solve when path ends with / - args.test_path = args.test_path[:-1] - - w = args.w - result_root = f'results/{os.path.basename(args.test_path)}_{w}' - - # ------------------ set up background upsampler ------------------ - if args.bg_upsampler == 'realesrgan': - bg_upsampler = set_realesrgan() - else: - bg_upsampler = None - - # ------------------ set up face upsampler ------------------ - if args.face_upsample: - if bg_upsampler is not None: - face_upsampler = bg_upsampler - else: - face_upsampler = set_realesrgan() - else: - face_upsampler = None - - # ------------------ set up CodeFormer restorer ------------------- - net = ARCH_REGISTRY.get('CodeFormer')(dim_embd=512, codebook_size=1024, n_head=8, n_layers=9, - connect_list=['32', '64', '128', '256']).to(device) - - # ckpt_path = 'weights/CodeFormer/codeformer.pth' - ckpt_path = load_file_from_url(url=pretrain_model_url['restoration'], - model_dir='weights/CodeFormer', progress=True, file_name=None) - checkpoint = torch.load(ckpt_path)['params_ema'] - net.load_state_dict(checkpoint) - net.eval() - - # ------------------ set up FaceRestoreHelper ------------------- - # large det_model: 'YOLOv5l', 'retinaface_resnet50' - # small det_model: 'YOLOv5n', 'retinaface_mobile0.25' - if not args.has_aligned: - print(f'Face detection model: {args.detection_model}') - if bg_upsampler is not None: - print(f'Background upsampling: True, Face upsampling: {args.face_upsample}') - else: - print(f'Background upsampling: False, Face upsampling: {args.face_upsample}') - - face_helper = FaceRestoreHelper( - args.upscale, - face_size=512, - crop_ratio=(1, 1), - det_model = args.detection_model, - save_ext='png', - use_parse=True, - device=device) - - # -------------------- start to processing --------------------- - # scan all the jpg and png images - for img_path in sorted(glob.glob(os.path.join(args.test_path, '*.[jp][pn]g'))): - # clean all the intermediate results to process the next image - face_helper.clean_all() - - img_name = os.path.basename(img_path) - print(f'Processing: {img_name}') - basename, ext = os.path.splitext(img_name) - img = cv2.imread(img_path, cv2.IMREAD_COLOR) - - if args.has_aligned: - # the input faces are already cropped and aligned - img = cv2.resize(img, (512, 512), interpolation=cv2.INTER_LINEAR) - face_helper.cropped_faces = [img] - else: - face_helper.read_image(img) - # get face landmarks for each face - num_det_faces = face_helper.get_face_landmarks_5( - only_center_face=args.only_center_face, resize=640, eye_dist_threshold=5) - print(f'\tdetect {num_det_faces} faces') - # align and warp each face - face_helper.align_warp_face() - - # face restoration for each cropped face - for idx, cropped_face in enumerate(face_helper.cropped_faces): - # prepare data - cropped_face_t = img2tensor(cropped_face / 255., bgr2rgb=True, float32=True) - normalize(cropped_face_t, (0.5, 0.5, 0.5), (0.5, 0.5, 0.5), inplace=True) - cropped_face_t = cropped_face_t.unsqueeze(0).to(device) - - try: - with torch.no_grad(): - output = net(cropped_face_t, w=w, adain=True)[0] - restored_face = tensor2img(output, rgb2bgr=True, min_max=(-1, 1)) - del output - torch.cuda.empty_cache() - except Exception as error: - print(f'\tFailed inference for CodeFormer: {error}') - restored_face = tensor2img(cropped_face_t, rgb2bgr=True, min_max=(-1, 1)) - - restored_face = restored_face.astype('uint8') - face_helper.add_restored_face(restored_face) - - # paste_back - if not args.has_aligned: - # upsample the background - if bg_upsampler is not None: - # Now only support RealESRGAN for upsampling background - bg_img = bg_upsampler.enhance(img, outscale=args.upscale)[0] - else: - bg_img = None - face_helper.get_inverse_affine(None) - # paste each restored face to the input image - if args.face_upsample and face_upsampler is not None: - restored_img = face_helper.paste_faces_to_input_image(upsample_img=bg_img, draw_box=args.draw_box, face_upsampler=face_upsampler) - else: - restored_img = face_helper.paste_faces_to_input_image(upsample_img=bg_img, draw_box=args.draw_box) - - # save faces - for idx, (cropped_face, restored_face) in enumerate(zip(face_helper.cropped_faces, face_helper.restored_faces)): - # save cropped face - if not args.has_aligned: - save_crop_path = os.path.join(result_root, 'cropped_faces', f'{basename}_{idx:02d}.png') - imwrite(cropped_face, save_crop_path) - # save restored face - if args.has_aligned: - save_face_name = f'{basename}.png' - else: - save_face_name = f'{basename}_{idx:02d}.png' - save_restore_path = os.path.join(result_root, 'restored_faces', save_face_name) - imwrite(restored_face, save_restore_path) - - # save restored img - if not args.has_aligned and restored_img is not None: - save_restore_path = os.path.join(result_root, 'final_results', f'{basename}.png') - imwrite(restored_img, save_restore_path) - - print(f'\nAll results are saved in {result_root}') diff --git a/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/barthez/__init__.py b/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/barthez/__init__.py deleted file mode 100644 index 084cd22bdf1d888efd46b759b91ccf95ee53c656..0000000000000000000000000000000000000000 --- a/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/barthez/__init__.py +++ /dev/null @@ -1,59 +0,0 @@ -# Copyright 2020 The HuggingFace Team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from typing import TYPE_CHECKING - -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available - - -_import_structure = {} - -try: - if not is_sentencepiece_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["tokenization_barthez"] = ["BarthezTokenizer"] - -try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["tokenization_barthez_fast"] = ["BarthezTokenizerFast"] - - -if TYPE_CHECKING: - try: - if not is_sentencepiece_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .tokenization_barthez import BarthezTokenizer - - try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .tokenization_barthez_fast import BarthezTokenizerFast - -else: - import sys - - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) diff --git a/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/deformable_detr/load_custom.py b/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/deformable_detr/load_custom.py deleted file mode 100644 index c3a822e2764170c24c7098956e81788856385451..0000000000000000000000000000000000000000 --- a/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/deformable_detr/load_custom.py +++ /dev/null @@ -1,49 +0,0 @@ -# coding=utf-8 -# Copyright 2022 The HuggingFace Inc. team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" Loading of Deformable DETR's CUDA kernels""" -import os -from pathlib import Path - - -def load_cuda_kernels(): - from torch.utils.cpp_extension import load - - root = Path(__file__).resolve().parent.parent.parent / "kernels" / "deformable_detr" - src_files = [ - root / filename - for filename in [ - "vision.cpp", - os.path.join("cpu", "ms_deform_attn_cpu.cpp"), - os.path.join("cuda", "ms_deform_attn_cuda.cu"), - ] - ] - - load( - "MultiScaleDeformableAttention", - src_files, - with_cuda=True, - extra_include_paths=[str(root)], - extra_cflags=["-DWITH_CUDA=1"], - extra_cuda_cflags=[ - "-DCUDA_HAS_FP16=1", - "-D__CUDA_NO_HALF_OPERATORS__", - "-D__CUDA_NO_HALF_CONVERSIONS__", - "-D__CUDA_NO_HALF2_OPERATORS__", - ], - ) - - import MultiScaleDeformableAttention as MSDA - - return MSDA diff --git a/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/layoutlmv3/__init__.py b/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/layoutlmv3/__init__.py deleted file mode 100644 index ca1c31091e8b6e210e3da32fcfc766ac6a69f05f..0000000000000000000000000000000000000000 --- a/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/layoutlmv3/__init__.py +++ /dev/null @@ -1,144 +0,0 @@ -# Copyright 2022 The HuggingFace Team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from typing import TYPE_CHECKING - -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_tf_available, - is_tokenizers_available, - is_torch_available, - is_vision_available, -) - - -_import_structure = { - "configuration_layoutlmv3": [ - "LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP", - "LayoutLMv3Config", - "LayoutLMv3OnnxConfig", - ], - "processing_layoutlmv3": ["LayoutLMv3Processor"], - "tokenization_layoutlmv3": ["LayoutLMv3Tokenizer"], -} - -try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["tokenization_layoutlmv3_fast"] = ["LayoutLMv3TokenizerFast"] - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_layoutlmv3"] = [ - "LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST", - "LayoutLMv3ForQuestionAnswering", - "LayoutLMv3ForSequenceClassification", - "LayoutLMv3ForTokenClassification", - "LayoutLMv3Model", - "LayoutLMv3PreTrainedModel", - ] - -try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_tf_layoutlmv3"] = [ - "TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST", - "TFLayoutLMv3ForQuestionAnswering", - "TFLayoutLMv3ForSequenceClassification", - "TFLayoutLMv3ForTokenClassification", - "TFLayoutLMv3Model", - "TFLayoutLMv3PreTrainedModel", - ] - -try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["feature_extraction_layoutlmv3"] = ["LayoutLMv3FeatureExtractor"] - _import_structure["image_processing_layoutlmv3"] = ["LayoutLMv3ImageProcessor"] - - -if TYPE_CHECKING: - from .configuration_layoutlmv3 import ( - LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP, - LayoutLMv3Config, - LayoutLMv3OnnxConfig, - ) - from .processing_layoutlmv3 import LayoutLMv3Processor - from .tokenization_layoutlmv3 import LayoutLMv3Tokenizer - - try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .tokenization_layoutlmv3_fast import LayoutLMv3TokenizerFast - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_layoutlmv3 import ( - LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST, - LayoutLMv3ForQuestionAnswering, - LayoutLMv3ForSequenceClassification, - LayoutLMv3ForTokenClassification, - LayoutLMv3Model, - LayoutLMv3PreTrainedModel, - ) - - try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_tf_layoutlmv3 import ( - TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST, - TFLayoutLMv3ForQuestionAnswering, - TFLayoutLMv3ForSequenceClassification, - TFLayoutLMv3ForTokenClassification, - TFLayoutLMv3Model, - TFLayoutLMv3PreTrainedModel, - ) - - try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .feature_extraction_layoutlmv3 import LayoutLMv3FeatureExtractor - from .image_processing_layoutlmv3 import LayoutLMv3ImageProcessor - -else: - import sys - - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) diff --git a/spaces/zdxiaoda/sovits-4.0-V1-anime-character-model/so-vits-svc/preprocess_hubert_f0.py b/spaces/zdxiaoda/sovits-4.0-V1-anime-character-model/so-vits-svc/preprocess_hubert_f0.py deleted file mode 100644 index 763fb0d65540ed4d62b269914e81c740f3ff6bba..0000000000000000000000000000000000000000 --- a/spaces/zdxiaoda/sovits-4.0-V1-anime-character-model/so-vits-svc/preprocess_hubert_f0.py +++ /dev/null @@ -1,101 +0,0 @@ -import math -import multiprocessing -import os -import argparse -from random import shuffle - -import torch -from glob import glob -from tqdm import tqdm -from modules.mel_processing import spectrogram_torch - -import utils -import logging - -logging.getLogger("numba").setLevel(logging.WARNING) -import librosa -import numpy as np - -hps = utils.get_hparams_from_file("configs/config.json") -sampling_rate = hps.data.sampling_rate -hop_length = hps.data.hop_length - - -def process_one(filename, hmodel): - # print(filename) - wav, sr = librosa.load(filename, sr=sampling_rate) - soft_path = filename + ".soft.pt" - if not os.path.exists(soft_path): - device = torch.device("cuda" if torch.cuda.is_available() else "cpu") - wav16k = librosa.resample(wav, orig_sr=sampling_rate, target_sr=16000) - wav16k = torch.from_numpy(wav16k).to(device) - c = utils.get_hubert_content(hmodel, wav_16k_tensor=wav16k) - torch.save(c.cpu(), soft_path) - - f0_path = filename + ".f0.npy" - if not os.path.exists(f0_path): - f0 = utils.compute_f0_dio( - wav, sampling_rate=sampling_rate, hop_length=hop_length - ) - np.save(f0_path, f0) - - spec_path = filename.replace(".wav", ".spec.pt") - if not os.path.exists(spec_path): - # Process spectrogram - # The following code can't be replaced by torch.FloatTensor(wav) - # because load_wav_to_torch return a tensor that need to be normalized - - audio, sr = utils.load_wav_to_torch(filename) - if sr != hps.data.sampling_rate: - raise ValueError( - "{} SR doesn't match target {} SR".format( - sr, hps.data.sampling_rate - ) - ) - - audio_norm = audio / hps.data.max_wav_value - audio_norm = audio_norm.unsqueeze(0) - - spec = spectrogram_torch( - audio_norm, - hps.data.filter_length, - hps.data.sampling_rate, - hps.data.hop_length, - hps.data.win_length, - center=False, - ) - spec = torch.squeeze(spec, 0) - torch.save(spec, spec_path) - - -def process_batch(filenames): - print("Loading hubert for content...") - device = "cuda" if torch.cuda.is_available() else "cpu" - hmodel = utils.get_hubert_model().to(device) - print("Loaded hubert.") - for filename in tqdm(filenames): - process_one(filename, hmodel) - - -if __name__ == "__main__": - parser = argparse.ArgumentParser() - parser.add_argument( - "--in_dir", type=str, default="dataset/44k", help="path to input dir" - ) - - args = parser.parse_args() - filenames = glob(f"{args.in_dir}/*/*.wav", recursive=True) # [:10] - shuffle(filenames) - multiprocessing.set_start_method("spawn", force=True) - - num_processes = 1 - chunk_size = int(math.ceil(len(filenames) / num_processes)) - chunks = [ - filenames[i : i + chunk_size] for i in range(0, len(filenames), chunk_size) - ] - print([len(c) for c in chunks]) - processes = [ - multiprocessing.Process(target=process_batch, args=(chunk,)) for chunk in chunks - ] - for p in processes: - p.start() diff --git a/spaces/zhaoys/wfms-kuiwenc/src/components/welcome-screen.tsx b/spaces/zhaoys/wfms-kuiwenc/src/components/welcome-screen.tsx deleted file mode 100644 index fe6d4e025bfe53817450e9f3e948bf8fa65f8ff2..0000000000000000000000000000000000000000 --- a/spaces/zhaoys/wfms-kuiwenc/src/components/welcome-screen.tsx +++ /dev/null @@ -1,34 +0,0 @@ -import { BingReturnType } from '@/lib/hooks/use-bing' - -const exampleMessages = [ - { - heading: '🧐 提出复杂问题', - message: `我可以为我挑剔的只吃橙色食物的孩子做什么饭?` - }, - { - heading: '🙌 获取更好的答案', - message: '销量最高的 3 种宠物吸尘器有哪些优点和缺点?' - }, - { - heading: '🎨 获得创意灵感', - message: `以海盗的口吻写一首关于外太空鳄鱼的俳句` - } -] - -export function WelcomeScreen({ setInput }: Pick) { - return ( -
        - {exampleMessages.map(example => ( - - ))} -
        - ) -} diff --git a/spaces/zixian/Zhenhuan-VITS/download_model.py b/spaces/zixian/Zhenhuan-VITS/download_model.py deleted file mode 100644 index 9f1ab59aa549afdf107bf2ff97d48149a87da6f4..0000000000000000000000000000000000000000 --- a/spaces/zixian/Zhenhuan-VITS/download_model.py +++ /dev/null @@ -1,4 +0,0 @@ -from google.colab import files -files.download("./G_latest.pth") -files.download("./finetune_speaker.json") -files.download("./moegoe_config.json") \ No newline at end of file