diff --git a/spaces/0xSynapse/Segmagine/app.py b/spaces/0xSynapse/Segmagine/app.py deleted file mode 100644 index 2725a96f1cb65388a6ac57b5af6a1135e37ac437..0000000000000000000000000000000000000000 --- a/spaces/0xSynapse/Segmagine/app.py +++ /dev/null @@ -1,97 +0,0 @@ -import os - -import cv2 -import gradio as gr -import matplotlib -import matplotlib.pyplot as plt -import numpy as np -import torch - -from PIL import Image - -from segment_anything import SamAutomaticMaskGenerator, SamPredictor, sam_model_registry - -# suppress server-side GUI windows -matplotlib.pyplot.switch_backend('Agg') - -# setup models -device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') -sam = sam_model_registry["vit_b"](checkpoint="./sam_vit_b_01ec64.pth") -sam.to(device=device) -mask_generator = SamAutomaticMaskGenerator(sam) -predictor = SamPredictor(sam) - - -# copied from: https://github.com/facebookresearch/segment-anything -def show_anns(anns): - if len(anns) == 0: - return - sorted_anns = sorted(anns, key=(lambda x: x['area']), reverse=True) - ax = plt.gca() - ax.set_autoscale_on(False) - polygons = [] - color = [] - for ann in sorted_anns: - m = ann['segmentation'] - img = np.ones((m.shape[0], m.shape[1], 3)) - color_mask = np.random.random((1, 3)).tolist()[0] - for i in range(3): - img[:,:,i] = color_mask[i] - ax.imshow(np.dstack((img, m*0.35))) - - -# demo function -def segment_image(input_image): - - if input_image is not None: - - # generate masks - masks = mask_generator.generate(input_image) - - # add masks to image - plt.clf() - ppi = 100 - height, width, _ = input_image.shape - plt.figure(figsize=(width / ppi, height / ppi)) # convert pixel to inches - plt.imshow(input_image) - show_anns(masks) - plt.axis('off') - - # save and get figure - plt.savefig('output_figure.png', bbox_inches='tight') - output_image = cv2.imread('output_figure.png') - return Image.fromarray(output_image) - - -with gr.Blocks() as demo: - - with gr.Row(): - gr.Markdown("## Segmagine 🎨") - with gr.Row(): - gr.Markdown("Gradio demo for Segment Anything Model (SAM) by Meta AI Research, produces high quality object masks from input prompts such as points or boxes, and it can be used to generate masks for all objects in an image. It has been trained on a dataset of 11 million images and 1.1 billion masks, and has strong zero-shot performance on a variety of segmentation tasks.[Learn More](https://segment-anything.com/)") - - with gr.Row(): - - with gr.Column(): - image_input = gr.Image() - segment_image_button = gr.Button('Generate Mask') - - with gr.Column(): - image_output = gr.Image() - - segment_image_button.click(segment_image, inputs=[image_input], outputs=image_output) - - gr.Examples( - examples=[ - ['./examples/dog.jpg'], - ['./examples/groceries.jpg'], - ['./examples/truck.jpg'] - - ], - inputs=[image_input], - outputs=[image_output], - fn=segment_image, - #cache_examples=True - ) - -demo.launch() diff --git a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/ACDSee Photo Manager 12.0.342 Keys Keygen The Best Photo Editing Software.md b/spaces/1acneusushi/gradio-2dmoleculeeditor/data/ACDSee Photo Manager 12.0.342 Keys Keygen The Best Photo Editing Software.md deleted file mode 100644 index 43995f98dfaf1fd5463c038aec979e59325657f0..0000000000000000000000000000000000000000 --- a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/ACDSee Photo Manager 12.0.342 Keys Keygen The Best Photo Editing Software.md +++ /dev/null @@ -1,149 +0,0 @@ - -

ACDSee Photo Manager 12.0.342 Keys Keygen: How to Get and Use This Amazing Photo Software

-

Do you love taking photos but struggle with organizing, editing, and sharing them? Do you wish you had a photo software that can help you manage your entire photo collection with ease and speed? If you answered yes, then you might want to check out ACDSee Photo Manager 12.0.342.

-

ACDSee Photo Manager 12.0.342 Keys Keygen


Download Filehttps://byltly.com/2uKzi1



-

ACDSee Photo Manager 12.0.342 is one of the most popular and trusted photo managers around, with over 56,000 downloads on Soft32.com. It is a powerful and fast photo software that can help you organize, view, edit, and share your photos with amazing results.

-

However, there is a catch: ACDSee Photo Manager 12.0.342 is not free. You need to pay $69 to get the full version of this software. But don't worry, there is a way to get it for free: by using keys and keygen.

-

In this article, we will show you what keys and keygen are, how they work, how to download them, how to install them, and how to use them to activate ACDSee Photo Manager 12.0.342 on your computer.

-

By the end of this article, you will be able to enjoy all the features and benefits of ACDSee Photo Manager 12.0.342 without spending a dime.

-

What is ACDSee Photo Manager 12.0.342?

-

ACDSee Photo Manager 12.0.342 is a photo software that can help you manage your entire photo collection with ease and speed. It has four main modes: Manage, View, Edit, and Online.

-

In Manage mode, you can access your photos from anywhere on your computer or connected devices without importing them first. You can also sort, group, filter, tag, rate, and backup your photos with ease.

-

In View mode, you can display, zoom, examine, rotate, crop, and remove red eye from your photos with amazing speed and quality. You can also view your photos in slide show mode or full screen mode.

-

In Edit mode, you can fix, fine-tune, and enhance your photos with various tools. You can adjust brightness I'm happy to help you with your task. Here is the continuation of the article I have created based on your prompt.

contrast, color balance, sharpness, noise reduction, and more. You can also apply creative effects, such as sepia, grayscale, vignette, and borders.

-

In Online mode, you can upload your photos to ACDSeeOnline.com, a free online image sharing and storage service. You can also share your photos via email or FTP.

-

ACDSee PhotoManager 12.0.342 Serials License Keys
-ACDSee Photo Manager 12 Full Keys YouTube
-ACDSee Photo Manager 12.0.342 -TrT Serial Key Keygen
-How to Download ACDSee Photo Manager 12.0.342 with Keys for Free
-ACDSee Photo Manager 12 Crack Download
-ACDSee Photo Manager 12 License Code Generator
-ACDSee Photo Manager 12 Activation Key Free
-ACDSee Photo Manager 12 Patch Download
-ACDSee Photo Manager 12 Keygen Only
-ACDSee Photo Manager 12 Serial Number Finder
-ACDSee Photo Manager 12 Registration Code Online
-ACDSee Photo Manager 12 Product Key Valid
-ACDSee Photo Manager 12 Full Version Download
-ACDSee Photo Manager 12 Portable Download
-ACDSee Photo Manager 12 Review and Features
-ACDSee Photo Manager 12 Tutorial and Tips
-ACDSee Photo Manager 12 System Requirements and Compatibility
-ACDSee Photo Manager 12 Comparison with Other Photo Managers
-ACDSee Photo Manager 12 Discount and Coupon Code
-ACDSee Photo Manager 12 Free Trial Download
-ACDSee Photo Manager 12 Latest Update and Release Notes
-ACDSee Photo Manager 12 User Guide and Manual
-ACDSee Photo Manager 12 Support and Help Center
-ACDSee Photo Manager 12 Forum and Community
-ACDSee Photo Manager 12 Alternatives and Similar Software
-How to Install ACDSee Photo Manager 12 on Windows
-How to Uninstall ACDSee Photo Manager 12 from Windows
-How to Backup and Restore ACDSee Photo Manager 12 Settings and Data
-How to Upgrade from Previous Versions of ACDSee Photo Manager to 12.0.342
-How to Fix Common Problems and Errors with ACDSee Photo Manager 12.0.342
-How to Use ACDSee Photo Manager 12 to Organize Your Photos
-How to Use ACDSee Photo Manager 12 to View Your Photos
-How to Use ACDSee Photo Manager 12 to Edit Your Photos
-How to Use ACDSee Photo Manager 12 to Share Your Photos Online
-How to Use ACDSee Photo Manager 12 to Print Your Photos
-How to Use ACDSee Photo Manager 12 to Create Slideshows and Albums
-How to Use ACDSee Photo Manager 12 to Batch Process Your Photos
-How to Use ACDSee Photo Manager 12 to Convert Your Photos to Different Formats
-How to Use ACDSee Photo Manager 12 to Add Metadata and Tags to Your Photos
-How to Use ACDSee Photo Manager 12 to Find Duplicate Photos and Remove Them
-How to Use ACDSee Photo Manager 12 to Protect Your Photos with Passwords and Encryption
-How to Use ACDSee Photo Manager 12 to Enhance Your Photos with Filters and Effects
-How to Use ACDSee Photo Manager 12 to Crop, Rotate, Resize, and Adjust Your Photos
-How to Use ACDSee Photo Manager 12 to Red-Eye Removal, Face Detection, and Skin Smoothing Tools
-How to Use ACDSee Photo Manager 12 to Watermark, Frame, and Stamp Your Photos
-How to Use ACDSee Photo Manager 12 to Compare and Rate Your Photos
-How to Use ACDSee Photo Manager 12 to Manage RAW Files and EXIF Data
-How to Use ACDSee Photo Manager 12 Keyboard Shortcuts and Hotkeys

-

What are Keys and Keygen?

-

Keys and keygen are tools that can help you activate ACDSee Photo Manager 12.0.342 without paying for it.

-

A key is a series of letters and numbers that you need to enter when you install ACDSee Photo Manager 12.0.342 to unlock its full features. A keygen is a program that can generate keys for you automatically.

-

By using keys and keygen, you can bypass the registration process and enjoy ACDSee Photo Manager 12.0.342 for free.

-

How to Download ACDSee Photo Manager 12.0.342 with Keys and Keygen for Free

-

To download ACDSee Photo Manager 12.0.342 with keys and keygen for free, you need to find a reliable source that offers them. There are many websites that claim to provide keys and keygen for ACDSee Photo Manager 12.0.342, but not all of them are safe or trustworthy.

-

Some of them may contain viruses, malware, or spyware that can harm your computer or steal your personal information. Some of them may also provide fake or invalid keys and keygen that will not work or cause problems with your software.

-

To avoid these risks, you should only download keys and keygen from reputable sources that have positive feedback from other users. Here are some examples of such sources:

- -

How to Install and Activate ACDSee Photo Manager 12.0.342 with Keys and Keygen

-

To install and activate ACDSee Photo Manager 12.0.342 with keys and keygen, follow these steps:

-
    -
  1. Download the software from one of the sources mentioned above and save it to your computer.
  2. -
  3. Run the file once it is downloaded and follow the instructions on the screen to complete the installation process.
  4. -
  5. When prompted, enter one of the keys provided by the source or generated by the keygen to register your software.
  6. -
  7. Enjoy using ACDSee Photo Manager 12.0.342 with all its features unlocked.
  8. -
-

How to Use ACDSee Photo Manager 12.0.342 to Organize, Edit, and Share Your Photos

-

Now that you have installed and activated ACDSee Photo Manager 12.0.342 with keys and keygen, you can start using it to manage your photos.

-

To use ACDSee Photo Manager 12.0.342 to organize, edit, and share your photos, follow these steps:

-
    -
  1. Launch the software and switch to Manage mode by clicking on the Manage button at the top left corner of the screen.
  2. -
  3. Navigate to the folder where your photos are stored using the Folder pane on the left side of the screen.
  4. -
  5. Select the photos that you want to organize by clicking on them or using Ctrl+click or Shift+click for multiple selections.
  6. -
  7. Use the tools on the right side of the screen to organize your photos according to your preferences:
  8. - -
  9. Switch to View mode by clicking on the View button at the top left corner of the screen.
  10. -
  11. Select a photo that you want to edit by clicking on it in the File List pane at the bottom of the screen.
  12. -
  13. Use the tools on the right side of the screen to edit your photo according to your needs:
  14. - -
  15. Switch to Online mode by clicking on the Online button at the top left corner of the screen.
  16. -
  17. Drag and drop your photos from your computer or connected devices to your own personal 2 GB of free storage space on ACDSeeOnline.com.
  18. -
  19. Share your photos with your friends and family by sending them a link to your online album or posting it on Facebook or Twitter.
  20. -
-

Conclusion

-

ACDSee Photo Manager 12.0.342 is a powerful and fast photo software that can help you organize, view, edit, and share your photos with ease and speed. However, it is not free and you need to pay $69 to get the full version of this software.

-

Fortunately, there is a way to get it for free: by using keys and keygen. Keys and keygen are tools that can help you activate ACDSee Photo Manager 12.0.342 without paying for it. You can download them from reliable sources and use them to install and activate ACDSee Photo Manager 12.0.342 on your computer.

-

By using keys and keygen, you can enjoy all the features and benefits of ACDSee Photo Manager 12.0.342 without spending a dime. You can manage your entire photo collection with ease and speed, edit your photos with amazing quality and creativity, and share your photos with your friends and family online.

-

So what are you waiting for? Download ACDSee Photo Manager 12.0.342 with keys and keygen today and start managing your photos like a pro!

-

FAQs

-

Here are some frequently asked questions and answers about ACDSee Photo Manager 12.0.342, keys, and keygen:

-
    -
  1. Is ACDSee Photo Manager 12.0.342 safe to use?
  2. -

    Yes, ACDSee Photo Manager 12.0.342 is safe to use as long as you download it from a reputable source and scan it for viruses before running it. However, you should be careful when downloading keys and keygen from other websites as they may contain malware or spyware that can harm your computer or steal your personal information. You should only download keys and keygen from reliable sources that have positive feedback from other users.

    -
  3. Is ACDSee Photo Manager 12.0.342 compatible with Windows 10?
  4. -

    No, ACDSee Photo Manager 12.0.342 is not compatible with Windows 10. If you want to use ACDSee Photo Manager on Windows 10, you need to upgrade to a newer version of ACDSee Photo Studio Standard. You can compare the features of different versions of ACDSee Photo Studio here.

    -
  5. How do I uninstall ACDSee Photo Manager 12.0.342?
  6. -

    To uninstall ACDSee Photo Manager 12.0.342 from your computer, follow these steps:

    -
      -
    1. Click on Start > Control Panel > Programs > Programs and Features.
    2. -
    3. Select ACDSee Photo Manager 12 from the list of programs and click on Uninstall/Change.
    4. -
    5. Follow the instructions on the screen to complete the uninstallation process.
    6. -
    -
  7. How do I update ACDSee Photo Manager 12.0.342?
  8. -

    To update ACDSee Photo Manager 12.0.342 to the latest version, follow these steps:

    -
      -
    1. Launch ACDSee Photo Manager 12 and click on Help > Check for Updates.
    2. -
    3. If there is an update available, click on Download Now and follow the instructions on the screen to install it.
    4. -
    -
  9. How do I contact ACD Systems for support?
  10. -

    If you need any help or support with ACDSee Photo Manager 12 or any other ACD Systems products, you can contact them through their website here. You can also find resources and support such as user guides, tutorials, forums, blogs, webinars, FAQs I'm happy to help you with your task. Here is the continuation of the article I have created based on your prompt.

    FAQs, and more on their website.

    -

    0a6ba089eb
    -
    -
    \ No newline at end of file diff --git a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Adata Classic CH94 Driver Windows 7 91 Troubleshooting Tips and Fixes.md b/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Adata Classic CH94 Driver Windows 7 91 Troubleshooting Tips and Fixes.md deleted file mode 100644 index d7d624b9e08bf9ad73a6f606da2e229caa5d9b36..0000000000000000000000000000000000000000 --- a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Adata Classic CH94 Driver Windows 7 91 Troubleshooting Tips and Fixes.md +++ /dev/null @@ -1,121 +0,0 @@ -
    -

    ADATA Classic CH94 Driver Windows 7 91: How to Download and Install

    -

    If you have an ADATA Classic CH94 external hard drive and you want to use it with your Windows 7 computer, you may need to download and install a driver for it. A driver is a software that helps your computer communicate with your device and enables its proper functioning. In this article, we will show you how to download and install ADATA Classic CH94 driver for Windows 7 91 in two easy ways.

    -

    adata classic ch94 driver windows 7 91


    Download Filehttps://byltly.com/2uKwpt



    -

    Introduction

    -

    What is ADATA Classic CH94?

    -

    ADATA Classic CH94 is a portable external hard drive that offers up to 640GB of storage capacity. It has a sleek design and comes in various colors. It also features a wrap-around USB cable that makes it easy to carry and use. You can use it to store and backup your files, photos, videos, music, and more.

    -

    Why do you need a driver for ADATA Classic CH94?

    -

    A driver is a software that allows your computer to recognize and communicate with your device. Without a driver, your computer may not be able to detect or access your device properly. You may also encounter errors or performance issues when using your device. Therefore, it is important to have the correct and updated driver for your device.

    -

    How to check your Windows version and system type?

    -

    Before you download and install a driver for your device, you need to check your Windows version and system type. This will help you find the compatible driver for your device. To check your Windows version and system type, follow these steps:

    -

    How to install adata classic ch94 driver on windows 7 91
    -Adata classic ch94 driver windows 7 91 download link
    -Adata classic ch94 driver windows 7 91 compatibility issues
    -Adata classic ch94 driver windows 7 91 not working
    -Adata classic ch94 driver windows 7 91 troubleshooting guide
    -Adata classic ch94 driver windows 7 91 update available
    -Adata classic ch94 driver windows 7 91 error code
    -Adata classic ch94 driver windows 7 91 manual installation
    -Adata classic ch94 driver windows 7 91 support contact
    -Adata classic ch94 driver windows 7 91 review and rating
    -Best alternative to adata classic ch94 driver windows 7 91
    -Adata classic ch94 driver windows 7 91 vs adata classic ch11 driver
    -Adata classic ch94 driver windows 7 91 features and specifications
    -Adata classic ch94 driver windows 7 91 warranty and service
    -Adata classic ch94 driver windows 7 91 price and availability
    -How to uninstall adata classic ch94 driver windows 7 91
    -Adata classic ch94 driver windows 7 91 backup and restore
    -Adata classic ch94 driver windows 7 91 performance and speed
    -Adata classic ch94 driver windows 7 91 security and encryption
    -Adata classic ch94 driver windows 7 91 software and firmware
    -How to format adata classic ch94 external hard drive on windows 7 91
    -How to recover data from adata classic ch94 external hard drive on windows 7 91
    -How to partition adata classic ch94 external hard drive on windows 7 91
    -How to test adata classic ch94 external hard drive on windows 7 91
    -How to fix adata classic ch94 external hard drive not detected on windows 7 91
    -How to use adata classic ch94 external hard drive with mac os x
    -How to use adata classic ch94 external hard drive with linux
    -How to use adata classic ch94 external hard drive with xbox one
    -How to use adata classic ch94 external hard drive with ps4
    -How to use adata classic ch94 external hard drive with smart tv
    -Benefits of using adata classic ch94 external hard drive for backup and storage
    -Drawbacks of using adata classic ch94 external hard drive for backup and storage
    -Tips and tricks for using adata classic ch94 external hard drive for backup and storage
    -How to protect adata classic ch94 external hard drive from damage and theft
    -How to clean and maintain adata classic ch94 external hard drive

    -
      -
    1. Click on the Start button and type "system" in the search box.
    2. -
    3. Select System from the list of results.
    4. -
    5. In the System window, you will see your Windows edition, service pack, and system type.
    6. -
    -

    For example, if you see "Windows 7 Service Pack 1 (SP1) - 64-bit", it means you have Windows 7 with SP1 installed and your system type is 64-bit.

    -

    How to download ADATA Classic CH94 driver for Windows 7 91

    -

    Option 1: Download from ADATA official website

    -

    The first option is to download the driver from ADATA official website. This is the recommended option as you can get the latest and official driver for your device. To download the driver from ADATA official website, follow these steps:

    -

    Step 1: Go to ADATA support page

    -

    Open your web browser and go to https://www.adata.com/us/support/driver?tab=downloads. This is the support page of ADATA where you can find drivers, manuals, firmware, software, and more for various products.

    -

    Step 2: Select your product category and model

    -

    On the support page, select "External HDD" from the product category dropdown menu. Then select "Classic Series" from the product series dropdown menu. Finally, select "CH94" from the product model dropdown menu. You will see a list of drivers available for download.

    -

    Step 3: Download the driver file

    -

    Find the driver that matches your Windows version and system type. For example, if you have Windows 7 SP1 -64-bit, look for "CH94 Driver (Windows Vista/7/8/10) -64bit". Click on the download icon next to the driver name. Save the file to a location where you can easily find it later.

    -

    Option 2: Download from DriverDouble website

    -

    The second option is to download the driver from DriverDouble website. This is an alternative option if you cannot find or access the driver from ADATA official website. DriverDouble is a website that provides drivers for various devices from different manufacturers. To download the driver from DriverDouble website, follow these steps:

    -

    Step 1: Go to DriverDouble website

    -

    Open your web browser and go to https://driverdouble.com/drivers/adata-ch94-classic.html. This is the page where you can find drivers for ADATA CH94 Classic device.

    -

    Step 2: Search for ADATA CH94 Classic driver

    -

    On the page, scroll down until you see a list of drivers available for download. Find the driver that matches your Windows version and system type. For example, if you have Windows 7 SP1 -64-bit, look for "ADATA CH94 Classic - windows vista-7-8-10 drivers". Click on "Download Now" button next to the driver name.

    -

    Step 3: Download the driver file

    -

    You will be redirected to another page where you can download the driver file. Click on "Download Now" button again and save the file to a location where you can easily find it later.

    -

    How to install ADATA Classic CH94 driver for Windows 7 91

    -

    Option 1: Install using the downloaded file

    The first option is to install the driver using the downloaded file. This is the easiest and fastest way to install the driver. To install the driver using the downloaded file, follow these steps:

    Step 1: Locate the downloaded file and double-click on it

    Navigate to the location where you saved the downloaded file. The file name should be something like "CH94_Driver_Win_Vista_7_8_10_64bit.zip" or "ADATA_CH94_Classic_driver.zip". Double-click on the file to open it. You will see a folder containing the driver files.

    Step 2: Follow the on-screen instructions to complete the installation

    In some cases, you may need to extract the folder first before running the installation. To extract the folder, right-click on it and select "Extract All". Then choose a destination where you want to extract it. After extracting, open the folder and look for an executable file such as "setup.exe" or "install.exe". Double-click on the executable file to run the installation. Follow the on-screen instructions to complete

    Step 3: Restart your computer if prompted

    -

    Some drivers may require you to restart your computer after the installation. If you see a message asking you to restart your computer, click on "Yes" or "Restart Now". This will ensure that the driver is properly installed and activated.

    -

    Option 2: Install using Device Manager

    -

    The second option is to install the driver using Device Manager. This is an alternative option if you encounter any problems or errors when installing the driver using the downloaded file. Device Manager is a tool that allows you to manage and update the devices connected to your computer. To install the driver using Device Manager, follow these steps:

    -

    Step 1: Connect your ADATA Classic CH94 to your computer

    -

    Plug your ADATA Classic CH94 into a USB port on your computer. Make sure that the device is securely connected and powered on.

    -

    Step 2: Open Device Manager and find your device

    -

    Click on the Start button and type "device manager" in the search box. Select Device Manager from the list of results. In the Device Manager window, look for your device under "Disk drives" or "Other devices". It may be labeled as "ADATA CH94 Classic" or "Unknown device". If you see a yellow exclamation mark or a red cross next to your device, it means that there is a problem with the driver.

    -

    Step 3: Right-click on your device and select Update driver software

    -

    Right-click on your device and select "Update driver software" from the menu. This will open a new window where you can choose how to update your driver.

    -

    Step 4: Choose Browse my computer for driver software and locate the downloaded file

    -

    In the new window, choose "Browse my computer for driver software". This will allow you to manually select the driver file that you downloaded earlier. Click on "Browse" and navigate to the location where you saved the downloaded file. The file name should be something like "CH94_Driver_Win_Vista_7_8_10_64bit.zip" or "ADATA_CH94_Classic_driver.zip". Select the file and click on "Open". Then click on "Next".

    -

    Step 5: Follow the on-screen instructions to complete the installation

    -

    The system will start installing the driver for your device. Follow the on-screen instructions to complete the installation. You may see a warning message saying that the driver is not digitally signed or verified. This is normal and you can ignore it. Just click on "Install this driver software anyway" or "Continue anyway".

    -

    Step 6: Restart your computer if prompted

    -

    Some drivers may require you to restart your computer after the installation. If you see a message asking you to restart your computer, click on "Yes" or "Restart Now". This will ensure that the driver is properly installed and activated.

    -

    Conclusion

    -

    Summary of the main points

    -

    In this article, we have shown you how to download and install ADATA Classic CH94 driver for Windows 7 91 in two easy ways. You can either download the driver from ADATA official website or from DriverDouble website. Then you can install the driver using the downloaded file or using Device Manager. Both methods are simple and effective.

    -

    Benefits of using ADATA Classic CH94 driver for Windows 7 91

    -

    By using ADATA Classic CH94 driver for Windows 7 91, you can enjoy several benefits such as:

    - -

    We hope this article has helped you download and install ADATA Classic CH94 driver for Windows 7 91 successfully. If you have any questions or feedback, please feel free to leave a comment below.

    - **FAQs** Q: How do I know if my ADATA Classic CH94 driver is up to date? A: You can check if your ADATA Classic CH94 driver is up to date by using Device Manager. To do this, follow these steps:
      -
    1. Connect your ADATA Classic CH94 to your computer.
    2. -
    3. Open Device Manager and find your device under "Disk drives" or "Other devices".
    4. -
    5. Right-click on your device and select "Properties".
    6. -
    7. In the Properties window, click on the "Driver" tab.
    8. -
    9. You will see the driver version, date, provider, and other information.
    10. -
    11. If you see a newer version of the driver available on ADATA official website or DriverDouble website, you can download and install it following the steps in this article.
    12. -
    - Q: How do I uninstall ADATA Classic CH94 driver from my computer? A: You can uninstall ADATA Classic CH94 driver from your computer by using Device Manager. To do this, follow these steps:
      -
    1. Connect your ADATA Classic CH94 to your computer.
    2. -
    3. Open Device Manager and find your device under "Disk drives" or "Other devices".
    4. -
    5. Right-click on your device and select "Uninstall".
    6. -
    7. In the confirmation window, check the box that says "Delete the driver software for this device" and click on "OK".
    8. -
    9. Restart your computer if prompted.
    10. -
    - Q: How do I format my ADATA Classic CH94 external hard drive? A: You can format your ADATA Classic CH94 external hard drive by using Disk Management. To do this, follow these steps:
      -
    1. Connect your ADATA Classic CH94 to your computer.
    2. -
    3. Click on the Start button and type "disk management" in the search box.
    4. Select Disk Management from the list of results.
    5. In the Disk Management window, look for your external hard drive under the volume list. It may be labeled as "ADATA CH94" or something similar.
    6. Right-click on your external hard drive and select "Format".
    7. In the Format window, choose the file system, allocation unit size, volume label, and format options that you want. You can use the default settings or customize them according to your preference.
    8. Click on "OK" to start the formatting process. Be aware that this will erase all the data on your external hard drive, so make sure you have backed up any important files beforehand.
    9. Wait until the formatting process is completed. You will see a message saying that the format was successful.
    - Q: How do I troubleshoot ADATA Classic CH94 external hard drive problems? A: If you encounter any problems or errors when using your ADATA Classic CH94 external hard drive, you can try some of these troubleshooting tips: - Q: How do I contact ADATA customer service or technical support? A: You can contact ADATA customer service or technical support by using one of these methods: -

    0a6ba089eb
    -
    -
    \ No newline at end of file diff --git a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Counter-Strike Source Full indir Tek link ile Efsanevi FPS Oyununu Deneyimleyin.md b/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Counter-Strike Source Full indir Tek link ile Efsanevi FPS Oyununu Deneyimleyin.md deleted file mode 100644 index b1d2874ba3ada0d4bca4164a6ed340bc0cfdf9db..0000000000000000000000000000000000000000 --- a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Counter-Strike Source Full indir Tek link ile Efsanevi FPS Oyununu Deneyimleyin.md +++ /dev/null @@ -1,140 +0,0 @@ - -

    Maya 2017 xforce keygen x64 x86: How to download and activate Autodesk products

    -

    If you are looking for a powerful and versatile 3D software that can help you create stunning animations, models, simulations, and renderings, you might want to check out Maya 2017. Maya 2017 is one of the most popular products of Autodesk, a leading company in the field of design and engineering software. However, to use Maya 2017, you need to have a valid product key that can activate the software. This is where xforce keygen comes in handy. Xforce keygen is a software that can generate product keys for any Autodesk product of 2017 version, including Maya 2017. In this article, we will show you how to download and use xforce keygen to activate Maya 2017 xforce keygen x64 x86.

    -

    Maya2017xforcekeygenx64x86


    Downloadhttps://byltly.com/2uKv1u



    -

    What is Maya 2017 and what are its features?

    -

    Maya 2017 is a 3D animation, modeling, simulation, and rendering software

    -

    Maya 2017 is a comprehensive software that can help you create amazing 3D content for various purposes, such as games, films, TV shows, advertisements, and more. With Maya 2017, you can create realistic characters, environments, effects, and animations using a range of tools and workflows. You can also import and export data from other software, such as Photoshop, After Effects, Mudbox, MotionBuilder, and more.

    -

    Maya 2017 has new features such as motion graphics, time editor, Arnold renderer, and more

    -

    Maya 2017 has introduced some new features that can enhance your creativity and productivity. Some of these features are:

    - -

    What is xforce keygen and how does it work?

    -

    Xforce keygen is a software that generates product keys for Autodesk products

    -

    Xforce keygen is a software that can help you activate any Autodesk product of 2017 version without paying for a license. Xforce keygen works by generating a unique product key for each Autodesk product based on its serial number and request code. The product key can then be used to activate the software online or offline.

    -

    Xforce keygen can activate any Autodesk product of 2017 version

    -

    Xforce keygen can generate product keys for any Autodesk product of 2017 version, such as AutoCAD, Revit, Inventor, Fusion 360, Civil 3D, 3ds Max,

    How to download and install Maya 2017 xforce keygen x64 x86?

    -

    Download xforce keygen from a reliable source

    -

    To download xforce keygen for Maya 2017, you need to find a reliable source that offers the software for free. You can search online for xforce keygen 2017 and choose a website that has positive reviews and ratings. You can also use the links provided by some of the web search results . However, you need to be careful and avoid downloading any malware or virus that might harm your computer. You should also scan the downloaded file with an antivirus software before opening it.

    -

    Install xforce keygen on your computer

    -

    To install xforce keygen on your computer, you need to follow these steps:

    -
      -
    1. Extract the downloaded file using a software such as WinRAR or 7-Zip.
    2. -
    3. Open the extracted folder and find the file named xf-adsk2017_x64.exe or xf-adsk2017_x86.exe depending on your system architecture.
    4. -
    5. Right-click on the file and choose Run as administrator.
    6. -
    7. Click on Yes if prompted by User Account Control.
    8. -
    9. Wait for the installation to complete and close the window.
    10. -
    -

    How to use xforce keygen to activate Maya 2017?

    -

    Run xforce keygen as administrator

    -

    To run xforce keygen as administrator, you need to follow these steps:

    -
      -
    1. Go to the folder where you installed xforce keygen and find the file named xf-adsk2017_x64.exe or xf-adsk2017_x86.exe depending on your system architecture.
    2. -
    3. Right-click on the file and choose Run as administrator.
    4. -
    5. Click on Yes if prompted by User Account Control.
    6. -
    7. You will see a window with a list of Autodesk products and a button named Patch.
    8. -
    -

    Select Maya 2017 from the product list and copy the product key

    -

    To select Maya 2017 from the product list and copy the product key, you need to follow these steps:

    -
      -
    1. In the window of xforce keygen, scroll down and find Maya 2017 from the product list. The product key for Maya 2017 is 657I1.
    2. -
    3. Click on the product name and copy the product key by pressing Ctrl + C or right-clicking and choosing Copy.
    4. -
    5. You will need this product key later when you activate Maya 2017.
    6. -
    -

    Finish the installation of Maya 2017 and restart it

    -

    To finish the installation of Maya 2017 and restart it, you need to follow these steps:

    -

    Maya 2017 xforce keygen 64 bit download
    -Maya 2017 xforce keygen 32 bit free
    -Maya 2017 xforce keygen activation code
    -Maya 2017 xforce keygen crack file
    -Maya 2017 xforce keygen online generator
    -Maya 2017 xforce keygen windows 10
    -Maya 2017 xforce keygen mac os
    -Maya 2017 xforce keygen linux
    -Maya 2017 xforce keygen torrent link
    -Maya 2017 xforce keygen rar password
    -Maya 2017 xforce keygen zip file
    -Maya 2017 xforce keygen serial number
    -Maya 2017 xforce keygen product key
    -Maya 2017 xforce keygen license key
    -Maya 2017 xforce keygen full version
    -Maya 2017 xforce keygen latest update
    -Maya 2017 xforce keygen how to use
    -Maya 2017 xforce keygen instructions
    -Maya 2017 xforce keygen tutorial video
    -Maya 2017 xforce keygen troubleshooting guide
    -Maya 2017 xforce keygen reviews and ratings
    -Maya 2017 xforce keygen alternatives and competitors
    -Maya 2017 xforce keygen benefits and features
    -Maya 2017 xforce keygen pros and cons
    -Maya 2017 xforce keygen comparison and analysis
    -Maya 2017 xforce keygen best practices and tips
    -Maya 2017 xforce keygen FAQs and answers
    -Maya 2017 xforce keygen support and contact
    -Maya 2017 xforce keygen forum and community
    -Maya 2017 xforce keygen blog and news
    -Maya 2017 xforce keygen software and tools
    -Maya 2017 xforce keygen plugins and addons
    -Maya 2017 xforce keygen scripts and codes
    -Maya 2017 xforce keygen templates and models
    -Maya 2017 xforce keygen textures and materials
    -Maya 2017 xforce keygen lighting and rendering
    -Maya 2017 xforce keygen animation and rigging
    -Maya 2017 xforce keygen dynamics and effects
    -Maya 2017 xforce keygen sculpting and painting
    -Maya 2017 xforce keygen uv mapping and unwrapping
    -Maya 2017 xforce keygen skinning and weight painting
    -Maya 2017 xforce keygen hair and fur simulation
    -Maya 2017 xforce keygen cloth and fabric simulation
    -Maya 2017 xforce keygen fluid and smoke simulation
    -Maya 2017 xforce keygen fire and explosion simulation
    -Maya 2017 xforce keygen particles and instancing
    -Maya 2017 xforce keygen motion graphics and design
    -Maya 2017 xforce keygen game development and integration
    -Maya 2017 xforce keygen virtual reality and augmented reality

    -
      -
    1. If you have not installed Maya 2017 yet, you can download it from the official website of Autodesk or from other sources. You can also use a trial version of Maya 2017 if you do not have a license.
    2. -
    3. Run the installer of Maya 2017 and follow the instructions on the screen. When asked to enter a serial number, enter anything you want or use this one: 666-69696969.
    4. -
    5. When asked to enter a product key, paste the product key that you copied from xforce keygen by pressing Ctrl + V or right-clicking and choosing Paste. The product key for Maya 2017 is 657I1.
    6. -
    7. Complete the installation process and restart Maya 2017.
    8. -
    -

    Click on activate and paste the product key generated by xforce keygen

    -

    To click on activate and paste the product key generated by xforce keygen, you need to follow these steps:

    -
      -
    1. When you restart Maya 2017, you will see a window that asks you to activate your product. Click on Activate.
    2. -
    3. You will see another window that asks you to connect to the internet or enter an activation code. Choose I have an activation code from Autodesk.
    4. -
    5. Go back to the window of xforce keygen and click on Patch. You will see a message that says Successfully patched. If not, make sure you run xforce keygen as administrator and try again.
    6. -
    7. In the window of xforce keygen, click on Generate. You will see a long code in the Activation field. Copy this code by pressing Ctrl + C or right-clicking and choosing Copy.
    8. -
    9. Go back to the window of Maya 2017 activation and paste the code in the Activation field by pressing Ctrl + V or right-clicking and choosing Paste. Make sure you fill all the boxes with the code.
    10. -
    11. Click on Next. You will see a message that says Thank you for activating your Autodesk product. Click on Finish.
    12. -
    -

    Conclusion and FAQs

    -

    In this article, we have shown you how to download and use xforce keygen to activate Maya 2017 xforce keygen x64 x86. Xforce keygen is a software that can generate product keys for any Autodesk product of 2017 version, including Maya 2017. Maya 2017 is a powerful and versatile 3D software that can help you create stunning animations, models, simulations, and renderings. However, using xforce keygen is illegal and unethical, as it violates the terms and conditions of Autodesk. Therefore, we do not recommend using xforce keygen for any purpose other than educational or testing purposes. If you want to use Maya 2017 legally and ethically, you should buy a license from Autodesk or use other alternatives such as Blender or SketchUp.

    -

    If you have any questions about using xforce keygen or Maya 2017, here are some FAQs that might help you:

    - -

    0a6ba089eb
    -
    -
    \ No newline at end of file diff --git a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Fight Night Round 4 Pc Game Free High Quality Download U Torrent Pirate Bay.md b/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Fight Night Round 4 Pc Game Free High Quality Download U Torrent Pirate Bay.md deleted file mode 100644 index 95617f18ae5d3b90ed237f90a19df5dbb14810fd..0000000000000000000000000000000000000000 --- a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Fight Night Round 4 Pc Game Free High Quality Download U Torrent Pirate Bay.md +++ /dev/null @@ -1,95 +0,0 @@ - -

    Fight Night Round 4 PC Game Free Download

    -

    Introduction

    -

    Do you love boxing games? Do you want to experience the thrill of fighting as some of the greatest boxers of all time? If you answered yes, then you might be interested in playing Fight Night Round 4 on your PC.

    -

    What is Fight Night Round 4?

    -

    Fight Night Round 4 is a boxing video game developed by EA Sports and released in 2009 for PlayStation 3 and Xbox 360. It is the sequel to Fight Night Round 3, which was released in 2006. It features a mix of today's best and the greatest of all time, with over 40 licensed boxers to choose from. You can leave your mark on the sport by moving up the rankings as an unknown fighter, pumping up your popularity, and dominating fighters in multiple weight classes on the way to becoming a legend. You can also challenge other players online or offline in various modes, such as exhibition, career, legacy, or world championship.

    -

    fight night round 4 pc game free download u torrent pirate bay


    Download File ———>>> https://byltly.com/2uKwlw



    -

    Why play Fight Night Round 4 on PC?

    -

    While Fight Night Round 4 is a great game on its own, playing it on PC can offer some advantages over playing it on consoles. For example, you can enjoy better graphics, faster loading times, smoother gameplay, and more customization options. You can also use your preferred controller or keyboard and mouse to play the game. Moreover, you can access mods and cheats that can enhance your gaming experience or make it more fun.

    -

    How to download Fight Night Round 4 for PC?

    -

    Unfortunately, Fight Night Round 4 is not officially available for PC. However, there are still ways to play it on your computer. Here are two options that you can try:

    -

    Fight Night Round 4 PC version GamesKnit
    -How to install Fight Night Round 4 on PC
    -Fight Night Round 4 system requirements for PC
    -Fight Night Round 4 PC activation code
    -Fight Night Round 4 boxing video game EA Sports
    -Muhammad Ali and Mike Tyson in Fight Night Round 4
    -Fight Night Round 4 classic control scheme
    -Fight Night Round 4 demo download
    -Fight Night Round 4 net energy gain
    -Fight Night Round 4 legacy mode
    -Fight Night Round 4 over 40 licensed boxers
    -Ricky Hatton and Manny Pacquiao in Fight Night Round 4
    -Fight Night Round 4 gameplay tips and tricks
    -Fight Night Round 4 review and rating
    -Fight Night Round 4 cheats and hacks
    -Fight Night Round 4 best fighters and strategies
    -Fight Night Round 4 online multiplayer mode
    -Fight Night Round 4 soundtrack and music
    -Fight Night Round 4 graphics and animation
    -Fight Night Round 4 comparison with previous games
    -Fight Night Round 4 DLC and updates
    -Fight Night Round 4 mods and customizations
    -Fight Night Round 4 career mode and challenges
    -Fight Night Round 4 unlockables and rewards
    -Fight Night Round 4 roster and stats
    -Fight Night Round 4 news and announcements
    -Fight Night Round 4 trailer and screenshots
    -Fight Night Round 4 forum and community
    -Fight Night Round 4 guide and walkthrough
    -Fight Night Round 4 features and improvements
    -Fight Night Round 4 problems and solutions
    -Fight Night Round 4 patch notes and bug fixes
    -Fight Night Round 4 controller support and settings
    -Fight Night Round 4 steam key and download link
    -Fight Night Round 4 crack and serial number
    -Fight Night Round 4 torrent magnet link and seeders
    -Fight Night Round 4 pirate bay proxy and mirror sites
    -Fight Night Round 4 alternatives and similar games
    -Fight Night Round 4 windows compatibility mode
    -Fight Night Round 4 linux version and winehq support

    -

    Option 1: Use an emulator

    -

    What is an emulator?

    -

    An emulator is a software that allows you to run games or applications designed for one system on another system. For example, you can use an emulator to run PlayStation 3 games on your PC.

    -

    How to use an emulator to play Fight Night Round 4?

    -

    One of the most popular emulators for PlayStation 3 games is RPCS3. It is a free and open-source emulator that can run many PS3 games with high compatibility and performance. To use RPCS3 to play Fight Night Round 4, you need to follow these steps:

    -
      -
    1. Download and install RPCS3 from its official website: https://rpcs3.net/
    2. -
    3. Download and install the PS3 firmware from the same website: https://rpcs3.net/quickstart
    4. -
    5. Download or rip the ISO file of Fight Night Round 4 from your PS3 disc or online source.
    6. -
    7. Launch RPCS3 and click on File > Boot Game.
    8. -
    9. Select the ISO file of Fight Night Round 4 and click on Open.
    10. -
    11. Wait for the game to load and enjoy!
    12. -
    -

    Note: You may need to adjust some settings in RPCS3 to optimize the game's performance and compatibility. You can check the official wiki for more information: https://wiki.rpcs3.net/index.php?title=Help:Game_Patches

    -

    Option 2: Use a torrent site

    -

    What is a torrent site?

    -

    A torrent site is a website that hosts torrent files, which are small files that contain information about larger files that can be downloaded from other users through a peer-to-peer network. For example, you can use a torrent site to download movies, music, games, or software.

    -

    How to use a torrent site to download Fight Night Round 4?

    -

    To use a torrent site to download Fight Night Round 4 for PC, you need to follow these steps:

    -
      -
    1. Download and install a torrent client, such as uTorrent or BitTorrent: https://www.utorrent.com/ or https://www.bittorrent.com/
    2. -
    3. Go to a torrent site that has Fight Night Round 4 for PC, such as The Pirate Bay: https://thepiratebay.org/
    4. -
    5. Search for "Fight Night Round 4 PC" and find a torrent that has good ratings and comments.
    6. -
    7. Click on the magnet link or download the torrent file and open it with your torrent client.
    8. -
    9. Wait for the download to finish and open the folder where the game files are stored.
    10. -
    11. Follow the instructions in the readme file or crack folder to install and run the game.
    12. -
    -

    Note: Downloading games from torrent sites may be illegal in some countries and may expose you to viruses or malware. Use this option at your own risk.

    -

    Conclusion

    -

    Summary of the main points

    -

    In this article, we have discussed what Fight Night Round 4 is, why you might want to play it on PC, and how you can download it for PC using two options: an emulator or a torrent site. We have also provided some links and tips to help you with each option.

    -

    Call to action

    -

    If you are a fan of boxing games and want to play Fight Night Round 4 on your PC, we hope this article has been helpful for you. Now it's time for you to choose your option and start downloading the game. Don't forget to share this article with your friends who might also be interested in playing Fight Night Round 4 on PC. Have fun!

    - - - - - - - -
    FAQs
    Q: Is Fight Night Round 4 compatible with Windows 10?A: Yes, both options should work fine with Windows 10.
    Q: Can I play Fight Night Round 4 online with other players?A: Yes, if you use an emulator, you can use its online features as long as you have a valid PSN account. If you use a torrent site, you may need to use a VPN or a LAN software to play online with other players.
    Q: What are some other boxing games that I can play on PC?A: Some other boxing games that you can play on PC are Real Boxing, Creed: Rise to Glory, Punch Club, Boxing School, and Knockout League.
    Q: What are some other emulators that I can use to play PS3 games on PC?A: Some other emulators that you can use to play PS3 games on PC are ESX PS3 Emulator (https://esxemulator.com/) and PS Now (https://www.playstation.com/en-us/ps-now/).
    Q: What are some other torrent sites that I can use to download games for PC?A: Some other torrent sites that you can use to download games for PC are RARBG (https://rarbg.to/), Kickass Torrents (https://katcr.co/), LimeTorrents (https://www.limetorrents.info/), and Torrentz2 (https://torrentz2.eu/).
    -

    0a6ba089eb
    -
    -
    \ No newline at end of file diff --git a/spaces/1gistliPinn/ChatGPT4/Examples/Audio Amplifier Pro Serial LINK.md b/spaces/1gistliPinn/ChatGPT4/Examples/Audio Amplifier Pro Serial LINK.md deleted file mode 100644 index 91efcd2ffc6e58b36f85933d2927d40878fa2e5a..0000000000000000000000000000000000000000 --- a/spaces/1gistliPinn/ChatGPT4/Examples/Audio Amplifier Pro Serial LINK.md +++ /dev/null @@ -1,74 +0,0 @@ -
    -

    Audio Amplifier Pro Serial: How to Boost and Normalize Your Audio & Video Files

    -

    If you are looking for a simple and effective tool to adjust the volume of your audio and video files, you may want to try Audio Amplifier Pro. This software allows you to increase or decrease the volume of any audio or video file, without affecting the quality or the format. You can also normalize all your files to the maximum or average volume level, to avoid clipping or distortion.

    -

    In this article, we will show you how to use Audio Amplifier Pro Serial to enhance your audio and video experience. We will also explain how to crack and register the full version of the software, so you can enjoy all its features and benefits.

    -

    Audio Amplifier Pro Serial


    Download File ✒ ✒ ✒ https://imgfil.com/2uxYfc



    -

    What is Audio Amplifier Pro?

    -

    Audio Amplifier Pro is a software that works with all key audio and video formats, such as MP3, WAV, WMA, OGG, FLAC, AVI, MP4, WMV, MOV, MKV, etc. It supports batch processing, so you can adjust the volume of multiple files at once. It has a user-friendly interface that makes it easy to use for anyone.

    -

    Audio Amplifier Pro has two main functions: boost and normalize. Boosting means increasing or decreasing the volume of a file by a certain percentage or decibel. Normalizing means setting all files to a certain volume level, either maximum or average. Both functions can help you improve the sound quality of your files, especially if they are too low or too high in volume.

    -

    How to Use Audio Amplifier Pro Serial?

    -

    To use Audio Amplifier Pro Serial, you need to download and install the software from its official website or from a trusted source. Then, you need to copy and paste the serial key into the registration window, to activate the full version of the software. The serial key is usually provided by the crack file or by the online generator.

    -

    Once you have registered the software, you can start using it to adjust the volume of your audio and video files. Here are the steps to follow:

    -
      -
    1. Launch Audio Amplifier Pro and click on the "Add Files" button to browse and select the files you want to process.
    2. -
    3. Choose whether you want to boost or normalize the volume of your files. If you choose boost, you can enter the percentage or decibel value you want to increase or decrease by. If you choose normalize, you can select either maximum or average as the target volume level.
    4. -
    5. Click on the "Save Changes" button to apply the volume adjustment to your files. You can choose whether you want to overwrite the original files or save them as new files in a different folder.
    6. -
    7. Enjoy your enhanced audio and video files!
    8. -
    -

    Why Choose Audio Amplifier Pro Serial?

    -

    There are many reasons why you may want to choose Audio Amplifier Pro Serial as your preferred tool for volume adjustment. Here are some of them:

    -

    - -

    Conclusion

    -

    Audio Amplifier Pro Serial is a great software that can help you boost and normalize the volume of your audio and video files. It is simple, easy, fast, and effective. It works with all key audio and video formats, without affecting their quality or format. It supports batch processing, so you can process multiple files at once. It offers both boost and normalize functions, so you can choose the best option for your needs and preferences.

    -

    If you want to enhance your audio and video experience, you should try Audio Amplifier Pro Serial today. You can download it from its official website or from a trusted source. You can also crack and register it for free by using a serial key. You will be amazed by how much difference it can make in your sound quality.

    -

    How to Download Audio Amplifier Pro Serial?

    -

    If you want to download Audio Amplifier Pro Serial, you have two options: you can either download it from its official website or from a trusted source. The official website offers a free trial version of the software, which you can use for a limited time and with some restrictions. You can also buy the full version of the software from the official website, which costs $29.95.

    -

    However, if you don't want to spend money on the software, you can also download it from a trusted source, such as 4DOWNLOAD, AbbasPC, or OpenSea. These sources provide you with the crack file or the serial key generator, which you can use to activate the full version of the software for free. You just need to follow the instructions provided by these sources to download and install the software.

    -

    What are the Benefits of Audio Amplifier Pro Serial?

    -

    Audio Amplifier Pro Serial has many benefits that make it worth downloading and using. Here are some of them:

    - -

    Conclusion

    -

    Audio Amplifier Pro Serial is a great software that can help you boost and normalize the volume of your audio and video files. It is simple, easy, fast, and effective. It works with all key audio and video formats, without affecting their quality or format. It supports batch processing, so you can process multiple files at once. It offers both boost and normalize functions, so you can choose the best option for your needs and preferences.

    -

    If you want to enhance your audio and video experience, you should try Audio Amplifier Pro Serial today. You can download it from its official website or from a trusted source. You can also crack and register it for free by using a serial key. You will be amazed by how much difference it can make in your sound quality.

    -

    How to Troubleshoot Audio Amplifier Pro Serial?

    -

    Sometimes, you may encounter some problems or errors when using Audio Amplifier Pro Serial. For example, you may get a message saying that the serial key is invalid or expired, or that the software cannot load or process your files. In such cases, you need to troubleshoot the issue and find a solution.

    -

    Here are some common troubleshooting tips for Audio Amplifier Pro Serial:

    - -

    What are the Alternatives to Audio Amplifier Pro Serial?

    -

    Audio Amplifier Pro Serial is not the only software that can help you boost and normalize the volume of your audio and video files. There are many other alternatives that offer similar or different features and benefits. Here are some of them:

    - -

    Conclusion

    -

    Audio Amplifier Pro Serial is a great software that can help you boost and normalize the volume of your audio and video files. It is simple, easy, fast, and effective. It works with all key audio and video formats, without affecting their quality or format. It supports batch processing, so you can process multiple files at once. It offers both boost and normalize functions, so you can choose the best option for your needs and preferences.

    -

    If you want to enhance your audio and video experience, you should try Audio Amplifier Pro Serial today. You can download it from its official website or from a trusted source. You can also crack and register it for free by using a serial key. You will be amazed by how much difference it can make in your sound quality.

    -

    Conclusion

    -

    Audio Amplifier Pro Serial is a great software that can help you boost and normalize the volume of your audio and video files. It is simple, easy, fast, and effective. It works with all key audio and video formats, without affecting their quality or format. It supports batch processing, so you can process multiple files at once. It offers both boost and normalize functions, so you can choose the best option for your needs and preferences.

    -

    If you want to enhance your audio and video experience, you should try Audio Amplifier Pro Serial today. You can download it from its official website or from a trusted source. You can also crack and register it for free by using a serial key. You will be amazed by how much difference it can make in your sound quality.

    3cee63e6c2
    -
    -
    \ No newline at end of file diff --git a/spaces/1phancelerku/anime-remove-background/Comment jouer Jeux Ludo Master APK sur votre smartphone ou tablette.md b/spaces/1phancelerku/anime-remove-background/Comment jouer Jeux Ludo Master APK sur votre smartphone ou tablette.md deleted file mode 100644 index c7d3bfd73212a12bc0cc1a8119121a960e1b3038..0000000000000000000000000000000000000000 --- a/spaces/1phancelerku/anime-remove-background/Comment jouer Jeux Ludo Master APK sur votre smartphone ou tablette.md +++ /dev/null @@ -1,107 +0,0 @@ -
    -

    Jeux Ludo Master APK: How to Play the Classic Board Game on Your Android Device

    -

    Do you love playing board games with your friends and family? Do you want to enjoy a fun and exciting game of ludo on your smartphone or tablet? If you answered yes, then you should try Jeux Ludo Master APK, a cross-platform multiplayer ludo game that lets you play with up to six players online or offline. In this article, we will show you how to download and install Ludo Master APK, how to play it with your loved ones, and some tips and tricks to win more games. Let's get started!

    -

    jeux ludo master apk


    Download File · https://jinyurl.com/2uNRJ1



    -

    Introduction

    -

    What is Ludo Master?

    -

    Ludo Master is a ludo board game app developed by Hippo Lab. It is based on the classic Indian game of Pachisi, which originated in the 6th century AD. The game involves four players, each with four tokens of the same color, who compete to move their tokens from their starting corner to the center of the board. The movement of the tokens is determined by rolling a six-sided dice. The game is simple to learn but challenging to master, as it requires both strategy and luck.

    -

    Why play Ludo Master?

    -

    Ludo Master is one of the best ludo games available on Android devices. Here are some of the reasons why you should play it:

    - -

    How to download and install Ludo Master APK

    -

    Step 1: Go to the Google Play Store or APKCombo

    -

    To download and install Ludo Master APK, you have two options. You can either go to the Google Play Store or APKCombo, which are both reliable sources of Android apps. If you choose the Google Play Store, you will need a Google account to access it. If you choose APKCombo, you will need to enable unknown sources on your device settings to install apps from outside the Play Store.

    -

    Step 2: Search for Ludo Master and tap on the app

    -

    Once you are on the Google Play Store or APKCombo, search for "Ludo Master" in the search bar. You will see a list of results that match your query. Tap on the app that has the logo of a red dice with four colored tokens. This is the official app of Ludo Master by Hippo Lab.

    -

    jeux ludo master apk download
    -jeux ludo master apk mod
    -jeux ludo master apk latest version
    -jeux ludo master apk offline
    -jeux ludo master apk hack
    -jeux ludo master apk free
    -jeux ludo master apk android
    -jeux ludo master apk online
    -jeux ludo master apk update
    -jeux ludo master apk for pc
    -jeux ludo master apk 2023
    -jeux ludo master apk full
    -jeux ludo master apk premium
    -jeux ludo master apk pro
    -jeux ludo master apk cracked
    -jeux ludo master apk unlimited money
    -jeux ludo master apk old version
    -jeux ludo master apk revdl
    -jeux ludo master apk rexdl
    -jeux ludo master apk uptodown
    -jeux ludo master apk pure
    -jeux ludo master apk mob.org
    -jeux ludo master apk apkpure
    -jeux ludo master apk apkmirror
    -jeux ludo master apk apknite
    -jeux ludo master apk appvn
    -jeux ludo master apk aptoide
    -jeux ludo master apk android 1
    -jeux ludo master apk android oyun club
    -jeux ludo master apk andropalace
    -jeux ludo master apk blackmod
    -jeux ludo master apk by rexdl.com
    -jeux ludo master apk by revdl.com
    -jeux ludo master apk by apkpure.com
    -jeux ludo master apk by apkmirror.com
    -jeux ludo master apk by apknite.com
    -jeux ludo master apk by appvn.com
    -jeux ludo master apk by aptoide.com
    -jeux ludo master apk by android 1.com
    -jeux ludo master apk by android oyun club.com
    -jeux ludo master apk by andropalace.net
    -jeux ludo master board game - apps on google play[^1^]
    -jeux de société de maître de LUDO - applications sur Google Play[^1^]
    -LUDO Master Board Game - Applications sur Google Play[^1^]
    -LUDO Master Board Game - Apps no Google Play[^1^]
    -LUDO Master Board Game - Apps en Google Play[^1^]
    -LUDO Master Board Game - App su Google Play[^1^]
    -LUDO Master Board Game - Apps auf Google Play[^1^]
    -LUDO Master Board Game - Appar på Google Play[^1^]
    -LUDO Master Board Game - Apper på Google Play[^1^]

    -

    Step 3: Click on Install or Download

    Step 3: Click on Install or Download APK

    -

    If you are on the Google Play Store, you will see a green button that says "Install". Click on it and wait for the app to download and install on your device. If you are on APKCombo, you will see a blue button that says "Download APK". Click on it and save the file to your device. Then, locate the file and tap on it to install the app.

    -

    Step 4: Follow the instructions on your screen

    -

    After installing the app, you will see an icon of Ludo Master on your home screen or app drawer. Tap on it to launch the app. You will be asked to grant some permissions, such as access to your contacts, storage, and location. Allow them to enjoy the full features of the app. You will also be asked to sign in with your Facebook account or play as a guest. Choose the option that suits you best. You are now ready to play Ludo Master!

    -

    How to play Ludo Master with your friends and family

    -

    Step 1: Launch the app and choose your game mode

    -

    When you open the app, you will see four game modes: Classic, Quick, Master, and Magic. Classic mode is the traditional ludo game with four players and normal rules. Quick mode is a faster version of ludo with two players and fewer tokens. Master mode is a more challenging version of ludo with six players and special rules. Magic mode is a fun version of ludo with four players and power-ups. Choose the game mode that you prefer and tap on it.

    -

    Step 2: Invite your friends or join a random room

    -

    After choosing your game mode, you will see two options: Play with Friends or Play Online. If you want to play with your friends or family, tap on Play with Friends. You will see a code that you can share with them via WhatsApp, Messenger, or other apps. They will need to enter the code to join your room. You can also create a private room with a password if you want more security. If you want to play with strangers, tap on Play Online. You will be matched with other players who are online and looking for a game.

    -

    Step 3: Roll the dice and move your tokens

    -

    Once you are in a room with other players, you will see the ludo board and your tokens on the screen. You will also see a dice on the bottom right corner. Tap on it to roll it and see how many steps you can move your tokens. You can only move your tokens out of their base if you roll a six. You can also move your tokens forward by the number of steps shown on the dice. If you land on a square that already has another token, you can capture it and send it back to its base. If you land on a star square, you can get a power-up that can help you in the game.

    -

    Step 4: Be the first to reach the center of the board

    -

    The goal of the game is to be the first player to move all four of your tokens from their base to the center of the board. To do this, you need to roll the exact number of steps required to reach the center. For example, if your token is three steps away from the center, you need to roll a three to move it there. If you roll more than three, you cannot move your token and have to wait for another turn. The first player who reaches the center with all four tokens wins the game.

    -

    Tips and tricks to win Ludo Master games

    -

    Tip 1: Use strategy and luck to your advantage

    -

    Ludo Master is a game that combines both strategy and luck. You need to use both of them to win more games. For example, you need to decide which token to move based on the situation of the board and your opponents. You also need to take risks sometimes and hope for a good roll of the dice. You can also use some math skills to calculate the probability of rolling certain numbers.

    -

    Tip 2: Avoid getting captured by your opponents

    -

    One of the most frustrating things in Ludo Master is getting captured by your opponents and losing your progress. To avoid this, you need to be careful where you place your tokens and how you move them. Try not to land on squares that are close to your opponents' bases or paths. Also, try not to leave your tokens alone or exposed on the board. Instead, try to form pairs or groups with your own tokens or allies' tokens for protection.

    -

    Tip 3: Use power-ups and boosters [user

    Tip 3: Use power-ups and boosters to enhance your gameplay

    -

    Ludo Master is not just a plain ludo game. It also has some special features that can make the game more fun and exciting. One of them is the power-ups, which are items that you can get by landing on star squares. There are four types of power-ups: Shield, Swap, Double, and Bomb. Shield protects your token from being captured for one turn. Swap lets you switch places with another token on the board. Double lets you roll the dice twice in one turn. Bomb lets you explode a nearby token and send it back to its base. You can use these power-ups wisely to gain an edge over your opponents.

    -

    Another feature is the boosters, which are items that you can buy with coins or gems. There are three types of boosters: Dice, Token, and Board. Dice boosters let you choose the number you want to roll on the dice. Token boosters let you move your token faster or skip some steps. Board boosters let you change the color or shape of the board. You can use these boosters sparingly to enhance your gameplay.

    -

    Conclusion

    -

    Ludo Master is a great app for anyone who loves playing ludo games. It is easy to download and install, and it offers various game modes and features that make the game more enjoyable and challenging. You can play Ludo Master with your friends and family online or offline, and chat and interact with them during the game. You can also learn some tips and tricks to win more games and improve your skills. Ludo Master is a game that can bring you hours of fun and entertainment. Download it now and start playing!

    -

    FAQs

    -

    Q: Is Ludo Master free to play?

    -

    A: Yes, Ludo Master is free to play. However, it also has some in-app purchases that can enhance your gameplay, such as coins, gems, boosters, and VIP membership.

    -

    Q: How can I play Ludo Master on my PC?

    -

    A: You can play Ludo Master on your PC by using an Android emulator, such as BlueStacks or NoxPlayer. These are software that allow you to run Android apps on your PC.

    -

    Q: How can I contact the developer of Ludo Master?

    -

    A: You can contact the developer of Ludo Master by sending an email to ludomaster@hippolab.com or by visiting their Facebook page. You can also leave a review or feedback on the Google Play Store or APKCombo.

    -

    Q: How can I update Ludo Master to the latest version?

    -

    A: You can update Ludo Master to the latest version by going to the Google Play Store or APKCombo and checking for updates. You can also enable automatic updates on your device settings to get the latest version automatically.

    -

    Q: How can I uninstall Ludo Master from my device?

    -

    A: You can uninstall Ludo Master from your device by going to your device settings and finding the app in the list of installed apps. Then, tap on it and choose Uninstall. You can also long-press on the app icon on your home screen or app drawer and drag it to the Uninstall option.

    401be4b1e0
    -
    -
    \ No newline at end of file diff --git a/spaces/1phancelerku/anime-remove-background/Download Summertime Saga for PC in ZIP Format - Free and Easy.md b/spaces/1phancelerku/anime-remove-background/Download Summertime Saga for PC in ZIP Format - Free and Easy.md deleted file mode 100644 index 8648782128624f9f80ff55f212772cb9e24f7d10..0000000000000000000000000000000000000000 --- a/spaces/1phancelerku/anime-remove-background/Download Summertime Saga for PC in ZIP Format - Free and Easy.md +++ /dev/null @@ -1,22 +0,0 @@ - -

    How to Download and Install Summertime Saga Zip File on Your PC

    -

    Introduction

    -- What is Summertime Saga and why is it popular? - What are the benefits of downloading the zip file version of the game? - What are the requirements and steps to download and install the game on your PC?

    What is Summertime Saga and Why is it Popular?

    -- Summertime Saga is a graphical adventure game for adults that features a rich story, diverse characters, and multiple choices. - The game is set in a small town where the protagonist has to deal with various challenges, such as school, romance, mysteries, and secrets. - The game has over 65 characters to interact with, 30 locations to explore, and 20 mini-games to play. - The game is popular because of its humor, quality, and content. It has a loyal fan base and receives regular updates from the developers.

    What are the Benefits of Downloading the Zip File Version of the Game?

    -- The zip file version of the game is a compressed file that contains all the necessary files to run the game on your PC. - The benefits of downloading the zip file version are: - It saves space on your hard drive, as it is smaller than the regular version. - It is faster to download, as it requires less bandwidth. - It is easier to install, as it does not require any additional software or setup.

    What are the Requirements and Steps to Download and Install the Game on Your PC?

    -- The requirements to download and install the game on your PC are: - A Windows PC with at least 2 GB of RAM and 2 GB of free disk space. - A stable internet connection. - A web browser that supports downloading files. - The steps to download and install the game on your PC are: - Go to [Summertime Saga 0.20 - Download for PC Free - Malavida](^1^) and click on the green "Download" button. - Choose a location to save the zip file on your PC and wait for the download to finish. - Once the download is complete, locate the zip file on your PC and right-click on it. Select "Extract All" from the menu and choose a destination folder for the extracted files. - Open the destination folder and double-click on the "SummertimeSaga.exe" file to launch the game. - Enjoy playing Summertime Saga on your PC!

    Conclusion

    -- Summertime Saga is a fun and entertaining graphical adventure game for adults that you can download and play on your PC. - By downloading the zip file version of the game, you can save space, time, and hassle. - To download and install the game on your PC, you just need to follow a few simple steps that we have outlined in this article.

    FAQs

    -

    Q: Is Summertime Saga free to play?

    -

    A: Yes, Summertime Saga is free to play. You can download it from its official website or from other sources. However, you can also support the developers by donating or becoming a patron.

    -

    Q: Is Summertime Saga safe to download?

    -

    A: Yes, Summertime Saga is safe to download. However, you should always download it from trusted sources and scan it with an antivirus program before installing it. Also, be careful of any pop-ups or ads that may appear while downloading or playing the game.

    -

    Q: How often does Summertime Saga get updated?

    -

    A: Summertime Saga gets updated regularly by its developers. The latest version of the game is 0.20, which was released in November 2022. You can check for updates on its official website or social media pages.

    -

    Q: How can I save my progress in Summertime Saga?

    -

    A: You can save your progress in Summertime Saga by using the in-game menu. You can access it by clicking on the phone icon at the top right corner of the screen. You can then choose to save or load your game from one of the available slots. You can also use the auto-save feature that saves your game automatically every time you change locations or complete an event.

    -

    Q: How can I unlock more content and features in Summertime Saga?

    -

    A: You can unlock more content and features in Summertime Saga by completing quests, exploring locations, interacting with characters, and making choices. You can also use cheats or mods to access hidden or extra content, but be aware that this may cause bugs or errors in the game.

    -

    -

    summertime saga zip download


    Download Zip ☆☆☆ https://jinyurl.com/2uNTbg



    401be4b1e0
    -
    -
    \ No newline at end of file diff --git a/spaces/1phancelerku/anime-remove-background/Enjoy Stick War Legacy with MOD APK Zip (All Modes All Weapons 999 Army).md b/spaces/1phancelerku/anime-remove-background/Enjoy Stick War Legacy with MOD APK Zip (All Modes All Weapons 999 Army).md deleted file mode 100644 index e51599747e91d67a6bca3f0de7d1b8ecd4d795dc..0000000000000000000000000000000000000000 --- a/spaces/1phancelerku/anime-remove-background/Enjoy Stick War Legacy with MOD APK Zip (All Modes All Weapons 999 Army).md +++ /dev/null @@ -1,143 +0,0 @@ -
    -

    Stick War Legacy Mod APK Zip: A Guide for Gamers

    -

    If you are a fan of strategy games, you might have heard of Stick War Legacy, a popular and challenging game that lets you control an army of stickmen in a battle for supremacy. But did you know that there is a way to make the game even more fun and exciting? That's right, we are talking about Stick War Legacy Mod APK Zip, a modified version of the game that gives you unlimited resources and army to dominate your enemies. In this article, we will tell you everything you need to know about Stick War Legacy Mod APK Zip, including what it is, how to download and install it, what are its benefits and drawbacks, what are some alternatives, and what are some reviews and FAQs about it. So, without further ado, let's get started!

    -

    stick war legacy mod apk zip


    DOWNLOAD ✵✵✵ https://jinyurl.com/2uNLea



    -

    What is Stick War Legacy?

    -

    A popular and challenging strategy game

    -

    Stick War Legacy is a game for iOS and Android devices that allows you to control an army of stickmen in a battle for supremacy. The game features a campaign mode with over 50 levels to play through, as well as a multiplayer mode where you can take on other players from around the world. The gameplay in Stick War Legacy is simple but addictive. You have to build your army, mine gold, learn the way of the sword, spear, archer, mage, and giant, destroy the enemy statue, and capture all territories. You can also customize your units with different weapons and skins, and use spells to boost your performance. The game has amazing graphics and sound effects that make the battles more realistic and immersive.

    -

    Features and game modes of Stick War Legacy

    -

    Stick War Legacy has many features that make it one of the best strategy games on mobile devices. Some of these features are:

    - -

    Stick War Legacy also has four different game modes that offer different challenges and experiences. These are:

    - -

    What is Stick War Legacy Mod APK Zip?

    -

    A modified version of the game with unlimited resources and armyA modified version of the game with unlimited resources and army

    -

    Stick War Legacy Mod APK Zip is a modified version of the game that gives you unlimited resources and army to dominate your enemies. By downloading and installing this mod apk file, you can enjoy the game with more freedom and fun. You can unlock all the weapons and skins, upgrade your units to the max level, and recruit as many soldiers as you want. You can also use unlimited gems to buy spells and items that can help you in the battle. With Stick War Legacy Mod APK Zip, you can experience the game in a whole new way.

    -

    Benefits and drawbacks of using Stick War Legacy Mod APK Zip

    -

    Using Stick War Legacy Mod APK Zip has its benefits and drawbacks. Here are some of them:

    -

    stick war legacy mod apk unlimited gems
    -stick war legacy mod apk 999 army
    -stick war legacy mod apk unlocked all
    -stick war legacy mod apk unlimited everything
    -stick war legacy mod apk weapon customization
    -stick war legacy mod apk latest version
    -stick war legacy mod apk download for android
    -stick war legacy mod apk free download
    -stick war legacy mod apk no ads
    -stick war legacy mod apk offline
    -stick war legacy mod apk gameplay and overview
    -stick war legacy mod apk classic mode
    -stick war legacy mod apk weekly missions mode
    -stick war legacy mod apk tournament mode
    -stick war legacy mod apk graphics and sound system
    -stick war legacy mod apk army training strategies
    -stick war legacy mod apk countries with resources
    -stick war legacy mod apk skin customization
    -stick war legacy mod apk findmeapk.com[^1^]
    -stick war legacy mod apk stickwarlegacyapk.com[^2^]
    -stick war legacy mod apk 2023.2.83 version
    -stick war legacy mod apk 113 MB size
    -stick war legacy mod apk introduction and features
    -stick war legacy mod apk background and story
    -stick war legacy mod apk how to install and play
    -stick war legacy mod apk tips and tricks
    -stick war legacy mod apk cheats and hacks
    -stick war legacy mod apk reviews and ratings
    -stick war legacy mod apk updates and news
    -stick war legacy mod apk best strategies and tactics
    -stick war legacy mod apk fun and addictive game
    -stick war legacy mod apk challenges and achievements
    -stick war legacy mod apk online and multiplayer mode
    -stick war legacy mod apk support and feedback
    -stick war legacy mod apk compatible devices and requirements

    - - - - - - - - - - - - - - - - - - - - - -
    BenefitsDrawbacks
    - You can enjoy the game without any limitations or restrictions.- You might lose the challenge and thrill of the game.
    - You can customize your army with different weapons and skins.- You might encounter some bugs or glitches in the game.
    - You can use unlimited gems to buy spells and items.- You might get banned from the multiplayer mode or the game itself.
    - You can recruit as many soldiers as you want.- You might harm your device with viruses or malware.
    -

    How to download and install Stick War Legacy Mod APK Zip?

    -

    Steps to download and install the mod apk file

    -

    If you want to download and install Stick War Legacy Mod APK Zip, you need to follow these steps:

    -
      -
    1. First, you need to find a reliable source for downloading the mod apk file. You can use one of these links: or . Make sure you have enough storage space on your device before downloading.
    2. -
    3. Second, you need to enable the installation of unknown sources on your device. To do this, go to Settings > Security > Unknown Sources and toggle it on. This will allow you to install apps from sources other than the Google Play Store.
    4. -
    5. Third, you need to locate the downloaded mod apk file on your device. You can use a file manager app to find it in your Downloads folder or wherever you saved it.
    6. -
    7. Fourth, you need to tap on the mod apk file and follow the instructions on the screen to install it. It might take a few minutes for the installation to complete.
    8. -
    9. Fifth, you need to launch the game and enjoy it with unlimited resources and army.
    10. -
    -

    Tips to avoid viruses and malware

    -

    While downloading and installing Stick War Legacy Mod APK Zip can be fun and exciting, it can also be risky and dangerous. There are many sources that offer mod apk files that contain viruses or malware that can harm your device or steal your personal information. To avoid this, you need to follow these tips:

    - -

    Alternatives to Stick War Legacy Mod APK Zip

    Alternatives to Stick War Legacy Mod APK Zip

    -

    Other mod apk files for Stick War Legacy

    -

    If you are not satisfied with Stick War Legacy Mod APK Zip, or you want to try something different, you can also download other mod apk files for Stick War Legacy. These mod apk files offer different features and modifications that can enhance your gaming experience. Some of these mod apk files are:

    - -

    Other strategy games similar to Stick War Legacy

    -

    If you are looking for other strategy games similar to Stick War Legacy, you can also check out these games that offer similar gameplay and features. These games are:

    - -

    Reviews and FAQs about Stick War Legacy Mod APK Zip

    -

    What users say about the mod apk file

    -

    Stick War Legacy Mod APK Zip has received mixed reviews from users who have tried it. Some users praise the mod apk file for making the game more fun and easy, while others criticize it for ruining the game's balance and challenge. Here are some examples of user reviews:

    -
    "This is the best mod ever! I love having unlimited resources and army, it makes the game so much more enjoyable. I can try different strategies and weapons without worrying about running out of money or soldiers."
    -
    "This is the worst mod ever! It takes away all the challenge and thrill of the game. It makes the game too easy and boring, there is no point in playing it anymore."
    -
    "This is a good mod for casual players who just want to have fun and relax. But for hardcore players who want to test their skills and strategy, this is not a good mod. It depends on what you are looking for in the game."
    -

    Frequently asked questions and answers about the mod apk file

    -

    If you have any questions or doubts about Stick War Legacy Mod APK Zip, you might find some answers here. We

    If you have any questions or doubts about Stick War Legacy Mod APK Zip, you might find some answers here. We have compiled some of the most frequently asked questions and answers about the mod apk file. These are:

    - -

    Conclusion

    -

    Stick War Legacy Mod APK Zip is a modified version of the game that gives you unlimited resources and army to dominate your enemies. It can make the game more fun and exciting, but it can also ruin the game's balance and challenge. It can also be risky and dangerous to use, as it might contain viruses or malware, or get you banned from the game. Therefore, you should always download and install mod apk files at your own risk, and follow the tips we mentioned above to avoid any problems. Alternatively, you can also try other mod apk files or other strategy games similar to Stick War Legacy. We hope this article has helped you learn more about Stick War Legacy Mod APK Zip, and we wish you a happy gaming experience!

    401be4b1e0
    -
    -
    \ No newline at end of file diff --git a/spaces/44ov41za8i/FreeVC/speaker_encoder/__init__.py b/spaces/44ov41za8i/FreeVC/speaker_encoder/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/7hao/bingo/cloudflare/worker.js b/spaces/7hao/bingo/cloudflare/worker.js deleted file mode 100644 index e0debd750615f1329b2c72fbce73e1b9291f7137..0000000000000000000000000000000000000000 --- a/spaces/7hao/bingo/cloudflare/worker.js +++ /dev/null @@ -1,18 +0,0 @@ -const TRAGET_HOST='hf4all-bingo.hf.space' // 请将此域名改成你自己的,域名信息在设置》站点域名查看。 - -export default { - async fetch(request) { - const uri = new URL(request.url); - if (uri.protocol === 'http:') { - uri.protocol = 'https:'; - return new Response('', { - status: 301, - headers: { - location: uri.toString(), - }, - }) - } - uri.host = TRAGET_HOST - return fetch(new Request(uri.toString(), request)); - }, -}; diff --git a/spaces/7hao/bingo/src/pages/api/healthz.ts b/spaces/7hao/bingo/src/pages/api/healthz.ts deleted file mode 100644 index f6ae44ff0fd66ccd3f7feaa550025fbf2a83bf77..0000000000000000000000000000000000000000 --- a/spaces/7hao/bingo/src/pages/api/healthz.ts +++ /dev/null @@ -1,7 +0,0 @@ -'use server' - -import { NextApiRequest, NextApiResponse } from 'next' - -export default async function handler(req: NextApiRequest, res: NextApiResponse) { - res.status(200).end('ok') -} diff --git a/spaces/AIGC-Audio/AudioGPT/audio_to_text/captioning/__init__.py b/spaces/AIGC-Audio/AudioGPT/audio_to_text/captioning/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/AIGC-Audio/AudioGPT/text_to_speech/modules/vocoder/parallel_wavegan/losses/stft_loss.py b/spaces/AIGC-Audio/AudioGPT/text_to_speech/modules/vocoder/parallel_wavegan/losses/stft_loss.py deleted file mode 100644 index adb5767eb6e48b79c9811139091522cf635b5697..0000000000000000000000000000000000000000 --- a/spaces/AIGC-Audio/AudioGPT/text_to_speech/modules/vocoder/parallel_wavegan/losses/stft_loss.py +++ /dev/null @@ -1,154 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright 2019 Tomoki Hayashi -# MIT License (https://opensource.org/licenses/MIT) - -"""STFT-based Loss modules.""" - -import torch -import torch.nn.functional as F - - -def stft(x, fft_size, hop_size, win_length, window): - """Perform STFT and convert to magnitude spectrogram. - - Args: - x (Tensor): Input signal tensor (B, T). - fft_size (int): FFT size. - hop_size (int): Hop size. - win_length (int): Window length. - window (str): Window function type. - - Returns: - Tensor: Magnitude spectrogram (B, #frames, fft_size // 2 + 1). - - """ - x_stft = torch.stft(x, fft_size, hop_size, win_length, window) - real = x_stft[..., 0] - imag = x_stft[..., 1] - - # NOTE(kan-bayashi): clamp is needed to avoid nan or inf - return torch.sqrt(torch.clamp(real ** 2 + imag ** 2, min=1e-7)).transpose(2, 1) - - -class SpectralConvergengeLoss(torch.nn.Module): - """Spectral convergence loss module.""" - - def __init__(self): - """Initilize spectral convergence loss module.""" - super(SpectralConvergengeLoss, self).__init__() - - def forward(self, x_mag, y_mag): - """Calculate forward propagation. - - Args: - x_mag (Tensor): Magnitude spectrogram of predicted signal (B, #frames, #freq_bins). - y_mag (Tensor): Magnitude spectrogram of groundtruth signal (B, #frames, #freq_bins). - - Returns: - Tensor: Spectral convergence loss value. - - """ - return torch.norm(y_mag - x_mag, p="fro") / torch.norm(y_mag, p="fro") - - -class LogSTFTMagnitudeLoss(torch.nn.Module): - """Log STFT magnitude loss module.""" - - def __init__(self): - """Initilize los STFT magnitude loss module.""" - super(LogSTFTMagnitudeLoss, self).__init__() - - def forward(self, x_mag, y_mag): - """Calculate forward propagation. - - Args: - x_mag (Tensor): Magnitude spectrogram of predicted signal (B, #frames, #freq_bins). - y_mag (Tensor): Magnitude spectrogram of groundtruth signal (B, #frames, #freq_bins). - - Returns: - Tensor: Log STFT magnitude loss value. - - """ - return F.l1_loss(torch.log(y_mag), torch.log(x_mag)) - - -class STFTLoss(torch.nn.Module): - """STFT loss module.""" - - def __init__(self, fft_size=1024, shift_size=120, win_length=600, window="hann_window"): - """Initialize STFT loss module.""" - super(STFTLoss, self).__init__() - self.fft_size = fft_size - self.shift_size = shift_size - self.win_length = win_length - self.window = getattr(torch, window)(win_length) - self.spectral_convergenge_loss = SpectralConvergengeLoss() - self.log_stft_magnitude_loss = LogSTFTMagnitudeLoss() - - def forward(self, x, y): - """Calculate forward propagation. - - Args: - x (Tensor): Predicted signal (B, T). - y (Tensor): Groundtruth signal (B, T). - - Returns: - Tensor: Spectral convergence loss value. - Tensor: Log STFT magnitude loss value. - - """ - self.window = self.window.to(x.device) - x_mag = stft(x, self.fft_size, self.shift_size, self.win_length, self.window) - y_mag = stft(y, self.fft_size, self.shift_size, self.win_length, self.window) - sc_loss = self.spectral_convergenge_loss(x_mag, y_mag) - mag_loss = self.log_stft_magnitude_loss(x_mag, y_mag) - - return sc_loss, mag_loss - - -class MultiResolutionSTFTLoss(torch.nn.Module): - """Multi resolution STFT loss module.""" - - def __init__(self, - fft_sizes=[1024, 2048, 512], - hop_sizes=[120, 240, 50], - win_lengths=[600, 1200, 240], - window="hann_window"): - """Initialize Multi resolution STFT loss module. - - Args: - fft_sizes (list): List of FFT sizes. - hop_sizes (list): List of hop sizes. - win_lengths (list): List of window lengths. - window (str): Window function type. - - """ - super(MultiResolutionSTFTLoss, self).__init__() - assert len(fft_sizes) == len(hop_sizes) == len(win_lengths) - self.stft_losses = torch.nn.ModuleList() - for fs, ss, wl in zip(fft_sizes, hop_sizes, win_lengths): - self.stft_losses += [STFTLoss(fs, ss, wl, window)] - - def forward(self, x, y): - """Calculate forward propagation. - - Args: - x (Tensor): Predicted signal (B, T). - y (Tensor): Groundtruth signal (B, T). - - Returns: - Tensor: Multi resolution spectral convergence loss value. - Tensor: Multi resolution log STFT magnitude loss value. - - """ - sc_loss = 0.0 - mag_loss = 0.0 - for f in self.stft_losses: - sc_l, mag_l = f(x, y) - sc_loss += sc_l - mag_loss += mag_l - sc_loss /= len(self.stft_losses) - mag_loss /= len(self.stft_losses) - - return sc_loss, mag_loss diff --git a/spaces/ALSv/FSW/app.py b/spaces/ALSv/FSW/app.py deleted file mode 100644 index fe9a516e99129636b838903af8a4fab32f15d9cf..0000000000000000000000000000000000000000 --- a/spaces/ALSv/FSW/app.py +++ /dev/null @@ -1,72 +0,0 @@ -# -* coding:UTF-8 -* -# !/usr/bin/env python -import numpy as np -import gradio as gr -import roop.globals -from roop.core import ( - start, - decode_execution_providers, - suggest_max_memory, - suggest_execution_threads, -) -from roop.processors.frame.core import get_frame_processors_modules -from roop.utilities import normalize_output_path -import os -from PIL import Image - - -def swap_face(source_file, target_file,doFaceEnhancer): - - source_path = "input.jpg" - target_path = "target.jpg" - - source_image = Image.fromarray(source_file) - source_image.save(source_path) - target_image = Image.fromarray(target_file) - target_image.save(target_path) - - print("source_path: ", source_path) - print("target_path: ", target_path) - - roop.globals.source_path = source_path - roop.globals.target_path = target_path - output_path = "output.jpg" - roop.globals.output_path = normalize_output_path( - roop.globals.source_path, roop.globals.target_path, output_path - ) - if doFaceEnhancer == True: - roop.globals.frame_processors = ["face_swapper","face_enhancer"] - else: - roop.globals.frame_processors = ["face_swapper"] - roop.globals.headless = True - roop.globals.keep_fps = True - roop.globals.keep_audio = True - roop.globals.keep_frames = False - roop.globals.many_faces = False - roop.globals.video_encoder = "libx264" - roop.globals.video_quality = 18 - roop.globals.max_memory = suggest_max_memory() - roop.globals.execution_providers = decode_execution_providers(["cuda"]) - roop.globals.execution_threads = suggest_execution_threads() - - print( - "start process", - roop.globals.source_path, - roop.globals.target_path, - roop.globals.output_path, - ) - - for frame_processor in get_frame_processors_modules( - roop.globals.frame_processors - ): - if not frame_processor.pre_check(): - return - - start() - return output_path - - -app = gr.Interface( - fn=swap_face, inputs=[gr.Image(), gr.Image(),gr.Checkbox(label="face_enhancer?", info="do face enhancer?")], outputs="image" -) -app.launch() diff --git a/spaces/AUST001/True-GPT4/README.md b/spaces/AUST001/True-GPT4/README.md deleted file mode 100644 index 77d4979e8d46ea8fbd8a2ef99e854c389d2235b7..0000000000000000000000000000000000000000 --- a/spaces/AUST001/True-GPT4/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: True GPT4 -emoji: ⚡ -colorFrom: yellow -colorTo: indigo -sdk: gradio -sdk_version: 3.35.2 -app_file: app.py -pinned: false -license: cc-by-nc-sa-4.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Abhilashvj/planogram-compliance/utils/loggers/__init__.py b/spaces/Abhilashvj/planogram-compliance/utils/loggers/__init__.py deleted file mode 100644 index 6a0232bd5bab37e21e7fc926eb64630c190b514d..0000000000000000000000000000000000000000 --- a/spaces/Abhilashvj/planogram-compliance/utils/loggers/__init__.py +++ /dev/null @@ -1,578 +0,0 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license -""" -Logging utils -""" - -import os -import warnings -from pathlib import Path - -import pkg_resources as pkg -import torch -from torch.utils.tensorboard import SummaryWriter - -from utils.general import LOGGER, colorstr, cv2 -from utils.loggers.clearml.clearml_utils import ClearmlLogger -from utils.loggers.wandb.wandb_utils import WandbLogger -from utils.plots import plot_images, plot_labels, plot_results -from utils.torch_utils import de_parallel - -LOGGERS = ( - "csv", - "tb", - "wandb", - "clearml", - "comet", -) # *.csv, TensorBoard, Weights & Biases, ClearML -RANK = int(os.getenv("RANK", -1)) - -try: - import wandb - - assert hasattr(wandb, "__version__") # verify package import not local dir - if pkg.parse_version(wandb.__version__) >= pkg.parse_version( - "0.12.2" - ) and RANK in {0, -1}: - try: - wandb_login_success = wandb.login(timeout=30) - except wandb.errors.UsageError: # known non-TTY terminal issue - wandb_login_success = False - if not wandb_login_success: - wandb = None -except (ImportError, AssertionError): - wandb = None - -try: - import clearml - - assert hasattr( - clearml, "__version__" - ) # verify package import not local dir -except (ImportError, AssertionError): - clearml = None - -try: - if RANK not in [0, -1]: - comet_ml = None - else: - import comet_ml - - assert hasattr( - comet_ml, "__version__" - ) # verify package import not local dir - from utils.loggers.comet import CometLogger - -except (ModuleNotFoundError, ImportError, AssertionError): - comet_ml = None - - -class Loggers: - # YOLOv5 Loggers class - def __init__( - self, - save_dir=None, - weights=None, - opt=None, - hyp=None, - logger=None, - include=LOGGERS, - ): - self.save_dir = save_dir - self.weights = weights - self.opt = opt - self.hyp = hyp - self.plots = not opt.noplots # plot results - self.logger = logger # for printing results to console - self.include = include - self.keys = [ - "train/box_loss", - "train/obj_loss", - "train/cls_loss", # train loss - "metrics/precision", - "metrics/recall", - "metrics/mAP_0.5", - "metrics/mAP_0.5:0.95", # metrics - "val/box_loss", - "val/obj_loss", - "val/cls_loss", # val loss - "x/lr0", - "x/lr1", - "x/lr2", - ] # params - self.best_keys = [ - "best/epoch", - "best/precision", - "best/recall", - "best/mAP_0.5", - "best/mAP_0.5:0.95", - ] - for k in LOGGERS: - setattr(self, k, None) # init empty logger dictionary - self.csv = True # always log to csv - - # Messages - # if not wandb: - # prefix = colorstr('Weights & Biases: ') - # s = f"{prefix}run 'pip install wandb' to automatically track and visualize YOLOv5 🚀 runs in Weights & Biases" - # self.logger.info(s) - if not clearml: - prefix = colorstr("ClearML: ") - s = f"{prefix}run 'pip install clearml' to automatically track, visualize and remotely train YOLOv5 🚀 in ClearML" - self.logger.info(s) - if not comet_ml: - prefix = colorstr("Comet: ") - s = f"{prefix}run 'pip install comet_ml' to automatically track and visualize YOLOv5 🚀 runs in Comet" - self.logger.info(s) - # TensorBoard - s = self.save_dir - if "tb" in self.include and not self.opt.evolve: - prefix = colorstr("TensorBoard: ") - self.logger.info( - f"{prefix}Start with 'tensorboard --logdir {s.parent}', view at http://localhost:6006/" - ) - self.tb = SummaryWriter(str(s)) - - # W&B - if wandb and "wandb" in self.include: - wandb_artifact_resume = isinstance( - self.opt.resume, str - ) and self.opt.resume.startswith("wandb-artifact://") - run_id = ( - torch.load(self.weights).get("wandb_id") - if self.opt.resume and not wandb_artifact_resume - else None - ) - self.opt.hyp = self.hyp # add hyperparameters - self.wandb = WandbLogger(self.opt, run_id) - # temp warn. because nested artifacts not supported after 0.12.10 - # if pkg.parse_version(wandb.__version__) >= pkg.parse_version('0.12.11'): - # s = "YOLOv5 temporarily requires wandb version 0.12.10 or below. Some features may not work as expected." - # self.logger.warning(s) - else: - self.wandb = None - - # ClearML - if clearml and "clearml" in self.include: - try: - self.clearml = ClearmlLogger(self.opt, self.hyp) - except Exception: - self.clearml = None - prefix = colorstr("ClearML: ") - LOGGER.warning( - f"{prefix}WARNING ⚠️ ClearML is installed but not configured, skipping ClearML logging." - f" See https://github.com/ultralytics/yolov5/tree/master/utils/loggers/clearml#readme" - ) - - else: - self.clearml = None - - # Comet - if comet_ml and "comet" in self.include: - if isinstance(self.opt.resume, str) and self.opt.resume.startswith( - "comet://" - ): - run_id = self.opt.resume.split("/")[-1] - self.comet_logger = CometLogger( - self.opt, self.hyp, run_id=run_id - ) - - else: - self.comet_logger = CometLogger(self.opt, self.hyp) - - else: - self.comet_logger = None - - @property - def remote_dataset(self): - # Get data_dict if custom dataset artifact link is provided - data_dict = None - if self.clearml: - data_dict = self.clearml.data_dict - if self.wandb: - data_dict = self.wandb.data_dict - if self.comet_logger: - data_dict = self.comet_logger.data_dict - - return data_dict - - def on_train_start(self): - if self.comet_logger: - self.comet_logger.on_train_start() - - def on_pretrain_routine_start(self): - if self.comet_logger: - self.comet_logger.on_pretrain_routine_start() - - def on_pretrain_routine_end(self, labels, names): - # Callback runs on pre-train routine end - if self.plots: - plot_labels(labels, names, self.save_dir) - paths = self.save_dir.glob("*labels*.jpg") # training labels - if self.wandb: - self.wandb.log( - { - "Labels": [ - wandb.Image(str(x), caption=x.name) for x in paths - ] - } - ) - # if self.clearml: - # pass # ClearML saves these images automatically using hooks - if self.comet_logger: - self.comet_logger.on_pretrain_routine_end(paths) - - def on_train_batch_end(self, model, ni, imgs, targets, paths, vals): - log_dict = dict(zip(self.keys[0:3], vals)) - # Callback runs on train batch end - # ni: number integrated batches (since train start) - if self.plots: - if ni < 3: - f = self.save_dir / f"train_batch{ni}.jpg" # filename - plot_images(imgs, targets, paths, f) - if ni == 0 and self.tb and not self.opt.sync_bn: - log_tensorboard_graph( - self.tb, model, imgsz=(self.opt.imgsz, self.opt.imgsz) - ) - if ni == 10 and (self.wandb or self.clearml): - files = sorted(self.save_dir.glob("train*.jpg")) - if self.wandb: - self.wandb.log( - { - "Mosaics": [ - wandb.Image(str(f), caption=f.name) - for f in files - if f.exists() - ] - } - ) - if self.clearml: - self.clearml.log_debug_samples(files, title="Mosaics") - - if self.comet_logger: - self.comet_logger.on_train_batch_end(log_dict, step=ni) - - def on_train_epoch_end(self, epoch): - # Callback runs on train epoch end - if self.wandb: - self.wandb.current_epoch = epoch + 1 - - if self.comet_logger: - self.comet_logger.on_train_epoch_end(epoch) - - def on_val_start(self): - if self.comet_logger: - self.comet_logger.on_val_start() - - def on_val_image_end(self, pred, predn, path, names, im): - # Callback runs on val image end - if self.wandb: - self.wandb.val_one_image(pred, predn, path, names, im) - if self.clearml: - self.clearml.log_image_with_boxes(path, pred, names, im) - - def on_val_batch_end(self, batch_i, im, targets, paths, shapes, out): - if self.comet_logger: - self.comet_logger.on_val_batch_end( - batch_i, im, targets, paths, shapes, out - ) - - def on_val_end( - self, nt, tp, fp, p, r, f1, ap, ap50, ap_class, confusion_matrix - ): - # Callback runs on val end - if self.wandb or self.clearml: - files = sorted(self.save_dir.glob("val*.jpg")) - if self.wandb: - self.wandb.log( - { - "Validation": [ - wandb.Image(str(f), caption=f.name) for f in files - ] - } - ) - if self.clearml: - self.clearml.log_debug_samples(files, title="Validation") - - if self.comet_logger: - self.comet_logger.on_val_end( - nt, tp, fp, p, r, f1, ap, ap50, ap_class, confusion_matrix - ) - - def on_fit_epoch_end(self, vals, epoch, best_fitness, fi): - # Callback runs at the end of each fit (train+val) epoch - x = dict(zip(self.keys, vals)) - if self.csv: - file = self.save_dir / "results.csv" - n = len(x) + 1 # number of cols - s = ( - "" - if file.exists() - else ( - ("%20s," * n % tuple(["epoch"] + self.keys)).rstrip(",") - + "\n" - ) - ) # add header - with open(file, "a") as f: - f.write( - s - + ("%20.5g," * n % tuple([epoch] + vals)).rstrip(",") - + "\n" - ) - - if self.tb: - for k, v in x.items(): - self.tb.add_scalar(k, v, epoch) - elif self.clearml: # log to ClearML if TensorBoard not used - for k, v in x.items(): - title, series = k.split("/") - self.clearml.task.get_logger().report_scalar( - title, series, v, epoch - ) - - if self.wandb: - if best_fitness == fi: - best_results = [epoch] + vals[3:7] - for i, name in enumerate(self.best_keys): - self.wandb.wandb_run.summary[name] = best_results[ - i - ] # log best results in the summary - self.wandb.log(x) - self.wandb.end_epoch(best_result=best_fitness == fi) - - if self.clearml: - self.clearml.current_epoch_logged_images = ( - set() - ) # reset epoch image limit - self.clearml.current_epoch += 1 - - if self.comet_logger: - self.comet_logger.on_fit_epoch_end(x, epoch=epoch) - - def on_model_save(self, last, epoch, final_epoch, best_fitness, fi): - # Callback runs on model save event - if ( - (epoch + 1) % self.opt.save_period == 0 - and not final_epoch - and self.opt.save_period != -1 - ): - if self.wandb: - self.wandb.log_model( - last.parent, - self.opt, - epoch, - fi, - best_model=best_fitness == fi, - ) - if self.clearml: - self.clearml.task.update_output_model( - model_path=str(last), - model_name="Latest Model", - auto_delete_file=False, - ) - - if self.comet_logger: - self.comet_logger.on_model_save( - last, epoch, final_epoch, best_fitness, fi - ) - - def on_train_end(self, last, best, epoch, results): - # Callback runs on training end, i.e. saving best model - if self.plots: - plot_results( - file=self.save_dir / "results.csv" - ) # save results.png - files = [ - "results.png", - "confusion_matrix.png", - *(f"{x}_curve.png" for x in ("F1", "PR", "P", "R")), - ] - files = [ - (self.save_dir / f) for f in files if (self.save_dir / f).exists() - ] # filter - self.logger.info(f"Results saved to {colorstr('bold', self.save_dir)}") - - if ( - self.tb and not self.clearml - ): # These images are already captured by ClearML by now, we don't want doubles - for f in files: - self.tb.add_image( - f.stem, - cv2.imread(str(f))[..., ::-1], - epoch, - dataformats="HWC", - ) - - if self.wandb: - self.wandb.log(dict(zip(self.keys[3:10], results))) - self.wandb.log( - { - "Results": [ - wandb.Image(str(f), caption=f.name) for f in files - ] - } - ) - # Calling wandb.log. TODO: Refactor this into WandbLogger.log_model - if not self.opt.evolve: - wandb.log_artifact( - str(best if best.exists() else last), - type="model", - name=f"run_{self.wandb.wandb_run.id}_model", - aliases=["latest", "best", "stripped"], - ) - self.wandb.finish_run() - - if self.clearml and not self.opt.evolve: - self.clearml.task.update_output_model( - model_path=str(best if best.exists() else last), - name="Best Model", - auto_delete_file=False, - ) - - if self.comet_logger: - final_results = dict(zip(self.keys[3:10], results)) - self.comet_logger.on_train_end( - files, self.save_dir, last, best, epoch, final_results - ) - - def on_params_update(self, params: dict): - # Update hyperparams or configs of the experiment - if self.wandb: - self.wandb.wandb_run.config.update(params, allow_val_change=True) - if self.comet_logger: - self.comet_logger.on_params_update(params) - - -class GenericLogger: - """ - YOLOv5 General purpose logger for non-task specific logging - Usage: from utils.loggers import GenericLogger; logger = GenericLogger(...) - Arguments - opt: Run arguments - console_logger: Console logger - include: loggers to include - """ - - def __init__(self, opt, console_logger, include=("tb", "wandb")): - # init default loggers - self.save_dir = Path(opt.save_dir) - self.include = include - self.console_logger = console_logger - self.csv = self.save_dir / "results.csv" # CSV logger - if "tb" in self.include: - prefix = colorstr("TensorBoard: ") - self.console_logger.info( - f"{prefix}Start with 'tensorboard --logdir {self.save_dir.parent}', view at http://localhost:6006/" - ) - self.tb = SummaryWriter(str(self.save_dir)) - - if wandb and "wandb" in self.include: - self.wandb = wandb.init( - project=web_project_name(str(opt.project)), - name=None if opt.name == "exp" else opt.name, - config=opt, - ) - else: - self.wandb = None - - def log_metrics(self, metrics, epoch): - # Log metrics dictionary to all loggers - if self.csv: - keys, vals = list(metrics.keys()), list(metrics.values()) - n = len(metrics) + 1 # number of cols - s = ( - "" - if self.csv.exists() - else ( - ("%23s," * n % tuple(["epoch"] + keys)).rstrip(",") + "\n" - ) - ) # header - with open(self.csv, "a") as f: - f.write( - s - + ("%23.5g," * n % tuple([epoch] + vals)).rstrip(",") - + "\n" - ) - - if self.tb: - for k, v in metrics.items(): - self.tb.add_scalar(k, v, epoch) - - if self.wandb: - self.wandb.log(metrics, step=epoch) - - def log_images(self, files, name="Images", epoch=0): - # Log images to all loggers - files = [ - Path(f) - for f in (files if isinstance(files, (tuple, list)) else [files]) - ] # to Path - files = [f for f in files if f.exists()] # filter by exists - - if self.tb: - for f in files: - self.tb.add_image( - f.stem, - cv2.imread(str(f))[..., ::-1], - epoch, - dataformats="HWC", - ) - - if self.wandb: - self.wandb.log( - {name: [wandb.Image(str(f), caption=f.name) for f in files]}, - step=epoch, - ) - - def log_graph(self, model, imgsz=(640, 640)): - # Log model graph to all loggers - if self.tb: - log_tensorboard_graph(self.tb, model, imgsz) - - def log_model(self, model_path, epoch=0, metadata={}): - # Log model to all loggers - if self.wandb: - art = wandb.Artifact( - name=f"run_{wandb.run.id}_model", - type="model", - metadata=metadata, - ) - art.add_file(str(model_path)) - wandb.log_artifact(art) - - def update_params(self, params): - # Update the paramters logged - if self.wandb: - wandb.run.config.update(params, allow_val_change=True) - - -def log_tensorboard_graph(tb, model, imgsz=(640, 640)): - # Log model graph to TensorBoard - try: - p = next(model.parameters()) # for device, type - imgsz = (imgsz, imgsz) if isinstance(imgsz, int) else imgsz # expand - im = ( - torch.zeros((1, 3, *imgsz)).to(p.device).type_as(p) - ) # input image (WARNING: must be zeros, not empty) - with warnings.catch_warnings(): - warnings.simplefilter("ignore") # suppress jit trace warning - tb.add_graph( - torch.jit.trace(de_parallel(model), im, strict=False), [] - ) - except Exception as e: - LOGGER.warning( - f"WARNING ⚠️ TensorBoard graph visualization failure {e}" - ) - - -def web_project_name(project): - # Convert local project name to web project name - if not project.startswith("runs/train"): - return project - suffix = ( - "-Classify" - if project.endswith("-cls") - else "-Segment" - if project.endswith("-seg") - else "" - ) - return f"YOLOv5{suffix}" diff --git a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/plugins/board-plugin.js b/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/plugins/board-plugin.js deleted file mode 100644 index c358d42156f143b9861fb57338d7aec7dd604aad..0000000000000000000000000000000000000000 --- a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/plugins/board-plugin.js +++ /dev/null @@ -1,40 +0,0 @@ -import ObjectFactory from './board/ObjectFactory.js'; - -import BoardFactory from './board/board/Factory.js'; -import HexagonFactory from './board/grid/hexagon/Factory.js'; -import QuadFactory from './board/grid/quad/Factory.js'; -import ShapeFactory from './board/shape/Factory.js'; - -import MoveToFactory from './board/moveto/Factory.js'; -import MatchFactory from './board/match/Factory.js'; -import PathFinderFactory from './board/pathfinder/Factory.js'; -import FieldOfViewFactory from './board/fieldofview/Factory.js'; -import MonopolyFactory from './board/monopoly/Factory.js'; - -import MiniBoardFactory from './board/miniboard/Factory.js'; - -import HexagonMap from './board/hexagonmap/index.js'; - -import CreateTileTexture from './board/texture/CreateTileTexture.js'; - -import CreateBoardFromTilemap from './board/tilemap/CreateBoardFromTilemap.js'; - -class BoardPlugin extends Phaser.Plugins.ScenePlugin { - constructor(scene, pluginManager) { - super(scene, pluginManager); - - this.add = new ObjectFactory(scene); - - // Helper functions - this.hexagonMap = HexagonMap; - this.createTileTexture = CreateTileTexture; - this.createBoardFromTilemap = CreateBoardFromTilemap; - } - - start() { - var eventEmitter = this.scene.sys.events; - eventEmitter.on('destroy', this.destroy, this); - } -} - -export default BoardPlugin; diff --git a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/plugins/swirlpipeline.d.ts b/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/plugins/swirlpipeline.d.ts deleted file mode 100644 index b8da9e6eadd01e3084e6d21fa1b3254dc6bdc13c..0000000000000000000000000000000000000000 --- a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/plugins/swirlpipeline.d.ts +++ /dev/null @@ -1,2 +0,0 @@ -import SwirlPostFxPipeline from './shaders/swirl/SwirlPostFxPipeline'; -export default SwirlPostFxPipeline; \ No newline at end of file diff --git a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/api/models/vq.md b/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/api/models/vq.md deleted file mode 100644 index cdb6761468a8fc5a81a6b4b2d063bd6e81e1e1d9..0000000000000000000000000000000000000000 --- a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/api/models/vq.md +++ /dev/null @@ -1,15 +0,0 @@ -# VQModel - -The VQ-VAE model was introduced in [Neural Discrete Representation Learning](https://huggingface.co/papers/1711.00937) by Aaron van den Oord, Oriol Vinyals and Koray Kavukcuoglu. The model is used in 🤗 Diffusers to decode latent representations into images. Unlike [`AutoencoderKL`], the [`VQModel`] works in a quantized latent space. - -The abstract from the paper is: - -*Learning useful representations without supervision remains a key challenge in machine learning. In this paper, we propose a simple yet powerful generative model that learns such discrete representations. Our model, the Vector Quantised-Variational AutoEncoder (VQ-VAE), differs from VAEs in two key ways: the encoder network outputs discrete, rather than continuous, codes; and the prior is learnt rather than static. In order to learn a discrete latent representation, we incorporate ideas from vector quantisation (VQ). Using the VQ method allows the model to circumvent issues of "posterior collapse" -- where the latents are ignored when they are paired with a powerful autoregressive decoder -- typically observed in the VAE framework. Pairing these representations with an autoregressive prior, the model can generate high quality images, videos, and speech as well as doing high quality speaker conversion and unsupervised learning of phonemes, providing further evidence of the utility of the learnt representations.* - -## VQModel - -[[autodoc]] VQModel - -## VQEncoderOutput - -[[autodoc]] models.vq_model.VQEncoderOutput \ No newline at end of file diff --git a/spaces/Andy1621/uniformer_image_detection/configs/gcnet/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco.py b/spaces/Andy1621/uniformer_image_detection/configs/gcnet/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco.py deleted file mode 100644 index 31fdd070595ac0512a39075bb045dd18035d3f14..0000000000000000000000000000000000000000 --- a/spaces/Andy1621/uniformer_image_detection/configs/gcnet/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco.py +++ /dev/null @@ -1,11 +0,0 @@ -_base_ = '../cascade_rcnn/cascade_mask_rcnn_x101_32x4d_fpn_1x_coco.py' -model = dict( - backbone=dict( - norm_cfg=dict(type='SyncBN', requires_grad=True), - norm_eval=False, - plugins=[ - dict( - cfg=dict(type='ContextBlock', ratio=1. / 4), - stages=(False, True, True, True), - position='after_conv3') - ])) diff --git a/spaces/Andy1621/uniformer_image_detection/mmdet/core/export/__init__.py b/spaces/Andy1621/uniformer_image_detection/mmdet/core/export/__init__.py deleted file mode 100644 index 76589b1f279a71a59a5515d1b78cea0865f83131..0000000000000000000000000000000000000000 --- a/spaces/Andy1621/uniformer_image_detection/mmdet/core/export/__init__.py +++ /dev/null @@ -1,8 +0,0 @@ -from .pytorch2onnx import (build_model_from_cfg, - generate_inputs_and_wrap_model, - preprocess_example_input) - -__all__ = [ - 'build_model_from_cfg', 'generate_inputs_and_wrap_model', - 'preprocess_example_input' -] diff --git a/spaces/AnishKumbhar/ChatBot/text-generation-webui-main/start_macos.sh b/spaces/AnishKumbhar/ChatBot/text-generation-webui-main/start_macos.sh deleted file mode 100644 index 7fa82d81117ebde944d2b29fa830a329dddc7998..0000000000000000000000000000000000000000 --- a/spaces/AnishKumbhar/ChatBot/text-generation-webui-main/start_macos.sh +++ /dev/null @@ -1,67 +0,0 @@ -#!/bin/bash - -cd "$(dirname "${BASH_SOURCE[0]}")" - -if [[ "$(pwd)" =~ " " ]]; then echo This script relies on Miniconda which can not be silently installed under a path with spaces. && exit; fi - -# deactivate existing conda envs as needed to avoid conflicts -{ conda deactivate && conda deactivate && conda deactivate; } 2> /dev/null - -# M Series or Intel -OS_ARCH=$(uname -m) -case "${OS_ARCH}" in - x86_64*) OS_ARCH="x86_64";; - arm64*) OS_ARCH="arm64";; - *) echo "Unknown system architecture: $OS_ARCH! This script runs only on x86_64 or arm64" && exit -esac - -# config -INSTALL_DIR="$(pwd)/installer_files" -CONDA_ROOT_PREFIX="$(pwd)/installer_files/conda" -INSTALL_ENV_DIR="$(pwd)/installer_files/env" -MINICONDA_DOWNLOAD_URL="https://repo.anaconda.com/miniconda/Miniconda3-py310_23.3.1-0-MacOSX-${OS_ARCH}.sh" -conda_exists="F" - -# figure out whether git and conda needs to be installed -if "$CONDA_ROOT_PREFIX/bin/conda" --version &>/dev/null; then conda_exists="T"; fi - -# (if necessary) install git and conda into a contained environment -# download miniconda -if [ "$conda_exists" == "F" ]; then - echo "Downloading Miniconda from $MINICONDA_DOWNLOAD_URL to $INSTALL_DIR/miniconda_installer.sh" - - mkdir -p "$INSTALL_DIR" - curl -Lk "$MINICONDA_DOWNLOAD_URL" > "$INSTALL_DIR/miniconda_installer.sh" - - chmod u+x "$INSTALL_DIR/miniconda_installer.sh" - bash "$INSTALL_DIR/miniconda_installer.sh" -b -p $CONDA_ROOT_PREFIX - - # test the conda binary - echo "Miniconda version:" - "$CONDA_ROOT_PREFIX/bin/conda" --version -fi - -# create the installer env -if [ ! -e "$INSTALL_ENV_DIR" ]; then - "$CONDA_ROOT_PREFIX/bin/conda" create -y -k --prefix "$INSTALL_ENV_DIR" python=3.10 -fi - -# check if conda environment was actually created -if [ ! -e "$INSTALL_ENV_DIR/bin/python" ]; then - echo "Conda environment is empty." - exit -fi - -# environment isolation -export PYTHONNOUSERSITE=1 -unset PYTHONPATH -unset PYTHONHOME -export CUDA_PATH="$INSTALL_ENV_DIR" -export CUDA_HOME="$CUDA_PATH" - -# activate installer env -source "$CONDA_ROOT_PREFIX/etc/profile.d/conda.sh" # otherwise conda complains about 'shell not initialized' (needed when running in a script) -conda activate "$INSTALL_ENV_DIR" - -# setup installer env -python one_click.py $@ diff --git a/spaces/Anonymous-123/ImageNet-Editing/editing_diffusion/CLIP/clip/model.py b/spaces/Anonymous-123/ImageNet-Editing/editing_diffusion/CLIP/clip/model.py deleted file mode 100644 index f2c95c481724270116998b90de64cee8ef58c94e..0000000000000000000000000000000000000000 --- a/spaces/Anonymous-123/ImageNet-Editing/editing_diffusion/CLIP/clip/model.py +++ /dev/null @@ -1,432 +0,0 @@ -from collections import OrderedDict -from typing import Tuple, Union - -import numpy as np -import torch -import torch.nn.functional as F -from torch import nn - - -class Bottleneck(nn.Module): - expansion = 4 - - def __init__(self, inplanes, planes, stride=1): - super().__init__() - - # all conv layers have stride 1. an avgpool is performed after the second convolution when stride > 1 - self.conv1 = nn.Conv2d(inplanes, planes, 1, bias=False) - self.bn1 = nn.BatchNorm2d(planes) - - self.conv2 = nn.Conv2d(planes, planes, 3, padding=1, bias=False) - self.bn2 = nn.BatchNorm2d(planes) - - self.avgpool = nn.AvgPool2d(stride) if stride > 1 else nn.Identity() - - self.conv3 = nn.Conv2d(planes, planes * self.expansion, 1, bias=False) - self.bn3 = nn.BatchNorm2d(planes * self.expansion) - - self.relu = nn.ReLU(inplace=True) - self.downsample = None - self.stride = stride - - if stride > 1 or inplanes != planes * Bottleneck.expansion: - # downsampling layer is prepended with an avgpool, and the subsequent convolution has stride 1 - self.downsample = nn.Sequential(OrderedDict([ - ("-1", nn.AvgPool2d(stride)), - ("0", nn.Conv2d(inplanes, planes * self.expansion, 1, stride=1, bias=False)), - ("1", nn.BatchNorm2d(planes * self.expansion)) - ])) - - def forward(self, x: torch.Tensor): - identity = x - - out = self.relu(self.bn1(self.conv1(x))) - out = self.relu(self.bn2(self.conv2(out))) - out = self.avgpool(out) - out = self.bn3(self.conv3(out)) - - if self.downsample is not None: - identity = self.downsample(x) - - out += identity - out = self.relu(out) - return out - - -class AttentionPool2d(nn.Module): - def __init__(self, spacial_dim: int, embed_dim: int, num_heads: int, output_dim: int = None): - super().__init__() - self.positional_embedding = nn.Parameter(torch.randn(spacial_dim ** 2 + 1, embed_dim) / embed_dim ** 0.5) - self.k_proj = nn.Linear(embed_dim, embed_dim) - self.q_proj = nn.Linear(embed_dim, embed_dim) - self.v_proj = nn.Linear(embed_dim, embed_dim) - self.c_proj = nn.Linear(embed_dim, output_dim or embed_dim) - self.num_heads = num_heads - - def forward(self, x): - x = x.reshape(x.shape[0], x.shape[1], x.shape[2] * x.shape[3]).permute(2, 0, 1) # NCHW -> (HW)NC - x = torch.cat([x.mean(dim=0, keepdim=True), x], dim=0) # (HW+1)NC - x = x + self.positional_embedding[:, None, :].to(x.dtype) # (HW+1)NC - x, _ = F.multi_head_attention_forward( - query=x, key=x, value=x, - embed_dim_to_check=x.shape[-1], - num_heads=self.num_heads, - q_proj_weight=self.q_proj.weight, - k_proj_weight=self.k_proj.weight, - v_proj_weight=self.v_proj.weight, - in_proj_weight=None, - in_proj_bias=torch.cat([self.q_proj.bias, self.k_proj.bias, self.v_proj.bias]), - bias_k=None, - bias_v=None, - add_zero_attn=False, - dropout_p=0, - out_proj_weight=self.c_proj.weight, - out_proj_bias=self.c_proj.bias, - use_separate_proj_weight=True, - training=self.training, - need_weights=False - ) - - return x[0] - - -class ModifiedResNet(nn.Module): - """ - A ResNet class that is similar to torchvision's but contains the following changes: - - There are now 3 "stem" convolutions as opposed to 1, with an average pool instead of a max pool. - - Performs anti-aliasing strided convolutions, where an avgpool is prepended to convolutions with stride > 1 - - The final pooling layer is a QKV attention instead of an average pool - """ - - def __init__(self, layers, output_dim, heads, input_resolution=224, width=64): - super().__init__() - self.output_dim = output_dim - self.input_resolution = input_resolution - - # the 3-layer stem - self.conv1 = nn.Conv2d(3, width // 2, kernel_size=3, stride=2, padding=1, bias=False) - self.bn1 = nn.BatchNorm2d(width // 2) - self.conv2 = nn.Conv2d(width // 2, width // 2, kernel_size=3, padding=1, bias=False) - self.bn2 = nn.BatchNorm2d(width // 2) - self.conv3 = nn.Conv2d(width // 2, width, kernel_size=3, padding=1, bias=False) - self.bn3 = nn.BatchNorm2d(width) - self.avgpool = nn.AvgPool2d(2) - self.relu = nn.ReLU(inplace=True) - - # residual layers - self._inplanes = width # this is a *mutable* variable used during construction - self.layer1 = self._make_layer(width, layers[0]) - self.layer2 = self._make_layer(width * 2, layers[1], stride=2) - self.layer3 = self._make_layer(width * 4, layers[2], stride=2) - self.layer4 = self._make_layer(width * 8, layers[3], stride=2) - - embed_dim = width * 32 # the ResNet feature dimension - self.attnpool = AttentionPool2d(input_resolution // 32, embed_dim, heads, output_dim) - - def _make_layer(self, planes, blocks, stride=1): - layers = [Bottleneck(self._inplanes, planes, stride)] - - self._inplanes = planes * Bottleneck.expansion - for _ in range(1, blocks): - layers.append(Bottleneck(self._inplanes, planes)) - - return nn.Sequential(*layers) - - def forward(self, x): - def stem(x): - for conv, bn in [(self.conv1, self.bn1), (self.conv2, self.bn2), (self.conv3, self.bn3)]: - x = self.relu(bn(conv(x))) - x = self.avgpool(x) - return x - - x = x.type(self.conv1.weight.dtype) - x = stem(x) - x = self.layer1(x) - x = self.layer2(x) - x = self.layer3(x) - x = self.layer4(x) - x = self.attnpool(x) - - return x - - -class LayerNorm(nn.LayerNorm): - """Subclass torch's LayerNorm to handle fp16.""" - - def forward(self, x: torch.Tensor): - orig_type = x.dtype - ret = super().forward(x.type(torch.float32)) - return ret.type(orig_type) - - -class QuickGELU(nn.Module): - def forward(self, x: torch.Tensor): - return x * torch.sigmoid(1.702 * x) - - -class ResidualAttentionBlock(nn.Module): - def __init__(self, d_model: int, n_head: int, attn_mask: torch.Tensor = None): - super().__init__() - - self.attn = nn.MultiheadAttention(d_model, n_head) - self.ln_1 = LayerNorm(d_model) - self.mlp = nn.Sequential(OrderedDict([ - ("c_fc", nn.Linear(d_model, d_model * 4)), - ("gelu", QuickGELU()), - ("c_proj", nn.Linear(d_model * 4, d_model)) - ])) - self.ln_2 = LayerNorm(d_model) - self.attn_mask = attn_mask - - def attention(self, x: torch.Tensor): - self.attn_mask = self.attn_mask.to(dtype=x.dtype, device=x.device) if self.attn_mask is not None else None - return self.attn(x, x, x, need_weights=False, attn_mask=self.attn_mask)[0] - - def forward(self, x: torch.Tensor): - x = x + self.attention(self.ln_1(x)) - x = x + self.mlp(self.ln_2(x)) - return x - - -class Transformer(nn.Module): - def __init__(self, width: int, layers: int, heads: int, attn_mask: torch.Tensor = None): - super().__init__() - self.width = width - self.layers = layers - self.resblocks = nn.Sequential(*[ResidualAttentionBlock(width, heads, attn_mask) for _ in range(layers)]) - - def forward(self, x: torch.Tensor): - return self.resblocks(x) - - -class VisionTransformer(nn.Module): - def __init__(self, input_resolution: int, patch_size: int, width: int, layers: int, heads: int, output_dim: int): - super().__init__() - self.input_resolution = input_resolution - self.output_dim = output_dim - self.conv1 = nn.Conv2d(in_channels=3, out_channels=width, kernel_size=patch_size, stride=patch_size, bias=False) - - scale = width ** -0.5 - self.class_embedding = nn.Parameter(scale * torch.randn(width)) - self.positional_embedding = nn.Parameter(scale * torch.randn((input_resolution // patch_size) ** 2 + 1, width)) - self.ln_pre = LayerNorm(width) - - self.transformer = Transformer(width, layers, heads) - - self.ln_post = LayerNorm(width) - self.proj = nn.Parameter(scale * torch.randn(width, output_dim)) - - def forward(self, x: torch.Tensor): - x = self.conv1(x) # shape = [*, width, grid, grid] - x = x.reshape(x.shape[0], x.shape[1], -1) # shape = [*, width, grid ** 2] - x = x.permute(0, 2, 1) # shape = [*, grid ** 2, width] - x = torch.cat([self.class_embedding.to(x.dtype) + torch.zeros(x.shape[0], 1, x.shape[-1], dtype=x.dtype, device=x.device), x], dim=1) # shape = [*, grid ** 2 + 1, width] - x = x + self.positional_embedding.to(x.dtype) - x = self.ln_pre(x) - - x = x.permute(1, 0, 2) # NLD -> LND - x = self.transformer(x) - x = x.permute(1, 0, 2) # LND -> NLD - - x = self.ln_post(x[:, 0, :]) - - if self.proj is not None: - x = x @ self.proj - - return x - - -class CLIP(nn.Module): - def __init__(self, - embed_dim: int, - # vision - image_resolution: int, - vision_layers: Union[Tuple[int, int, int, int], int], - vision_width: int, - vision_patch_size: int, - # text - context_length: int, - vocab_size: int, - transformer_width: int, - transformer_heads: int, - transformer_layers: int - ): - super().__init__() - - self.context_length = context_length - - if isinstance(vision_layers, (tuple, list)): - vision_heads = vision_width * 32 // 64 - self.visual = ModifiedResNet( - layers=vision_layers, - output_dim=embed_dim, - heads=vision_heads, - input_resolution=image_resolution, - width=vision_width - ) - else: - vision_heads = vision_width // 64 - self.visual = VisionTransformer( - input_resolution=image_resolution, - patch_size=vision_patch_size, - width=vision_width, - layers=vision_layers, - heads=vision_heads, - output_dim=embed_dim - ) - - self.transformer = Transformer( - width=transformer_width, - layers=transformer_layers, - heads=transformer_heads, - attn_mask=self.build_attention_mask() - ) - - self.vocab_size = vocab_size - self.token_embedding = nn.Embedding(vocab_size, transformer_width) - self.positional_embedding = nn.Parameter(torch.empty(self.context_length, transformer_width)) - self.ln_final = LayerNorm(transformer_width) - - self.text_projection = nn.Parameter(torch.empty(transformer_width, embed_dim)) - self.logit_scale = nn.Parameter(torch.ones([]) * np.log(1 / 0.07)) - - self.initialize_parameters() - - def initialize_parameters(self): - nn.init.normal_(self.token_embedding.weight, std=0.02) - nn.init.normal_(self.positional_embedding, std=0.01) - - if isinstance(self.visual, ModifiedResNet): - if self.visual.attnpool is not None: - std = self.visual.attnpool.c_proj.in_features ** -0.5 - nn.init.normal_(self.visual.attnpool.q_proj.weight, std=std) - nn.init.normal_(self.visual.attnpool.k_proj.weight, std=std) - nn.init.normal_(self.visual.attnpool.v_proj.weight, std=std) - nn.init.normal_(self.visual.attnpool.c_proj.weight, std=std) - - for resnet_block in [self.visual.layer1, self.visual.layer2, self.visual.layer3, self.visual.layer4]: - for name, param in resnet_block.named_parameters(): - if name.endswith("bn3.weight"): - nn.init.zeros_(param) - - proj_std = (self.transformer.width ** -0.5) * ((2 * self.transformer.layers) ** -0.5) - attn_std = self.transformer.width ** -0.5 - fc_std = (2 * self.transformer.width) ** -0.5 - for block in self.transformer.resblocks: - nn.init.normal_(block.attn.in_proj_weight, std=attn_std) - nn.init.normal_(block.attn.out_proj.weight, std=proj_std) - nn.init.normal_(block.mlp.c_fc.weight, std=fc_std) - nn.init.normal_(block.mlp.c_proj.weight, std=proj_std) - - if self.text_projection is not None: - nn.init.normal_(self.text_projection, std=self.transformer.width ** -0.5) - - def build_attention_mask(self): - # lazily create causal attention mask, with full attention between the vision tokens - # pytorch uses additive attention mask; fill with -inf - mask = torch.empty(self.context_length, self.context_length) - mask.fill_(float("-inf")) - mask.triu_(1) # zero out the lower diagonal - return mask - - @property - def dtype(self): - return self.visual.conv1.weight.dtype - - def encode_image(self, image): - return self.visual(image.type(self.dtype)) - - def encode_text(self, text): - x = self.token_embedding(text).type(self.dtype) # [batch_size, n_ctx, d_model] - - x = x + self.positional_embedding.type(self.dtype) - x = x.permute(1, 0, 2) # NLD -> LND - x = self.transformer(x) - x = x.permute(1, 0, 2) # LND -> NLD - x = self.ln_final(x).type(self.dtype) - - # x.shape = [batch_size, n_ctx, transformer.width] - # take features from the eot embedding (eot_token is the highest number in each sequence) - x = x[torch.arange(x.shape[0]), text.argmax(dim=-1)] @ self.text_projection - - return x - - def forward(self, image, text): - image_features = self.encode_image(image) - text_features = self.encode_text(text) - - # normalized features - image_features = image_features / image_features.norm(dim=-1, keepdim=True) - text_features = text_features / text_features.norm(dim=-1, keepdim=True) - - # cosine similarity as logits - logit_scale = self.logit_scale.exp() - logits_per_image = logit_scale * image_features @ text_features.t() - logits_per_text = logit_scale * text_features @ image_features.t() - - # shape = [global_batch_size, global_batch_size] - return logits_per_image, logits_per_text - - -def convert_weights(model: nn.Module): - """Convert applicable model parameters to fp16""" - - def _convert_weights_to_fp16(l): - if isinstance(l, (nn.Conv1d, nn.Conv2d, nn.Linear)): - l.weight.data = l.weight.data.half() - if l.bias is not None: - l.bias.data = l.bias.data.half() - - if isinstance(l, nn.MultiheadAttention): - for attr in [*[f"{s}_proj_weight" for s in ["in", "q", "k", "v"]], "in_proj_bias", "bias_k", "bias_v"]: - tensor = getattr(l, attr) - if tensor is not None: - tensor.data = tensor.data.half() - - for name in ["text_projection", "proj"]: - if hasattr(l, name): - attr = getattr(l, name) - if attr is not None: - attr.data = attr.data.half() - - model.apply(_convert_weights_to_fp16) - - -def build_model(state_dict: dict): - vit = "visual.proj" in state_dict - - if vit: - vision_width = state_dict["visual.conv1.weight"].shape[0] - vision_layers = len([k for k in state_dict.keys() if k.startswith("visual.") and k.endswith(".attn.in_proj_weight")]) - vision_patch_size = state_dict["visual.conv1.weight"].shape[-1] - grid_size = round((state_dict["visual.positional_embedding"].shape[0] - 1) ** 0.5) - image_resolution = vision_patch_size * grid_size - else: - counts: list = [len(set(k.split(".")[2] for k in state_dict if k.startswith(f"visual.layer{b}"))) for b in [1, 2, 3, 4]] - vision_layers = tuple(counts) - vision_width = state_dict["visual.layer1.0.conv1.weight"].shape[0] - output_width = round((state_dict["visual.attnpool.positional_embedding"].shape[0] - 1) ** 0.5) - vision_patch_size = None - assert output_width ** 2 + 1 == state_dict["visual.attnpool.positional_embedding"].shape[0] - image_resolution = output_width * 32 - - embed_dim = state_dict["text_projection"].shape[1] - context_length = state_dict["positional_embedding"].shape[0] - vocab_size = state_dict["token_embedding.weight"].shape[0] - transformer_width = state_dict["ln_final.weight"].shape[0] - transformer_heads = transformer_width // 64 - transformer_layers = len(set(k.split(".")[2] for k in state_dict if k.startswith(f"transformer.resblocks"))) - - model = CLIP( - embed_dim, - image_resolution, vision_layers, vision_width, vision_patch_size, - context_length, vocab_size, transformer_width, transformer_heads, transformer_layers - ) - - for key in ["input_resolution", "context_length", "vocab_size"]: - if key in state_dict: - del state_dict[key] - - convert_weights(model) - model.load_state_dict(state_dict) - return model.eval() diff --git a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/operations/build/wheel_editable.py b/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/operations/build/wheel_editable.py deleted file mode 100644 index 719d69dd801b78b360c6c2234080eee638b8de82..0000000000000000000000000000000000000000 --- a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/operations/build/wheel_editable.py +++ /dev/null @@ -1,46 +0,0 @@ -import logging -import os -from typing import Optional - -from pip._vendor.pyproject_hooks import BuildBackendHookCaller, HookMissing - -from pip._internal.utils.subprocess import runner_with_spinner_message - -logger = logging.getLogger(__name__) - - -def build_wheel_editable( - name: str, - backend: BuildBackendHookCaller, - metadata_directory: str, - tempd: str, -) -> Optional[str]: - """Build one InstallRequirement using the PEP 660 build process. - - Returns path to wheel if successfully built. Otherwise, returns None. - """ - assert metadata_directory is not None - try: - logger.debug("Destination directory: %s", tempd) - - runner = runner_with_spinner_message( - f"Building editable for {name} (pyproject.toml)" - ) - with backend.subprocess_runner(runner): - try: - wheel_name = backend.build_editable( - tempd, - metadata_directory=metadata_directory, - ) - except HookMissing as e: - logger.error( - "Cannot build editable %s because the build " - "backend does not have the %s hook", - name, - e, - ) - return None - except Exception: - logger.error("Failed building editable for %s", name) - return None - return os.path.join(tempd, wheel_name) diff --git a/spaces/Awiny/Image2Paragraph/models/grit_src/grit/predictor.py b/spaces/Awiny/Image2Paragraph/models/grit_src/grit/predictor.py deleted file mode 100644 index 6c188ea2ab5fac232554d4eaaf2fb073670a70e4..0000000000000000000000000000000000000000 --- a/spaces/Awiny/Image2Paragraph/models/grit_src/grit/predictor.py +++ /dev/null @@ -1,66 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# Modified by Jialian Wu from https://github.com/facebookresearch/detectron2/blob/main/detectron2/utils/visualizer.py -import torch - -from detectron2.engine.defaults import DefaultPredictor -from detectron2.utils.visualizer import ColorMode, Visualizer - - -class Visualizer_GRiT(Visualizer): - def __init__(self, image, instance_mode=None): - super().__init__(image, instance_mode=instance_mode) - - def draw_instance_predictions(self, predictions): - boxes = predictions.pred_boxes if predictions.has("pred_boxes") else None - scores = predictions.scores if predictions.has("scores") else None - classes = predictions.pred_classes.tolist() if predictions.has("pred_classes") else None - object_description = predictions.pred_object_descriptions.data - # uncomment to output scores in visualized images - # object_description = [c + '|' + str(round(s.item(), 1)) for c, s in zip(object_description, scores)] - - if self._instance_mode == ColorMode.SEGMENTATION and self.metadata.get("thing_colors"): - colors = [ - self._jitter([x / 255 for x in self.metadata.thing_colors[c]]) for c in classes - ] - alpha = 0.8 - else: - colors = None - alpha = 0.5 - - if self._instance_mode == ColorMode.IMAGE_BW: - self.output.reset_image( - self._create_grayscale_image( - (predictions.pred_masks.any(dim=0) > 0).numpy() - if predictions.has("pred_masks") - else None - ) - ) - alpha = 0.3 - - self.overlay_instances( - masks=None, - boxes=boxes, - labels=object_description, - keypoints=None, - assigned_colors=colors, - alpha=alpha, - ) - return self.output - - -class VisualizationDemo(object): - def __init__(self, cfg, instance_mode=ColorMode.IMAGE): - self.cpu_device = torch.device("cpu") - self.instance_mode = instance_mode - - self.predictor = DefaultPredictor(cfg) - - def run_on_image(self, image): - predictions = self.predictor(image) - # Convert image from OpenCV BGR format to Matplotlib RGB format. - image = image[:, :, ::-1] - visualizer = Visualizer_GRiT(image, instance_mode=self.instance_mode) - instances = predictions["instances"].to(self.cpu_device) - vis_output = visualizer.draw_instance_predictions(predictions=instances) - - return predictions, vis_output \ No newline at end of file diff --git a/spaces/Benson/text-generation/Examples/Cookie Ejecutar Reino Pc Descargar Ldplayer.md b/spaces/Benson/text-generation/Examples/Cookie Ejecutar Reino Pc Descargar Ldplayer.md deleted file mode 100644 index bc3919809c173a7b6109ebe4fc47939e2d432d55..0000000000000000000000000000000000000000 --- a/spaces/Benson/text-generation/Examples/Cookie Ejecutar Reino Pc Descargar Ldplayer.md +++ /dev/null @@ -1,58 +0,0 @@ - -

    Cómo jugar Cookie Run: Reino en el PC con LDPlayer

    -

    Cookie Run: Kingdom es un popular juego móvil que combina acción-RPG, construcción de bases y elementos gacha. Cuenta con un amplio elenco de cookies únicas, un reino personalizable y varios modos de juego y eventos. Si usted es un fan de este juego, es posible que se pregunte cómo jugar en su PC para una mejor experiencia de juego. En este artículo, te mostraremos cómo jugar a Cookie Run: Kingdom en PC con LDPlayer, un emulador de Android gratuito y rápido que proporciona potentes funciones orientadas a los jugadores.

    -

    cookie ejecutar reino pc descargar ldplayer


    Download Zip ✓✓✓ https://bltlly.com/2v6MNG



    -

    ¿Qué es Cookie Run: Reino?

    -

    Un divertido y colorido juego de acción-RPG con cookies

    -

    Cookie Run: Kingdom es el octavo juego de la serie Cookie Run, desarrollado por Devsisters. Es una desviación de la jugabilidad del corredor del original, ya que cuenta con estrategia de batalla en tiempo real y construcción de ciudades. Puedes crear tu propio escuadrón de galletas, cada uno con sus propias habilidades y disfraces, y dirigirlos en batallas contra los enemigos de la Oscuridad. También puedes explorar otros reinos antiguos, desentrañar los misterios de los héroes antiguos y participar en varios festivales.

    -

    Un juego creativo y personalizable con cookies

    -

    Cookie Run: Kingdom también te permite construir tu propio reino con cookies. Puedes decorar y diseñar tu reino a tu gusto, utilizando diferentes decoraciones y edificios. También puede crear artículos, producir materiales y mejorar sus instalaciones. Tu reino será el hogar de tu escuadrón de galletas, donde pueden descansar, interactuar y divertirse. También puedes visitar los reinos de otros jugadores y ver cómo han construido su paraíso de galletas.

    -

    Un juego emocionante y desafiante con varios modos y eventos

    - -

    ¿Qué es LDPlayer?

    -

    Un emulador de Android gratuito y rápido para PC

    -

    LDPlayer es un emulador de Android que te permite ejecutar juegos móviles en tu PC con ratón y teclado. Proporciona el rendimiento más rápido para juegos Android, soporta varios sistemas Windows y las aplicaciones y juegos más populares. Es gratis de descargar y usar, y no contiene ningún malware o spyware.

    -

    Un emulador potente y orientado a los jugadores con características y funciones

    -

    LDPlayer no es solo un simple emulador, sino también uno orientado a los jugadores. Proporciona características y funciones potentes que mejorarán su experiencia de juego en PC. Algunas de estas características son:

    -

    - -

    Un emulador compatible y estable con soporte para los últimos juegos

    - -

    Cómo descargar e instalar Cookie Run: Reino en el PC con LDPlayer

    -

    Paso 1: Descargue e instale LDPlayer en su escritorio

    -

    El primer paso para jugar Cookie Run: Kingdom en PC con LDPlayer es descargar e instalar LDPlayer en su escritorio. Puede descargar LDPlayer desde su sitio web oficial o desde este enlace: https://www.ldplayer.net/. El proceso de instalación es simple y rápido, y solo tienes que seguir las instrucciones en la pantalla.

    -

    Paso 2: Abra LDPlayer y busque Cookie Run: Kingdom from LD Store

    -

    El siguiente paso es abrir LDPlayer y buscar Cookie Run: Kingdom desde LD Store. LD Store es la tienda de aplicaciones integrada de LDPlayer, donde puede encontrar y descargar varias aplicaciones y juegos. Puede acceder a LD Store desde la pantalla de inicio de LDPlayer o desde la barra de herramientas del lado derecho. También puede utilizar la barra de búsqueda para escribir Ejecutar cookies: Reino, o navegar por las categorías para encontrarlo.

    -

    Paso 3: Instalar Cookie Run: Reino en su emulador de LDPlayer Android

    -

    El tercer paso es instalar Cookie Run: Kingdom en su emulador LDPlayer Android. Una vez que encuentre Cookie Run: Kingdom de LD Store, solo tiene que hacer clic en el botón de instalación y esperar unos minutos para que se complete la instalación. También puede comprobar el progreso de la instalación desde la barra de herramientas en el lado derecho.

    -

    Paso 4: Abrir el juego y disfrutar de jugar Cookie Run: Reino en el PC con LDPlayer

    -

    El paso final es abrir el juego y disfrutar jugando Cookie Run: Kingdom en PC con LDPlayer. Puede encontrar el icono del juego en la pantalla de inicio de LDPlayer, o desde la barra de herramientas en el lado derecho. También puede crear un acceso directo para el juego en su escritorio para facilitar el acceso. Una vez que abras el juego, puedes iniciar sesión con tu cuenta, o crear una nueva si no tienes una. También puedes ajustar la configuración del juego, como gráficos, sonido, idioma, etc.

    -

    Conclusión

    - -

    Preguntas frecuentes

    -

    64aa2da5cf
    -
    -
    \ No newline at end of file diff --git a/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/tenacity/wait.py b/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/tenacity/wait.py deleted file mode 100644 index f9349c028360d541c56962d6a09bd9c2a00e3a37..0000000000000000000000000000000000000000 --- a/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/tenacity/wait.py +++ /dev/null @@ -1,228 +0,0 @@ -# Copyright 2016–2021 Julien Danjou -# Copyright 2016 Joshua Harlow -# Copyright 2013-2014 Ray Holder -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import abc -import random -import typing - -from pip._vendor.tenacity import _utils - -if typing.TYPE_CHECKING: - from pip._vendor.tenacity import RetryCallState - - -class wait_base(abc.ABC): - """Abstract base class for wait strategies.""" - - @abc.abstractmethod - def __call__(self, retry_state: "RetryCallState") -> float: - pass - - def __add__(self, other: "wait_base") -> "wait_combine": - return wait_combine(self, other) - - def __radd__(self, other: "wait_base") -> typing.Union["wait_combine", "wait_base"]: - # make it possible to use multiple waits with the built-in sum function - if other == 0: # type: ignore[comparison-overlap] - return self - return self.__add__(other) - - -WaitBaseT = typing.Union[wait_base, typing.Callable[["RetryCallState"], typing.Union[float, int]]] - - -class wait_fixed(wait_base): - """Wait strategy that waits a fixed amount of time between each retry.""" - - def __init__(self, wait: _utils.time_unit_type) -> None: - self.wait_fixed = _utils.to_seconds(wait) - - def __call__(self, retry_state: "RetryCallState") -> float: - return self.wait_fixed - - -class wait_none(wait_fixed): - """Wait strategy that doesn't wait at all before retrying.""" - - def __init__(self) -> None: - super().__init__(0) - - -class wait_random(wait_base): - """Wait strategy that waits a random amount of time between min/max.""" - - def __init__(self, min: _utils.time_unit_type = 0, max: _utils.time_unit_type = 1) -> None: # noqa - self.wait_random_min = _utils.to_seconds(min) - self.wait_random_max = _utils.to_seconds(max) - - def __call__(self, retry_state: "RetryCallState") -> float: - return self.wait_random_min + (random.random() * (self.wait_random_max - self.wait_random_min)) - - -class wait_combine(wait_base): - """Combine several waiting strategies.""" - - def __init__(self, *strategies: wait_base) -> None: - self.wait_funcs = strategies - - def __call__(self, retry_state: "RetryCallState") -> float: - return sum(x(retry_state=retry_state) for x in self.wait_funcs) - - -class wait_chain(wait_base): - """Chain two or more waiting strategies. - - If all strategies are exhausted, the very last strategy is used - thereafter. - - For example:: - - @retry(wait=wait_chain(*[wait_fixed(1) for i in range(3)] + - [wait_fixed(2) for j in range(5)] + - [wait_fixed(5) for k in range(4))) - def wait_chained(): - print("Wait 1s for 3 attempts, 2s for 5 attempts and 5s - thereafter.") - """ - - def __init__(self, *strategies: wait_base) -> None: - self.strategies = strategies - - def __call__(self, retry_state: "RetryCallState") -> float: - wait_func_no = min(max(retry_state.attempt_number, 1), len(self.strategies)) - wait_func = self.strategies[wait_func_no - 1] - return wait_func(retry_state=retry_state) - - -class wait_incrementing(wait_base): - """Wait an incremental amount of time after each attempt. - - Starting at a starting value and incrementing by a value for each attempt - (and restricting the upper limit to some maximum value). - """ - - def __init__( - self, - start: _utils.time_unit_type = 0, - increment: _utils.time_unit_type = 100, - max: _utils.time_unit_type = _utils.MAX_WAIT, # noqa - ) -> None: - self.start = _utils.to_seconds(start) - self.increment = _utils.to_seconds(increment) - self.max = _utils.to_seconds(max) - - def __call__(self, retry_state: "RetryCallState") -> float: - result = self.start + (self.increment * (retry_state.attempt_number - 1)) - return max(0, min(result, self.max)) - - -class wait_exponential(wait_base): - """Wait strategy that applies exponential backoff. - - It allows for a customized multiplier and an ability to restrict the - upper and lower limits to some maximum and minimum value. - - The intervals are fixed (i.e. there is no jitter), so this strategy is - suitable for balancing retries against latency when a required resource is - unavailable for an unknown duration, but *not* suitable for resolving - contention between multiple processes for a shared resource. Use - wait_random_exponential for the latter case. - """ - - def __init__( - self, - multiplier: typing.Union[int, float] = 1, - max: _utils.time_unit_type = _utils.MAX_WAIT, # noqa - exp_base: typing.Union[int, float] = 2, - min: _utils.time_unit_type = 0, # noqa - ) -> None: - self.multiplier = multiplier - self.min = _utils.to_seconds(min) - self.max = _utils.to_seconds(max) - self.exp_base = exp_base - - def __call__(self, retry_state: "RetryCallState") -> float: - try: - exp = self.exp_base ** (retry_state.attempt_number - 1) - result = self.multiplier * exp - except OverflowError: - return self.max - return max(max(0, self.min), min(result, self.max)) - - -class wait_random_exponential(wait_exponential): - """Random wait with exponentially widening window. - - An exponential backoff strategy used to mediate contention between multiple - uncoordinated processes for a shared resource in distributed systems. This - is the sense in which "exponential backoff" is meant in e.g. Ethernet - networking, and corresponds to the "Full Jitter" algorithm described in - this blog post: - - https://aws.amazon.com/blogs/architecture/exponential-backoff-and-jitter/ - - Each retry occurs at a random time in a geometrically expanding interval. - It allows for a custom multiplier and an ability to restrict the upper - limit of the random interval to some maximum value. - - Example:: - - wait_random_exponential(multiplier=0.5, # initial window 0.5s - max=60) # max 60s timeout - - When waiting for an unavailable resource to become available again, as - opposed to trying to resolve contention for a shared resource, the - wait_exponential strategy (which uses a fixed interval) may be preferable. - - """ - - def __call__(self, retry_state: "RetryCallState") -> float: - high = super().__call__(retry_state=retry_state) - return random.uniform(0, high) - - -class wait_exponential_jitter(wait_base): - """Wait strategy that applies exponential backoff and jitter. - - It allows for a customized initial wait, maximum wait and jitter. - - This implements the strategy described here: - https://cloud.google.com/storage/docs/retry-strategy - - The wait time is min(initial * 2**n + random.uniform(0, jitter), maximum) - where n is the retry count. - """ - - def __init__( - self, - initial: float = 1, - max: float = _utils.MAX_WAIT, # noqa - exp_base: float = 2, - jitter: float = 1, - ) -> None: - self.initial = initial - self.max = max - self.exp_base = exp_base - self.jitter = jitter - - def __call__(self, retry_state: "RetryCallState") -> float: - jitter = random.uniform(0, self.jitter) - try: - exp = self.exp_base ** (retry_state.attempt_number - 1) - result = self.initial * exp + jitter - except OverflowError: - result = self.max - return max(0, min(result, self.max)) diff --git a/spaces/CVPR/Dual-Key_Backdoor_Attacks/openvqa/openvqa/models/mfb/net.py b/spaces/CVPR/Dual-Key_Backdoor_Attacks/openvqa/openvqa/models/mfb/net.py deleted file mode 100644 index 63a369f8f9ed9cccd235cda267bc974d444362bc..0000000000000000000000000000000000000000 --- a/spaces/CVPR/Dual-Key_Backdoor_Attacks/openvqa/openvqa/models/mfb/net.py +++ /dev/null @@ -1,62 +0,0 @@ -# -------------------------------------------------------- -# OpenVQA -# Licensed under The MIT License [see LICENSE for details] -# Written by Pengbing Gao https://github.com/nbgao -# -------------------------------------------------------- - -from openvqa.models.mfb.mfb import CoAtt -from openvqa.models.mfb.adapter import Adapter -import torch -import torch.nn as nn - - -# ------------------------------------------------------- -# ---- Main MFB/MFH model with Co-Attention Learning ---- -# ------------------------------------------------------- - - -class Net(nn.Module): - def __init__(self, __C, pretrained_emb, token_size, answer_size): - super(Net, self).__init__() - self.__C = __C - self.adapter = Adapter(__C) - - self.embedding = nn.Embedding( - num_embeddings=token_size, - embedding_dim=__C.WORD_EMBED_SIZE - ) - - # Loading the GloVe embedding weights - if __C.USE_GLOVE: - self.embedding.weight.data.copy_(torch.from_numpy(pretrained_emb)) - - self.lstm = nn.LSTM( - input_size=__C.WORD_EMBED_SIZE, - hidden_size=__C.LSTM_OUT_SIZE, - num_layers=1, - batch_first=True - ) - self.dropout = nn.Dropout(__C.DROPOUT_R) - self.dropout_lstm = nn.Dropout(__C.DROPOUT_R) - self.backbone = CoAtt(__C) - - if __C.HIGH_ORDER: # MFH - self.proj = nn.Linear(2*__C.MFB_O, answer_size) - else: # MFB - self.proj = nn.Linear(__C.MFB_O, answer_size) - - def forward(self, frcn_feat, grid_feat, bbox_feat, ques_ix): - - img_feat, _ = self.adapter(frcn_feat, grid_feat, bbox_feat) # (N, C, FRCN_FEAT_SIZE) - - # Pre-process Language Feature - ques_feat = self.embedding(ques_ix) # (N, T, WORD_EMBED_SIZE) - ques_feat = self.dropout(ques_feat) - ques_feat, _ = self.lstm(ques_feat) # (N, T, LSTM_OUT_SIZE) - ques_feat = self.dropout_lstm(ques_feat) - - z = self.backbone(img_feat, ques_feat) # MFH:(N, 2*O) / MFB:(N, O) - proj_feat = self.proj(z) # (N, answer_size) - - return proj_feat - diff --git a/spaces/Cong723/gpt-academic-public/request_llm/README.md b/spaces/Cong723/gpt-academic-public/request_llm/README.md deleted file mode 100644 index 4a912d10136dc99a3ebbe1e228e98f6ab63ad277..0000000000000000000000000000000000000000 --- a/spaces/Cong723/gpt-academic-public/request_llm/README.md +++ /dev/null @@ -1,54 +0,0 @@ -# 如何使用其他大语言模型 - -## ChatGLM - -- 安装依赖 `pip install -r request_llm/requirements_chatglm.txt` -- 修改配置,在config.py中将LLM_MODEL的值改为"chatglm" - -``` sh -LLM_MODEL = "chatglm" -``` -- 运行! -``` sh -`python main.py` -``` - - ---- -## Text-Generation-UI (TGUI,调试中,暂不可用) - -### 1. 部署TGUI -``` sh -# 1 下载模型 -git clone https://github.com/oobabooga/text-generation-webui.git -# 2 这个仓库的最新代码有问题,回滚到几周之前 -git reset --hard fcda3f87767e642d1c0411776e549e1d3894843d -# 3 切换路径 -cd text-generation-webui -# 4 安装text-generation的额外依赖 -pip install accelerate bitsandbytes flexgen gradio llamacpp markdown numpy peft requests rwkv safetensors sentencepiece tqdm datasets git+https://github.com/huggingface/transformers -# 5 下载模型 -python download-model.py facebook/galactica-1.3b -# 其他可选如 facebook/opt-1.3b -# facebook/galactica-1.3b -# facebook/galactica-6.7b -# facebook/galactica-120b -# facebook/pygmalion-1.3b 等 -# 详情见 https://github.com/oobabooga/text-generation-webui - -# 6 启动text-generation -python server.py --cpu --listen --listen-port 7865 --model facebook_galactica-1.3b -``` - -### 2. 修改config.py - -``` sh -# LLM_MODEL格式: tgui:[模型]@[ws地址]:[ws端口] , 端口要和上面给定的端口一致 -LLM_MODEL = "tgui:galactica-1.3b@localhost:7860" -``` - -### 3. 运行! -``` sh -cd chatgpt-academic -python main.py -``` diff --git a/spaces/DHEIVER/CoronaryAngioSegment/app.py b/spaces/DHEIVER/CoronaryAngioSegment/app.py deleted file mode 100644 index e5e0c1d6e3b1cd4024d5516202037e7acb4a86df..0000000000000000000000000000000000000000 --- a/spaces/DHEIVER/CoronaryAngioSegment/app.py +++ /dev/null @@ -1,142 +0,0 @@ -import os -os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" - -import gradio as gr -import torch -import cv2 -import numpy as np -from preprocess import unsharp_masking -import glob -import time -from sklearn.cluster import KMeans # Import K-means clustering - -device = "cuda" if torch.cuda.is_available() else "cpu" -model_paths = { - 'SE-RegUNet 4GF': './model/SERegUNet4GF.pt', - 'SE-RegUNet 16GF': './model/SERegUNet16GF.pt', - 'AngioNet': './model/AngioNet.pt', - 'EffUNet++ B5': './model/EffUNetppb5.pt', - 'Reg-SA-UNet++': './model/RegSAUnetpp.pt', - 'UNet3+': './model/UNet3plus.pt', -} - -print("torch: ", torch.__version__) - -def filesort(img, model): - ori = img.copy() - img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) - h, w = img.shape - img_out = preprocessing(img, model) - return img_out, h, w, img, ori - -def preprocessing(img, model='SE-RegUNet 4GF'): - img = cv2.resize(img, (512, 512)) - img = unsharp_masking(img).astype(np.uint8) - if model == 'AngioNet' or model == 'UNet3+': - img = np.float32((img - img.min()) / (img.max() - img.min() + 1e-6)) - img_out = np.expand_dims(img, axis=0) - elif model == 'SE-RegUNet 4GF': - clahe1 = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8)) - clahe2 = cv2.createCLAHE(clipLimit=8.0, tileGridSize=(8, 8)) - image1 = clahe1.apply(img) - image2 = clahe2.apply(img) - img = np.float32((img - img.min()) / (img.max() - img.min() + 1e-6)) - image1 = np.float32((image1 - image1.min()) / (image1.max() - image1.min() + 1e-6)) - image2 = np.float32((image2 - image2.min()) / (image2.max() - image2.min() + 1e-6)) - img_out = np.stack((img, image1, image2), axis=0) - else: - clahe1 = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8)) - image1 = clahe1.apply(img) - image1 = np.float32((image1 - image1.min()) / (image1.max() - image1.min() + 1e-6)) - img_out = np.stack((image1,) * 3, axis=0) - return img_out - -def process_input_image(img, model): - pipe = torch.jit.load(model_paths[model]) - pipe = pipe.to(device).eval() - start = time.time() - img, h, w, ori_gray, ori = filesort(img, model) - img = torch.FloatTensor(img).unsqueeze(0).to(device) - with torch.no_grad(): - if model == 'AngioNet': - img = torch.cat([img, img], dim=0) - logit = np.round(torch.softmax(pipe.forward(img), dim=1).detach().cpu().numpy()[0, 0]).astype(np.uint8) - spent = time.time() - start - spent = f"{spent:.3f} second(s)" - - if h != 512 or w != 512: - logit = cv2.resize(logit, (h, w)) - - logit = logit.astype(bool) - img_out = ori.copy() - - # Change the color of the segmented mask to red - img_out[logit, :] = [255, 0, 0] # Red color for the mask - - # Add a white border to the mask - contours, _ = cv2.findContours(logit.astype(np.uint8), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) - cv2.drawContours(img_out, contours, -1, [255, 255, 255], 2) # White color for the border - - # Perform K-means clustering on the segmented mask - masked_image = ori_gray.copy() - masked_image[~logit] = 0 # Set non-segmented regions to 0 - flattened_masked_image = masked_image.reshape((-1, 1)) - - # You can adjust the number of clusters (n_clusters) based on your requirements - n_clusters = 2 - kmeans = KMeans(n_clusters=n_clusters, random_state=0).fit(flattened_masked_image) - cluster_labels = kmeans.labels_ - - # Determine the potential for anomalies based on the cluster centroids - cluster_centers = kmeans.cluster_centers_ - anomaly_potential = np.abs(cluster_centers[0] - cluster_centers[1]) - - # Define a higher threshold for classifying anomalies - anomaly_threshold = 50 # Adjust this threshold as needed for higher rigor - - # Check if anomaly potential is above the threshold - is_anomaly = np.sum(anomaly_potential) > anomaly_threshold - - # Provide a detailed message for cardiologists only when there's high confidence - if is_anomaly: - anomaly_label = "Potential Anomaly Detected: Consult a Cardiologist for Further Assessment and Diagnosis." - else: - anomaly_label = "No Potential Anomaly Detected. Continue Routine Cardiac Assessment." - - return spent, img_out, anomaly_label - -with gr.Column(): - time_spent = gr.Label(label="Time Spent (Preprocessing + Inference)") - img_output = gr.Image(label="Output Mask") - anomaly_label = gr.Label(label="Anomaly Status") - -my_app = gr.Blocks() -with my_app: - gr.Markdown("Coronary Angiogram Segmentation with Gradio.") - with gr.Tabs(): - with gr.TabItem("Select your image"): - with gr.Row(): - with gr.Column(): - img_source = gr.Image(label="Please select angiogram.", value='./example/angio.png', shape=(512, 512)) - model_choice = gr.Dropdown(['SE-RegUNet 4GF', 'SE-RegUNet 16GF', 'AngioNet', 'EffUNet++ B5', - 'Reg-SA-UNet++', 'UNet3+'], label='Model', info='Which model to infer?') - source_image_loader = gr.Button("Vessel Segment") - with gr.Column(): - time_spent = gr.Label(label="Time Spent (Preprocessing + Inference)") - img_output = gr.Image(label="Output Mask") - anomaly_label = gr.Label(label="Anomaly Status") - - source_image_loader.click( - process_input_image, - [ - img_source, - model_choice - ], - [ - time_spent, - img_output, - anomaly_label # Display the anomaly status label - ] - ) - -my_app.launch(debug=True) diff --git a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/cdn/assets/index-3ba00a4a.js b/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/cdn/assets/index-3ba00a4a.js deleted file mode 100644 index 560722d73957b63c49c65c940e2331a862a5f5f8..0000000000000000000000000000000000000000 --- a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/cdn/assets/index-3ba00a4a.js +++ /dev/null @@ -1,13 +0,0 @@ -const VERSION_RE = new RegExp("3.37.0/", "g");function import_fix(mod, base) {const url = new URL(mod, base); return import(`https://gradio.s3-us-west-2.amazonaws.com/3.37.0/${url.pathname?.startsWith('/') ? url.pathname.substring(1).replace(VERSION_RE, "") : url.pathname.replace(VERSION_RE, "")}`);}import{S as si,e as ri,s as oi,J as io,K as se,p as Ce,M as Yt,n as vi,A as Ae,_ as Pe,N as dt,B as Xl,C as Xc,h as xr,k as fe,O as pt,U as yn,o as ue,Q as Zl,z as H,u as _n,v as j,y as Vn,x as de,ai as Ql,Z as ea,ao as bn,m as ta,am as Zc,E as ia,ae as na,j as sa,q as ra,r as oa,t as la}from"./index-1d65707a.js";import"./Blocks-c9e1499d.js";import{f as wn,B as aa}from"./Button-f155035a.js";import{B as ha}from"./BlockLabel-66866176.js";import{E as Qc}from"./Empty-eec13822.js";import{C as ef,a as ca}from"./Copy-9f1657c4.js";import{D as tf}from"./Download-daff1959.js";function nf(n){let e,t;return{c(){e=io("svg"),t=io("path"),se(t,"fill","currentColor"),se(t,"d","m31 16l-7 7l-1.41-1.41L28.17 16l-5.58-5.59L24 9l7 7zM1 16l7-7l1.41 1.41L3.83 16l5.58 5.59L8 23l-7-7zm11.42 9.484L17.64 6l1.932.517L14.352 26z"),se(e,"width","100%"),se(e,"height","100%"),se(e,"viewBox","0 0 32 32")},m(i,s){Ce(i,e,s),Yt(e,t)},p:vi,i:vi,o:vi,d(i){i&&Ae(e)}}}let Sr=class extends si{constructor(e){super(),ri(this,e,null,nf,oi,{})}};class _{constructor(){}lineAt(e){if(e<0||e>this.length)throw new RangeError(`Invalid position ${e} in document of length ${this.length}`);return this.lineInner(e,!1,1,0)}line(e){if(e<1||e>this.lines)throw new RangeError(`Invalid line number ${e} in ${this.lines}-line document`);return this.lineInner(e,!0,1,0)}replace(e,t,i){let s=[];return this.decompose(0,e,s,2),i.length&&i.decompose(0,i.length,s,3),this.decompose(t,this.length,s,1),$e.from(s,this.length-(t-e)+i.length)}append(e){return this.replace(this.length,this.length,e)}slice(e,t=this.length){let i=[];return this.decompose(e,t,i,0),$e.from(i,t-e)}eq(e){if(e==this)return!0;if(e.length!=this.length||e.lines!=this.lines)return!1;let t=this.scanIdentical(e,1),i=this.length-this.scanIdentical(e,-1),s=new xi(this),r=new xi(e);for(let o=t,l=t;;){if(s.next(o),r.next(o),o=0,s.lineBreak!=r.lineBreak||s.done!=r.done||s.value!=r.value)return!1;if(l+=s.value.length,s.done||l>=i)return!0}}iter(e=1){return new xi(this,e)}iterRange(e,t=this.length){return new fa(this,e,t)}iterLines(e,t){let i;if(e==null)i=this.iter();else{t==null&&(t=this.lines+1);let s=this.line(e).from;i=this.iterRange(s,Math.max(s,t==this.lines+1?this.length:t<=1?0:this.line(t-1).to))}return new ua(i)}toString(){return this.sliceString(0)}toJSON(){let e=[];return this.flatten(e),e}static of(e){if(e.length==0)throw new RangeError("A document must have at least one line");return e.length==1&&!e[0]?_.empty:e.length<=32?new te(e):$e.from(te.split(e,[]))}}class te extends _{constructor(e,t=sf(e)){super(),this.text=e,this.length=t}get lines(){return this.text.length}get children(){return null}lineInner(e,t,i,s){for(let r=0;;r++){let o=this.text[r],l=s+o.length;if((t?i:l)>=e)return new rf(s,l,i,o);s=l+1,i++}}decompose(e,t,i,s){let r=e<=0&&t>=this.length?this:new te(no(this.text,e,t),Math.min(t,this.length)-Math.max(0,e));if(s&1){let o=i.pop(),l=fn(r.text,o.text.slice(),0,r.length);if(l.length<=32)i.push(new te(l,o.length+r.length));else{let a=l.length>>1;i.push(new te(l.slice(0,a)),new te(l.slice(a)))}}else i.push(r)}replace(e,t,i){if(!(i instanceof te))return super.replace(e,t,i);let s=fn(this.text,fn(i.text,no(this.text,0,e)),t),r=this.length+i.length-(t-e);return s.length<=32?new te(s,r):$e.from(te.split(s,[]),r)}sliceString(e,t=this.length,i=` -`){let s="";for(let r=0,o=0;r<=t&&oe&&o&&(s+=i),er&&(s+=l.slice(Math.max(0,e-r),t-r)),r=a+1}return s}flatten(e){for(let t of this.text)e.push(t)}scanIdentical(){return 0}static split(e,t){let i=[],s=-1;for(let r of e)i.push(r),s+=r.length+1,i.length==32&&(t.push(new te(i,s)),i=[],s=-1);return s>-1&&t.push(new te(i,s)),t}}class $e extends _{constructor(e,t){super(),this.children=e,this.length=t,this.lines=0;for(let i of e)this.lines+=i.lines}lineInner(e,t,i,s){for(let r=0;;r++){let o=this.children[r],l=s+o.length,a=i+o.lines-1;if((t?a:l)>=e)return o.lineInner(e,t,i,s);s=l+1,i=a+1}}decompose(e,t,i,s){for(let r=0,o=0;o<=t&&r=o){let h=s&((o<=e?1:0)|(a>=t?2:0));o>=e&&a<=t&&!h?i.push(l):l.decompose(e-o,t-o,i,h)}o=a+1}}replace(e,t,i){if(i.lines=r&&t<=l){let a=o.replace(e-r,t-r,i),h=this.lines-o.lines+a.lines;if(a.lines>5-1&&a.lines>h>>5+1){let c=this.children.slice();return c[s]=a,new $e(c,this.length-(t-e)+i.length)}return super.replace(r,l,a)}r=l+1}return super.replace(e,t,i)}sliceString(e,t=this.length,i=` -`){let s="";for(let r=0,o=0;re&&r&&(s+=i),eo&&(s+=l.sliceString(e-o,t-o,i)),o=a+1}return s}flatten(e){for(let t of this.children)t.flatten(e)}scanIdentical(e,t){if(!(e instanceof $e))return 0;let i=0,[s,r,o,l]=t>0?[0,0,this.children.length,e.children.length]:[this.children.length-1,e.children.length-1,-1,-1];for(;;s+=t,r+=t){if(s==o||r==l)return i;let a=this.children[s],h=e.children[r];if(a!=h)return i+a.scanIdentical(h,t);i+=a.length+1}}static from(e,t=e.reduce((i,s)=>i+s.length+1,-1)){let i=0;for(let d of e)i+=d.lines;if(i<32){let d=[];for(let p of e)p.flatten(d);return new te(d,t)}let s=Math.max(32,i>>5),r=s<<1,o=s>>1,l=[],a=0,h=-1,c=[];function f(d){let p;if(d.lines>r&&d instanceof $e)for(let g of d.children)f(g);else d.lines>o&&(a>o||!a)?(u(),l.push(d)):d instanceof te&&a&&(p=c[c.length-1])instanceof te&&d.lines+p.lines<=32?(a+=d.lines,h+=d.length+1,c[c.length-1]=new te(p.text.concat(d.text),p.length+1+d.length)):(a+d.lines>s&&u(),a+=d.lines,h+=d.length+1,c.push(d))}function u(){a!=0&&(l.push(c.length==1?c[0]:$e.from(c,h)),h=-1,a=c.length=0)}for(let d of e)f(d);return u(),l.length==1?l[0]:new $e(l,t)}}_.empty=new te([""],0);function sf(n){let e=-1;for(let t of n)e+=t.length+1;return e}function fn(n,e,t=0,i=1e9){for(let s=0,r=0,o=!0;r=t&&(a>i&&(l=l.slice(0,i-s)),s0?1:(e instanceof te?e.text.length:e.children.length)<<1]}nextInner(e,t){for(this.done=this.lineBreak=!1;;){let i=this.nodes.length-1,s=this.nodes[i],r=this.offsets[i],o=r>>1,l=s instanceof te?s.text.length:s.children.length;if(o==(t>0?l:0)){if(i==0)return this.done=!0,this.value="",this;t>0&&this.offsets[i-1]++,this.nodes.pop(),this.offsets.pop()}else if((r&1)==(t>0?0:1)){if(this.offsets[i]+=t,e==0)return this.lineBreak=!0,this.value=` -`,this;e--}else if(s instanceof te){let a=s.text[o+(t<0?-1:0)];if(this.offsets[i]+=t,a.length>Math.max(0,e))return this.value=e==0?a:t>0?a.slice(e):a.slice(0,a.length-e),this;e-=a.length}else{let a=s.children[o+(t<0?-1:0)];e>a.length?(e-=a.length,this.offsets[i]+=t):(t<0&&this.offsets[i]--,this.nodes.push(a),this.offsets.push(t>0?1:(a instanceof te?a.text.length:a.children.length)<<1))}}}next(e=0){return e<0&&(this.nextInner(-e,-this.dir),e=this.value.length),this.nextInner(e,this.dir)}}class fa{constructor(e,t,i){this.value="",this.done=!1,this.cursor=new xi(e,t>i?-1:1),this.pos=t>i?e.length:0,this.from=Math.min(t,i),this.to=Math.max(t,i)}nextInner(e,t){if(t<0?this.pos<=this.from:this.pos>=this.to)return this.value="",this.done=!0,this;e+=Math.max(0,t<0?this.pos-this.to:this.from-this.pos);let i=t<0?this.pos-this.from:this.to-this.pos;e>i&&(e=i),i-=e;let{value:s}=this.cursor.next(e);return this.pos+=(s.length+e)*t,this.value=s.length<=i?s:t<0?s.slice(s.length-i):s.slice(0,i),this.done=!this.value,this}next(e=0){return e<0?e=Math.max(e,this.from-this.pos):e>0&&(e=Math.min(e,this.to-this.pos)),this.nextInner(e,this.cursor.dir)}get lineBreak(){return this.cursor.lineBreak&&this.value!=""}}class ua{constructor(e){this.inner=e,this.afterBreak=!0,this.value="",this.done=!1}next(e=0){let{done:t,lineBreak:i,value:s}=this.inner.next(e);return t?(this.done=!0,this.value=""):i?this.afterBreak?this.value="":(this.afterBreak=!0,this.next()):(this.value=s,this.afterBreak=!1),this}get lineBreak(){return!1}}typeof Symbol<"u"&&(_.prototype[Symbol.iterator]=function(){return this.iter()},xi.prototype[Symbol.iterator]=fa.prototype[Symbol.iterator]=ua.prototype[Symbol.iterator]=function(){return this});class rf{constructor(e,t,i,s){this.from=e,this.to=t,this.number=i,this.text=s}get length(){return this.to-this.from}}let Ut="lc,34,7n,7,7b,19,,,,2,,2,,,20,b,1c,l,g,,2t,7,2,6,2,2,,4,z,,u,r,2j,b,1m,9,9,,o,4,,9,,3,,5,17,3,3b,f,,w,1j,,,,4,8,4,,3,7,a,2,t,,1m,,,,2,4,8,,9,,a,2,q,,2,2,1l,,4,2,4,2,2,3,3,,u,2,3,,b,2,1l,,4,5,,2,4,,k,2,m,6,,,1m,,,2,,4,8,,7,3,a,2,u,,1n,,,,c,,9,,14,,3,,1l,3,5,3,,4,7,2,b,2,t,,1m,,2,,2,,3,,5,2,7,2,b,2,s,2,1l,2,,,2,4,8,,9,,a,2,t,,20,,4,,2,3,,,8,,29,,2,7,c,8,2q,,2,9,b,6,22,2,r,,,,,,1j,e,,5,,2,5,b,,10,9,,2u,4,,6,,2,2,2,p,2,4,3,g,4,d,,2,2,6,,f,,jj,3,qa,3,t,3,t,2,u,2,1s,2,,7,8,,2,b,9,,19,3,3b,2,y,,3a,3,4,2,9,,6,3,63,2,2,,1m,,,7,,,,,2,8,6,a,2,,1c,h,1r,4,1c,7,,,5,,14,9,c,2,w,4,2,2,,3,1k,,,2,3,,,3,1m,8,2,2,48,3,,d,,7,4,,6,,3,2,5i,1m,,5,ek,,5f,x,2da,3,3x,,2o,w,fe,6,2x,2,n9w,4,,a,w,2,28,2,7k,,3,,4,,p,2,5,,47,2,q,i,d,,12,8,p,b,1a,3,1c,,2,4,2,2,13,,1v,6,2,2,2,2,c,,8,,1b,,1f,,,3,2,2,5,2,,,16,2,8,,6m,,2,,4,,fn4,,kh,g,g,g,a6,2,gt,,6a,,45,5,1ae,3,,2,5,4,14,3,4,,4l,2,fx,4,ar,2,49,b,4w,,1i,f,1k,3,1d,4,2,2,1x,3,10,5,,8,1q,,c,2,1g,9,a,4,2,,2n,3,2,,,2,6,,4g,,3,8,l,2,1l,2,,,,,m,,e,7,3,5,5f,8,2,3,,,n,,29,,2,6,,,2,,,2,,2,6j,,2,4,6,2,,2,r,2,2d,8,2,,,2,2y,,,,2,6,,,2t,3,2,4,,5,77,9,,2,6t,,a,2,,,4,,40,4,2,2,4,,w,a,14,6,2,4,8,,9,6,2,3,1a,d,,2,ba,7,,6,,,2a,m,2,7,,2,,2,3e,6,3,,,2,,7,,,20,2,3,,,,9n,2,f0b,5,1n,7,t4,,1r,4,29,,f5k,2,43q,,,3,4,5,8,8,2,7,u,4,44,3,1iz,1j,4,1e,8,,e,,m,5,,f,11s,7,,h,2,7,,2,,5,79,7,c5,4,15s,7,31,7,240,5,gx7k,2o,3k,6o".split(",").map(n=>n?parseInt(n,36):1);for(let n=1;nn)return Ut[e-1]<=n;return!1}function so(n){return n>=127462&&n<=127487}const ro=8205;function Oe(n,e,t=!0,i=!0){return(t?da:lf)(n,e,i)}function da(n,e,t){if(e==n.length)return e;e&&pa(n.charCodeAt(e))&&ma(n.charCodeAt(e-1))&&e--;let i=ge(n,e);for(e+=Ee(i);e=0&&so(ge(n,o));)r++,o-=2;if(r%2==0)break;e+=2}else break}return e}function lf(n,e,t){for(;e>0;){let i=da(n,e-2,t);if(i=56320&&n<57344}function ma(n){return n>=55296&&n<56320}function ge(n,e){let t=n.charCodeAt(e);if(!ma(t)||e+1==n.length)return t;let i=n.charCodeAt(e+1);return pa(i)?(t-55296<<10)+(i-56320)+65536:t}function ga(n){return n<=65535?String.fromCharCode(n):(n-=65536,String.fromCharCode((n>>10)+55296,(n&1023)+56320))}function Ee(n){return n<65536?1:2}const Bs=/\r\n?|\n/;var ce=function(n){return n[n.Simple=0]="Simple",n[n.TrackDel=1]="TrackDel",n[n.TrackBefore=2]="TrackBefore",n[n.TrackAfter=3]="TrackAfter",n}(ce||(ce={}));class Ze{constructor(e){this.sections=e}get length(){let e=0;for(let t=0;te)return r+(e-s);r+=l}else{if(i!=ce.Simple&&h>=e&&(i==ce.TrackDel&&se||i==ce.TrackBefore&&se))return null;if(h>e||h==e&&t<0&&!l)return e==s||t<0?r:r+a;r+=a}s=h}if(e>s)throw new RangeError(`Position ${e} is out of range for changeset of length ${s}`);return r}touchesRange(e,t=e){for(let i=0,s=0;i=0&&s<=t&&l>=e)return st?"cover":!0;s=l}return!1}toString(){let e="";for(let t=0;t=0?":"+s:"")}return e}toJSON(){return this.sections}static fromJSON(e){if(!Array.isArray(e)||e.length%2||e.some(t=>typeof t!="number"))throw new RangeError("Invalid JSON representation of ChangeDesc");return new Ze(e)}static create(e){return new Ze(e)}}class ne extends Ze{constructor(e,t){super(e),this.inserted=t}apply(e){if(this.length!=e.length)throw new RangeError("Applying change set to a document with the wrong length");return Ps(this,(t,i,s,r,o)=>e=e.replace(s,s+(i-t),o),!1),e}mapDesc(e,t=!1){return Es(this,e,t,!0)}invert(e){let t=this.sections.slice(),i=[];for(let s=0,r=0;s=0){t[s]=l,t[s+1]=o;let a=s>>1;for(;i.length0&&ht(i,t,r.text),r.forward(c),l+=c}let h=e[o++];for(;l>1].toJSON()))}return e}static of(e,t,i){let s=[],r=[],o=0,l=null;function a(c=!1){if(!c&&!s.length)return;ou||f<0||u>t)throw new RangeError(`Invalid change range ${f} to ${u} (in doc of length ${t})`);let p=d?typeof d=="string"?_.of(d.split(i||Bs)):d:_.empty,g=p.length;if(f==u&&g==0)return;fo&&me(s,f-o,-1),me(s,u-f,g),ht(r,s,p),o=u}}return h(e),a(!l),l}static empty(e){return new ne(e?[e,-1]:[],[])}static fromJSON(e){if(!Array.isArray(e))throw new RangeError("Invalid JSON representation of ChangeSet");let t=[],i=[];for(let s=0;sl&&typeof o!="string"))throw new RangeError("Invalid JSON representation of ChangeSet");if(r.length==1)t.push(r[0],0);else{for(;i.length=0&&t<=0&&t==n[s+1]?n[s]+=e:e==0&&n[s]==0?n[s+1]+=t:i?(n[s]+=e,n[s+1]+=t):n.push(e,t)}function ht(n,e,t){if(t.length==0)return;let i=e.length-2>>1;if(i>1])),!(t||o==n.sections.length||n.sections[o+1]<0);)l=n.sections[o++],a=n.sections[o++];e(s,h,r,c,f),s=h,r=c}}}function Es(n,e,t,i=!1){let s=[],r=i?[]:null,o=new Di(n),l=new Di(e);for(let a=-1;;)if(o.ins==-1&&l.ins==-1){let h=Math.min(o.len,l.len);me(s,h,-1),o.forward(h),l.forward(h)}else if(l.ins>=0&&(o.ins<0||a==o.i||o.off==0&&(l.len=0&&a=0){let h=0,c=o.len;for(;c;)if(l.ins==-1){let f=Math.min(c,l.len);h+=f,c-=f,l.forward(f)}else if(l.ins==0&&l.lena||o.ins>=0&&o.len>a)&&(l||i.length>h),r.forward2(a),o.forward(a)}}}}class Di{constructor(e){this.set=e,this.i=0,this.next()}next(){let{sections:e}=this.set;this.i>1;return t>=e.length?_.empty:e[t]}textBit(e){let{inserted:t}=this.set,i=this.i-2>>1;return i>=t.length&&!e?_.empty:t[i].slice(this.off,e==null?void 0:this.off+e)}forward(e){e==this.len?this.next():(this.len-=e,this.off+=e)}forward2(e){this.ins==-1?this.forward(e):e==this.ins?this.next():(this.ins-=e,this.off+=e)}}class Mt{constructor(e,t,i){this.from=e,this.to=t,this.flags=i}get anchor(){return this.flags&16?this.to:this.from}get head(){return this.flags&16?this.from:this.to}get empty(){return this.from==this.to}get assoc(){return this.flags&4?-1:this.flags&8?1:0}get bidiLevel(){let e=this.flags&3;return e==3?null:e}get goalColumn(){let e=this.flags>>5;return e==33554431?void 0:e}map(e,t=-1){let i,s;return this.empty?i=s=e.mapPos(this.from,t):(i=e.mapPos(this.from,1),s=e.mapPos(this.to,-1)),i==this.from&&s==this.to?this:new Mt(i,s,this.flags)}extend(e,t=e){if(e<=this.anchor&&t>=this.anchor)return w.range(e,t);let i=Math.abs(e-this.anchor)>Math.abs(t-this.anchor)?e:t;return w.range(this.anchor,i)}eq(e){return this.anchor==e.anchor&&this.head==e.head}toJSON(){return{anchor:this.anchor,head:this.head}}static fromJSON(e){if(!e||typeof e.anchor!="number"||typeof e.head!="number")throw new RangeError("Invalid JSON representation for SelectionRange");return w.range(e.anchor,e.head)}static create(e,t,i){return new Mt(e,t,i)}}class w{constructor(e,t){this.ranges=e,this.mainIndex=t}map(e,t=-1){return e.empty?this:w.create(this.ranges.map(i=>i.map(e,t)),this.mainIndex)}eq(e){if(this.ranges.length!=e.ranges.length||this.mainIndex!=e.mainIndex)return!1;for(let t=0;te.toJSON()),main:this.mainIndex}}static fromJSON(e){if(!e||!Array.isArray(e.ranges)||typeof e.main!="number"||e.main>=e.ranges.length)throw new RangeError("Invalid JSON representation for EditorSelection");return new w(e.ranges.map(t=>Mt.fromJSON(t)),e.main)}static single(e,t=e){return new w([w.range(e,t)],0)}static create(e,t=0){if(e.length==0)throw new RangeError("A selection needs at least one range");for(let i=0,s=0;se?4:0))}static normalized(e,t=0){let i=e[t];e.sort((s,r)=>s.from-r.from),t=e.indexOf(i);for(let s=1;sr.head?w.range(a,l):w.range(l,a))}}return new w(e,t)}}function ba(n,e){for(let t of n.ranges)if(t.to>e)throw new RangeError("Selection points outside of document")}let Cr=0;class D{constructor(e,t,i,s,r){this.combine=e,this.compareInput=t,this.compare=i,this.isStatic=s,this.id=Cr++,this.default=e([]),this.extensions=typeof r=="function"?r(this):r}static define(e={}){return new D(e.combine||(t=>t),e.compareInput||((t,i)=>t===i),e.compare||(e.combine?(t,i)=>t===i:Ar),!!e.static,e.enables)}of(e){return new un([],this,0,e)}compute(e,t){if(this.isStatic)throw new Error("Can't compute a static facet");return new un(e,this,1,t)}computeN(e,t){if(this.isStatic)throw new Error("Can't compute a static facet");return new un(e,this,2,t)}from(e,t){return t||(t=i=>i),this.compute([e],i=>t(i.field(e)))}}function Ar(n,e){return n==e||n.length==e.length&&n.every((t,i)=>t===e[i])}class un{constructor(e,t,i,s){this.dependencies=e,this.facet=t,this.type=i,this.value=s,this.id=Cr++}dynamicSlot(e){var t;let i=this.value,s=this.facet.compareInput,r=this.id,o=e[r]>>1,l=this.type==2,a=!1,h=!1,c=[];for(let f of this.dependencies)f=="doc"?a=!0:f=="selection"?h=!0:((t=e[f.id])!==null&&t!==void 0?t:1)&1||c.push(e[f.id]);return{create(f){return f.values[o]=i(f),1},update(f,u){if(a&&u.docChanged||h&&(u.docChanged||u.selection)||Rs(f,c)){let d=i(f);if(l?!oo(d,f.values[o],s):!s(d,f.values[o]))return f.values[o]=d,1}return 0},reconfigure:(f,u)=>{let d=i(f),p=u.config.address[r];if(p!=null){let g=vn(u,p);if(this.dependencies.every(y=>y instanceof D?u.facet(y)===f.facet(y):y instanceof Me?u.field(y,!1)==f.field(y,!1):!0)||(l?oo(d,g,s):s(d,g)))return f.values[o]=g,0}return f.values[o]=d,1}}}}function oo(n,e,t){if(n.length!=e.length)return!1;for(let i=0;in[a.id]),s=t.map(a=>a.type),r=i.filter(a=>!(a&1)),o=n[e.id]>>1;function l(a){let h=[];for(let c=0;ci===s),e);return e.provide&&(t.provides=e.provide(t)),t}create(e){let t=e.facet(lo).find(i=>i.field==this);return(t?.create||this.createF)(e)}slot(e){let t=e[this.id]>>1;return{create:i=>(i.values[t]=this.create(i),1),update:(i,s)=>{let r=i.values[t],o=this.updateF(r,s);return this.compareF(r,o)?0:(i.values[t]=o,1)},reconfigure:(i,s)=>s.config.address[this.id]!=null?(i.values[t]=s.field(this),0):(i.values[t]=this.create(i),1)}}init(e){return[this,lo.of({field:this,create:e})]}get extension(){return this}}const Ct={lowest:4,low:3,default:2,high:1,highest:0};function ci(n){return e=>new wa(e,n)}const Vi={highest:ci(Ct.highest),high:ci(Ct.high),default:ci(Ct.default),low:ci(Ct.low),lowest:ci(Ct.lowest)};class wa{constructor(e,t){this.inner=e,this.prec=t}}class Fn{of(e){return new Ls(this,e)}reconfigure(e){return Fn.reconfigure.of({compartment:this,extension:e})}get(e){return e.config.compartments.get(this)}}class Ls{constructor(e,t){this.compartment=e,this.inner=t}}class kn{constructor(e,t,i,s,r,o){for(this.base=e,this.compartments=t,this.dynamicSlots=i,this.address=s,this.staticValues=r,this.facets=o,this.statusTemplate=[];this.statusTemplate.length>1]}static resolve(e,t,i){let s=[],r=Object.create(null),o=new Map;for(let u of hf(e,t,o))u instanceof Me?s.push(u):(r[u.facet.id]||(r[u.facet.id]=[])).push(u);let l=Object.create(null),a=[],h=[];for(let u of s)l[u.id]=h.length<<1,h.push(d=>u.slot(d));let c=i?.config.facets;for(let u in r){let d=r[u],p=d[0].facet,g=c&&c[u]||[];if(d.every(y=>y.type==0))if(l[p.id]=a.length<<1|1,Ar(g,d))a.push(i.facet(p));else{let y=p.combine(d.map(b=>b.value));a.push(i&&p.compare(y,i.facet(p))?i.facet(p):y)}else{for(let y of d)y.type==0?(l[y.id]=a.length<<1|1,a.push(y.value)):(l[y.id]=h.length<<1,h.push(b=>y.dynamicSlot(b)));l[p.id]=h.length<<1,h.push(y=>af(y,p,d))}}let f=h.map(u=>u(l));return new kn(e,o,f,l,a,r)}}function hf(n,e,t){let i=[[],[],[],[],[]],s=new Map;function r(o,l){let a=s.get(o);if(a!=null){if(a<=l)return;let h=i[a].indexOf(o);h>-1&&i[a].splice(h,1),o instanceof Ls&&t.delete(o.compartment)}if(s.set(o,l),Array.isArray(o))for(let h of o)r(h,l);else if(o instanceof Ls){if(t.has(o.compartment))throw new RangeError("Duplicate use of compartment in extensions");let h=e.get(o.compartment)||o.inner;t.set(o.compartment,h),r(h,l)}else if(o instanceof wa)r(o.inner,o.prec);else if(o instanceof Me)i[l].push(o),o.provides&&r(o.provides,l);else if(o instanceof un)i[l].push(o),o.facet.extensions&&r(o.facet.extensions,Ct.default);else{let h=o.extension;if(!h)throw new Error(`Unrecognized extension value in extension set (${o}). This sometimes happens because multiple instances of @codemirror/state are loaded, breaking instanceof checks.`);r(h,l)}}return r(n,Ct.default),i.reduce((o,l)=>o.concat(l))}function Si(n,e){if(e&1)return 2;let t=e>>1,i=n.status[t];if(i==4)throw new Error("Cyclic dependency between fields and/or facets");if(i&2)return i;n.status[t]=4;let s=n.computeSlot(n,n.config.dynamicSlots[t]);return n.status[t]=2|s}function vn(n,e){return e&1?n.config.staticValues[e>>1]:n.values[e>>1]}const ka=D.define(),va=D.define({combine:n=>n.some(e=>e),static:!0}),xa=D.define({combine:n=>n.length?n[0]:void 0,static:!0}),Sa=D.define(),Ca=D.define(),Aa=D.define(),Ma=D.define({combine:n=>n.length?n[0]:!1});class Nt{constructor(e,t){this.type=e,this.value=t}static define(){return new cf}}class cf{of(e){return new Nt(this,e)}}class ff{constructor(e){this.map=e}of(e){return new R(this,e)}}class R{constructor(e,t){this.type=e,this.value=t}map(e){let t=this.type.map(this.value,e);return t===void 0?void 0:t==this.value?this:new R(this.type,t)}is(e){return this.type==e}static define(e={}){return new ff(e.map||(t=>t))}static mapEffects(e,t){if(!e.length)return e;let i=[];for(let s of e){let r=s.map(t);r&&i.push(r)}return i}}R.reconfigure=R.define();R.appendConfig=R.define();class re{constructor(e,t,i,s,r,o){this.startState=e,this.changes=t,this.selection=i,this.effects=s,this.annotations=r,this.scrollIntoView=o,this._doc=null,this._state=null,i&&ba(i,t.newLength),r.some(l=>l.type==re.time)||(this.annotations=r.concat(re.time.of(Date.now())))}static create(e,t,i,s,r,o){return new re(e,t,i,s,r,o)}get newDoc(){return this._doc||(this._doc=this.changes.apply(this.startState.doc))}get newSelection(){return this.selection||this.startState.selection.map(this.changes)}get state(){return this._state||this.startState.applyTransaction(this),this._state}annotation(e){for(let t of this.annotations)if(t.type==e)return t.value}get docChanged(){return!this.changes.empty}get reconfigured(){return this.startState.config!=this.state.config}isUserEvent(e){let t=this.annotation(re.userEvent);return!!(t&&(t==e||t.length>e.length&&t.slice(0,e.length)==e&&t[e.length]=="."))}}re.time=Nt.define();re.userEvent=Nt.define();re.addToHistory=Nt.define();re.remote=Nt.define();function uf(n,e){let t=[];for(let i=0,s=0;;){let r,o;if(i=n[i]))r=n[i++],o=n[i++];else if(s=0;s--){let r=i[s](n);r instanceof re?n=r:Array.isArray(r)&&r.length==1&&r[0]instanceof re?n=r[0]:n=Ta(e,Gt(r),!1)}return n}function pf(n){let e=n.startState,t=e.facet(Aa),i=n;for(let s=t.length-1;s>=0;s--){let r=t[s](n);r&&Object.keys(r).length&&(i=Da(i,Is(e,r,n.changes.newLength),!0))}return i==n?n:re.create(e,n.changes,n.selection,i.effects,i.annotations,i.scrollIntoView)}const mf=[];function Gt(n){return n==null?mf:Array.isArray(n)?n:[n]}var Re=function(n){return n[n.Word=0]="Word",n[n.Space=1]="Space",n[n.Other=2]="Other",n}(Re||(Re={}));const gf=/[\u00df\u0587\u0590-\u05f4\u0600-\u06ff\u3040-\u309f\u30a0-\u30ff\u3400-\u4db5\u4e00-\u9fcc\uac00-\ud7af]/;let Ns;try{Ns=new RegExp("[\\p{Alphabetic}\\p{Number}_]","u")}catch{}function yf(n){if(Ns)return Ns.test(n);for(let e=0;e"€"&&(t.toUpperCase()!=t.toLowerCase()||gf.test(t)))return!0}return!1}function bf(n){return e=>{if(!/\S/.test(e))return Re.Space;if(yf(e))return Re.Word;for(let t=0;t-1)return Re.Word;return Re.Other}}class N{constructor(e,t,i,s,r,o){this.config=e,this.doc=t,this.selection=i,this.values=s,this.status=e.statusTemplate.slice(),this.computeSlot=r,o&&(o._state=this);for(let l=0;ls.set(a,l)),t=null),s.set(o.value.compartment,o.value.extension)):o.is(R.reconfigure)?(t=null,i=o.value):o.is(R.appendConfig)&&(t=null,i=Gt(i).concat(o.value));let r;t?r=e.startState.values.slice():(t=kn.resolve(i,s,this),r=new N(t,this.doc,this.selection,t.dynamicSlots.map(()=>null),(l,a)=>a.reconfigure(l,this),null).values),new N(t,e.newDoc,e.newSelection,r,(o,l)=>l.update(o,e),e)}replaceSelection(e){return typeof e=="string"&&(e=this.toText(e)),this.changeByRange(t=>({changes:{from:t.from,to:t.to,insert:e},range:w.cursor(t.from+e.length)}))}changeByRange(e){let t=this.selection,i=e(t.ranges[0]),s=this.changes(i.changes),r=[i.range],o=Gt(i.effects);for(let l=1;lo.spec.fromJSON(l,a)))}}return N.create({doc:e.doc,selection:w.fromJSON(e.selection),extensions:t.extensions?s.concat([t.extensions]):s})}static create(e={}){let t=kn.resolve(e.extensions||[],new Map),i=e.doc instanceof _?e.doc:_.of((e.doc||"").split(t.staticFacet(N.lineSeparator)||Bs)),s=e.selection?e.selection instanceof w?e.selection:w.single(e.selection.anchor,e.selection.head):w.single(0);return ba(s,i.length),t.staticFacet(va)||(s=s.asSingle()),new N(t,i,s,t.dynamicSlots.map(()=>null),(r,o)=>o.create(r),null)}get tabSize(){return this.facet(N.tabSize)}get lineBreak(){return this.facet(N.lineSeparator)||` -`}get readOnly(){return this.facet(Ma)}phrase(e,...t){for(let i of this.facet(N.phrases))if(Object.prototype.hasOwnProperty.call(i,e)){e=i[e];break}return t.length&&(e=e.replace(/\$(\$|\d*)/g,(i,s)=>{if(s=="$")return"$";let r=+(s||1);return!r||r>t.length?i:t[r-1]})),e}languageDataAt(e,t,i=-1){let s=[];for(let r of this.facet(ka))for(let o of r(this,t,i))Object.prototype.hasOwnProperty.call(o,e)&&s.push(o[e]);return s}charCategorizer(e){return bf(this.languageDataAt("wordChars",e).join(""))}wordAt(e){let{text:t,from:i,length:s}=this.doc.lineAt(e),r=this.charCategorizer(e),o=e-i,l=e-i;for(;o>0;){let a=Oe(t,o,!1);if(r(t.slice(a,o))!=Re.Word)break;o=a}for(;ln.length?n[0]:4});N.lineSeparator=xa;N.readOnly=Ma;N.phrases=D.define({compare(n,e){let t=Object.keys(n),i=Object.keys(e);return t.length==i.length&&t.every(s=>n[s]==e[s])}});N.languageData=ka;N.changeFilter=Sa;N.transactionFilter=Ca;N.transactionExtender=Aa;Fn.reconfigure=R.define();function _t(n,e,t={}){let i={};for(let s of n)for(let r of Object.keys(s)){let o=s[r],l=i[r];if(l===void 0)i[r]=o;else if(!(l===o||o===void 0))if(Object.hasOwnProperty.call(t,r))i[r]=t[r](l,o);else throw new Error("Config merge conflict for field "+r)}for(let s in e)i[s]===void 0&&(i[s]=e[s]);return i}class Bt{eq(e){return this==e}range(e,t=e){return _s.create(e,t,this)}}Bt.prototype.startSide=Bt.prototype.endSide=0;Bt.prototype.point=!1;Bt.prototype.mapMode=ce.TrackDel;let _s=class Oa{constructor(e,t,i){this.from=e,this.to=t,this.value=i}static create(e,t,i){return new Oa(e,t,i)}};function Vs(n,e){return n.from-e.from||n.value.startSide-e.value.startSide}class Mr{constructor(e,t,i,s){this.from=e,this.to=t,this.value=i,this.maxPoint=s}get length(){return this.to[this.to.length-1]}findIndex(e,t,i,s=0){let r=i?this.to:this.from;for(let o=s,l=r.length;;){if(o==l)return o;let a=o+l>>1,h=r[a]-e||(i?this.value[a].endSide:this.value[a].startSide)-t;if(a==o)return h>=0?o:l;h>=0?l=a:o=a+1}}between(e,t,i,s){for(let r=this.findIndex(t,-1e9,!0),o=this.findIndex(i,1e9,!1,r);rd||u==d&&h.startSide>0&&h.endSide<=0)continue;(d-u||h.endSide-h.startSide)<0||(o<0&&(o=u),h.point&&(l=Math.max(l,d-u)),i.push(h),s.push(u-o),r.push(d-o))}return{mapped:i.length?new Mr(s,r,i,l):null,pos:o}}}class F{constructor(e,t,i,s){this.chunkPos=e,this.chunk=t,this.nextLayer=i,this.maxPoint=s}static create(e,t,i,s){return new F(e,t,i,s)}get length(){let e=this.chunk.length-1;return e<0?0:Math.max(this.chunkEnd(e),this.nextLayer.length)}get size(){if(this.isEmpty)return 0;let e=this.nextLayer.size;for(let t of this.chunk)e+=t.value.length;return e}chunkEnd(e){return this.chunkPos[e]+this.chunk[e].length}update(e){let{add:t=[],sort:i=!1,filterFrom:s=0,filterTo:r=this.length}=e,o=e.filter;if(t.length==0&&!o)return this;if(i&&(t=t.slice().sort(Vs)),this.isEmpty)return t.length?F.of(t):this;let l=new Ba(this,null,-1).goto(0),a=0,h=[],c=new Pt;for(;l.value||a=0){let f=t[a++];c.addInner(f.from,f.to,f.value)||h.push(f)}else l.rangeIndex==1&&l.chunkIndexthis.chunkEnd(l.chunkIndex)||rl.to||r=r&&e<=r+o.length&&o.between(r,e-r,t-r,i)===!1)return}this.nextLayer.between(e,t,i)}}iter(e=0){return Ti.from([this]).goto(e)}get isEmpty(){return this.nextLayer==this}static iter(e,t=0){return Ti.from(e).goto(t)}static compare(e,t,i,s,r=-1){let o=e.filter(f=>f.maxPoint>0||!f.isEmpty&&f.maxPoint>=r),l=t.filter(f=>f.maxPoint>0||!f.isEmpty&&f.maxPoint>=r),a=ao(o,l,i),h=new fi(o,a,r),c=new fi(l,a,r);i.iterGaps((f,u,d)=>ho(h,f,c,u,d,s)),i.empty&&i.length==0&&ho(h,0,c,0,0,s)}static eq(e,t,i=0,s){s==null&&(s=1e9);let r=e.filter(c=>!c.isEmpty&&t.indexOf(c)<0),o=t.filter(c=>!c.isEmpty&&e.indexOf(c)<0);if(r.length!=o.length)return!1;if(!r.length)return!0;let l=ao(r,o),a=new fi(r,l,0).goto(i),h=new fi(o,l,0).goto(i);for(;;){if(a.to!=h.to||!Fs(a.active,h.active)||a.point&&(!h.point||!a.point.eq(h.point)))return!1;if(a.to>s)return!0;a.next(),h.next()}}static spans(e,t,i,s,r=-1){let o=new fi(e,null,r).goto(t),l=t,a=o.openStart;for(;;){let h=Math.min(o.to,i);if(o.point?(s.point(l,h,o.point,o.activeForPoint(o.to),a,o.pointRank),a=o.openEnd(h)+(o.to>h?1:0)):h>l&&(s.span(l,h,o.active,a),a=o.openEnd(h)),o.to>i)break;l=o.to,o.next()}return a}static of(e,t=!1){let i=new Pt;for(let s of e instanceof _s?[e]:t?wf(e):e)i.add(s.from,s.to,s.value);return i.finish()}}F.empty=new F([],[],null,-1);function wf(n){if(n.length>1)for(let e=n[0],t=1;t0)return n.slice().sort(Vs);e=i}return n}F.empty.nextLayer=F.empty;class Pt{constructor(){this.chunks=[],this.chunkPos=[],this.chunkStart=-1,this.last=null,this.lastFrom=-1e9,this.lastTo=-1e9,this.from=[],this.to=[],this.value=[],this.maxPoint=-1,this.setMaxPoint=-1,this.nextLayer=null}finishChunk(e){this.chunks.push(new Mr(this.from,this.to,this.value,this.maxPoint)),this.chunkPos.push(this.chunkStart),this.chunkStart=-1,this.setMaxPoint=Math.max(this.setMaxPoint,this.maxPoint),this.maxPoint=-1,e&&(this.from=[],this.to=[],this.value=[])}add(e,t,i){this.addInner(e,t,i)||(this.nextLayer||(this.nextLayer=new Pt)).add(e,t,i)}addInner(e,t,i){let s=e-this.lastTo||i.startSide-this.last.endSide;if(s<=0&&(e-this.lastFrom||i.startSide-this.last.startSide)<0)throw new Error("Ranges must be added sorted by `from` position and `startSide`");return s<0?!1:(this.from.length==250&&this.finishChunk(!0),this.chunkStart<0&&(this.chunkStart=e),this.from.push(e-this.chunkStart),this.to.push(t-this.chunkStart),this.last=i,this.lastFrom=e,this.lastTo=t,this.value.push(i),i.point&&(this.maxPoint=Math.max(this.maxPoint,t-e)),!0)}addChunk(e,t){if((e-this.lastTo||t.value[0].startSide-this.last.endSide)<0)return!1;this.from.length&&this.finishChunk(!0),this.setMaxPoint=Math.max(this.setMaxPoint,t.maxPoint),this.chunks.push(t),this.chunkPos.push(e);let i=t.value.length-1;return this.last=t.value[i],this.lastFrom=t.from[i]+e,this.lastTo=t.to[i]+e,!0}finish(){return this.finishInner(F.empty)}finishInner(e){if(this.from.length&&this.finishChunk(!1),this.chunks.length==0)return e;let t=F.create(this.chunkPos,this.chunks,this.nextLayer?this.nextLayer.finishInner(e):e,this.setMaxPoint);return this.from=null,t}}function ao(n,e,t){let i=new Map;for(let r of n)for(let o=0;o=this.minPoint)break}}setRangeIndex(e){if(e==this.layer.chunk[this.chunkIndex].value.length){if(this.chunkIndex++,this.skip)for(;this.chunkIndex=i&&s.push(new Ba(o,t,i,r));return s.length==1?s[0]:new Ti(s)}get startSide(){return this.value?this.value.startSide:0}goto(e,t=-1e9){for(let i of this.heap)i.goto(e,t);for(let i=this.heap.length>>1;i>=0;i--)es(this.heap,i);return this.next(),this}forward(e,t){for(let i of this.heap)i.forward(e,t);for(let i=this.heap.length>>1;i>=0;i--)es(this.heap,i);(this.to-e||this.value.endSide-t)<0&&this.next()}next(){if(this.heap.length==0)this.from=this.to=1e9,this.value=null,this.rank=-1;else{let e=this.heap[0];this.from=e.from,this.to=e.to,this.value=e.value,this.rank=e.rank,e.value&&e.next(),es(this.heap,0)}}}function es(n,e){for(let t=n[e];;){let i=(e<<1)+1;if(i>=n.length)break;let s=n[i];if(i+1=0&&(s=n[i+1],i++),t.compare(s)<0)break;n[i]=t,n[e]=s,e=i}}class fi{constructor(e,t,i){this.minPoint=i,this.active=[],this.activeTo=[],this.activeRank=[],this.minActive=-1,this.point=null,this.pointFrom=0,this.pointRank=0,this.to=-1e9,this.endSide=0,this.openStart=-1,this.cursor=Ti.from(e,t,i)}goto(e,t=-1e9){return this.cursor.goto(e,t),this.active.length=this.activeTo.length=this.activeRank.length=0,this.minActive=-1,this.to=e,this.endSide=t,this.openStart=-1,this.next(),this}forward(e,t){for(;this.minActive>-1&&(this.activeTo[this.minActive]-e||this.active[this.minActive].endSide-t)<0;)this.removeActive(this.minActive);this.cursor.forward(e,t)}removeActive(e){Ki(this.active,e),Ki(this.activeTo,e),Ki(this.activeRank,e),this.minActive=co(this.active,this.activeTo)}addActive(e){let t=0,{value:i,to:s,rank:r}=this.cursor;for(;t-1&&(this.activeTo[r]-this.cursor.from||this.active[r].endSide-this.cursor.startSide)<0){if(this.activeTo[r]>e){this.to=this.activeTo[r],this.endSide=this.active[r].endSide;break}this.removeActive(r),i&&Ki(i,r)}else if(this.cursor.value)if(this.cursor.from>e){this.to=this.cursor.from,this.endSide=this.cursor.startSide;break}else{let o=this.cursor.value;if(!o.point)this.addActive(i),this.cursor.frome&&s++,this.cursor.next();else if(t&&this.cursor.to==this.to&&this.cursor.from=0&&!(this.activeRank[i]e||this.activeTo[i]==e&&this.active[i].endSide>=this.point.endSide)&&t.push(this.active[i]);return t.reverse()}openEnd(e){let t=0;for(let i=this.activeTo.length-1;i>=0&&this.activeTo[i]>e;i--)t++;return t}}function ho(n,e,t,i,s,r){n.goto(e),t.goto(i);let o=i+s,l=i,a=i-e;for(;;){let h=n.to+a-t.to||n.endSide-t.endSide,c=h<0?n.to+a:t.to,f=Math.min(c,o);if(n.point||t.point?n.point&&t.point&&(n.point==t.point||n.point.eq(t.point))&&Fs(n.activeForPoint(n.to+a),t.activeForPoint(t.to))||r.comparePoint(l,f,n.point,t.point):f>l&&!Fs(n.active,t.active)&&r.compareRange(l,f,n.active,t.active),c>o)break;l=c,h<=0&&n.next(),h>=0&&t.next()}}function Fs(n,e){if(n.length!=e.length)return!1;for(let t=0;t=e;i--)n[i+1]=n[i];n[e]=t}function co(n,e){let t=-1,i=1e9;for(let s=0;s=e)return s;if(s==n.length)break;r+=n.charCodeAt(s)==9?t-r%t:1,s=Oe(n,s)}return i===!0?-1:n.length}const Ws="ͼ",fo=typeof Symbol>"u"?"__"+Ws:Symbol.for(Ws),zs=typeof Symbol>"u"?"__styleSet"+Math.floor(Math.random()*1e8):Symbol("styleSet"),uo=typeof globalThis<"u"?globalThis:typeof window<"u"?window:{};class mt{constructor(e,t){this.rules=[];let{finish:i}=t||{};function s(o){return/^@/.test(o)?[o]:o.split(/,\s*/)}function r(o,l,a,h){let c=[],f=/^@(\w+)\b/.exec(o[0]),u=f&&f[1]=="keyframes";if(f&&l==null)return a.push(o[0]+";");for(let d in l){let p=l[d];if(/&/.test(d))r(d.split(/,\s*/).map(g=>o.map(y=>g.replace(/&/,y))).reduce((g,y)=>g.concat(y)),p,a);else if(p&&typeof p=="object"){if(!f)throw new RangeError("The value of a property ("+d+") should be a primitive value.");r(s(d),p,c,u)}else p!=null&&c.push(d.replace(/_.*/,"").replace(/[A-Z]/g,g=>"-"+g.toLowerCase())+": "+p+";")}(c.length||u)&&a.push((i&&!f&&!h?o.map(i):o).join(", ")+" {"+c.join(" ")+"}")}for(let o in e)r(s(o),e[o],this.rules)}getRules(){return this.rules.join(` -`)}static newName(){let e=uo[fo]||1;return uo[fo]=e+1,Ws+e.toString(36)}static mount(e,t){(e[zs]||new kf(e)).mount(Array.isArray(t)?t:[t])}}let Gi=null;class kf{constructor(e){if(!e.head&&e.adoptedStyleSheets&&typeof CSSStyleSheet<"u"){if(Gi)return e.adoptedStyleSheets=[Gi.sheet].concat(e.adoptedStyleSheets),e[zs]=Gi;this.sheet=new CSSStyleSheet,e.adoptedStyleSheets=[this.sheet].concat(e.adoptedStyleSheets),Gi=this}else{this.styleTag=(e.ownerDocument||e).createElement("style");let t=e.head||e;t.insertBefore(this.styleTag,t.firstChild)}this.modules=[],e[zs]=this}mount(e){let t=this.sheet,i=0,s=0;for(let r=0;r-1&&(this.modules.splice(l,1),s--,l=-1),l==-1){if(this.modules.splice(s++,0,o),t)for(let a=0;a",191:"?",192:"~",219:"{",220:"|",221:"}",222:'"'},po=typeof navigator<"u"&&/Chrome\/(\d+)/.exec(navigator.userAgent),vf=typeof navigator<"u"&&/Mac/.test(navigator.platform),xf=typeof navigator<"u"&&/MSIE \d|Trident\/(?:[7-9]|\d{2,})\..*rv:(\d+)/.exec(navigator.userAgent),Sf=vf||po&&+po[1]<57;for(var he=0;he<10;he++)gt[48+he]=gt[96+he]=String(he);for(var he=1;he<=24;he++)gt[he+111]="F"+he;for(var he=65;he<=90;he++)gt[he]=String.fromCharCode(he+32),Oi[he]=String.fromCharCode(he);for(var ts in gt)Oi.hasOwnProperty(ts)||(Oi[ts]=gt[ts]);function Cf(n){var e=Sf&&(n.ctrlKey||n.altKey||n.metaKey)||xf&&n.shiftKey&&n.key&&n.key.length==1||n.key=="Unidentified",t=!e&&n.key||(n.shiftKey?Oi:gt)[n.keyCode]||n.key||"Unidentified";return t=="Esc"&&(t="Escape"),t=="Del"&&(t="Delete"),t=="Left"&&(t="ArrowLeft"),t=="Up"&&(t="ArrowUp"),t=="Right"&&(t="ArrowRight"),t=="Down"&&(t="ArrowDown"),t}function xn(n){let e;return n.nodeType==11?e=n.getSelection?n:n.ownerDocument:e=n,e.getSelection()}function Xt(n,e){return e?n==e||n.contains(e.nodeType!=1?e.parentNode:e):!1}function Af(n){let e=n.activeElement;for(;e&&e.shadowRoot;)e=e.shadowRoot.activeElement;return e}function dn(n,e){if(!e.anchorNode)return!1;try{return Xt(n,e.anchorNode)}catch{return!1}}function Bi(n){return n.nodeType==3?Zt(n,0,n.nodeValue.length).getClientRects():n.nodeType==1?n.getClientRects():[]}function Sn(n,e,t,i){return t?mo(n,e,t,i,-1)||mo(n,e,t,i,1):!1}function Cn(n){for(var e=0;;e++)if(n=n.previousSibling,!n)return e}function mo(n,e,t,i,s){for(;;){if(n==t&&e==i)return!0;if(e==(s<0?0:Pi(n))){if(n.nodeName=="DIV")return!1;let r=n.parentNode;if(!r||r.nodeType!=1)return!1;e=Cn(n)+(s<0?0:1),n=r}else if(n.nodeType==1){if(n=n.childNodes[e+(s<0?-1:0)],n.nodeType==1&&n.contentEditable=="false")return!1;e=s<0?Pi(n):0}else return!1}}function Pi(n){return n.nodeType==3?n.nodeValue.length:n.childNodes.length}const Pa={left:0,right:0,top:0,bottom:0};function Dr(n,e){let t=e?n.left:n.right;return{left:t,right:t,top:n.top,bottom:n.bottom}}function Mf(n){return{left:0,right:n.innerWidth,top:0,bottom:n.innerHeight}}function Df(n,e,t,i,s,r,o,l){let a=n.ownerDocument,h=a.defaultView||window;for(let c=n;c;)if(c.nodeType==1){let f,u=c==a.body;if(u)f=Mf(h);else{if(c.scrollHeight<=c.clientHeight&&c.scrollWidth<=c.clientWidth){c=c.assignedSlot||c.parentNode;continue}let g=c.getBoundingClientRect();f={left:g.left,right:g.left+c.clientWidth,top:g.top,bottom:g.top+c.clientHeight}}let d=0,p=0;if(s=="nearest")e.top0&&e.bottom>f.bottom+p&&(p=e.bottom-f.bottom+p+o)):e.bottom>f.bottom&&(p=e.bottom-f.bottom+o,t<0&&e.top-p0&&e.right>f.right+d&&(d=e.right-f.right+d+r)):e.right>f.right&&(d=e.right-f.right+r,t<0&&e.leftt)return f.domBoundsAround(e,t,h);if(u>=e&&s==-1&&(s=a,r=h),h>t&&f.dom.parentNode==this.dom){o=a,l=c;break}c=u,h=u+f.breakAfter}return{from:r,to:l<0?i+this.length:l,startDOM:(s?this.children[s-1].dom.nextSibling:null)||this.dom.firstChild,endDOM:o=0?this.children[o].dom:null}}markDirty(e=!1){this.dirty|=2,this.markParentsDirty(e)}markParentsDirty(e){for(let t=this.parent;t;t=t.parent){if(e&&(t.dirty|=2),t.dirty&1)return;t.dirty|=1,e=!1}}setParent(e){this.parent!=e&&(this.parent=e,this.dirty&&this.markParentsDirty(!0))}setDOM(e){this.dom&&(this.dom.cmView=null),this.dom=e,e.cmView=this}get rootView(){for(let e=this;;){let t=e.parent;if(!t)return e;e=t}}replaceChildren(e,t,i=Tr){this.markDirty();for(let s=e;sthis.pos||e==this.pos&&(t>0||this.i==0||this.children[this.i-1].breakAfter))return this.off=e-this.pos,this;let i=this.children[--this.i];this.pos-=i.length+i.breakAfter}}}function Ia(n,e,t,i,s,r,o,l,a){let{children:h}=n,c=h.length?h[e]:null,f=r.length?r[r.length-1]:null,u=f?f.breakAfter:o;if(!(e==i&&c&&!o&&!u&&r.length<2&&c.merge(t,s,r.length?f:null,t==0,l,a))){if(i0&&(!o&&r.length&&c.merge(t,c.length,r[0],!1,l,0)?c.breakAfter=r.shift().breakAfter:(t2);var A={mac:ko||/Mac/.test(Te.platform),windows:/Win/.test(Te.platform),linux:/Linux|X11/.test(Te.platform),ie:Hn,ie_version:_a?qs.documentMode||6:Ks?+Ks[1]:js?+js[1]:0,gecko:bo,gecko_version:bo?+(/Firefox\/(\d+)/.exec(Te.userAgent)||[0,0])[1]:0,chrome:!!is,chrome_version:is?+is[1]:0,ios:ko,android:/Android\b/.test(Te.userAgent),webkit:wo,safari:Va,webkit_version:wo?+(/\bAppleWebKit\/(\d+)/.exec(navigator.userAgent)||[0,0])[1]:0,tabSize:qs.documentElement.style.tabSize!=null?"tab-size":"-moz-tab-size"};const Pf=256;class yt extends K{constructor(e){super(),this.text=e}get length(){return this.text.length}createDOM(e){this.setDOM(e||document.createTextNode(this.text))}sync(e){this.dom||this.createDOM(),this.dom.nodeValue!=this.text&&(e&&e.node==this.dom&&(e.written=!0),this.dom.nodeValue=this.text)}reuseDOM(e){e.nodeType==3&&this.createDOM(e)}merge(e,t,i){return i&&(!(i instanceof yt)||this.length-(t-e)+i.length>Pf)?!1:(this.text=this.text.slice(0,e)+(i?i.text:"")+this.text.slice(t),this.markDirty(),!0)}split(e){let t=new yt(this.text.slice(e));return this.text=this.text.slice(0,e),this.markDirty(),t}localPosFromDOM(e,t){return e==this.dom?t:t?this.text.length:0}domAtPos(e){return new ye(this.dom,e)}domBoundsAround(e,t,i){return{from:i,to:i+this.length,startDOM:this.dom,endDOM:this.dom.nextSibling}}coordsAt(e,t){return Us(this.dom,e,t)}}class et extends K{constructor(e,t=[],i=0){super(),this.mark=e,this.children=t,this.length=i;for(let s of t)s.setParent(this)}setAttrs(e){if(Ra(e),this.mark.class&&(e.className=this.mark.class),this.mark.attrs)for(let t in this.mark.attrs)e.setAttribute(t,this.mark.attrs[t]);return e}reuseDOM(e){e.nodeName==this.mark.tagName.toUpperCase()&&(this.setDOM(e),this.dirty|=6)}sync(e){this.dom?this.dirty&4&&this.setAttrs(this.dom):this.setDOM(this.setAttrs(document.createElement(this.mark.tagName))),super.sync(e)}merge(e,t,i,s,r,o){return i&&(!(i instanceof et&&i.mark.eq(this.mark))||e&&r<=0||te&&t.push(i=e&&(s=r),i=a,r++}let o=this.length-e;return this.length=e,s>-1&&(this.children.length=s,this.markDirty()),new et(this.mark,t,o)}domAtPos(e){return Wa(this,e)}coordsAt(e,t){return qa(this,e,t)}}function Us(n,e,t){let i=n.nodeValue.length;e>i&&(e=i);let s=e,r=e,o=0;e==0&&t<0||e==i&&t>=0?A.chrome||A.gecko||(e?(s--,o=1):r=0)?0:l.length-1];return A.safari&&!o&&a.width==0&&(a=Array.prototype.find.call(l,h=>h.width)||a),o?Dr(a,o<0):a||null}class ct extends K{constructor(e,t,i){super(),this.widget=e,this.length=t,this.side=i,this.prevWidget=null}static create(e,t,i){return new(e.customView||ct)(e,t,i)}split(e){let t=ct.create(this.widget,this.length-e,this.side);return this.length-=e,t}sync(){(!this.dom||!this.widget.updateDOM(this.dom))&&(this.dom&&this.prevWidget&&this.prevWidget.destroy(this.dom),this.prevWidget=null,this.setDOM(this.widget.toDOM(this.editorView)),this.dom.contentEditable="false")}getSide(){return this.side}merge(e,t,i,s,r,o){return i&&(!(i instanceof ct)||!this.widget.compare(i.widget)||e>0&&r<=0||t0?i.length-1:0;s=i[r],!(e>0?r==0:r==i.length-1||s.top0?-1:1);return this.length?s:Dr(s,this.side>0)}get isEditable(){return!1}destroy(){super.destroy(),this.dom&&this.widget.destroy(this.dom)}}class Fa extends ct{domAtPos(e){let{topView:t,text:i}=this.widget;return t?Gs(e,0,t,i,(s,r)=>s.domAtPos(r),s=>new ye(i,Math.min(s,i.nodeValue.length))):new ye(i,Math.min(e,i.nodeValue.length))}sync(){this.setDOM(this.widget.toDOM())}localPosFromDOM(e,t){let{topView:i,text:s}=this.widget;return i?Ha(e,t,i,s):Math.min(t,this.length)}ignoreMutation(){return!1}get overrideDOMText(){return null}coordsAt(e,t){let{topView:i,text:s}=this.widget;return i?Gs(e,t,i,s,(r,o,l)=>r.coordsAt(o,l),(r,o)=>Us(s,r,o)):Us(s,e,t)}destroy(){var e;super.destroy(),(e=this.widget.topView)===null||e===void 0||e.destroy()}get isEditable(){return!0}canReuseDOM(){return!0}}function Gs(n,e,t,i,s,r){if(t instanceof et){for(let o=t.dom.firstChild;o;o=o.nextSibling){let l=K.get(o);if(!l)return r(n,e);let a=Xt(o,i),h=l.length+(a?i.nodeValue.length:0);if(n0?-1:1);return i&&i.topt.top?{left:t.left,right:t.right,top:i.top,bottom:i.bottom}:t}get overrideDOMText(){return _.empty}}yt.prototype.children=ct.prototype.children=Qt.prototype.children=Tr;function Ef(n,e){let t=n.parent,i=t?t.children.indexOf(n):-1;for(;t&&i>=0;)if(e<0?i>0:ir&&e0;r--){let o=i[r-1];if(o.dom.parentNode==t)return o.domAtPos(o.length)}for(let r=s;r0&&e instanceof et&&s.length&&(i=s[s.length-1])instanceof et&&i.mark.eq(e.mark)?za(i,e.children[0],t-1):(s.push(e),e.setParent(n)),n.length+=e.length}function qa(n,e,t){let i=null,s=-1,r=null,o=-1;function l(h,c){for(let f=0,u=0;f=c&&(d.children.length?l(d,c-u):!r&&(p>c||u==p&&d.getSide()>0)?(r=d,o=c-u):(u0?3e8:-4e8:t>0?1e8:-1e8,new Et(e,t,t,i,e.widget||null,!1)}static replace(e){let t=!!e.block,i,s;if(e.isBlockGap)i=-5e8,s=4e8;else{let{start:r,end:o}=ja(e,t);i=(r?t?-3e8:-1:5e8)-1,s=(o?t?2e8:1:-6e8)+1}return new Et(e,i,s,t,e.widget||null,!0)}static line(e){return new Hi(e)}static set(e,t=!1){return F.of(e,t)}hasHeight(){return this.widget?this.widget.estimatedHeight>-1:!1}}E.none=F.empty;class Wn extends E{constructor(e){let{start:t,end:i}=ja(e);super(t?-1:5e8,i?1:-6e8,null,e),this.tagName=e.tagName||"span",this.class=e.class||"",this.attrs=e.attributes||null}eq(e){return this==e||e instanceof Wn&&this.tagName==e.tagName&&this.class==e.class&&Or(this.attrs,e.attrs)}range(e,t=e){if(e>=t)throw new RangeError("Mark decorations may not be empty");return super.range(e,t)}}Wn.prototype.point=!1;class Hi extends E{constructor(e){super(-2e8,-2e8,null,e)}eq(e){return e instanceof Hi&&Or(this.spec.attributes,e.spec.attributes)}range(e,t=e){if(t!=e)throw new RangeError("Line decoration ranges must be zero-length");return super.range(e,t)}}Hi.prototype.mapMode=ce.TrackBefore;Hi.prototype.point=!0;class Et extends E{constructor(e,t,i,s,r,o){super(t,i,r,e),this.block=s,this.isReplace=o,this.mapMode=s?t<=0?ce.TrackBefore:ce.TrackAfter:ce.TrackDel}get type(){return this.startSide=5}eq(e){return e instanceof Et&&Lf(this.widget,e.widget)&&this.block==e.block&&this.startSide==e.startSide&&this.endSide==e.endSide}range(e,t=e){if(this.isReplace&&(e>t||e==t&&this.startSide>0&&this.endSide<=0))throw new RangeError("Invalid range for replacement decoration");if(!this.isReplace&&t!=e)throw new RangeError("Widget decorations can only have zero-length ranges");return super.range(e,t)}}Et.prototype.point=!0;function ja(n,e=!1){let{inclusiveStart:t,inclusiveEnd:i}=n;return t==null&&(t=n.inclusive),i==null&&(i=n.inclusive),{start:t??e,end:i??e}}function Lf(n,e){return n==e||!!(n&&e&&n.compare(e))}function Ys(n,e,t,i=0){let s=t.length-1;s>=0&&t[s]+i>=n?t[s]=Math.max(t[s],e):t.push(n,e)}class ke extends K{constructor(){super(...arguments),this.children=[],this.length=0,this.prevAttrs=void 0,this.attrs=null,this.breakAfter=0}merge(e,t,i,s,r,o){if(i){if(!(i instanceof ke))return!1;this.dom||i.transferDOM(this)}return s&&this.setDeco(i?i.attrs:null),Na(this,e,t,i?i.children:[],r,o),!0}split(e){let t=new ke;if(t.breakAfter=this.breakAfter,this.length==0)return t;let{i,off:s}=this.childPos(e);s&&(t.append(this.children[i].split(s),0),this.children[i].merge(s,this.children[i].length,null,!1,0,0),i++);for(let r=i;r0&&this.children[i-1].length==0;)this.children[--i].destroy();return this.children.length=i,this.markDirty(),this.length=e,t}transferDOM(e){this.dom&&(this.markDirty(),e.setDOM(this.dom),e.prevAttrs=this.prevAttrs===void 0?this.attrs:this.prevAttrs,this.prevAttrs=void 0,this.dom=null)}setDeco(e){Or(this.attrs,e)||(this.dom&&(this.prevAttrs=this.attrs,this.markDirty()),this.attrs=e)}append(e,t){za(this,e,t)}addLineDeco(e){let t=e.spec.attributes,i=e.spec.class;t&&(this.attrs=$s(t,this.attrs||{})),i&&(this.attrs=$s({class:i},this.attrs||{}))}domAtPos(e){return Wa(this,e)}reuseDOM(e){e.nodeName=="DIV"&&(this.setDOM(e),this.dirty|=6)}sync(e){var t;this.dom?this.dirty&4&&(Ra(this.dom),this.dom.className="cm-line",this.prevAttrs=this.attrs?null:void 0):(this.setDOM(document.createElement("div")),this.dom.className="cm-line",this.prevAttrs=this.attrs?null:void 0),this.prevAttrs!==void 0&&(Js(this.dom,this.prevAttrs,this.attrs),this.dom.classList.add("cm-line"),this.prevAttrs=void 0),super.sync(e);let i=this.dom.lastChild;for(;i&&K.get(i)instanceof et;)i=i.lastChild;if(!i||!this.length||i.nodeName!="BR"&&((t=K.get(i))===null||t===void 0?void 0:t.isEditable)==!1&&(!A.ios||!this.children.some(s=>s instanceof yt))){let s=document.createElement("BR");s.cmIgnore=!0,this.dom.appendChild(s)}}measureTextSize(){if(this.children.length==0||this.length>20)return null;let e=0;for(let t of this.children){if(!(t instanceof yt)||/[^ -~]/.test(t.text))return null;let i=Bi(t.dom);if(i.length!=1)return null;e+=i[0].width}return e?{lineHeight:this.dom.getBoundingClientRect().height,charWidth:e/this.length}:null}coordsAt(e,t){return qa(this,e,t)}become(e){return!1}get type(){return W.Text}static find(e,t){for(let i=0,s=0;i=t){if(r instanceof ke)return r;if(o>t)break}s=o+r.breakAfter}return null}}class Ot extends K{constructor(e,t,i){super(),this.widget=e,this.length=t,this.type=i,this.breakAfter=0,this.prevWidget=null}merge(e,t,i,s,r,o){return i&&(!(i instanceof Ot)||!this.widget.compare(i.widget)||e>0&&r<=0||t0;){if(this.textOff==this.text.length){let{value:r,lineBreak:o,done:l}=this.cursor.next(this.skip);if(this.skip=0,l)throw new Error("Ran out of text content when drawing inline views");if(o){this.posCovered()||this.getLine(),this.content.length?this.content[this.content.length-1].breakAfter=1:this.breakAtStart=1,this.flushBuffer([]),this.curLine=null,e--;continue}else this.text=r,this.textOff=0}let s=Math.min(this.text.length-this.textOff,e,512);this.flushBuffer(t.slice(0,i)),this.getLine().append($i(new yt(this.text.slice(this.textOff,this.textOff+s)),t),i),this.atCursorPos=!0,this.textOff+=s,e-=s,i=0}}span(e,t,i,s){this.buildText(t-e,i,s),this.pos=t,this.openStart<0&&(this.openStart=s)}point(e,t,i,s,r,o){if(this.disallowBlockEffectsFor[o]&&i instanceof Et){if(i.block)throw new RangeError("Block decorations may not be specified via plugins");if(t>this.doc.lineAt(this.pos).to)throw new RangeError("Decorations that replace line breaks may not be specified via plugins")}let l=t-e;if(i instanceof Et)if(i.block){let{type:a}=i;a==W.WidgetAfter&&!this.posCovered()&&this.getLine(),this.addBlockWidget(new Ot(i.widget||new vo("div"),l,a))}else{let a=ct.create(i.widget||new vo("span"),l,l?0:i.startSide),h=this.atCursorPos&&!a.isEditable&&r<=s.length&&(e0),c=!a.isEditable&&(en.some(e=>e)}),Xa=D.define({combine:n=>n.some(e=>e)});class An{constructor(e,t="nearest",i="nearest",s=5,r=5){this.range=e,this.y=t,this.x=i,this.yMargin=s,this.xMargin=r}map(e){return e.empty?this:new An(this.range.map(e),this.y,this.x,this.yMargin,this.xMargin)}}const xo=R.define({map:(n,e)=>n.map(e)});function He(n,e,t){let i=n.facet($a);i.length?i[0](e):window.onerror?window.onerror(String(e),t,void 0,void 0,e):t?console.error(t+":",e):console.error(e)}const zn=D.define({combine:n=>n.length?n[0]:!0});let If=0;const yi=D.define();class be{constructor(e,t,i,s){this.id=e,this.create=t,this.domEventHandlers=i,this.extension=s(this)}static define(e,t){const{eventHandlers:i,provide:s,decorations:r}=t||{};return new be(If++,e,i,o=>{let l=[yi.of(o)];return r&&l.push(Ei.of(a=>{let h=a.plugin(o);return h?r(h):E.none})),s&&l.push(s(o)),l})}static fromClass(e,t){return be.define(i=>new e(i),t)}}class ns{constructor(e){this.spec=e,this.mustUpdate=null,this.value=null}update(e){if(this.value){if(this.mustUpdate){let t=this.mustUpdate;if(this.mustUpdate=null,this.value.update)try{this.value.update(t)}catch(i){if(He(t.state,i,"CodeMirror plugin crashed"),this.value.destroy)try{this.value.destroy()}catch{}this.deactivate()}}}else if(this.spec)try{this.value=this.spec.create(e)}catch(t){He(e.state,t,"CodeMirror plugin crashed"),this.deactivate()}return this}destroy(e){var t;if(!((t=this.value)===null||t===void 0)&&t.destroy)try{this.value.destroy()}catch(i){He(e.state,i,"CodeMirror plugin crashed")}}deactivate(){this.spec=this.value=null}}const Za=D.define(),Qa=D.define(),Ei=D.define(),eh=D.define(),th=D.define(),bi=D.define();class Qe{constructor(e,t,i,s){this.fromA=e,this.toA=t,this.fromB=i,this.toB=s}join(e){return new Qe(Math.min(this.fromA,e.fromA),Math.max(this.toA,e.toA),Math.min(this.fromB,e.fromB),Math.max(this.toB,e.toB))}addToSet(e){let t=e.length,i=this;for(;t>0;t--){let s=e[t-1];if(!(s.fromA>i.toA)){if(s.toAc)break;r+=2}if(!a)return i;new Qe(a.fromA,a.toA,a.fromB,a.toB).addToSet(i),o=a.toA,l=a.toB}}}class Mn{constructor(e,t,i){this.view=e,this.state=t,this.transactions=i,this.flags=0,this.startState=e.state,this.changes=ne.empty(this.startState.doc.length);for(let o of i)this.changes=this.changes.compose(o.changes);let s=[];this.changes.iterChangedRanges((o,l,a,h)=>s.push(new Qe(o,l,a,h))),this.changedRanges=s;let r=e.hasFocus;r!=e.inputState.notifiedFocused&&(e.inputState.notifiedFocused=r,this.flags|=1)}static create(e,t,i){return new Mn(e,t,i)}get viewportChanged(){return(this.flags&4)>0}get heightChanged(){return(this.flags&2)>0}get geometryChanged(){return this.docChanged||(this.flags&10)>0}get focusChanged(){return(this.flags&1)>0}get docChanged(){return!this.changes.empty}get selectionSet(){return this.transactions.some(e=>e.selection)}get empty(){return this.flags==0&&this.transactions.length==0}}var Z=function(n){return n[n.LTR=0]="LTR",n[n.RTL=1]="RTL",n}(Z||(Z={}));const Zs=Z.LTR,Nf=Z.RTL;function ih(n){let e=[];for(let t=0;t=t){if(l.level==i)return o;(r<0||(s!=0?s<0?l.fromt:e[r].level>l.level))&&(r=o)}}if(r<0)throw new RangeError("Index out of range");return r}}const X=[];function Wf(n,e){let t=n.length,i=e==Zs?1:2,s=e==Zs?2:1;if(!n||i==1&&!Hf.test(n))return nh(t);for(let o=0,l=i,a=i;o=0;u-=3)if(ze[u+1]==-c){let d=ze[u+2],p=d&2?i:d&4?d&1?s:i:0;p&&(X[o]=X[ze[u]]=p),l=u;break}}else{if(ze.length==189)break;ze[l++]=o,ze[l++]=h,ze[l++]=a}else if((f=X[o])==2||f==1){let u=f==i;a=u?0:1;for(let d=l-3;d>=0;d-=3){let p=ze[d+2];if(p&2)break;if(u)ze[d+2]|=2;else{if(p&4)break;ze[d+2]|=4}}}for(let o=0;ol;){let c=h,f=X[--h]!=2;for(;h>l&&f==(X[h-1]!=2);)h--;r.push(new Jt(h,c,f?2:1))}else r.push(new Jt(l,o,0))}else for(let o=0;o1)for(let a of this.points)a.node==e&&a.pos>this.text.length&&(a.pos-=o-1);i=r+o}}readNode(e){if(e.cmIgnore)return;let t=K.get(e),i=t&&t.overrideDOMText;if(i!=null){this.findPointInside(e,i.length);for(let s=i.iter();!s.next().done;)s.lineBreak?this.lineBreak():this.append(s.value)}else e.nodeType==3?this.readTextNode(e):e.nodeName=="BR"?e.nextSibling&&this.lineBreak():e.nodeType==1&&this.readRange(e.firstChild,null)}findPointBefore(e,t){for(let i of this.points)i.node==e&&e.childNodes[i.offset]==t&&(i.pos=this.text.length)}findPointInside(e,t){for(let i of this.points)(e.nodeType==3?i.node==e:e.contains(i.node))&&(i.pos=this.text.length+Math.min(t,i.offset))}}function So(n){return n.nodeType==1&&/^(DIV|P|LI|UL|OL|BLOCKQUOTE|DD|DT|H\d|SECTION|PRE)$/.test(n.nodeName)}class Co{constructor(e,t){this.node=e,this.offset=t,this.pos=-1}}class Ao extends K{constructor(e){super(),this.view=e,this.compositionDeco=E.none,this.decorations=[],this.dynamicDecorationMap=[],this.minWidth=0,this.minWidthFrom=0,this.minWidthTo=0,this.impreciseAnchor=null,this.impreciseHead=null,this.forceSelection=!1,this.lastUpdate=Date.now(),this.setDOM(e.contentDOM),this.children=[new ke],this.children[0].setParent(this),this.updateDeco(),this.updateInner([new Qe(0,0,0,e.state.doc.length)],0)}get editorView(){return this.view}get length(){return this.view.state.doc.length}update(e){let t=e.changedRanges;this.minWidth>0&&t.length&&(t.every(({fromA:o,toA:l})=>lthis.minWidthTo)?(this.minWidthFrom=e.changes.mapPos(this.minWidthFrom,1),this.minWidthTo=e.changes.mapPos(this.minWidthTo,1)):this.minWidth=this.minWidthFrom=this.minWidthTo=0),this.view.inputState.composing<0?this.compositionDeco=E.none:(e.transactions.length||this.dirty)&&(this.compositionDeco=jf(this.view,e.changes)),(A.ie||A.chrome)&&!this.compositionDeco.size&&e&&e.state.doc.lines!=e.startState.doc.lines&&(this.forceSelection=!0);let i=this.decorations,s=this.updateDeco(),r=$f(i,s,e.changes);return t=Qe.extendWithRanges(t,r),this.dirty==0&&t.length==0?!1:(this.updateInner(t,e.startState.doc.length),e.transactions.length&&(this.lastUpdate=Date.now()),!0)}updateInner(e,t){this.view.viewState.mustMeasureContent=!0,this.updateChildren(e,t);let{observer:i}=this.view;i.ignore(()=>{this.dom.style.height=this.view.viewState.contentHeight+"px",this.dom.style.flexBasis=this.minWidth?this.minWidth+"px":"";let r=A.chrome||A.ios?{node:i.selectionRange.focusNode,written:!1}:void 0;this.sync(r),this.dirty=0,r&&(r.written||i.selectionRange.focusNode!=r.node)&&(this.forceSelection=!0),this.dom.style.height=""});let s=[];if(this.view.viewport.from||this.view.viewport.to=0?e[s]:null;if(!r)break;let{fromA:o,toA:l,fromB:a,toB:h}=r,{content:c,breakAtStart:f,openStart:u,openEnd:d}=Br.build(this.view.state.doc,a,h,this.decorations,this.dynamicDecorationMap),{i:p,off:g}=i.findPos(l,1),{i:y,off:b}=i.findPos(o,-1);Ia(this,y,b,p,g,c,f,u,d)}}updateSelection(e=!1,t=!1){if((e||!this.view.observer.selectionRange.focusNode)&&this.view.observer.readSelectionRange(),!(t||this.mayControlSelection()))return;let i=this.forceSelection;this.forceSelection=!1;let s=this.view.state.selection.main,r=this.domAtPos(s.anchor),o=s.empty?r:this.domAtPos(s.head);if(A.gecko&&s.empty&&qf(r)){let a=document.createTextNode("");this.view.observer.ignore(()=>r.node.insertBefore(a,r.node.childNodes[r.offset]||null)),r=o=new ye(a,0),i=!0}let l=this.view.observer.selectionRange;(i||!l.focusNode||!Sn(r.node,r.offset,l.anchorNode,l.anchorOffset)||!Sn(o.node,o.offset,l.focusNode,l.focusOffset))&&(this.view.observer.ignore(()=>{A.android&&A.chrome&&this.dom.contains(l.focusNode)&&Jf(l.focusNode,this.dom)&&(this.dom.blur(),this.dom.focus({preventScroll:!0}));let a=xn(this.view.root);if(a)if(s.empty){if(A.gecko){let h=Uf(r.node,r.offset);if(h&&h!=3){let c=lh(r.node,r.offset,h==1?1:-1);c&&(r=new ye(c,h==1?0:c.nodeValue.length))}}a.collapse(r.node,r.offset),s.bidiLevel!=null&&l.cursorBidiLevel!=null&&(l.cursorBidiLevel=s.bidiLevel)}else if(a.extend){a.collapse(r.node,r.offset);try{a.extend(o.node,o.offset)}catch{}}else{let h=document.createRange();s.anchor>s.head&&([r,o]=[o,r]),h.setEnd(o.node,o.offset),h.setStart(r.node,r.offset),a.removeAllRanges(),a.addRange(h)}}),this.view.observer.setSelectionRange(r,o)),this.impreciseAnchor=r.precise?null:new ye(l.anchorNode,l.anchorOffset),this.impreciseHead=o.precise?null:new ye(l.focusNode,l.focusOffset)}enforceCursorAssoc(){if(this.compositionDeco.size)return;let{view:e}=this,t=e.state.selection.main,i=xn(e.root),{anchorNode:s,anchorOffset:r}=e.observer.selectionRange;if(!i||!t.empty||!t.assoc||!i.modify)return;let o=ke.find(this,t.head);if(!o)return;let l=o.posAtStart;if(t.head==l||t.head==l+o.length)return;let a=this.coordsAt(t.head,-1),h=this.coordsAt(t.head,1);if(!a||!h||a.bottom>h.top)return;let c=this.domAtPos(t.head+t.assoc);i.collapse(c.node,c.offset),i.modify("move",t.assoc<0?"forward":"backward","lineboundary"),e.observer.readSelectionRange();let f=e.observer.selectionRange;e.docView.posFromDOM(f.anchorNode,f.anchorOffset)!=t.from&&i.collapse(s,r)}mayControlSelection(){let e=this.view.root.activeElement;return e==this.dom||dn(this.dom,this.view.observer.selectionRange)&&!(e&&this.dom.contains(e))}nearest(e){for(let t=e;t;){let i=K.get(t);if(i&&i.rootView==this)return i;t=t.parentNode}return null}posFromDOM(e,t){let i=this.nearest(e);if(!i)throw new RangeError("Trying to find position for a DOM position outside of the document");return i.localPosFromDOM(e,t)+i.posAtStart}domAtPos(e){let{i:t,off:i}=this.childCursor().findPos(e,-1);for(;to||e==o&&r.type!=W.WidgetBefore&&r.type!=W.WidgetAfter&&(!s||t==2||this.children[s-1].breakAfter||this.children[s-1].type==W.WidgetBefore&&t>-2))return r.coordsAt(e-o,t);i=o}}measureVisibleLineHeights(e){let t=[],{from:i,to:s}=e,r=this.view.contentDOM.clientWidth,o=r>Math.max(this.view.scrollDOM.clientWidth,this.minWidth)+1,l=-1,a=this.view.textDirection==Z.LTR;for(let h=0,c=0;cs)break;if(h>=i){let d=f.dom.getBoundingClientRect();if(t.push(d.height),o){let p=f.dom.lastChild,g=p?Bi(p):[];if(g.length){let y=g[g.length-1],b=a?y.right-d.left:d.right-y.left;b>l&&(l=b,this.minWidth=r,this.minWidthFrom=h,this.minWidthTo=u)}}}h=u+f.breakAfter}return t}textDirectionAt(e){let{i:t}=this.childPos(e,1);return getComputedStyle(this.children[t].dom).direction=="rtl"?Z.RTL:Z.LTR}measureTextSize(){for(let s of this.children)if(s instanceof ke){let r=s.measureTextSize();if(r)return r}let e=document.createElement("div"),t,i;return e.className="cm-line",e.style.width="99999px",e.textContent="abc def ghi jkl mno pqr stu",this.view.observer.ignore(()=>{this.dom.appendChild(e);let s=Bi(e.firstChild)[0];t=e.getBoundingClientRect().height,i=s?s.width/27:7,e.remove()}),{lineHeight:t,charWidth:i}}childCursor(e=this.length){let t=this.children.length;return t&&(e-=this.children[--t].length),new La(this.children,e,t)}computeBlockGapDeco(){let e=[],t=this.view.viewState;for(let i=0,s=0;;s++){let r=s==t.viewports.length?null:t.viewports[s],o=r?r.from-1:this.length;if(o>i){let l=t.lineBlockAt(o).bottom-t.lineBlockAt(i).top;e.push(E.replace({widget:new Mo(l),block:!0,inclusive:!0,isBlockGap:!0}).range(i,o))}if(!r)break;i=r.to+1}return E.set(e)}updateDeco(){let e=this.view.state.facet(Ei).map((t,i)=>(this.dynamicDecorationMap[i]=typeof t=="function")?t(this.view):t);for(let t=e.length;tt.anchor?-1:1),s;if(!i)return;!t.empty&&(s=this.coordsAt(t.anchor,t.anchor>t.head?-1:1))&&(i={left:Math.min(i.left,s.left),top:Math.min(i.top,s.top),right:Math.max(i.right,s.right),bottom:Math.max(i.bottom,s.bottom)});let r=0,o=0,l=0,a=0;for(let c of this.view.state.facet(th).map(f=>f(this.view)))if(c){let{left:f,right:u,top:d,bottom:p}=c;f!=null&&(r=Math.max(r,f)),u!=null&&(o=Math.max(o,u)),d!=null&&(l=Math.max(l,d)),p!=null&&(a=Math.max(a,p))}let h={left:i.left-r,top:i.top-l,right:i.right+o,bottom:i.bottom+a};Df(this.view.scrollDOM,h,t.head0&&t<=0)n=n.childNodes[e-1],e=Pi(n);else if(n.nodeType==1&&e=0)n=n.childNodes[e],e=0;else return null}}function Uf(n,e){return n.nodeType!=1?0:(e&&n.childNodes[e-1].contentEditable=="false"?1:0)|(e0;){let h=Oe(s.text,o,!1);if(i(s.text.slice(h,o))!=a)break;o=h}for(;ln?e.left-n:Math.max(0,n-e.right)}function Zf(n,e){return e.top>n?e.top-n:Math.max(0,n-e.bottom)}function ss(n,e){return n.tope.top+1}function Do(n,e){return en.bottom?{top:n.top,left:n.left,right:n.right,bottom:e}:n}function er(n,e,t){let i,s,r,o,l=!1,a,h,c,f;for(let p=n.firstChild;p;p=p.nextSibling){let g=Bi(p);for(let y=0;yS||o==S&&r>v)&&(i=p,s=b,r=v,o=S,l=!v||(v>0?y0)),v==0?t>b.bottom&&(!c||c.bottomb.top)&&(h=p,f=b):c&&ss(c,b)?c=To(c,b.bottom):f&&ss(f,b)&&(f=Do(f,b.top))}}if(c&&c.bottom>=t?(i=a,s=c):f&&f.top<=t&&(i=h,s=f),!i)return{node:n,offset:0};let u=Math.max(s.left,Math.min(s.right,e));if(i.nodeType==3)return Oo(i,u,t);if(l&&i.contentEditable!="false")return er(i,u,t);let d=Array.prototype.indexOf.call(n.childNodes,i)+(e>=(s.left+s.right)/2?1:0);return{node:n,offset:d}}function Oo(n,e,t){let i=n.nodeValue.length,s=-1,r=1e9,o=0;for(let l=0;lt?c.top-t:t-c.bottom)-1;if(c.left-1<=e&&c.right+1>=e&&f=(c.left+c.right)/2,d=u;if((A.chrome||A.gecko)&&Zt(n,l).getBoundingClientRect().left==c.right&&(d=!u),f<=0)return{node:n,offset:l+(d?1:0)};s=l+(d?1:0),r=f}}}return{node:n,offset:s>-1?s:o>0?n.nodeValue.length:0}}function ah(n,{x:e,y:t},i,s=-1){var r;let o=n.contentDOM.getBoundingClientRect(),l=o.top+n.viewState.paddingTop,a,{docHeight:h}=n.viewState,c=t-l;if(c<0)return 0;if(c>h)return n.state.doc.length;for(let b=n.defaultLineHeight/2,v=!1;a=n.elementAtHeight(c),a.type!=W.Text;)for(;c=s>0?a.bottom+b:a.top-b,!(c>=0&&c<=h);){if(v)return i?null:0;v=!0,s=-s}t=l+c;let f=a.from;if(fn.viewport.to)return n.viewport.to==n.state.doc.length?n.state.doc.length:i?null:Bo(n,o,a,e,t);let u=n.dom.ownerDocument,d=n.root.elementFromPoint?n.root:u,p=d.elementFromPoint(e,t);p&&!n.contentDOM.contains(p)&&(p=null),p||(e=Math.max(o.left+1,Math.min(o.right-1,e)),p=d.elementFromPoint(e,t),p&&!n.contentDOM.contains(p)&&(p=null));let g,y=-1;if(p&&((r=n.docView.nearest(p))===null||r===void 0?void 0:r.isEditable)!=!1){if(u.caretPositionFromPoint){let b=u.caretPositionFromPoint(e,t);b&&({offsetNode:g,offset:y}=b)}else if(u.caretRangeFromPoint){let b=u.caretRangeFromPoint(e,t);b&&({startContainer:g,startOffset:y}=b,(!n.contentDOM.contains(g)||A.safari&&Qf(g,y,e)||A.chrome&&eu(g,y,e))&&(g=void 0))}}if(!g||!n.docView.dom.contains(g)){let b=ke.find(n.docView,f);if(!b)return c>a.top+a.height/2?a.to:a.from;({node:g,offset:y}=er(b.dom,e,t))}return n.docView.posFromDOM(g,y)}function Bo(n,e,t,i,s){let r=Math.round((i-e.left)*n.defaultCharacterWidth);if(n.lineWrapping&&t.height>n.defaultLineHeight*1.5){let l=Math.floor((s-t.top)/n.defaultLineHeight);r+=l*n.viewState.heightOracle.lineLength}let o=n.state.sliceDoc(t.from,t.to);return t.from+Hs(o,r,n.state.tabSize)}function Qf(n,e,t){let i;if(n.nodeType!=3||e!=(i=n.nodeValue.length))return!1;for(let s=n.nextSibling;s;s=s.nextSibling)if(s.nodeType!=1||s.nodeName!="BR")return!1;return Zt(n,i-1,i).getBoundingClientRect().left>t}function eu(n,e,t){if(e!=0)return!1;for(let s=n;;){let r=s.parentNode;if(!r||r.nodeType!=1||r.firstChild!=s)return!1;if(r.classList.contains("cm-line"))break;s=r}let i=n.nodeType==1?n.getBoundingClientRect():Zt(n,0,Math.max(n.nodeValue.length,1)).getBoundingClientRect();return t-i.left>5}function tu(n,e,t,i){let s=n.state.doc.lineAt(e.head),r=!i||!n.lineWrapping?null:n.coordsAtPos(e.assoc<0&&e.head>s.from?e.head-1:e.head);if(r){let a=n.dom.getBoundingClientRect(),h=n.textDirectionAt(s.from),c=n.posAtCoords({x:t==(h==Z.LTR)?a.right-1:a.left+1,y:(r.top+r.bottom)/2});if(c!=null)return w.cursor(c,t?-1:1)}let o=ke.find(n.docView,e.head),l=o?t?o.posAtEnd:o.posAtStart:t?s.to:s.from;return w.cursor(l,t?-1:1)}function Po(n,e,t,i){let s=n.state.doc.lineAt(e.head),r=n.bidiSpans(s),o=n.textDirectionAt(s.from);for(let l=e,a=null;;){let h=zf(s,r,o,l,t),c=sh;if(!h){if(s.number==(t?n.state.doc.lines:1))return l;c=` -`,s=n.state.doc.line(s.number+(t?1:-1)),r=n.bidiSpans(s),h=w.cursor(t?s.from:s.to)}if(a){if(!a(c))return l}else{if(!i)return h;a=i(c)}l=h}}function iu(n,e,t){let i=n.state.charCategorizer(e),s=i(t);return r=>{let o=i(r);return s==Re.Space&&(s=o),s==o}}function nu(n,e,t,i){let s=e.head,r=t?1:-1;if(s==(t?n.state.doc.length:0))return w.cursor(s,e.assoc);let o=e.goalColumn,l,a=n.contentDOM.getBoundingClientRect(),h=n.coordsAtPos(s),c=n.documentTop;if(h)o==null&&(o=h.left-a.left),l=r<0?h.top:h.bottom;else{let d=n.viewState.lineBlockAt(s);o==null&&(o=Math.min(a.right-a.left,n.defaultCharacterWidth*(s-d.from))),l=(r<0?d.top:d.bottom)+c}let f=a.left+o,u=i??n.defaultLineHeight>>1;for(let d=0;;d+=10){let p=l+(u+d)*r,g=ah(n,{x:f,y:p},!1,r);if(pa.bottom||(r<0?gs))return w.cursor(g,e.assoc,void 0,o)}}function rs(n,e,t){let i=n.state.facet(eh).map(s=>s(n));for(;;){let s=!1;for(let r of i)r.between(t.from-1,t.from+1,(o,l,a)=>{t.from>o&&t.fromt.from?w.cursor(o,1):w.cursor(l,-1),s=!0)});if(!s)return t}}class su{constructor(e){this.lastKeyCode=0,this.lastKeyTime=0,this.lastTouchTime=0,this.lastFocusTime=0,this.lastScrollTop=0,this.lastScrollLeft=0,this.chromeScrollHack=-1,this.pendingIOSKey=void 0,this.lastSelectionOrigin=null,this.lastSelectionTime=0,this.lastEscPress=0,this.lastContextMenu=0,this.scrollHandlers=[],this.registeredEvents=[],this.customHandlers=[],this.composing=-1,this.compositionFirstChange=null,this.compositionEndedAt=0,this.mouseSelection=null;for(let t in oe){let i=oe[t];e.contentDOM.addEventListener(t,s=>{!Eo(e,s)||this.ignoreDuringComposition(s)||t=="keydown"&&this.keydown(e,s)||(this.mustFlushObserver(s)&&e.observer.forceFlush(),this.runCustomHandlers(t,e,s)?s.preventDefault():i(e,s))},tr[t]),this.registeredEvents.push(t)}A.chrome&&A.chrome_version==102&&e.scrollDOM.addEventListener("wheel",()=>{this.chromeScrollHack<0?e.contentDOM.style.pointerEvents="none":window.clearTimeout(this.chromeScrollHack),this.chromeScrollHack=setTimeout(()=>{this.chromeScrollHack=-1,e.contentDOM.style.pointerEvents=""},100)},{passive:!0}),this.notifiedFocused=e.hasFocus,A.safari&&e.contentDOM.addEventListener("input",()=>null)}setSelectionOrigin(e){this.lastSelectionOrigin=e,this.lastSelectionTime=Date.now()}ensureHandlers(e,t){var i;let s;this.customHandlers=[];for(let r of t)if(s=(i=r.update(e).spec)===null||i===void 0?void 0:i.domEventHandlers){this.customHandlers.push({plugin:r.value,handlers:s});for(let o in s)this.registeredEvents.indexOf(o)<0&&o!="scroll"&&(this.registeredEvents.push(o),e.contentDOM.addEventListener(o,l=>{Eo(e,l)&&this.runCustomHandlers(o,e,l)&&l.preventDefault()}))}}runCustomHandlers(e,t,i){for(let s of this.customHandlers){let r=s.handlers[e];if(r)try{if(r.call(s.plugin,i,t)||i.defaultPrevented)return!0}catch(o){He(t.state,o)}}return!1}runScrollHandlers(e,t){this.lastScrollTop=e.scrollDOM.scrollTop,this.lastScrollLeft=e.scrollDOM.scrollLeft;for(let i of this.customHandlers){let s=i.handlers.scroll;if(s)try{s.call(i.plugin,t,e)}catch(r){He(e.state,r)}}}keydown(e,t){if(this.lastKeyCode=t.keyCode,this.lastKeyTime=Date.now(),t.keyCode==9&&Date.now()s.keyCode==t.keyCode))&&!t.ctrlKey||ru.indexOf(t.key)>-1&&t.ctrlKey&&!t.shiftKey)?(this.pendingIOSKey=i||t,setTimeout(()=>this.flushIOSKey(e),250),!0):!1}flushIOSKey(e){let t=this.pendingIOSKey;return t?(this.pendingIOSKey=void 0,$t(e.contentDOM,t.key,t.keyCode)):!1}ignoreDuringComposition(e){return/^key/.test(e.type)?this.composing>0?!0:A.safari&&!A.ios&&Date.now()-this.compositionEndedAt<100?(this.compositionEndedAt=0,!0):!1:!1}mustFlushObserver(e){return e.type=="keydown"&&e.keyCode!=229}startMouseSelection(e){this.mouseSelection&&this.mouseSelection.destroy(),this.mouseSelection=e}update(e){this.mouseSelection&&this.mouseSelection.update(e),e.transactions.length&&(this.lastKeyCode=this.lastSelectionTime=0)}destroy(){this.mouseSelection&&this.mouseSelection.destroy()}}const hh=[{key:"Backspace",keyCode:8,inputType:"deleteContentBackward"},{key:"Enter",keyCode:13,inputType:"insertParagraph"},{key:"Delete",keyCode:46,inputType:"deleteContentForward"}],ru="dthko",ch=[16,17,18,20,91,92,224,225];class ou{constructor(e,t,i,s){this.view=e,this.style=i,this.mustSelect=s,this.lastEvent=t;let r=e.contentDOM.ownerDocument;r.addEventListener("mousemove",this.move=this.move.bind(this)),r.addEventListener("mouseup",this.up=this.up.bind(this)),this.extend=t.shiftKey,this.multiple=e.state.facet(N.allowMultipleSelections)&&lu(e,t),this.dragMove=au(e,t),this.dragging=hu(e,t)&&ph(t)==1?null:!1,this.dragging===!1&&(t.preventDefault(),this.select(t))}move(e){if(e.buttons==0)return this.destroy();this.dragging===!1&&this.select(this.lastEvent=e)}up(e){this.dragging==null&&this.select(this.lastEvent),this.dragging||e.preventDefault(),this.destroy()}destroy(){let e=this.view.contentDOM.ownerDocument;e.removeEventListener("mousemove",this.move),e.removeEventListener("mouseup",this.up),this.view.inputState.mouseSelection=null}select(e){let t=this.style.get(e,this.extend,this.multiple);(this.mustSelect||!t.eq(this.view.state.selection)||t.main.assoc!=this.view.state.selection.main.assoc)&&this.view.dispatch({selection:t,userEvent:"select.pointer",scrollIntoView:!0}),this.mustSelect=!1}update(e){e.docChanged&&this.dragging&&(this.dragging=this.dragging.map(e.changes)),this.style.update(e)&&setTimeout(()=>this.select(this.lastEvent),20)}}function lu(n,e){let t=n.state.facet(Ka);return t.length?t[0](e):A.mac?e.metaKey:e.ctrlKey}function au(n,e){let t=n.state.facet(Ua);return t.length?t[0](e):A.mac?!e.altKey:!e.ctrlKey}function hu(n,e){let{main:t}=n.state.selection;if(t.empty)return!1;let i=xn(n.root);if(!i||i.rangeCount==0)return!0;let s=i.getRangeAt(0).getClientRects();for(let r=0;r=e.clientX&&o.top<=e.clientY&&o.bottom>=e.clientY)return!0}return!1}function Eo(n,e){if(!e.bubbles)return!0;if(e.defaultPrevented)return!1;for(let t=e.target,i;t!=n.contentDOM;t=t.parentNode)if(!t||t.nodeType==11||(i=K.get(t))&&i.ignoreEvent(e))return!1;return!0}const oe=Object.create(null),tr=Object.create(null),fh=A.ie&&A.ie_version<15||A.ios&&A.webkit_version<604;function cu(n){let e=n.dom.parentNode;if(!e)return;let t=e.appendChild(document.createElement("textarea"));t.style.cssText="position: fixed; left: -10000px; top: 10px",t.focus(),setTimeout(()=>{n.focus(),t.remove(),uh(n,t.value)},50)}function uh(n,e){let{state:t}=n,i,s=1,r=t.toText(e),o=r.lines==t.selection.ranges.length;if(ir!=null&&t.selection.ranges.every(a=>a.empty)&&ir==r.toString()){let a=-1;i=t.changeByRange(h=>{let c=t.doc.lineAt(h.from);if(c.from==a)return{range:h};a=c.from;let f=t.toText((o?r.line(s++).text:e)+t.lineBreak);return{changes:{from:c.from,insert:f},range:w.cursor(h.from+f.length)}})}else o?i=t.changeByRange(a=>{let h=r.line(s++);return{changes:{from:a.from,to:a.to,insert:h.text},range:w.cursor(a.from+h.length)}}):i=t.replaceSelection(r);n.dispatch(i,{userEvent:"input.paste",scrollIntoView:!0})}oe.keydown=(n,e)=>{n.inputState.setSelectionOrigin("select"),e.keyCode==27?n.inputState.lastEscPress=Date.now():ch.indexOf(e.keyCode)<0&&(n.inputState.lastEscPress=0)};oe.touchstart=(n,e)=>{n.inputState.lastTouchTime=Date.now(),n.inputState.setSelectionOrigin("select.pointer")};oe.touchmove=n=>{n.inputState.setSelectionOrigin("select.pointer")};tr.touchstart=tr.touchmove={passive:!0};oe.mousedown=(n,e)=>{if(n.observer.flush(),n.inputState.lastTouchTime>Date.now()-2e3)return;let t=null;for(let i of n.state.facet(Ga))if(t=i(n,e),t)break;if(!t&&e.button==0&&(t=du(n,e)),t){let i=n.root.activeElement!=n.contentDOM;i&&n.observer.ignore(()=>Ea(n.contentDOM)),n.inputState.startMouseSelection(new ou(n,e,t,i))}};function Ro(n,e,t,i){if(i==1)return w.cursor(e,t);if(i==2)return Yf(n.state,e,t);{let s=ke.find(n.docView,e),r=n.state.doc.lineAt(s?s.posAtEnd:e),o=s?s.posAtStart:r.from,l=s?s.posAtEnd:r.to;return ln>=e.top&&n<=e.bottom,Lo=(n,e,t)=>dh(e,t)&&n>=t.left&&n<=t.right;function fu(n,e,t,i){let s=ke.find(n.docView,e);if(!s)return 1;let r=e-s.posAtStart;if(r==0)return 1;if(r==s.length)return-1;let o=s.coordsAt(r,-1);if(o&&Lo(t,i,o))return-1;let l=s.coordsAt(r,1);return l&&Lo(t,i,l)?1:o&&dh(i,o)?-1:1}function Io(n,e){let t=n.posAtCoords({x:e.clientX,y:e.clientY},!1);return{pos:t,bias:fu(n,t,e.clientX,e.clientY)}}const uu=A.ie&&A.ie_version<=11;let No=null,_o=0,Vo=0;function ph(n){if(!uu)return n.detail;let e=No,t=Vo;return No=n,Vo=Date.now(),_o=!e||t>Date.now()-400&&Math.abs(e.clientX-n.clientX)<2&&Math.abs(e.clientY-n.clientY)<2?(_o+1)%3:1}function du(n,e){let t=Io(n,e),i=ph(e),s=n.state.selection,r=t,o=e;return{update(l){l.docChanged&&(t.pos=l.changes.mapPos(t.pos),s=s.map(l.changes),o=null)},get(l,a,h){let c;o&&l.clientX==o.clientX&&l.clientY==o.clientY?c=r:(c=r=Io(n,l),o=l);let f=Ro(n,c.pos,c.bias,i);if(t.pos!=c.pos&&!a){let u=Ro(n,t.pos,t.bias,i),d=Math.min(u.from,f.from),p=Math.max(u.to,f.to);f=d1&&s.ranges.some(u=>u.eq(f))?pu(s,f):h?s.addRange(f):w.create([f])}}}function pu(n,e){for(let t=0;;t++)if(n.ranges[t].eq(e))return w.create(n.ranges.slice(0,t).concat(n.ranges.slice(t+1)),n.mainIndex==t?0:n.mainIndex-(n.mainIndex>t?1:0))}oe.dragstart=(n,e)=>{let{selection:{main:t}}=n.state,{mouseSelection:i}=n.inputState;i&&(i.dragging=t),e.dataTransfer&&(e.dataTransfer.setData("Text",n.state.sliceDoc(t.from,t.to)),e.dataTransfer.effectAllowed="copyMove")};function Fo(n,e,t,i){if(!t)return;let s=n.posAtCoords({x:e.clientX,y:e.clientY},!1);e.preventDefault();let{mouseSelection:r}=n.inputState,o=i&&r&&r.dragging&&r.dragMove?{from:r.dragging.from,to:r.dragging.to}:null,l={from:s,insert:t},a=n.state.changes(o?[o,l]:l);n.focus(),n.dispatch({changes:a,selection:{anchor:a.mapPos(s,-1),head:a.mapPos(s,1)},userEvent:o?"move.drop":"input.drop"})}oe.drop=(n,e)=>{if(!e.dataTransfer)return;if(n.state.readOnly)return e.preventDefault();let t=e.dataTransfer.files;if(t&&t.length){e.preventDefault();let i=Array(t.length),s=0,r=()=>{++s==t.length&&Fo(n,e,i.filter(o=>o!=null).join(n.state.lineBreak),!1)};for(let o=0;o{/[\x00-\x08\x0e-\x1f]{2}/.test(l.result)||(i[o]=l.result),r()},l.readAsText(t[o])}}else Fo(n,e,e.dataTransfer.getData("Text"),!0)};oe.paste=(n,e)=>{if(n.state.readOnly)return e.preventDefault();n.observer.flush();let t=fh?null:e.clipboardData;t?(uh(n,t.getData("text/plain")),e.preventDefault()):cu(n)};function mu(n,e){let t=n.dom.parentNode;if(!t)return;let i=t.appendChild(document.createElement("textarea"));i.style.cssText="position: fixed; left: -10000px; top: 10px",i.value=e,i.focus(),i.selectionEnd=e.length,i.selectionStart=0,setTimeout(()=>{i.remove(),n.focus()},50)}function gu(n){let e=[],t=[],i=!1;for(let s of n.selection.ranges)s.empty||(e.push(n.sliceDoc(s.from,s.to)),t.push(s));if(!e.length){let s=-1;for(let{from:r}of n.selection.ranges){let o=n.doc.lineAt(r);o.number>s&&(e.push(o.text),t.push({from:o.from,to:Math.min(n.doc.length,o.to+1)})),s=o.number}i=!0}return{text:e.join(n.lineBreak),ranges:t,linewise:i}}let ir=null;oe.copy=oe.cut=(n,e)=>{let{text:t,ranges:i,linewise:s}=gu(n.state);if(!t&&!s)return;ir=s?t:null;let r=fh?null:e.clipboardData;r?(e.preventDefault(),r.clearData(),r.setData("text/plain",t)):mu(n,t),e.type=="cut"&&!n.state.readOnly&&n.dispatch({changes:i,scrollIntoView:!0,userEvent:"delete.cut"})};function mh(n){setTimeout(()=>{n.hasFocus!=n.inputState.notifiedFocused&&n.update([])},10)}oe.focus=n=>{n.inputState.lastFocusTime=Date.now(),!n.scrollDOM.scrollTop&&(n.inputState.lastScrollTop||n.inputState.lastScrollLeft)&&(n.scrollDOM.scrollTop=n.inputState.lastScrollTop,n.scrollDOM.scrollLeft=n.inputState.lastScrollLeft),mh(n)};oe.blur=n=>{n.observer.clearSelectionRange(),mh(n)};oe.compositionstart=oe.compositionupdate=n=>{n.inputState.compositionFirstChange==null&&(n.inputState.compositionFirstChange=!0),n.inputState.composing<0&&(n.inputState.composing=0)};oe.compositionend=n=>{n.inputState.composing=-1,n.inputState.compositionEndedAt=Date.now(),n.inputState.compositionFirstChange=null,A.chrome&&A.android&&n.observer.flushSoon(),setTimeout(()=>{n.inputState.composing<0&&n.docView.compositionDeco.size&&n.update([])},50)};oe.contextmenu=n=>{n.inputState.lastContextMenu=Date.now()};oe.beforeinput=(n,e)=>{var t;let i;if(A.chrome&&A.android&&(i=hh.find(s=>s.inputType==e.inputType))&&(n.observer.delayAndroidKey(i.key,i.keyCode),i.key=="Backspace"||i.key=="Delete")){let s=((t=window.visualViewport)===null||t===void 0?void 0:t.height)||0;setTimeout(()=>{var r;(((r=window.visualViewport)===null||r===void 0?void 0:r.height)||0)>s+10&&n.hasFocus&&(n.contentDOM.blur(),n.focus())},100)}};const Ho=["pre-wrap","normal","pre-line","break-spaces"];class yu{constructor(){this.doc=_.empty,this.lineWrapping=!1,this.heightSamples={},this.lineHeight=14,this.charWidth=7,this.lineLength=30,this.heightChanged=!1}heightForGap(e,t){let i=this.doc.lineAt(t).number-this.doc.lineAt(e).number+1;return this.lineWrapping&&(i+=Math.ceil((t-e-i*this.lineLength*.5)/this.lineLength)),this.lineHeight*i}heightForLine(e){return this.lineWrapping?(1+Math.max(0,Math.ceil((e-this.lineLength)/(this.lineLength-5))))*this.lineHeight:this.lineHeight}setDoc(e){return this.doc=e,this}mustRefreshForWrapping(e){return Ho.indexOf(e)>-1!=this.lineWrapping}mustRefreshForHeights(e){let t=!1;for(let i=0;i-1,l=Math.round(t)!=Math.round(this.lineHeight)||this.lineWrapping!=o;if(this.lineWrapping=o,this.lineHeight=t,this.charWidth=i,this.lineLength=s,l){this.heightSamples={};for(let a=0;a0}set outdated(e){this.flags=(e?2:0)|this.flags&-3}setHeight(e,t){this.height!=t&&(Math.abs(this.height-t)>pn&&(e.heightChanged=!0),this.height=t)}replace(e,t,i){return ve.of(i)}decomposeLeft(e,t){t.push(this)}decomposeRight(e,t){t.push(this)}applyChanges(e,t,i,s){let r=this;for(let o=s.length-1;o>=0;o--){let{fromA:l,toA:a,fromB:h,toB:c}=s[o],f=r.lineAt(l,q.ByPosNoHeight,t,0,0),u=f.to>=a?f:r.lineAt(a,q.ByPosNoHeight,t,0,0);for(c+=u.to-a,a=u.to;o>0&&f.from<=s[o-1].toA;)l=s[o-1].fromA,h=s[o-1].fromB,o--,lr*2){let l=e[t-1];l.break?e.splice(--t,1,l.left,null,l.right):e.splice(--t,1,l.left,l.right),i+=1+l.break,s-=l.size}else if(r>s*2){let l=e[i];l.break?e.splice(i,1,l.left,null,l.right):e.splice(i,1,l.left,l.right),i+=2+l.break,r-=l.size}else break;else if(s=r&&o(this.blockAt(0,i,s,r))}updateHeight(e,t=0,i=!1,s){return s&&s.from<=t&&s.more&&this.setHeight(e,s.heights[s.index++]),this.outdated=!1,this}toString(){return`block(${this.length})`}}class De extends gh{constructor(e,t){super(e,t,W.Text),this.collapsed=0,this.widgetHeight=0}replace(e,t,i){let s=i[0];return i.length==1&&(s instanceof De||s instanceof ae&&s.flags&4)&&Math.abs(this.length-s.length)<10?(s instanceof ae?s=new De(s.length,this.height):s.height=this.height,this.outdated||(s.outdated=!1),s):ve.of(i)}updateHeight(e,t=0,i=!1,s){return s&&s.from<=t&&s.more?this.setHeight(e,s.heights[s.index++]):(i||this.outdated)&&this.setHeight(e,Math.max(this.widgetHeight,e.heightForLine(this.length-this.collapsed))),this.outdated=!1,this}toString(){return`line(${this.length}${this.collapsed?-this.collapsed:""}${this.widgetHeight?":"+this.widgetHeight:""})`}}class ae extends ve{constructor(e){super(e,0)}lines(e,t){let i=e.lineAt(t).number,s=e.lineAt(t+this.length).number;return{firstLine:i,lastLine:s,lineHeight:this.height/(s-i+1)}}blockAt(e,t,i,s){let{firstLine:r,lastLine:o,lineHeight:l}=this.lines(t,s),a=Math.max(0,Math.min(o-r,Math.floor((e-i)/l))),{from:h,length:c}=t.line(r+a);return new ut(h,c,i+l*a,l,W.Text)}lineAt(e,t,i,s,r){if(t==q.ByHeight)return this.blockAt(e,i,s,r);if(t==q.ByPosNoHeight){let{from:f,to:u}=i.lineAt(e);return new ut(f,u-f,0,0,W.Text)}let{firstLine:o,lineHeight:l}=this.lines(i,r),{from:a,length:h,number:c}=i.lineAt(e);return new ut(a,h,s+l*(c-o),l,W.Text)}forEachLine(e,t,i,s,r,o){let{firstLine:l,lineHeight:a}=this.lines(i,r);for(let h=Math.max(e,r),c=Math.min(r+this.length,t);h<=c;){let f=i.lineAt(h);h==e&&(s+=a*(f.number-l)),o(new ut(f.from,f.length,s,a,W.Text)),s+=a,h=f.to+1}}replace(e,t,i){let s=this.length-t;if(s>0){let r=i[i.length-1];r instanceof ae?i[i.length-1]=new ae(r.length+s):i.push(null,new ae(s-1))}if(e>0){let r=i[0];r instanceof ae?i[0]=new ae(e+r.length):i.unshift(new ae(e-1),null)}return ve.of(i)}decomposeLeft(e,t){t.push(new ae(e-1),null)}decomposeRight(e,t){t.push(null,new ae(this.length-e-1))}updateHeight(e,t=0,i=!1,s){let r=t+this.length;if(s&&s.from<=t+this.length&&s.more){let o=[],l=Math.max(t,s.from),a=-1,h=e.heightChanged;for(s.from>t&&o.push(new ae(s.from-t-1).updateHeight(e,t));l<=r&&s.more;){let f=e.doc.lineAt(l).length;o.length&&o.push(null);let u=s.heights[s.index++];a==-1?a=u:Math.abs(u-a)>=pn&&(a=-2);let d=new De(f,u);d.outdated=!1,o.push(d),l+=f+1}l<=r&&o.push(null,new ae(r-l).updateHeight(e,l));let c=ve.of(o);return e.heightChanged=h||a<0||Math.abs(c.height-this.height)>=pn||Math.abs(a-this.lines(e.doc,t).lineHeight)>=pn,c}else(i||this.outdated)&&(this.setHeight(e,e.heightForGap(t,t+this.length)),this.outdated=!1);return this}toString(){return`gap(${this.length})`}}class wu extends ve{constructor(e,t,i){super(e.length+t+i.length,e.height+i.height,t|(e.outdated||i.outdated?2:0)),this.left=e,this.right=i,this.size=e.size+i.size}get break(){return this.flags&1}blockAt(e,t,i,s){let r=i+this.left.height;return el))return h;let c=t==q.ByPosNoHeight?q.ByPosNoHeight:q.ByPos;return a?h.join(this.right.lineAt(l,c,i,o,l)):this.left.lineAt(l,c,i,s,r).join(h)}forEachLine(e,t,i,s,r,o){let l=s+this.left.height,a=r+this.left.length+this.break;if(this.break)e=a&&this.right.forEachLine(e,t,i,l,a,o);else{let h=this.lineAt(a,q.ByPos,i,s,r);e=e&&h.from<=t&&o(h),t>h.to&&this.right.forEachLine(h.to+1,t,i,l,a,o)}}replace(e,t,i){let s=this.left.length+this.break;if(tthis.left.length)return this.balanced(this.left,this.right.replace(e-s,t-s,i));let r=[];e>0&&this.decomposeLeft(e,r);let o=r.length;for(let l of i)r.push(l);if(e>0&&Wo(r,o-1),t=i&&t.push(null)),e>i&&this.right.decomposeLeft(e-i,t)}decomposeRight(e,t){let i=this.left.length,s=i+this.break;if(e>=s)return this.right.decomposeRight(e-s,t);e2*t.size||t.size>2*e.size?ve.of(this.break?[e,null,t]:[e,t]):(this.left=e,this.right=t,this.height=e.height+t.height,this.outdated=e.outdated||t.outdated,this.size=e.size+t.size,this.length=e.length+this.break+t.length,this)}updateHeight(e,t=0,i=!1,s){let{left:r,right:o}=this,l=t+r.length+this.break,a=null;return s&&s.from<=t+r.length&&s.more?a=r=r.updateHeight(e,t,i,s):r.updateHeight(e,t,i),s&&s.from<=l+o.length&&s.more?a=o=o.updateHeight(e,l,i,s):o.updateHeight(e,l,i),a?this.balanced(r,o):(this.height=this.left.height+this.right.height,this.outdated=!1,this)}toString(){return this.left+(this.break?" ":"-")+this.right}}function Wo(n,e){let t,i;n[e]==null&&(t=n[e-1])instanceof ae&&(i=n[e+1])instanceof ae&&n.splice(e-1,3,new ae(t.length+1+i.length))}const ku=5;class Pr{constructor(e,t){this.pos=e,this.oracle=t,this.nodes=[],this.lineStart=-1,this.lineEnd=-1,this.covering=null,this.writtenTo=e}get isCovered(){return this.covering&&this.nodes[this.nodes.length-1]==this.covering}span(e,t){if(this.lineStart>-1){let i=Math.min(t,this.lineEnd),s=this.nodes[this.nodes.length-1];s instanceof De?s.length+=i-this.pos:(i>this.pos||!this.isCovered)&&this.nodes.push(new De(i-this.pos,-1)),this.writtenTo=i,t>i&&(this.nodes.push(null),this.writtenTo++,this.lineStart=-1)}this.pos=t}point(e,t,i){if(e=ku)&&this.addLineDeco(s,r)}else t>e&&this.span(e,t);this.lineEnd>-1&&this.lineEnd-1)return;let{from:e,to:t}=this.oracle.doc.lineAt(this.pos);this.lineStart=e,this.lineEnd=t,this.writtenToe&&this.nodes.push(new De(this.pos-e,-1)),this.writtenTo=this.pos}blankContent(e,t){let i=new ae(t-e);return this.oracle.doc.lineAt(e).to==t&&(i.flags|=4),i}ensureLine(){this.enterLine();let e=this.nodes.length?this.nodes[this.nodes.length-1]:null;if(e instanceof De)return e;let t=new De(0,-1);return this.nodes.push(t),t}addBlock(e){this.enterLine(),e.type==W.WidgetAfter&&!this.isCovered&&this.ensureLine(),this.nodes.push(e),this.writtenTo=this.pos=this.pos+e.length,e.type!=W.WidgetBefore&&(this.covering=e)}addLineDeco(e,t){let i=this.ensureLine();i.length+=t,i.collapsed+=t,i.widgetHeight=Math.max(i.widgetHeight,e),this.writtenTo=this.pos=this.pos+t}finish(e){let t=this.nodes.length==0?null:this.nodes[this.nodes.length-1];this.lineStart>-1&&!(t instanceof De)&&!this.isCovered?this.nodes.push(new De(0,-1)):(this.writtenToc.clientHeight||c.scrollWidth>c.clientWidth)&&f.overflow!="visible"){let u=c.getBoundingClientRect();r=Math.max(r,u.left),o=Math.min(o,u.right),l=Math.max(l,u.top),a=h==n.parentNode?u.bottom:Math.min(a,u.bottom)}h=f.position=="absolute"||f.position=="fixed"?c.offsetParent:c.parentNode}else if(h.nodeType==11)h=h.host;else break;return{left:r-t.left,right:Math.max(r,o)-t.left,top:l-(t.top+e),bottom:Math.max(l,a)-(t.top+e)}}function Cu(n,e){let t=n.getBoundingClientRect();return{left:0,right:t.right-t.left,top:e,bottom:t.bottom-(t.top+e)}}class os{constructor(e,t,i){this.from=e,this.to=t,this.size=i}static same(e,t){if(e.length!=t.length)return!1;for(let i=0;itypeof t!="function"),this.heightMap=ve.empty().applyChanges(this.stateDeco,_.empty,this.heightOracle.setDoc(e.doc),[new Qe(0,0,0,e.doc.length)]),this.viewport=this.getViewport(0,null),this.updateViewportLines(),this.updateForViewport(),this.lineGaps=this.ensureLineGaps([]),this.lineGapDeco=E.set(this.lineGaps.map(t=>t.draw(!1))),this.computeVisibleRanges()}updateForViewport(){let e=[this.viewport],{main:t}=this.state.selection;for(let i=0;i<=1;i++){let s=i?t.head:t.anchor;if(!e.some(({from:r,to:o})=>s>=r&&s<=o)){let{from:r,to:o}=this.lineBlockAt(s);e.push(new Ji(r,o))}}this.viewports=e.sort((i,s)=>i.from-s.from),this.scaler=this.heightMap.height<=7e6?qo:new Tu(this.heightOracle.doc,this.heightMap,this.viewports)}updateViewportLines(){this.viewportLines=[],this.heightMap.forEachLine(this.viewport.from,this.viewport.to,this.state.doc,0,0,e=>{this.viewportLines.push(this.scaler.scale==1?e:wi(e,this.scaler))})}update(e,t=null){this.state=e.state;let i=this.stateDeco;this.stateDeco=this.state.facet(Ei).filter(h=>typeof h!="function");let s=e.changedRanges,r=Qe.extendWithRanges(s,vu(i,this.stateDeco,e?e.changes:ne.empty(this.state.doc.length))),o=this.heightMap.height;this.heightMap=this.heightMap.applyChanges(this.stateDeco,e.startState.doc,this.heightOracle.setDoc(this.state.doc),r),this.heightMap.height!=o&&(e.flags|=2);let l=r.length?this.mapViewport(this.viewport,e.changes):this.viewport;(t&&(t.range.headl.to)||!this.viewportIsAppropriate(l))&&(l=this.getViewport(0,t));let a=!e.changes.empty||e.flags&2||l.from!=this.viewport.from||l.to!=this.viewport.to;this.viewport=l,this.updateForViewport(),a&&this.updateViewportLines(),(this.lineGaps.length||this.viewport.to-this.viewport.from>2e3<<1)&&this.updateLineGaps(this.ensureLineGaps(this.mapLineGaps(this.lineGaps,e.changes))),e.flags|=this.computeVisibleRanges(),t&&(this.scrollTarget=t),!this.mustEnforceCursorAssoc&&e.selectionSet&&e.view.lineWrapping&&e.state.selection.main.empty&&e.state.selection.main.assoc&&!e.state.facet(Xa)&&(this.mustEnforceCursorAssoc=!0)}measure(e){let t=e.contentDOM,i=window.getComputedStyle(t),s=this.heightOracle,r=i.whiteSpace;this.defaultTextDirection=i.direction=="rtl"?Z.RTL:Z.LTR;let o=this.heightOracle.mustRefreshForWrapping(r),l=o||this.mustMeasureContent||this.contentDOMHeight!=t.clientHeight;this.contentDOMHeight=t.clientHeight,this.mustMeasureContent=!1;let a=0,h=0,c=parseInt(i.paddingTop)||0,f=parseInt(i.paddingBottom)||0;(this.paddingTop!=c||this.paddingBottom!=f)&&(this.paddingTop=c,this.paddingBottom=f,a|=10),this.editorWidth!=e.scrollDOM.clientWidth&&(s.lineWrapping&&(l=!0),this.editorWidth=e.scrollDOM.clientWidth,a|=8);let u=(this.printing?Cu:Su)(t,this.paddingTop),d=u.top-this.pixelViewport.top,p=u.bottom-this.pixelViewport.bottom;this.pixelViewport=u;let g=this.pixelViewport.bottom>this.pixelViewport.top&&this.pixelViewport.right>this.pixelViewport.left;if(g!=this.inView&&(this.inView=g,g&&(l=!0)),!this.inView&&!this.scrollTarget)return 0;let y=t.clientWidth;if((this.contentDOMWidth!=y||this.editorHeight!=e.scrollDOM.clientHeight)&&(this.contentDOMWidth=y,this.editorHeight=e.scrollDOM.clientHeight,a|=8),l){let v=e.docView.measureVisibleLineHeights(this.viewport);if(s.mustRefreshForHeights(v)&&(o=!0),o||s.lineWrapping&&Math.abs(y-this.contentDOMWidth)>s.charWidth){let{lineHeight:S,charWidth:k}=e.docView.measureTextSize();o=S>0&&s.refresh(r,S,k,y/k,v),o&&(e.docView.minWidth=0,a|=8)}d>0&&p>0?h=Math.max(d,p):d<0&&p<0&&(h=Math.min(d,p)),s.heightChanged=!1;for(let S of this.viewports){let k=S.from==this.viewport.from?v:e.docView.measureVisibleLineHeights(S);this.heightMap=o?ve.empty().applyChanges(this.stateDeco,_.empty,this.heightOracle,[new Qe(0,0,0,e.state.doc.length)]):this.heightMap.updateHeight(s,0,o,new bu(S.from,k))}s.heightChanged&&(a|=2)}let b=!this.viewportIsAppropriate(this.viewport,h)||this.scrollTarget&&(this.scrollTarget.range.headthis.viewport.to);return b&&(this.viewport=this.getViewport(h,this.scrollTarget)),this.updateForViewport(),(a&2||b)&&this.updateViewportLines(),(this.lineGaps.length||this.viewport.to-this.viewport.from>2e3<<1)&&this.updateLineGaps(this.ensureLineGaps(o?[]:this.lineGaps,e)),a|=this.computeVisibleRanges(),this.mustEnforceCursorAssoc&&(this.mustEnforceCursorAssoc=!1,e.docView.enforceCursorAssoc()),a}get visibleTop(){return this.scaler.fromDOM(this.pixelViewport.top)}get visibleBottom(){return this.scaler.fromDOM(this.pixelViewport.bottom)}getViewport(e,t){let i=.5-Math.max(-.5,Math.min(.5,e/1e3/2)),s=this.heightMap,r=this.state.doc,{visibleTop:o,visibleBottom:l}=this,a=new Ji(s.lineAt(o-i*1e3,q.ByHeight,r,0,0).from,s.lineAt(l+(1-i)*1e3,q.ByHeight,r,0,0).to);if(t){let{head:h}=t.range;if(ha.to){let c=Math.min(this.editorHeight,this.pixelViewport.bottom-this.pixelViewport.top),f=s.lineAt(h,q.ByPos,r,0,0),u;t.y=="center"?u=(f.top+f.bottom)/2-c/2:t.y=="start"||t.y=="nearest"&&h=l+Math.max(10,Math.min(i,250)))&&s>o-2*1e3&&r>1,o=s<<1;if(this.defaultTextDirection!=Z.LTR&&!i)return[];let l=[],a=(h,c,f,u)=>{if(c-hh&&yy.from>=f.from&&y.to<=f.to&&Math.abs(y.from-h)y.fromb));if(!g){if(cy.from<=c&&y.to>=c)){let y=t.moveToLineBoundary(w.cursor(c),!1,!0).head;y>h&&(c=y)}g=new os(h,c,this.gapSize(f,h,c,u))}l.push(g)};for(let h of this.viewportLines){if(h.lengthh.from&&a(h.from,u,h,c),dt.draw(this.heightOracle.lineWrapping))))}computeVisibleRanges(){let e=this.stateDeco;this.lineGaps.length&&(e=e.concat(this.lineGapDeco));let t=[];F.spans(e,this.viewport.from,this.viewport.to,{span(s,r){t.push({from:s,to:r})},point(){}},20);let i=t.length!=this.visibleRanges.length||this.visibleRanges.some((s,r)=>s.from!=t[r].from||s.to!=t[r].to);return this.visibleRanges=t,i?4:0}lineBlockAt(e){return e>=this.viewport.from&&e<=this.viewport.to&&this.viewportLines.find(t=>t.from<=e&&t.to>=e)||wi(this.heightMap.lineAt(e,q.ByPos,this.state.doc,0,0),this.scaler)}lineBlockAtHeight(e){return wi(this.heightMap.lineAt(this.scaler.fromDOM(e),q.ByHeight,this.state.doc,0,0),this.scaler)}elementAtHeight(e){return wi(this.heightMap.blockAt(this.scaler.fromDOM(e),this.state.doc,0,0),this.scaler)}get docHeight(){return this.scaler.toDOM(this.heightMap.height)}get contentHeight(){return this.docHeight+this.paddingTop+this.paddingBottom}}class Ji{constructor(e,t){this.from=e,this.to=t}}function Mu(n,e,t){let i=[],s=n,r=0;return F.spans(t,n,e,{span(){},point(o,l){o>s&&(i.push({from:s,to:o}),r+=o-s),s=l}},20),s=1)return e[e.length-1].to;let i=Math.floor(n*t);for(let s=0;;s++){let{from:r,to:o}=e[s],l=o-r;if(i<=l)return r+i;i-=l}}function Xi(n,e){let t=0;for(let{from:i,to:s}of n.ranges){if(e<=s){t+=e-i;break}t+=s-i}return t/n.total}function Du(n,e){for(let t of n)if(e(t))return t}const qo={toDOM(n){return n},fromDOM(n){return n},scale:1};class Tu{constructor(e,t,i){let s=0,r=0,o=0;this.viewports=i.map(({from:l,to:a})=>{let h=t.lineAt(l,q.ByPos,e,0,0).top,c=t.lineAt(a,q.ByPos,e,0,0).bottom;return s+=c-h,{from:l,to:a,top:h,bottom:c,domTop:0,domBottom:0}}),this.scale=(7e6-s)/(t.height-s);for(let l of this.viewports)l.domTop=o+(l.top-r)*this.scale,o=l.domBottom=l.domTop+(l.bottom-l.top),r=l.bottom}toDOM(e){for(let t=0,i=0,s=0;;t++){let r=twi(s,e)):n.type)}const Zi=D.define({combine:n=>n.join(" ")}),nr=D.define({combine:n=>n.indexOf(!0)>-1}),sr=mt.newName(),yh=mt.newName(),bh=mt.newName(),wh={"&light":"."+yh,"&dark":"."+bh};function rr(n,e,t){return new mt(e,{finish(i){return/&/.test(i)?i.replace(/&\w*/,s=>{if(s=="&")return n;if(!t||!t[s])throw new RangeError(`Unsupported selector: ${s}`);return t[s]}):n+" "+i}})}const Ou=rr("."+sr,{"&.cm-editor":{position:"relative !important",boxSizing:"border-box","&.cm-focused":{outline:"1px dotted #212121"},display:"flex !important",flexDirection:"column"},".cm-scroller":{display:"flex !important",alignItems:"flex-start !important",fontFamily:"monospace",lineHeight:1.4,height:"100%",overflowX:"auto",position:"relative",zIndex:0},".cm-content":{margin:0,flexGrow:2,flexShrink:0,minHeight:"100%",display:"block",whiteSpace:"pre",wordWrap:"normal",boxSizing:"border-box",padding:"4px 0",outline:"none","&[contenteditable=true]":{WebkitUserModify:"read-write-plaintext-only"}},".cm-lineWrapping":{whiteSpace_fallback:"pre-wrap",whiteSpace:"break-spaces",wordBreak:"break-word",overflowWrap:"anywhere",flexShrink:1},"&light .cm-content":{caretColor:"black"},"&dark .cm-content":{caretColor:"white"},".cm-line":{display:"block",padding:"0 2px 0 4px"},".cm-selectionLayer":{zIndex:-1,contain:"size style"},".cm-selectionBackground":{position:"absolute"},"&light .cm-selectionBackground":{background:"#d9d9d9"},"&dark .cm-selectionBackground":{background:"#222"},"&light.cm-focused .cm-selectionBackground":{background:"#d7d4f0"},"&dark.cm-focused .cm-selectionBackground":{background:"#233"},".cm-cursorLayer":{zIndex:100,contain:"size style",pointerEvents:"none"},"&.cm-focused .cm-cursorLayer":{animation:"steps(1) cm-blink 1.2s infinite"},"@keyframes cm-blink":{"0%":{},"50%":{opacity:0},"100%":{}},"@keyframes cm-blink2":{"0%":{},"50%":{opacity:0},"100%":{}},".cm-cursor, .cm-dropCursor":{position:"absolute",borderLeft:"1.2px solid black",marginLeft:"-0.6px",pointerEvents:"none"},".cm-cursor":{display:"none"},"&dark .cm-cursor":{borderLeftColor:"#444"},"&.cm-focused .cm-cursor":{display:"block"},"&light .cm-activeLine":{backgroundColor:"#cceeff44"},"&dark .cm-activeLine":{backgroundColor:"#99eeff33"},"&light .cm-specialChar":{color:"red"},"&dark .cm-specialChar":{color:"#f78"},".cm-gutters":{flexShrink:0,display:"flex",height:"100%",boxSizing:"border-box",left:0,zIndex:200},"&light .cm-gutters":{backgroundColor:"#f5f5f5",color:"#6c6c6c",borderRight:"1px solid #ddd"},"&dark .cm-gutters":{backgroundColor:"#333338",color:"#ccc"},".cm-gutter":{display:"flex !important",flexDirection:"column",flexShrink:0,boxSizing:"border-box",minHeight:"100%",overflow:"hidden"},".cm-gutterElement":{boxSizing:"border-box"},".cm-lineNumbers .cm-gutterElement":{padding:"0 3px 0 5px",minWidth:"20px",textAlign:"right",whiteSpace:"nowrap"},"&light .cm-activeLineGutter":{backgroundColor:"#e2f2ff"},"&dark .cm-activeLineGutter":{backgroundColor:"#222227"},".cm-panels":{boxSizing:"border-box",position:"sticky",left:0,right:0},"&light .cm-panels":{backgroundColor:"#f5f5f5",color:"black"},"&light .cm-panels-top":{borderBottom:"1px solid #ddd"},"&light .cm-panels-bottom":{borderTop:"1px solid #ddd"},"&dark .cm-panels":{backgroundColor:"#333338",color:"white"},".cm-tab":{display:"inline-block",overflow:"hidden",verticalAlign:"bottom"},".cm-widgetBuffer":{verticalAlign:"text-top",height:"1em",width:0,display:"inline"},".cm-placeholder":{color:"#888",display:"inline-block",verticalAlign:"top"},".cm-button":{verticalAlign:"middle",color:"inherit",fontSize:"70%",padding:".2em 1em",borderRadius:"1px"},"&light .cm-button":{backgroundImage:"linear-gradient(#eff1f5, #d9d9df)",border:"1px solid #888","&:active":{backgroundImage:"linear-gradient(#b4b4b4, #d0d3d6)"}},"&dark .cm-button":{backgroundImage:"linear-gradient(#393939, #111)",border:"1px solid #888","&:active":{backgroundImage:"linear-gradient(#111, #333)"}},".cm-textfield":{verticalAlign:"middle",color:"inherit",fontSize:"70%",border:"1px solid silver",padding:".2em .5em"},"&light .cm-textfield":{backgroundColor:"white"},"&dark .cm-textfield":{border:"1px solid #555",backgroundColor:"inherit"}},wh);class Bu{constructor(e,t,i,s){this.typeOver=s,this.bounds=null,this.text="";let{impreciseHead:r,impreciseAnchor:o}=e.docView;if(t>-1&&!e.state.readOnly&&(this.bounds=e.docView.domBoundsAround(t,i,0))){let l=r||o?[]:Eu(e),a=new rh(l,e.state);a.readRange(this.bounds.startDOM,this.bounds.endDOM),this.text=a.text,this.newSel=Ru(l,this.bounds.from)}else{let l=e.observer.selectionRange,a=r&&r.node==l.focusNode&&r.offset==l.focusOffset||!Xt(e.contentDOM,l.focusNode)?e.state.selection.main.head:e.docView.posFromDOM(l.focusNode,l.focusOffset),h=o&&o.node==l.anchorNode&&o.offset==l.anchorOffset||!Xt(e.contentDOM,l.anchorNode)?e.state.selection.main.anchor:e.docView.posFromDOM(l.anchorNode,l.anchorOffset);this.newSel=w.single(h,a)}}}function kh(n,e){let t,{newSel:i}=e,s=n.state.selection.main;if(e.bounds){let{from:r,to:o}=e.bounds,l=s.from,a=null;(n.inputState.lastKeyCode===8&&n.inputState.lastKeyTime>Date.now()-100||A.android&&e.text.length=s.from&&t.to<=s.to&&(t.from!=s.from||t.to!=s.to)&&s.to-s.from-(t.to-t.from)<=4?t={from:s.from,to:s.to,insert:n.state.doc.slice(s.from,t.from).append(t.insert).append(n.state.doc.slice(t.to,s.to))}:(A.mac||A.android)&&t&&t.from==t.to&&t.from==s.head-1&&/^\. ?$/.test(t.insert.toString())?(i&&t.insert.length==2&&(i=w.single(i.main.anchor-1,i.main.head-1)),t={from:s.from,to:s.to,insert:_.of([" "])}):A.chrome&&t&&t.from==t.to&&t.from==s.head&&t.insert.toString()==` - `&&n.lineWrapping&&(i&&(i=w.single(i.main.anchor-1,i.main.head-1)),t={from:s.from,to:s.to,insert:_.of([" "])}),t){let r=n.state;if(A.ios&&n.inputState.flushIOSKey(n)||A.android&&(t.from==s.from&&t.to==s.to&&t.insert.length==1&&t.insert.lines==2&&$t(n.contentDOM,"Enter",13)||t.from==s.from-1&&t.to==s.to&&t.insert.length==0&&$t(n.contentDOM,"Backspace",8)||t.from==s.from&&t.to==s.to+1&&t.insert.length==0&&$t(n.contentDOM,"Delete",46)))return!0;let o=t.insert.toString();if(n.state.facet(Ja).some(h=>h(n,t.from,t.to,o)))return!0;n.inputState.composing>=0&&n.inputState.composing++;let l;if(t.from>=s.from&&t.to<=s.to&&t.to-t.from>=(s.to-s.from)/3&&(!i||i.main.empty&&i.main.from==t.from+t.insert.length)&&n.inputState.composing<0){let h=s.fromt.to?r.sliceDoc(t.to,s.to):"";l=r.replaceSelection(n.state.toText(h+t.insert.sliceString(0,void 0,n.state.lineBreak)+c))}else{let h=r.changes(t),c=i&&!r.selection.main.eq(i.main)&&i.main.to<=h.newLength?i.main:void 0;if(r.selection.ranges.length>1&&n.inputState.composing>=0&&t.to<=s.to&&t.to>=s.to-10){let f=n.state.sliceDoc(t.from,t.to),u=oh(n)||n.state.doc.lineAt(s.head),d=s.to-t.to,p=s.to-s.from;l=r.changeByRange(g=>{if(g.from==s.from&&g.to==s.to)return{changes:h,range:c||g.map(h)};let y=g.to-d,b=y-f.length;if(g.to-g.from!=p||n.state.sliceDoc(b,y)!=f||u&&g.to>=u.from&&g.from<=u.to)return{range:g};let v=r.changes({from:b,to:y,insert:t.insert}),S=g.to-s.to;return{changes:v,range:c?w.range(Math.max(0,c.anchor+S),Math.max(0,c.head+S)):g.map(v)}})}else l={changes:h,selection:c&&r.selection.replaceRange(c)}}let a="input.type";return n.composing&&(a+=".compose",n.inputState.compositionFirstChange&&(a+=".start",n.inputState.compositionFirstChange=!1)),n.dispatch(l,{scrollIntoView:!0,userEvent:a}),!0}else if(i&&!i.main.eq(s)){let r=!1,o="select";return n.inputState.lastSelectionTime>Date.now()-50&&(n.inputState.lastSelectionOrigin=="select"&&(r=!0),o=n.inputState.lastSelectionOrigin),n.dispatch({selection:i,scrollIntoView:r,userEvent:o}),!0}else return!1}function Pu(n,e,t,i){let s=Math.min(n.length,e.length),r=0;for(;r0&&l>0&&n.charCodeAt(o-1)==e.charCodeAt(l-1);)o--,l--;if(i=="end"){let a=Math.max(0,r-Math.min(o,l));t-=o+a-r}if(o=o?r-t:0;r-=a,l=r+(l-o),o=r}else if(l=l?r-t:0;r-=a,o=r+(o-l),l=r}return{from:r,toA:o,toB:l}}function Eu(n){let e=[];if(n.root.activeElement!=n.contentDOM)return e;let{anchorNode:t,anchorOffset:i,focusNode:s,focusOffset:r}=n.observer.selectionRange;return t&&(e.push(new Co(t,i)),(s!=t||r!=i)&&e.push(new Co(s,r))),e}function Ru(n,e){if(n.length==0)return null;let t=n[0].pos,i=n.length==2?n[1].pos:t;return t>-1&&i>-1?w.single(t+e,i+e):null}const Lu={childList:!0,characterData:!0,subtree:!0,attributes:!0,characterDataOldValue:!0},ls=A.ie&&A.ie_version<=11;class Iu{constructor(e){this.view=e,this.active=!1,this.selectionRange=new Tf,this.selectionChanged=!1,this.delayedFlush=-1,this.resizeTimeout=-1,this.queue=[],this.delayedAndroidKey=null,this.flushingAndroidKey=-1,this.lastChange=0,this.scrollTargets=[],this.intersection=null,this.resize=null,this.intersecting=!1,this.gapIntersection=null,this.gaps=[],this.parentCheck=-1,this.dom=e.contentDOM,this.observer=new MutationObserver(t=>{for(let i of t)this.queue.push(i);(A.ie&&A.ie_version<=11||A.ios&&e.composing)&&t.some(i=>i.type=="childList"&&i.removedNodes.length||i.type=="characterData"&&i.oldValue.length>i.target.nodeValue.length)?this.flushSoon():this.flush()}),ls&&(this.onCharData=t=>{this.queue.push({target:t.target,type:"characterData",oldValue:t.prevValue}),this.flushSoon()}),this.onSelectionChange=this.onSelectionChange.bind(this),this.onResize=this.onResize.bind(this),this.onPrint=this.onPrint.bind(this),this.onScroll=this.onScroll.bind(this),typeof ResizeObserver=="function"&&(this.resize=new ResizeObserver(()=>{var t;((t=this.view.docView)===null||t===void 0?void 0:t.lastUpdate){this.parentCheck<0&&(this.parentCheck=setTimeout(this.listenForScroll.bind(this),1e3)),t.length>0&&t[t.length-1].intersectionRatio>0!=this.intersecting&&(this.intersecting=!this.intersecting,this.intersecting!=this.view.inView&&this.onScrollChanged(document.createEvent("Event")))},{}),this.intersection.observe(this.dom),this.gapIntersection=new IntersectionObserver(t=>{t.length>0&&t[t.length-1].intersectionRatio>0&&this.onScrollChanged(document.createEvent("Event"))},{})),this.listenForScroll(),this.readSelectionRange()}onScrollChanged(e){this.view.inputState.runScrollHandlers(this.view,e),this.intersecting&&this.view.measure()}onScroll(e){this.intersecting&&this.flush(!1),this.onScrollChanged(e)}onResize(){this.resizeTimeout<0&&(this.resizeTimeout=setTimeout(()=>{this.resizeTimeout=-1,this.view.requestMeasure()},50))}onPrint(){this.view.viewState.printing=!0,this.view.measure(),setTimeout(()=>{this.view.viewState.printing=!1,this.view.requestMeasure()},500)}updateGaps(e){if(this.gapIntersection&&(e.length!=this.gaps.length||this.gaps.some((t,i)=>t!=e[i]))){this.gapIntersection.disconnect();for(let t of e)this.gapIntersection.observe(t);this.gaps=e}}onSelectionChange(e){let t=this.selectionChanged;if(!this.readSelectionRange()||this.delayedAndroidKey)return;let{view:i}=this,s=this.selectionRange;if(i.state.facet(zn)?i.root.activeElement!=this.dom:!dn(i.dom,s))return;let r=s.anchorNode&&i.docView.nearest(s.anchorNode);if(r&&r.ignoreEvent(e)){t||(this.selectionChanged=!1);return}(A.ie&&A.ie_version<=11||A.android&&A.chrome)&&!i.state.selection.main.empty&&s.focusNode&&Sn(s.focusNode,s.focusOffset,s.anchorNode,s.anchorOffset)?this.flushSoon():this.flush(!1)}readSelectionRange(){let{view:e}=this,t=A.safari&&e.root.nodeType==11&&Af(this.dom.ownerDocument)==this.dom&&Nu(this.view)||xn(e.root);if(!t||this.selectionRange.eq(t))return!1;let i=dn(this.dom,t);return i&&!this.selectionChanged&&e.inputState.lastFocusTime>Date.now()-200&&e.inputState.lastTouchTime{let r=this.delayedAndroidKey;r&&(this.clearDelayedAndroidKey(),!this.flush()&&r.force&&$t(this.dom,r.key,r.keyCode))};this.flushingAndroidKey=this.view.win.requestAnimationFrame(s)}(!this.delayedAndroidKey||e=="Enter")&&(this.delayedAndroidKey={key:e,keyCode:t,force:this.lastChange{this.delayedFlush=-1,this.flush()}))}forceFlush(){this.delayedFlush>=0&&(this.view.win.cancelAnimationFrame(this.delayedFlush),this.delayedFlush=-1),this.flush()}processRecords(){let e=this.queue;for(let r of this.observer.takeRecords())e.push(r);e.length&&(this.queue=[]);let t=-1,i=-1,s=!1;for(let r of e){let o=this.readMutation(r);o&&(o.typeOver&&(s=!0),t==-1?{from:t,to:i}=o:(t=Math.min(o.from,t),i=Math.max(o.to,i)))}return{from:t,to:i,typeOver:s}}readChange(){let{from:e,to:t,typeOver:i}=this.processRecords(),s=this.selectionChanged&&dn(this.dom,this.selectionRange);return e<0&&!s?null:(e>-1&&(this.lastChange=Date.now()),this.view.inputState.lastFocusTime=0,this.selectionChanged=!1,new Bu(this.view,e,t,i))}flush(e=!0){if(this.delayedFlush>=0||this.delayedAndroidKey)return!1;e&&this.readSelectionRange();let t=this.readChange();if(!t)return!1;let i=this.view.state,s=kh(this.view,t);return this.view.state==i&&this.view.update([]),s}readMutation(e){let t=this.view.docView.nearest(e.target);if(!t||t.ignoreMutation(e))return null;if(t.markDirty(e.type=="attributes"),e.type=="attributes"&&(t.dirty|=4),e.type=="childList"){let i=jo(t,e.previousSibling||e.target.previousSibling,-1),s=jo(t,e.nextSibling||e.target.nextSibling,1);return{from:i?t.posAfter(i):t.posAtStart,to:s?t.posBefore(s):t.posAtEnd,typeOver:!1}}else return e.type=="characterData"?{from:t.posAtStart,to:t.posAtEnd,typeOver:e.target.nodeValue==e.oldValue}:null}setWindow(e){e!=this.win&&(this.removeWindowListeners(this.win),this.win=e,this.addWindowListeners(this.win))}addWindowListeners(e){e.addEventListener("resize",this.onResize),e.addEventListener("beforeprint",this.onPrint),e.addEventListener("scroll",this.onScroll),e.document.addEventListener("selectionchange",this.onSelectionChange)}removeWindowListeners(e){e.removeEventListener("scroll",this.onScroll),e.removeEventListener("resize",this.onResize),e.removeEventListener("beforeprint",this.onPrint),e.document.removeEventListener("selectionchange",this.onSelectionChange)}destroy(){var e,t,i;this.stop(),(e=this.intersection)===null||e===void 0||e.disconnect(),(t=this.gapIntersection)===null||t===void 0||t.disconnect(),(i=this.resize)===null||i===void 0||i.disconnect();for(let s of this.scrollTargets)s.removeEventListener("scroll",this.onScroll);this.removeWindowListeners(this.win),clearTimeout(this.parentCheck),clearTimeout(this.resizeTimeout),this.win.cancelAnimationFrame(this.delayedFlush),this.win.cancelAnimationFrame(this.flushingAndroidKey)}}function jo(n,e,t){for(;e;){let i=K.get(e);if(i&&i.parent==n)return i;let s=e.parentNode;e=s!=n.dom?s:t>0?e.nextSibling:e.previousSibling}return null}function Nu(n){let e=null;function t(a){a.preventDefault(),a.stopImmediatePropagation(),e=a.getTargetRanges()[0]}if(n.contentDOM.addEventListener("beforeinput",t,!0),n.dom.ownerDocument.execCommand("indent"),n.contentDOM.removeEventListener("beforeinput",t,!0),!e)return null;let i=e.startContainer,s=e.startOffset,r=e.endContainer,o=e.endOffset,l=n.docView.domAtPos(n.state.selection.main.anchor);return Sn(l.node,l.offset,r,o)&&([i,s,r,o]=[r,o,i,s]),{anchorNode:i,anchorOffset:s,focusNode:r,focusOffset:o}}class O{constructor(e={}){this.plugins=[],this.pluginMap=new Map,this.editorAttrs={},this.contentAttrs={},this.bidiCache=[],this.destroyed=!1,this.updateState=2,this.measureScheduled=-1,this.measureRequests=[],this.contentDOM=document.createElement("div"),this.scrollDOM=document.createElement("div"),this.scrollDOM.tabIndex=-1,this.scrollDOM.className="cm-scroller",this.scrollDOM.appendChild(this.contentDOM),this.announceDOM=document.createElement("div"),this.announceDOM.style.cssText="position: absolute; top: -10000px",this.announceDOM.setAttribute("aria-live","polite"),this.dom=document.createElement("div"),this.dom.appendChild(this.announceDOM),this.dom.appendChild(this.scrollDOM),this._dispatch=e.dispatch||(t=>this.update([t])),this.dispatch=this.dispatch.bind(this),this._root=e.root||Of(e.parent)||document,this.viewState=new zo(e.state||N.create(e)),this.plugins=this.state.facet(yi).map(t=>new ns(t));for(let t of this.plugins)t.update(this);this.observer=new Iu(this),this.inputState=new su(this),this.inputState.ensureHandlers(this,this.plugins),this.docView=new Ao(this),this.mountStyles(),this.updateAttrs(),this.updateState=0,this.requestMeasure(),e.parent&&e.parent.appendChild(this.dom)}get state(){return this.viewState.state}get viewport(){return this.viewState.viewport}get visibleRanges(){return this.viewState.visibleRanges}get inView(){return this.viewState.inView}get composing(){return this.inputState.composing>0}get compositionStarted(){return this.inputState.composing>=0}get root(){return this._root}get win(){return this.dom.ownerDocument.defaultView||window}dispatch(...e){this._dispatch(e.length==1&&e[0]instanceof re?e[0]:this.state.update(...e))}update(e){if(this.updateState!=0)throw new Error("Calls to EditorView.update are not allowed while an update is in progress");let t=!1,i=!1,s,r=this.state;for(let h of e){if(h.startState!=r)throw new RangeError("Trying to update state with a transaction that doesn't start from the previous state.");r=h.state}if(this.destroyed){this.viewState.state=r;return}let o=this.observer.delayedAndroidKey,l=null;if(o?(this.observer.clearDelayedAndroidKey(),l=this.observer.readChange(),(l&&!this.state.doc.eq(r.doc)||!this.state.selection.eq(r.selection))&&(l=null)):this.observer.clear(),r.facet(N.phrases)!=this.state.facet(N.phrases))return this.setState(r);s=Mn.create(this,r,e);let a=this.viewState.scrollTarget;try{this.updateState=2;for(let h of e){if(a&&(a=a.map(h.changes)),h.scrollIntoView){let{main:c}=h.state.selection;a=new An(c.empty?c:w.cursor(c.head,c.head>c.anchor?-1:1))}for(let c of h.effects)c.is(xo)&&(a=c.value)}this.viewState.update(s,a),this.bidiCache=Dn.update(this.bidiCache,s.changes),s.empty||(this.updatePlugins(s),this.inputState.update(s)),t=this.docView.update(s),this.state.facet(bi)!=this.styleModules&&this.mountStyles(),i=this.updateAttrs(),this.showAnnouncements(e),this.docView.updateSelection(t,e.some(h=>h.isUserEvent("select.pointer")))}finally{this.updateState=0}if(s.startState.facet(Zi)!=s.state.facet(Zi)&&(this.viewState.mustMeasureContent=!0),(t||i||a||this.viewState.mustEnforceCursorAssoc||this.viewState.mustMeasureContent)&&this.requestMeasure(),!s.empty)for(let h of this.state.facet(Xs))h(s);l&&!kh(this,l)&&o.force&&$t(this.contentDOM,o.key,o.keyCode)}setState(e){if(this.updateState!=0)throw new Error("Calls to EditorView.setState are not allowed while an update is in progress");if(this.destroyed){this.viewState.state=e;return}this.updateState=2;let t=this.hasFocus;try{for(let i of this.plugins)i.destroy(this);this.viewState=new zo(e),this.plugins=e.facet(yi).map(i=>new ns(i)),this.pluginMap.clear();for(let i of this.plugins)i.update(this);this.docView=new Ao(this),this.inputState.ensureHandlers(this,this.plugins),this.mountStyles(),this.updateAttrs(),this.bidiCache=[]}finally{this.updateState=0}t&&this.focus(),this.requestMeasure()}updatePlugins(e){let t=e.startState.facet(yi),i=e.state.facet(yi);if(t!=i){let s=[];for(let r of i){let o=t.indexOf(r);if(o<0)s.push(new ns(r));else{let l=this.plugins[o];l.mustUpdate=e,s.push(l)}}for(let r of this.plugins)r.mustUpdate!=e&&r.destroy(this);this.plugins=s,this.pluginMap.clear(),this.inputState.ensureHandlers(this,this.plugins)}else for(let s of this.plugins)s.mustUpdate=e;for(let s=0;s-1&&cancelAnimationFrame(this.measureScheduled),this.measureScheduled=0,e&&this.observer.forceFlush();let t=null,{scrollHeight:i,scrollTop:s,clientHeight:r}=this.scrollDOM,o=s>i-r-4?i:s;try{for(let l=0;;l++){this.updateState=1;let a=this.viewport,h=this.viewState.lineBlockAtHeight(o),c=this.viewState.measure(this);if(!c&&!this.measureRequests.length&&this.viewState.scrollTarget==null)break;if(l>5){console.warn(this.measureRequests.length?"Measure loop restarted more than 5 times":"Viewport failed to stabilize");break}let f=[];c&4||([this.measureRequests,f]=[f,this.measureRequests]);let u=f.map(y=>{try{return y.read(this)}catch(b){return He(this.state,b),Ko}}),d=Mn.create(this,this.state,[]),p=!1,g=!1;d.flags|=c,t?t.flags|=c:t=d,this.updateState=2,d.empty||(this.updatePlugins(d),this.inputState.update(d),this.updateAttrs(),p=this.docView.update(d));for(let y=0;y1||y<-1)&&(this.scrollDOM.scrollTop+=y,g=!0)}if(p&&this.docView.updateSelection(!0),this.viewport.from==a.from&&this.viewport.to==a.to&&!g&&this.measureRequests.length==0)break}}finally{this.updateState=0,this.measureScheduled=-1}if(t&&!t.empty)for(let l of this.state.facet(Xs))l(t)}get themeClasses(){return sr+" "+(this.state.facet(nr)?bh:yh)+" "+this.state.facet(Zi)}updateAttrs(){let e=Uo(this,Za,{class:"cm-editor"+(this.hasFocus?" cm-focused ":" ")+this.themeClasses}),t={spellcheck:"false",autocorrect:"off",autocapitalize:"off",translate:"no",contenteditable:this.state.facet(zn)?"true":"false",class:"cm-content",style:`${A.tabSize}: ${this.state.tabSize}`,role:"textbox","aria-multiline":"true"};this.state.readOnly&&(t["aria-readonly"]="true"),Uo(this,Qa,t);let i=this.observer.ignore(()=>{let s=Js(this.contentDOM,this.contentAttrs,t),r=Js(this.dom,this.editorAttrs,e);return s||r});return this.editorAttrs=e,this.contentAttrs=t,i}showAnnouncements(e){let t=!0;for(let i of e)for(let s of i.effects)if(s.is(O.announce)){t&&(this.announceDOM.textContent=""),t=!1;let r=this.announceDOM.appendChild(document.createElement("div"));r.textContent=s.value}}mountStyles(){this.styleModules=this.state.facet(bi),mt.mount(this.root,this.styleModules.concat(Ou).reverse())}readMeasured(){if(this.updateState==2)throw new Error("Reading the editor layout isn't allowed during an update");this.updateState==0&&this.measureScheduled>-1&&this.measure(!1)}requestMeasure(e){if(this.measureScheduled<0&&(this.measureScheduled=this.win.requestAnimationFrame(()=>this.measure())),e){if(e.key!=null){for(let t=0;ti.spec==e)||null),t&&t.update(this).value}get documentTop(){return this.contentDOM.getBoundingClientRect().top+this.viewState.paddingTop}get documentPadding(){return{top:this.viewState.paddingTop,bottom:this.viewState.paddingBottom}}elementAtHeight(e){return this.readMeasured(),this.viewState.elementAtHeight(e)}lineBlockAtHeight(e){return this.readMeasured(),this.viewState.lineBlockAtHeight(e)}get viewportLineBlocks(){return this.viewState.viewportLines}lineBlockAt(e){return this.viewState.lineBlockAt(e)}get contentHeight(){return this.viewState.contentHeight}moveByChar(e,t,i){return rs(this,e,Po(this,e,t,i))}moveByGroup(e,t){return rs(this,e,Po(this,e,t,i=>iu(this,e.head,i)))}moveToLineBoundary(e,t,i=!0){return tu(this,e,t,i)}moveVertically(e,t,i){return rs(this,e,nu(this,e,t,i))}domAtPos(e){return this.docView.domAtPos(e)}posAtDOM(e,t=0){return this.docView.posFromDOM(e,t)}posAtCoords(e,t=!0){return this.readMeasured(),ah(this,e,t)}coordsAtPos(e,t=1){this.readMeasured();let i=this.docView.coordsAt(e,t);if(!i||i.left==i.right)return i;let s=this.state.doc.lineAt(e),r=this.bidiSpans(s),o=r[Jt.find(r,e-s.from,-1,t)];return Dr(i,o.dir==Z.LTR==t>0)}get defaultCharacterWidth(){return this.viewState.heightOracle.charWidth}get defaultLineHeight(){return this.viewState.heightOracle.lineHeight}get textDirection(){return this.viewState.defaultTextDirection}textDirectionAt(e){return!this.state.facet(Ya)||ethis.viewport.to?this.textDirection:(this.readMeasured(),this.docView.textDirectionAt(e))}get lineWrapping(){return this.viewState.heightOracle.lineWrapping}bidiSpans(e){if(e.length>_u)return nh(e.length);let t=this.textDirectionAt(e.from);for(let s of this.bidiCache)if(s.from==e.from&&s.dir==t)return s.order;let i=Wf(e.text,t);return this.bidiCache.push(new Dn(e.from,e.to,t,i)),i}get hasFocus(){var e;return(this.dom.ownerDocument.hasFocus()||A.safari&&((e=this.inputState)===null||e===void 0?void 0:e.lastContextMenu)>Date.now()-3e4)&&this.root.activeElement==this.contentDOM}focus(){this.observer.ignore(()=>{Ea(this.contentDOM),this.docView.updateSelection()})}setRoot(e){this._root!=e&&(this._root=e,this.observer.setWindow((e.nodeType==9?e:e.ownerDocument).defaultView||window),this.mountStyles())}destroy(){for(let e of this.plugins)e.destroy(this);this.plugins=[],this.inputState.destroy(),this.dom.remove(),this.observer.destroy(),this.measureScheduled>-1&&cancelAnimationFrame(this.measureScheduled),this.destroyed=!0}static scrollIntoView(e,t={}){return xo.of(new An(typeof e=="number"?w.cursor(e):e,t.y,t.x,t.yMargin,t.xMargin))}static domEventHandlers(e){return be.define(()=>({}),{eventHandlers:e})}static theme(e,t){let i=mt.newName(),s=[Zi.of(i),bi.of(rr(`.${i}`,e))];return t&&t.dark&&s.push(nr.of(!0)),s}static baseTheme(e){return Vi.lowest(bi.of(rr("."+sr,e,wh)))}static findFromDOM(e){var t;let i=e.querySelector(".cm-content"),s=i&&K.get(i)||K.get(e);return((t=s?.rootView)===null||t===void 0?void 0:t.view)||null}}O.styleModule=bi;O.inputHandler=Ja;O.perLineTextDirection=Ya;O.exceptionSink=$a;O.updateListener=Xs;O.editable=zn;O.mouseSelectionStyle=Ga;O.dragMovesSelection=Ua;O.clickAddsSelectionRange=Ka;O.decorations=Ei;O.atomicRanges=eh;O.scrollMargins=th;O.darkTheme=nr;O.contentAttributes=Qa;O.editorAttributes=Za;O.lineWrapping=O.contentAttributes.of({class:"cm-lineWrapping"});O.announce=R.define();const _u=4096,Ko={};class Dn{constructor(e,t,i,s){this.from=e,this.to=t,this.dir=i,this.order=s}static update(e,t){if(t.empty)return e;let i=[],s=e.length?e[e.length-1].dir:Z.LTR;for(let r=Math.max(0,e.length-10);r=0;s--){let r=i[s],o=typeof r=="function"?r(n):r;o&&$s(o,t)}return t}const Vu=A.mac?"mac":A.windows?"win":A.linux?"linux":"key";function Fu(n,e){const t=n.split(/-(?!$)/);let i=t[t.length-1];i=="Space"&&(i=" ");let s,r,o,l;for(let a=0;ai.concat(s),[]))),t}let at=null;const zu=4e3;function qu(n,e=Vu){let t=Object.create(null),i=Object.create(null),s=(o,l)=>{let a=i[o];if(a==null)i[o]=l;else if(a!=l)throw new Error("Key binding "+o+" is used both as a regular binding and as a multi-stroke prefix")},r=(o,l,a,h)=>{var c,f;let u=t[o]||(t[o]=Object.create(null)),d=l.split(/ (?!$)/).map(y=>Fu(y,e));for(let y=1;y{let S=at={view:v,prefix:b,scope:o};return setTimeout(()=>{at==S&&(at=null)},zu),!0}]})}let p=d.join(" ");s(p,!1);let g=u[p]||(u[p]={preventDefault:!1,run:((f=(c=u._any)===null||c===void 0?void 0:c.run)===null||f===void 0?void 0:f.slice())||[]});a&&g.run.push(a),h&&(g.preventDefault=!0)};for(let o of n){let l=o.scope?o.scope.split(" "):["editor"];if(o.any)for(let h of l){let c=t[h]||(t[h]=Object.create(null));c._any||(c._any={preventDefault:!1,run:[]});for(let f in c)c[f].run.push(o.any)}let a=o[e]||o.key;if(a)for(let h of l)r(h,a,o.run,o.preventDefault),o.shift&&r(h,"Shift-"+a,o.shift,o.preventDefault)}return t}function ju(n,e,t,i){let s=Cf(e),r=ge(s,0),o=Ee(r)==s.length&&s!=" ",l="",a=!1;at&&at.view==t&&at.scope==i&&(l=at.prefix+" ",(a=ch.indexOf(e.keyCode)<0)&&(at=null));let h=new Set,c=p=>{if(p){for(let g of p.run)if(!h.has(g)&&(h.add(g),g(t,e)))return!0;p.preventDefault&&(a=!0)}return!1},f=n[i],u,d;if(f){if(c(f[l+Qi(s,e,!o)]))return!0;if(o&&(e.shiftKey||e.altKey||e.metaKey||r>127)&&(u=gt[e.keyCode])&&u!=s){if(c(f[l+Qi(u,e,!0)]))return!0;if(e.shiftKey&&(d=Oi[e.keyCode])!=s&&d!=u&&c(f[l+Qi(d,e,!1)]))return!0}else if(o&&e.shiftKey&&c(f[l+Qi(s,e,!0)]))return!0;if(c(f._any))return!0}return a}const vh=!A.ios,ki=D.define({combine(n){return _t(n,{cursorBlinkRate:1200,drawRangeCursor:!0},{cursorBlinkRate:(e,t)=>Math.min(e,t),drawRangeCursor:(e,t)=>e||t})}});function Ku(n={}){return[ki.of(n),Uu,Gu,Xa.of(!0)]}class xh{constructor(e,t,i,s,r){this.left=e,this.top=t,this.width=i,this.height=s,this.className=r}draw(){let e=document.createElement("div");return e.className=this.className,this.adjust(e),e}adjust(e){e.style.left=this.left+"px",e.style.top=this.top+"px",this.width>=0&&(e.style.width=this.width+"px"),e.style.height=this.height+"px"}eq(e){return this.left==e.left&&this.top==e.top&&this.width==e.width&&this.height==e.height&&this.className==e.className}}const Uu=be.fromClass(class{constructor(n){this.view=n,this.rangePieces=[],this.cursors=[],this.measureReq={read:this.readPos.bind(this),write:this.drawSel.bind(this)},this.selectionLayer=n.scrollDOM.appendChild(document.createElement("div")),this.selectionLayer.className="cm-selectionLayer",this.selectionLayer.setAttribute("aria-hidden","true"),this.cursorLayer=n.scrollDOM.appendChild(document.createElement("div")),this.cursorLayer.className="cm-cursorLayer",this.cursorLayer.setAttribute("aria-hidden","true"),n.requestMeasure(this.measureReq),this.setBlinkRate()}setBlinkRate(){this.cursorLayer.style.animationDuration=this.view.state.facet(ki).cursorBlinkRate+"ms"}update(n){let e=n.startState.facet(ki)!=n.state.facet(ki);(e||n.selectionSet||n.geometryChanged||n.viewportChanged)&&this.view.requestMeasure(this.measureReq),n.transactions.some(t=>t.scrollIntoView)&&(this.cursorLayer.style.animationName=this.cursorLayer.style.animationName=="cm-blink"?"cm-blink2":"cm-blink"),e&&this.setBlinkRate()}readPos(){let{state:n}=this.view,e=n.facet(ki),t=n.selection.ranges.map(s=>s.empty?[]:$u(this.view,s)).reduce((s,r)=>s.concat(r)),i=[];for(let s of n.selection.ranges){let r=s==n.selection.main;if(s.empty?!r||vh:e.drawRangeCursor){let o=Ju(this.view,s,r);o&&i.push(o)}}return{rangePieces:t,cursors:i}}drawSel({rangePieces:n,cursors:e}){if(n.length!=this.rangePieces.length||n.some((t,i)=>!t.eq(this.rangePieces[i]))){this.selectionLayer.textContent="";for(let t of n)this.selectionLayer.appendChild(t.draw());this.rangePieces=n}if(e.length!=this.cursors.length||e.some((t,i)=>!t.eq(this.cursors[i]))){let t=this.cursorLayer.children;if(t.length!==e.length){this.cursorLayer.textContent="";for(const i of e)this.cursorLayer.appendChild(i.draw())}else e.forEach((i,s)=>i.adjust(t[s]));this.cursors=e}}destroy(){this.selectionLayer.remove(),this.cursorLayer.remove()}}),Sh={".cm-line":{"& ::selection":{backgroundColor:"transparent !important"},"&::selection":{backgroundColor:"transparent !important"}}};vh&&(Sh[".cm-line"].caretColor="transparent !important");const Gu=Vi.highest(O.theme(Sh));function Ch(n){let e=n.scrollDOM.getBoundingClientRect();return{left:(n.textDirection==Z.LTR?e.left:e.right-n.scrollDOM.clientWidth)-n.scrollDOM.scrollLeft,top:e.top-n.scrollDOM.scrollTop}}function $o(n,e,t){let i=w.cursor(e);return{from:Math.max(t.from,n.moveToLineBoundary(i,!1,!0).from),to:Math.min(t.to,n.moveToLineBoundary(i,!0,!0).from),type:W.Text}}function Jo(n,e){let t=n.lineBlockAt(e);if(Array.isArray(t.type)){for(let i of t.type)if(i.to>e||i.to==e&&(i.to==t.to||i.type==W.Text))return i}return t}function $u(n,e){if(e.to<=n.viewport.from||e.from>=n.viewport.to)return[];let t=Math.max(e.from,n.viewport.from),i=Math.min(e.to,n.viewport.to),s=n.textDirection==Z.LTR,r=n.contentDOM,o=r.getBoundingClientRect(),l=Ch(n),a=window.getComputedStyle(r.firstChild),h=o.left+parseInt(a.paddingLeft)+Math.min(0,parseInt(a.textIndent)),c=o.right-parseInt(a.paddingRight),f=Jo(n,t),u=Jo(n,i),d=f.type==W.Text?f:null,p=u.type==W.Text?u:null;if(n.lineWrapping&&(d&&(d=$o(n,t,d)),p&&(p=$o(n,i,p))),d&&p&&d.from==p.from)return y(b(e.from,e.to,d));{let S=d?b(e.from,null,d):v(f,!1),k=p?b(null,e.to,p):v(u,!0),C=[];return(d||f).to<(p||u).from-1?C.push(g(h,S.bottom,c,k.top)):S.bottomP&&G.from=M)break;J>Q&&I(Math.max(le,Q),S==null&&le<=P,Math.min(J,M),k==null&&J>=V,Y.dir)}if(Q=$.to+1,Q>=M)break}return U.length==0&&I(P,S==null,V,k==null,n.textDirection),{top:T,bottom:B,horizontal:U}}function v(S,k){let C=o.top+(k?S.top:S.bottom);return{top:C,bottom:C,horizontal:[]}}}function Ju(n,e,t){let i=n.coordsAtPos(e.head,e.assoc||1);if(!i)return null;let s=Ch(n);return new xh(i.left-s.left,i.top-s.top,-1,i.bottom-i.top,t?"cm-cursor cm-cursor-primary":"cm-cursor cm-cursor-secondary")}function Yo(n,e,t,i,s){e.lastIndex=0;for(let r=n.iterRange(t,i),o=t,l;!r.next().done;o+=r.value.length)if(!r.lineBreak)for(;l=e.exec(r.value);)s(o+l.index,l)}function Yu(n,e){let t=n.visibleRanges;if(t.length==1&&t[0].from==n.viewport.from&&t[0].to==n.viewport.to)return t;let i=[];for(let{from:s,to:r}of t)s=Math.max(n.state.doc.lineAt(s).from,s-e),r=Math.min(n.state.doc.lineAt(r).to,r+e),i.length&&i[i.length-1].to>=s?i[i.length-1].to=r:i.push({from:s,to:r});return i}class Xu{constructor(e){const{regexp:t,decoration:i,decorate:s,boundary:r,maxLength:o=1e3}=e;if(!t.global)throw new RangeError("The regular expression given to MatchDecorator should have its 'g' flag set");if(this.regexp=t,s)this.addMatch=(l,a,h,c)=>s(c,h,h+l[0].length,l,a);else if(typeof i=="function")this.addMatch=(l,a,h,c)=>{let f=i(l,a,h);f&&c(h,h+l[0].length,f)};else if(i)this.addMatch=(l,a,h,c)=>c(h,h+l[0].length,i);else throw new RangeError("Either 'decorate' or 'decoration' should be provided to MatchDecorator");this.boundary=r,this.maxLength=o}createDeco(e){let t=new Pt,i=t.add.bind(t);for(let{from:s,to:r}of Yu(e,this.maxLength))Yo(e.state.doc,this.regexp,s,r,(o,l)=>this.addMatch(l,e,o,i));return t.finish()}updateDeco(e,t){let i=1e9,s=-1;return e.docChanged&&e.changes.iterChanges((r,o,l,a)=>{a>e.view.viewport.from&&l1e3?this.createDeco(e.view):s>-1?this.updateRange(e.view,t.map(e.changes),i,s):t}updateRange(e,t,i,s){for(let r of e.visibleRanges){let o=Math.max(r.from,i),l=Math.min(r.to,s);if(l>o){let a=e.state.doc.lineAt(o),h=a.toa.from;o--)if(this.boundary.test(a.text[o-1-a.from])){c=o;break}for(;lu.push(b.range(g,y));if(a==h)for(this.regexp.lastIndex=c-a.from;(d=this.regexp.exec(a.text))&&d.indexthis.addMatch(y,e,g,p));t=t.update({filterFrom:c,filterTo:f,filter:(g,y)=>gf,add:u})}}return t}}const or=/x/.unicode!=null?"gu":"g",Zu=new RegExp(`[\0-\b ---Ÿ­؜​‎‏\u2028\u2029‭‮⁦⁧⁩\uFEFF-]`,or),Qu={0:"null",7:"bell",8:"backspace",10:"newline",11:"vertical tab",13:"carriage return",27:"escape",8203:"zero width space",8204:"zero width non-joiner",8205:"zero width joiner",8206:"left-to-right mark",8207:"right-to-left mark",8232:"line separator",8237:"left-to-right override",8238:"right-to-left override",8294:"left-to-right isolate",8295:"right-to-left isolate",8297:"pop directional isolate",8233:"paragraph separator",65279:"zero width no-break space",65532:"object replacement"};let as=null;function ed(){var n;if(as==null&&typeof document<"u"&&document.body){let e=document.body.style;as=((n=e.tabSize)!==null&&n!==void 0?n:e.MozTabSize)!=null}return as||!1}const mn=D.define({combine(n){let e=_t(n,{render:null,specialChars:Zu,addSpecialChars:null});return(e.replaceTabs=!ed())&&(e.specialChars=new RegExp(" |"+e.specialChars.source,or)),e.addSpecialChars&&(e.specialChars=new RegExp(e.specialChars.source+"|"+e.addSpecialChars.source,or)),e}});function td(n={}){return[mn.of(n),id()]}let Xo=null;function id(){return Xo||(Xo=be.fromClass(class{constructor(n){this.view=n,this.decorations=E.none,this.decorationCache=Object.create(null),this.decorator=this.makeDecorator(n.state.facet(mn)),this.decorations=this.decorator.createDeco(n)}makeDecorator(n){return new Xu({regexp:n.specialChars,decoration:(e,t,i)=>{let{doc:s}=t.state,r=ge(e[0],0);if(r==9){let o=s.lineAt(i),l=t.state.tabSize,a=Fi(o.text,l,i-o.from);return E.replace({widget:new od((l-a%l)*this.view.defaultCharacterWidth)})}return this.decorationCache[r]||(this.decorationCache[r]=E.replace({widget:new rd(n,r)}))},boundary:n.replaceTabs?void 0:/[^]/})}update(n){let e=n.state.facet(mn);n.startState.facet(mn)!=e?(this.decorator=this.makeDecorator(e),this.decorations=this.decorator.createDeco(n.view)):this.decorations=this.decorator.updateDeco(n,this.decorations)}},{decorations:n=>n.decorations}))}const nd="•";function sd(n){return n>=32?nd:n==10?"␤":String.fromCharCode(9216+n)}class rd extends tt{constructor(e,t){super(),this.options=e,this.code=t}eq(e){return e.code==this.code}toDOM(e){let t=sd(this.code),i=e.state.phrase("Control character")+" "+(Qu[this.code]||"0x"+this.code.toString(16)),s=this.options.render&&this.options.render(this.code,i,t);if(s)return s;let r=document.createElement("span");return r.textContent=t,r.title=i,r.setAttribute("aria-label",i),r.className="cm-specialChar",r}ignoreEvent(){return!1}}class od extends tt{constructor(e){super(),this.width=e}eq(e){return e.width==this.width}toDOM(){let e=document.createElement("span");return e.textContent=" ",e.className="cm-tab",e.style.width=this.width+"px",e}ignoreEvent(){return!1}}class ld extends tt{constructor(e){super(),this.content=e}toDOM(){let e=document.createElement("span");return e.className="cm-placeholder",e.style.pointerEvents="none",e.appendChild(typeof this.content=="string"?document.createTextNode(this.content):this.content),typeof this.content=="string"?e.setAttribute("aria-label","placeholder "+this.content):e.setAttribute("aria-hidden","true"),e}ignoreEvent(){return!1}}function ad(n){return be.fromClass(class{constructor(e){this.view=e,this.placeholder=E.set([E.widget({widget:new ld(n),side:1}).range(0)])}get decorations(){return this.view.state.doc.length?E.none:this.placeholder}},{decorations:e=>e.decorations})}const lr=2e3;function hd(n,e,t){let i=Math.min(e.line,t.line),s=Math.max(e.line,t.line),r=[];if(e.off>lr||t.off>lr||e.col<0||t.col<0){let o=Math.min(e.off,t.off),l=Math.max(e.off,t.off);for(let a=i;a<=s;a++){let h=n.doc.line(a);h.length<=l&&r.push(w.range(h.from+o,h.to+l))}}else{let o=Math.min(e.col,t.col),l=Math.max(e.col,t.col);for(let a=i;a<=s;a++){let h=n.doc.line(a),c=Hs(h.text,o,n.tabSize,!0);if(c<0)r.push(w.cursor(h.to));else{let f=Hs(h.text,l,n.tabSize);r.push(w.range(h.from+c,h.from+f))}}}return r}function cd(n,e){let t=n.coordsAtPos(n.viewport.from);return t?Math.round(Math.abs((t.left-e)/n.defaultCharacterWidth)):-1}function Zo(n,e){let t=n.posAtCoords({x:e.clientX,y:e.clientY},!1),i=n.state.doc.lineAt(t),s=t-i.from,r=s>lr?-1:s==i.length?cd(n,e.clientX):Fi(i.text,n.state.tabSize,t-i.from);return{line:i.number,col:r,off:s}}function fd(n,e){let t=Zo(n,e),i=n.state.selection;return t?{update(s){if(s.docChanged){let r=s.changes.mapPos(s.startState.doc.line(t.line).from),o=s.state.doc.lineAt(r);t={line:o.number,col:t.col,off:Math.min(t.off,o.length)},i=i.map(s.changes)}},get(s,r,o){let l=Zo(n,s);if(!l)return i;let a=hd(n.state,t,l);return a.length?o?w.create(a.concat(i.ranges)):w.create(a):i}}:null}function ud(n){let e=n?.eventFilter||(t=>t.altKey&&t.button==0);return O.mouseSelectionStyle.of((t,i)=>e(i)?fd(t,i):null)}const dd={Alt:[18,n=>n.altKey],Control:[17,n=>n.ctrlKey],Shift:[16,n=>n.shiftKey],Meta:[91,n=>n.metaKey]},pd={style:"cursor: crosshair"};function md(n={}){let[e,t]=dd[n.key||"Alt"],i=be.fromClass(class{constructor(s){this.view=s,this.isDown=!1}set(s){this.isDown!=s&&(this.isDown=s,this.view.update([]))}},{eventHandlers:{keydown(s){this.set(s.keyCode==e||t(s))},keyup(s){(s.keyCode==e||!t(s))&&this.set(!1)},mousemove(s){this.set(t(s))}}});return[i,O.contentAttributes.of(s=>{var r;return!((r=s.plugin(i))===null||r===void 0)&&r.isDown?pd:null})]}const hs="-10000px";class Ah{constructor(e,t,i){this.facet=t,this.createTooltipView=i,this.input=e.state.facet(t),this.tooltips=this.input.filter(s=>s),this.tooltipViews=this.tooltips.map(i)}update(e){var t;let i=e.state.facet(this.facet),s=i.filter(o=>o);if(i===this.input){for(let o of this.tooltipViews)o.update&&o.update(e);return!1}let r=[];for(let o=0;o{var e,t,i;return{position:A.ios?"absolute":((e=n.find(s=>s.position))===null||e===void 0?void 0:e.position)||"fixed",parent:((t=n.find(s=>s.parent))===null||t===void 0?void 0:t.parent)||null,tooltipSpace:((i=n.find(s=>s.tooltipSpace))===null||i===void 0?void 0:i.tooltipSpace)||gd}}}),Mh=be.fromClass(class{constructor(n){this.view=n,this.inView=!0,this.lastTransaction=0,this.measureTimeout=-1;let e=n.state.facet(cs);this.position=e.position,this.parent=e.parent,this.classes=n.themeClasses,this.createContainer(),this.measureReq={read:this.readMeasure.bind(this),write:this.writeMeasure.bind(this),key:this},this.manager=new Ah(n,Er,t=>this.createTooltip(t)),this.intersectionObserver=typeof IntersectionObserver=="function"?new IntersectionObserver(t=>{Date.now()>this.lastTransaction-50&&t.length>0&&t[t.length-1].intersectionRatio<1&&this.measureSoon()},{threshold:[1]}):null,this.observeIntersection(),n.win.addEventListener("resize",this.measureSoon=this.measureSoon.bind(this)),this.maybeMeasure()}createContainer(){this.parent?(this.container=document.createElement("div"),this.container.style.position="relative",this.container.className=this.view.themeClasses,this.parent.appendChild(this.container)):this.container=this.view.dom}observeIntersection(){if(this.intersectionObserver){this.intersectionObserver.disconnect();for(let n of this.manager.tooltipViews)this.intersectionObserver.observe(n.dom)}}measureSoon(){this.measureTimeout<0&&(this.measureTimeout=setTimeout(()=>{this.measureTimeout=-1,this.maybeMeasure()},50))}update(n){n.transactions.length&&(this.lastTransaction=Date.now());let e=this.manager.update(n);e&&this.observeIntersection();let t=e||n.geometryChanged,i=n.state.facet(cs);if(i.position!=this.position){this.position=i.position;for(let s of this.manager.tooltipViews)s.dom.style.position=this.position;t=!0}if(i.parent!=this.parent){this.parent&&this.container.remove(),this.parent=i.parent,this.createContainer();for(let s of this.manager.tooltipViews)this.container.appendChild(s.dom);t=!0}else this.parent&&this.view.themeClasses!=this.classes&&(this.classes=this.container.className=this.view.themeClasses);t&&this.maybeMeasure()}createTooltip(n){let e=n.create(this.view);if(e.dom.classList.add("cm-tooltip"),n.arrow&&!e.dom.querySelector(".cm-tooltip > .cm-tooltip-arrow")){let t=document.createElement("div");t.className="cm-tooltip-arrow",e.dom.appendChild(t)}return e.dom.style.position=this.position,e.dom.style.top=hs,this.container.appendChild(e.dom),e.mount&&e.mount(this.view),e}destroy(){var n,e;this.view.win.removeEventListener("resize",this.measureSoon);for(let t of this.manager.tooltipViews)t.dom.remove(),(n=t.destroy)===null||n===void 0||n.call(t);(e=this.intersectionObserver)===null||e===void 0||e.disconnect(),clearTimeout(this.measureTimeout)}readMeasure(){let n=this.view.dom.getBoundingClientRect();return{editor:n,parent:this.parent?this.container.getBoundingClientRect():n,pos:this.manager.tooltips.map((e,t)=>{let i=this.manager.tooltipViews[t];return i.getCoords?i.getCoords(e.pos):this.view.coordsAtPos(e.pos)}),size:this.manager.tooltipViews.map(({dom:e})=>e.getBoundingClientRect()),space:this.view.state.facet(cs).tooltipSpace(this.view)}}writeMeasure(n){let{editor:e,space:t}=n,i=[];for(let s=0;s=Math.min(e.bottom,t.bottom)||a.rightMath.min(e.right,t.right)+.1){l.style.top=hs;continue}let c=r.arrow?o.dom.querySelector(".cm-tooltip-arrow"):null,f=c?7:0,u=h.right-h.left,d=h.bottom-h.top,p=o.offset||bd,g=this.view.textDirection==Z.LTR,y=h.width>t.right-t.left?g?t.left:t.right-h.width:g?Math.min(a.left-(c?14:0)+p.x,t.right-u):Math.max(t.left,a.left-u+(c?14:0)-p.x),b=!!r.above;!r.strictSide&&(b?a.top-(h.bottom-h.top)-p.yt.bottom)&&b==t.bottom-a.bottom>a.top-t.top&&(b=!b);let v=b?a.top-d-f-p.y:a.bottom+f+p.y,S=y+u;if(o.overlap!==!0)for(let k of i)k.lefty&&k.topv&&(v=b?k.top-d-2-f:k.bottom+f+2);this.position=="absolute"?(l.style.top=v-n.parent.top+"px",l.style.left=y-n.parent.left+"px"):(l.style.top=v+"px",l.style.left=y+"px"),c&&(c.style.left=`${a.left+(g?p.x:-p.x)-(y+14-7)}px`),o.overlap!==!0&&i.push({left:y,top:v,right:S,bottom:v+d}),l.classList.toggle("cm-tooltip-above",b),l.classList.toggle("cm-tooltip-below",!b),o.positioned&&o.positioned()}}maybeMeasure(){if(this.manager.tooltips.length&&(this.view.inView&&this.view.requestMeasure(this.measureReq),this.inView!=this.view.inView&&(this.inView=this.view.inView,!this.inView)))for(let n of this.manager.tooltipViews)n.dom.style.top=hs}},{eventHandlers:{scroll(){this.maybeMeasure()}}}),yd=O.baseTheme({".cm-tooltip":{zIndex:100},"&light .cm-tooltip":{border:"1px solid #bbb",backgroundColor:"#f5f5f5"},"&light .cm-tooltip-section:not(:first-child)":{borderTop:"1px solid #bbb"},"&dark .cm-tooltip":{backgroundColor:"#333338",color:"white"},".cm-tooltip-arrow":{height:"7px",width:`${7*2}px`,position:"absolute",zIndex:-1,overflow:"hidden","&:before, &:after":{content:"''",position:"absolute",width:0,height:0,borderLeft:"7px solid transparent",borderRight:"7px solid transparent"},".cm-tooltip-above &":{bottom:"-7px","&:before":{borderTop:"7px solid #bbb"},"&:after":{borderTop:"7px solid #f5f5f5",bottom:"1px"}},".cm-tooltip-below &":{top:"-7px","&:before":{borderBottom:"7px solid #bbb"},"&:after":{borderBottom:"7px solid #f5f5f5",top:"1px"}}},"&dark .cm-tooltip .cm-tooltip-arrow":{"&:before":{borderTopColor:"#333338",borderBottomColor:"#333338"},"&:after":{borderTopColor:"transparent",borderBottomColor:"transparent"}}}),bd={x:0,y:0},Er=D.define({enables:[Mh,yd]}),Tn=D.define();class Rr{constructor(e){this.view=e,this.mounted=!1,this.dom=document.createElement("div"),this.dom.classList.add("cm-tooltip-hover"),this.manager=new Ah(e,Tn,t=>this.createHostedView(t))}static create(e){return new Rr(e)}createHostedView(e){let t=e.create(this.view);return t.dom.classList.add("cm-tooltip-section"),this.dom.appendChild(t.dom),this.mounted&&t.mount&&t.mount(this.view),t}mount(e){for(let t of this.manager.tooltipViews)t.mount&&t.mount(e);this.mounted=!0}positioned(){for(let e of this.manager.tooltipViews)e.positioned&&e.positioned()}update(e){this.manager.update(e)}}const wd=Er.compute([Tn],n=>{let e=n.facet(Tn).filter(t=>t);return e.length===0?null:{pos:Math.min(...e.map(t=>t.pos)),end:Math.max(...e.filter(t=>t.end!=null).map(t=>t.end)),create:Rr.create,above:e[0].above,arrow:e.some(t=>t.arrow)}});class kd{constructor(e,t,i,s,r){this.view=e,this.source=t,this.field=i,this.setHover=s,this.hoverTime=r,this.hoverTimeout=-1,this.restartTimeout=-1,this.pending=null,this.lastMove={x:0,y:0,target:e.dom,time:0},this.checkHover=this.checkHover.bind(this),e.dom.addEventListener("mouseleave",this.mouseleave=this.mouseleave.bind(this)),e.dom.addEventListener("mousemove",this.mousemove=this.mousemove.bind(this))}update(){this.pending&&(this.pending=null,clearTimeout(this.restartTimeout),this.restartTimeout=setTimeout(()=>this.startHover(),20))}get active(){return this.view.state.field(this.field)}checkHover(){if(this.hoverTimeout=-1,this.active)return;let e=Date.now()-this.lastMove.time;ei.bottom||e.xi.right+this.view.defaultCharacterWidth)return;let s=this.view.bidiSpans(this.view.state.doc.lineAt(t)).find(l=>l.from<=t&&l.to>=t),r=s&&s.dir==Z.RTL?-1:1,o=this.source(this.view,t,e.x{this.pending==l&&(this.pending=null,a&&this.view.dispatch({effects:this.setHover.of(a)}))},a=>He(this.view.state,a,"hover tooltip"))}else o&&this.view.dispatch({effects:this.setHover.of(o)})}mousemove(e){var t;this.lastMove={x:e.clientX,y:e.clientY,target:e.target,time:Date.now()},this.hoverTimeout<0&&(this.hoverTimeout=setTimeout(this.checkHover,this.hoverTime));let i=this.active;if(i&&!vd(this.lastMove.target)||this.pending){let{pos:s}=i||this.pending,r=(t=i?.end)!==null&&t!==void 0?t:s;(s==r?this.view.posAtCoords(this.lastMove)!=s:!xd(this.view,s,r,e.clientX,e.clientY,6))&&(this.view.dispatch({effects:this.setHover.of(null)}),this.pending=null)}}mouseleave(){clearTimeout(this.hoverTimeout),this.hoverTimeout=-1,this.active&&this.view.dispatch({effects:this.setHover.of(null)})}destroy(){clearTimeout(this.hoverTimeout),this.view.dom.removeEventListener("mouseleave",this.mouseleave),this.view.dom.removeEventListener("mousemove",this.mousemove)}}function vd(n){for(let e=n;e;e=e.parentNode)if(e.nodeType==1&&e.classList.contains("cm-tooltip"))return!0;return!1}function xd(n,e,t,i,s,r){let o=document.createRange(),l=n.domAtPos(e),a=n.domAtPos(t);o.setEnd(a.node,a.offset),o.setStart(l.node,l.offset);let h=o.getClientRects();o.detach();for(let c=0;cTn.from(s)});return[i,be.define(s=>new kd(s,n,i,t,e.hoverTime||300)),wd]}function Cd(n,e){let t=n.plugin(Mh);if(!t)return null;let i=t.manager.tooltips.indexOf(e);return i<0?null:t.manager.tooltipViews[i]}const Ad=R.define(),Qo=D.define({combine(n){let e,t;for(let i of n)e=e||i.topContainer,t=t||i.bottomContainer;return{topContainer:e,bottomContainer:t}}});function Md(n,e){let t=n.plugin(Dh),i=t?t.specs.indexOf(e):-1;return i>-1?t.panels[i]:null}const Dh=be.fromClass(class{constructor(n){this.input=n.state.facet(ar),this.specs=this.input.filter(t=>t),this.panels=this.specs.map(t=>t(n));let e=n.state.facet(Qo);this.top=new en(n,!0,e.topContainer),this.bottom=new en(n,!1,e.bottomContainer),this.top.sync(this.panels.filter(t=>t.top)),this.bottom.sync(this.panels.filter(t=>!t.top));for(let t of this.panels)t.dom.classList.add("cm-panel"),t.mount&&t.mount()}update(n){let e=n.state.facet(Qo);this.top.container!=e.topContainer&&(this.top.sync([]),this.top=new en(n.view,!0,e.topContainer)),this.bottom.container!=e.bottomContainer&&(this.bottom.sync([]),this.bottom=new en(n.view,!1,e.bottomContainer)),this.top.syncClasses(),this.bottom.syncClasses();let t=n.state.facet(ar);if(t!=this.input){let i=t.filter(a=>a),s=[],r=[],o=[],l=[];for(let a of i){let h=this.specs.indexOf(a),c;h<0?(c=a(n.view),l.push(c)):(c=this.panels[h],c.update&&c.update(n)),s.push(c),(c.top?r:o).push(c)}this.specs=i,this.panels=s,this.top.sync(r),this.bottom.sync(o);for(let a of l)a.dom.classList.add("cm-panel"),a.mount&&a.mount()}else for(let i of this.panels)i.update&&i.update(n)}destroy(){this.top.sync([]),this.bottom.sync([])}},{provide:n=>O.scrollMargins.of(e=>{let t=e.plugin(n);return t&&{top:t.top.scrollMargin(),bottom:t.bottom.scrollMargin()}})});class en{constructor(e,t,i){this.view=e,this.top=t,this.container=i,this.dom=void 0,this.classes="",this.panels=[],this.syncClasses()}sync(e){for(let t of this.panels)t.destroy&&e.indexOf(t)<0&&t.destroy();this.panels=e,this.syncDOM()}syncDOM(){if(this.panels.length==0){this.dom&&(this.dom.remove(),this.dom=void 0);return}if(!this.dom){this.dom=document.createElement("div"),this.dom.className=this.top?"cm-panels cm-panels-top":"cm-panels cm-panels-bottom",this.dom.style[this.top?"top":"bottom"]="0";let t=this.container||this.view.dom;t.insertBefore(this.dom,this.top?t.firstChild:null)}let e=this.dom.firstChild;for(let t of this.panels)if(t.dom.parentNode==this.dom){for(;e!=t.dom;)e=el(e);e=e.nextSibling}else this.dom.insertBefore(t.dom,e);for(;e;)e=el(e)}scrollMargin(){return!this.dom||this.container?0:Math.max(0,this.top?this.dom.getBoundingClientRect().bottom-Math.max(0,this.view.scrollDOM.getBoundingClientRect().top):Math.min(innerHeight,this.view.scrollDOM.getBoundingClientRect().bottom)-this.dom.getBoundingClientRect().top)}syncClasses(){if(!(!this.container||this.classes==this.view.themeClasses)){for(let e of this.classes.split(" "))e&&this.container.classList.remove(e);for(let e of(this.classes=this.view.themeClasses).split(" "))e&&this.container.classList.add(e)}}}function el(n){let e=n.nextSibling;return n.remove(),e}const ar=D.define({enables:Dh});class bt extends Bt{compare(e){return this==e||this.constructor==e.constructor&&this.eq(e)}eq(e){return!1}destroy(e){}}bt.prototype.elementClass="";bt.prototype.toDOM=void 0;bt.prototype.mapMode=ce.TrackBefore;bt.prototype.startSide=bt.prototype.endSide=-1;bt.prototype.point=!0;const fs=D.define(),Dd={class:"",renderEmptyElements:!1,elementStyle:"",markers:()=>F.empty,lineMarker:()=>null,lineMarkerChange:null,initialSpacer:null,updateSpacer:null,domEventHandlers:{}},Ci=D.define();function Td(n){return[Th(),Ci.of(Object.assign(Object.assign({},Dd),n))]}const hr=D.define({combine:n=>n.some(e=>e)});function Th(n){let e=[Od];return n&&n.fixed===!1&&e.push(hr.of(!0)),e}const Od=be.fromClass(class{constructor(n){this.view=n,this.prevViewport=n.viewport,this.dom=document.createElement("div"),this.dom.className="cm-gutters",this.dom.setAttribute("aria-hidden","true"),this.dom.style.minHeight=this.view.contentHeight+"px",this.gutters=n.state.facet(Ci).map(e=>new il(n,e));for(let e of this.gutters)this.dom.appendChild(e.dom);this.fixed=!n.state.facet(hr),this.fixed&&(this.dom.style.position="sticky"),this.syncGutters(!1),n.scrollDOM.insertBefore(this.dom,n.contentDOM)}update(n){if(this.updateGutters(n)){let e=this.prevViewport,t=n.view.viewport,i=Math.min(e.to,t.to)-Math.max(e.from,t.from);this.syncGutters(i<(t.to-t.from)*.8)}n.geometryChanged&&(this.dom.style.minHeight=this.view.contentHeight+"px"),this.view.state.facet(hr)!=!this.fixed&&(this.fixed=!this.fixed,this.dom.style.position=this.fixed?"sticky":""),this.prevViewport=n.view.viewport}syncGutters(n){let e=this.dom.nextSibling;n&&this.dom.remove();let t=F.iter(this.view.state.facet(fs),this.view.viewport.from),i=[],s=this.gutters.map(r=>new Bd(r,this.view.viewport,-this.view.documentPadding.top));for(let r of this.view.viewportLineBlocks){let o;if(Array.isArray(r.type)){for(let l of r.type)if(l.type==W.Text){o=l;break}}else o=r.type==W.Text?r:void 0;if(o){i.length&&(i=[]),Oh(t,i,r.from);for(let l of s)l.line(this.view,o,i)}}for(let r of s)r.finish();n&&this.view.scrollDOM.insertBefore(this.dom,e)}updateGutters(n){let e=n.startState.facet(Ci),t=n.state.facet(Ci),i=n.docChanged||n.heightChanged||n.viewportChanged||!F.eq(n.startState.facet(fs),n.state.facet(fs),n.view.viewport.from,n.view.viewport.to);if(e==t)for(let s of this.gutters)s.update(n)&&(i=!0);else{i=!0;let s=[];for(let r of t){let o=e.indexOf(r);o<0?s.push(new il(this.view,r)):(this.gutters[o].update(n),s.push(this.gutters[o]))}for(let r of this.gutters)r.dom.remove(),s.indexOf(r)<0&&r.destroy();for(let r of s)this.dom.appendChild(r.dom);this.gutters=s}return i}destroy(){for(let n of this.gutters)n.destroy();this.dom.remove()}},{provide:n=>O.scrollMargins.of(e=>{let t=e.plugin(n);return!t||t.gutters.length==0||!t.fixed?null:e.textDirection==Z.LTR?{left:t.dom.offsetWidth}:{right:t.dom.offsetWidth}})});function tl(n){return Array.isArray(n)?n:[n]}function Oh(n,e,t){for(;n.value&&n.from<=t;)n.from==t&&e.push(n.value),n.next()}class Bd{constructor(e,t,i){this.gutter=e,this.height=i,this.localMarkers=[],this.i=0,this.cursor=F.iter(e.markers,t.from)}line(e,t,i){this.localMarkers.length&&(this.localMarkers=[]),Oh(this.cursor,this.localMarkers,t.from);let s=i.length?this.localMarkers.concat(i):this.localMarkers,r=this.gutter.config.lineMarker(e,t,s);r&&s.unshift(r);let o=this.gutter;if(s.length==0&&!o.config.renderEmptyElements)return;let l=t.top-this.height;if(this.i==o.elements.length){let a=new Bh(e,t.height,l,s);o.elements.push(a),o.dom.appendChild(a.dom)}else o.elements[this.i].update(e,t.height,l,s);this.height=t.bottom,this.i++}finish(){let e=this.gutter;for(;e.elements.length>this.i;){let t=e.elements.pop();e.dom.removeChild(t.dom),t.destroy()}}}class il{constructor(e,t){this.view=e,this.config=t,this.elements=[],this.spacer=null,this.dom=document.createElement("div"),this.dom.className="cm-gutter"+(this.config.class?" "+this.config.class:"");for(let i in t.domEventHandlers)this.dom.addEventListener(i,s=>{let r=e.lineBlockAtHeight(s.clientY-e.documentTop);t.domEventHandlers[i](e,r,s)&&s.preventDefault()});this.markers=tl(t.markers(e)),t.initialSpacer&&(this.spacer=new Bh(e,0,0,[t.initialSpacer(e)]),this.dom.appendChild(this.spacer.dom),this.spacer.dom.style.cssText+="visibility: hidden; pointer-events: none")}update(e){let t=this.markers;if(this.markers=tl(this.config.markers(e.view)),this.spacer&&this.config.updateSpacer){let s=this.config.updateSpacer(this.spacer.markers[0],e);s!=this.spacer.markers[0]&&this.spacer.update(e.view,0,0,[s])}let i=e.view.viewport;return!F.eq(this.markers,t,i.from,i.to)||(this.config.lineMarkerChange?this.config.lineMarkerChange(e):!1)}destroy(){for(let e of this.elements)e.destroy()}}class Bh{constructor(e,t,i,s){this.height=-1,this.above=0,this.markers=[],this.dom=document.createElement("div"),this.dom.className="cm-gutterElement",this.update(e,t,i,s)}update(e,t,i,s){this.height!=t&&(this.dom.style.height=(this.height=t)+"px"),this.above!=i&&(this.dom.style.marginTop=(this.above=i)?i+"px":""),Pd(this.markers,s)||this.setMarkers(e,s)}setMarkers(e,t){let i="cm-gutterElement",s=this.dom.firstChild;for(let r=0,o=0;;){let l=o,a=rr(l,a,h)||o(l,a,h):o}return i}})}});class us extends bt{constructor(e){super(),this.number=e}eq(e){return this.number==e.number}toDOM(){return document.createTextNode(this.number)}}function ds(n,e){return n.state.facet(zt).formatNumber(e,n.state)}const Rd=Ci.compute([zt],n=>({class:"cm-lineNumbers",renderEmptyElements:!1,markers(e){return e.state.facet(Ed)},lineMarker(e,t,i){return i.some(s=>s.toDOM)?null:new us(ds(e,e.state.doc.lineAt(t.from).number))},lineMarkerChange:e=>e.startState.facet(zt)!=e.state.facet(zt),initialSpacer(e){return new us(ds(e,nl(e.state.doc.lines)))},updateSpacer(e,t){let i=ds(t.view,nl(t.view.state.doc.lines));return i==e.number?e:new us(i)},domEventHandlers:n.facet(zt).domEventHandlers}));function Ld(n={}){return[zt.of(n),Th(),Rd]}function nl(n){let e=9;for(;e{throw new Error("This node type doesn't define a deserialize function")})}add(e){if(this.perNode)throw new RangeError("Can't add per-node props to node types");return typeof e!="function"&&(e=xe.match(e)),t=>{let i=e(t);return i===void 0?null:[this,i]}}}L.closedBy=new L({deserialize:n=>n.split(" ")});L.openedBy=new L({deserialize:n=>n.split(" ")});L.group=new L({deserialize:n=>n.split(" ")});L.contextHash=new L({perNode:!0});L.lookAhead=new L({perNode:!0});L.mounted=new L({perNode:!0});class _d{constructor(e,t,i){this.tree=e,this.overlay=t,this.parser=i}}const Vd=Object.create(null);class xe{constructor(e,t,i,s=0){this.name=e,this.props=t,this.id=i,this.flags=s}static define(e){let t=e.props&&e.props.length?Object.create(null):Vd,i=(e.top?1:0)|(e.skipped?2:0)|(e.error?4:0)|(e.name==null?8:0),s=new xe(e.name||"",t,e.id,i);if(e.props){for(let r of e.props)if(Array.isArray(r)||(r=r(s)),r){if(r[0].perNode)throw new RangeError("Can't store a per-node prop on a node type");t[r[0].id]=r[1]}}return s}prop(e){return this.props[e.id]}get isTop(){return(this.flags&1)>0}get isSkipped(){return(this.flags&2)>0}get isError(){return(this.flags&4)>0}get isAnonymous(){return(this.flags&8)>0}is(e){if(typeof e=="string"){if(this.name==e)return!0;let t=this.prop(L.group);return t?t.indexOf(e)>-1:!1}return this.id==e}static match(e){let t=Object.create(null);for(let i in e)for(let s of i.split(" "))t[s]=e[i];return i=>{for(let s=i.prop(L.group),r=-1;r<(s?s.length:0);r++){let o=t[r<0?i.name:s[r]];if(o)return o}}}}xe.none=new xe("",Object.create(null),0,8);class Lr{constructor(e){this.types=e;for(let t=0;t=s&&(o.type.isAnonymous||t(o)!==!1)){if(o.firstChild())continue;l=!0}for(;l&&i&&!o.type.isAnonymous&&i(o),!o.nextSibling();){if(!o.parent())return;l=!0}}}prop(e){return e.perNode?this.props?this.props[e.id]:void 0:this.type.prop(e)}get propValues(){let e=[];if(this.props)for(let t in this.props)e.push([+t,this.props[t]]);return e}balance(e={}){return this.children.length<=8?this:_r(xe.none,this.children,this.positions,0,this.children.length,0,this.length,(t,i,s)=>new z(this.type,t,i,s,this.propValues),e.makeTree||((t,i,s)=>new z(xe.none,t,i,s)))}static build(e){return Hd(e)}}z.empty=new z(xe.none,[],[],0);class Ir{constructor(e,t){this.buffer=e,this.index=t}get id(){return this.buffer[this.index-4]}get start(){return this.buffer[this.index-3]}get end(){return this.buffer[this.index-2]}get size(){return this.buffer[this.index-1]}get pos(){return this.index}next(){this.index-=4}fork(){return new Ir(this.buffer,this.index)}}class Vt{constructor(e,t,i){this.buffer=e,this.length=t,this.set=i}get type(){return xe.none}toString(){let e=[];for(let t=0;t0));a=o[a+3]);return l}slice(e,t,i){let s=this.buffer,r=new Uint16Array(t-e),o=0;for(let l=e,a=0;l=e&&te;case 1:return t<=e&&i>e;case 2:return i>e;case 4:return!0}}function Eh(n,e){let t=n.childBefore(e);for(;t;){let i=t.lastChild;if(!i||i.to!=t.to)break;i.type.isError&&i.from==i.to?(n=t,t=i.prevSibling):t=i}return n}function ei(n,e,t,i){for(var s;n.from==n.to||(t<1?n.from>=e:n.from>e)||(t>-1?n.to<=e:n.to0?l.length:-1;e!=h;e+=t){let c=l[e],f=a[e]+o.from;if(Ph(s,i,f,f+c.length)){if(c instanceof Vt){if(r&ee.ExcludeBuffers)continue;let u=c.findChild(0,c.buffer.length,t,i-f,s);if(u>-1)return new Ye(new Fd(o,c,e,f),null,u)}else if(r&ee.IncludeAnonymous||!c.type.isAnonymous||Nr(c)){let u;if(!(r&ee.IgnoreMounts)&&c.props&&(u=c.prop(L.mounted))&&!u.overlay)return new _e(u.tree,f,e,o);let d=new _e(c,f,e,o);return r&ee.IncludeAnonymous||!d.type.isAnonymous?d:d.nextChild(t<0?c.children.length-1:0,t,i,s)}}}if(r&ee.IncludeAnonymous||!o.type.isAnonymous||(o.index>=0?e=o.index+t:e=t<0?-1:o._parent._tree.children.length,o=o._parent,!o))return null}}get firstChild(){return this.nextChild(0,1,0,4)}get lastChild(){return this.nextChild(this._tree.children.length-1,-1,0,4)}childAfter(e){return this.nextChild(0,1,e,2)}childBefore(e){return this.nextChild(this._tree.children.length-1,-1,e,-2)}enter(e,t,i=0){let s;if(!(i&ee.IgnoreOverlays)&&(s=this._tree.prop(L.mounted))&&s.overlay){let r=e-this.from;for(let{from:o,to:l}of s.overlay)if((t>0?o<=r:o=r:l>r))return new _e(s.tree,s.overlay[0].from+this.from,-1,this)}return this.nextChild(0,1,e,t,i)}nextSignificantParent(){let e=this;for(;e.type.isAnonymous&&e._parent;)e=e._parent;return e}get parent(){return this._parent?this._parent.nextSignificantParent():null}get nextSibling(){return this._parent&&this.index>=0?this._parent.nextChild(this.index+1,1,0,4):null}get prevSibling(){return this._parent&&this.index>=0?this._parent.nextChild(this.index-1,-1,0,4):null}cursor(e=0){return new Ri(this,e)}get tree(){return this._tree}toTree(){return this._tree}resolve(e,t=0){return ei(this,e,t,!1)}resolveInner(e,t=0){return ei(this,e,t,!0)}enterUnfinishedNodesBefore(e){return Eh(this,e)}getChild(e,t=null,i=null){let s=On(this,e,t,i);return s.length?s[0]:null}getChildren(e,t=null,i=null){return On(this,e,t,i)}toString(){return this._tree.toString()}get node(){return this}matchContext(e){return Bn(this,e)}}function On(n,e,t,i){let s=n.cursor(),r=[];if(!s.firstChild())return r;if(t!=null){for(;!s.type.is(t);)if(!s.nextSibling())return r}for(;;){if(i!=null&&s.type.is(i))return r;if(s.type.is(e)&&r.push(s.node),!s.nextSibling())return i==null?r:[]}}function Bn(n,e,t=e.length-1){for(let i=n.parent;t>=0;i=i.parent){if(!i)return!1;if(!i.type.isAnonymous){if(e[t]&&e[t]!=i.name)return!1;t--}}return!0}class Fd{constructor(e,t,i,s){this.parent=e,this.buffer=t,this.index=i,this.start=s}}class Ye{get name(){return this.type.name}get from(){return this.context.start+this.context.buffer.buffer[this.index+1]}get to(){return this.context.start+this.context.buffer.buffer[this.index+2]}constructor(e,t,i){this.context=e,this._parent=t,this.index=i,this.type=e.buffer.set.types[e.buffer.buffer[i]]}child(e,t,i){let{buffer:s}=this.context,r=s.findChild(this.index+4,s.buffer[this.index+3],e,t-this.context.start,i);return r<0?null:new Ye(this.context,this,r)}get firstChild(){return this.child(1,0,4)}get lastChild(){return this.child(-1,0,4)}childAfter(e){return this.child(1,e,2)}childBefore(e){return this.child(-1,e,-2)}enter(e,t,i=0){if(i&ee.ExcludeBuffers)return null;let{buffer:s}=this.context,r=s.findChild(this.index+4,s.buffer[this.index+3],t>0?1:-1,e-this.context.start,t);return r<0?null:new Ye(this.context,this,r)}get parent(){return this._parent||this.context.parent.nextSignificantParent()}externalSibling(e){return this._parent?null:this.context.parent.nextChild(this.context.index+e,e,0,4)}get nextSibling(){let{buffer:e}=this.context,t=e.buffer[this.index+3];return t<(this._parent?e.buffer[this._parent.index+3]:e.buffer.length)?new Ye(this.context,this._parent,t):this.externalSibling(1)}get prevSibling(){let{buffer:e}=this.context,t=this._parent?this._parent.index+4:0;return this.index==t?this.externalSibling(-1):new Ye(this.context,this._parent,e.findChild(t,this.index,-1,0,4))}cursor(e=0){return new Ri(this,e)}get tree(){return null}toTree(){let e=[],t=[],{buffer:i}=this.context,s=this.index+4,r=i.buffer[this.index+3];if(r>s){let o=i.buffer[this.index+1];e.push(i.slice(s,r,o)),t.push(0)}return new z(this.type,e,t,this.to-this.from)}resolve(e,t=0){return ei(this,e,t,!1)}resolveInner(e,t=0){return ei(this,e,t,!0)}enterUnfinishedNodesBefore(e){return Eh(this,e)}toString(){return this.context.buffer.childString(this.index)}getChild(e,t=null,i=null){let s=On(this,e,t,i);return s.length?s[0]:null}getChildren(e,t=null,i=null){return On(this,e,t,i)}get node(){return this}matchContext(e){return Bn(this,e)}}class Ri{get name(){return this.type.name}constructor(e,t=0){if(this.mode=t,this.buffer=null,this.stack=[],this.index=0,this.bufferNode=null,e instanceof _e)this.yieldNode(e);else{this._tree=e.context.parent,this.buffer=e.context;for(let i=e._parent;i;i=i._parent)this.stack.unshift(i.index);this.bufferNode=e,this.yieldBuf(e.index)}}yieldNode(e){return e?(this._tree=e,this.type=e.type,this.from=e.from,this.to=e.to,!0):!1}yieldBuf(e,t){this.index=e;let{start:i,buffer:s}=this.buffer;return this.type=t||s.set.types[s.buffer[e]],this.from=i+s.buffer[e+1],this.to=i+s.buffer[e+2],!0}yield(e){return e?e instanceof _e?(this.buffer=null,this.yieldNode(e)):(this.buffer=e.context,this.yieldBuf(e.index,e.type)):!1}toString(){return this.buffer?this.buffer.buffer.childString(this.index):this._tree.toString()}enterChild(e,t,i){if(!this.buffer)return this.yield(this._tree.nextChild(e<0?this._tree._tree.children.length-1:0,e,t,i,this.mode));let{buffer:s}=this.buffer,r=s.findChild(this.index+4,s.buffer[this.index+3],e,t-this.buffer.start,i);return r<0?!1:(this.stack.push(this.index),this.yieldBuf(r))}firstChild(){return this.enterChild(1,0,4)}lastChild(){return this.enterChild(-1,0,4)}childAfter(e){return this.enterChild(1,e,2)}childBefore(e){return this.enterChild(-1,e,-2)}enter(e,t,i=this.mode){return this.buffer?i&ee.ExcludeBuffers?!1:this.enterChild(1,e,t):this.yield(this._tree.enter(e,t,i))}parent(){if(!this.buffer)return this.yieldNode(this.mode&ee.IncludeAnonymous?this._tree._parent:this._tree.parent);if(this.stack.length)return this.yieldBuf(this.stack.pop());let e=this.mode&ee.IncludeAnonymous?this.buffer.parent:this.buffer.parent.nextSignificantParent();return this.buffer=null,this.yieldNode(e)}sibling(e){if(!this.buffer)return this._tree._parent?this.yield(this._tree.index<0?null:this._tree._parent.nextChild(this._tree.index+e,e,0,4,this.mode)):!1;let{buffer:t}=this.buffer,i=this.stack.length-1;if(e<0){let s=i<0?0:this.stack[i]+4;if(this.index!=s)return this.yieldBuf(t.findChild(s,this.index,-1,0,4))}else{let s=t.buffer[this.index+3];if(s<(i<0?t.buffer.length:t.buffer[this.stack[i]+3]))return this.yieldBuf(s)}return i<0?this.yield(this.buffer.parent.nextChild(this.buffer.index+e,e,0,4,this.mode)):!1}nextSibling(){return this.sibling(1)}prevSibling(){return this.sibling(-1)}atLastNode(e){let t,i,{buffer:s}=this;if(s){if(e>0){if(this.index-1)for(let r=t+e,o=e<0?-1:i._tree.children.length;r!=o;r+=e){let l=i._tree.children[r];if(this.mode&ee.IncludeAnonymous||l instanceof Vt||!l.type.isAnonymous||Nr(l))return!1}return!0}move(e,t){if(t&&this.enterChild(e,0,4))return!0;for(;;){if(this.sibling(e))return!0;if(this.atLastNode(e)||!this.parent())return!1}}next(e=!0){return this.move(1,e)}prev(e=!0){return this.move(-1,e)}moveTo(e,t=0){for(;(this.from==this.to||(t<1?this.from>=e:this.from>e)||(t>-1?this.to<=e:this.to=0;){for(let o=e;o;o=o._parent)if(o.index==s){if(s==this.index)return o;t=o,i=r+1;break e}s=this.stack[--r]}for(let s=i;s=0;r--){if(r<0)return Bn(this.node,e,s);let o=i[t.buffer[this.stack[r]]];if(!o.isAnonymous){if(e[s]&&e[s]!=o.name)return!1;s--}}return!0}}function Nr(n){return n.children.some(e=>e instanceof Vt||!e.type.isAnonymous||Nr(e))}function Hd(n){var e;let{buffer:t,nodeSet:i,maxBufferLength:s=Id,reused:r=[],minRepeatType:o=i.types.length}=n,l=Array.isArray(t)?new Ir(t,t.length):t,a=i.types,h=0,c=0;function f(k,C,T,B,U){let{id:I,start:P,end:V,size:G}=l,Q=c;for(;G<0;)if(l.next(),G==-1){let J=r[I];T.push(J),B.push(P-k);return}else if(G==-3){h=I;return}else if(G==-4){c=I;return}else throw new RangeError(`Unrecognized record size: ${G}`);let M=a[I],$,Y,le=P-k;if(V-P<=s&&(Y=g(l.pos-C,U))){let J=new Uint16Array(Y.size-Y.skip),ie=l.pos-Y.size,nt=J.length;for(;l.pos>ie;)nt=y(Y.start,J,nt);$=new Vt(J,V-Y.start,i),le=Y.start-k}else{let J=l.pos-G;l.next();let ie=[],nt=[],vt=I>=o?I:-1,Ft=0,ji=V;for(;l.pos>J;)vt>=0&&l.id==vt&&l.size>=0?(l.end<=ji-s&&(d(ie,nt,P,Ft,l.end,ji,vt,Q),Ft=ie.length,ji=l.end),l.next()):f(P,J,ie,nt,vt);if(vt>=0&&Ft>0&&Ft-1&&Ft>0){let to=u(M);$=_r(M,ie,nt,0,ie.length,0,V-P,to,to)}else $=p(M,ie,nt,V-P,Q-V)}T.push($),B.push(le)}function u(k){return(C,T,B)=>{let U=0,I=C.length-1,P,V;if(I>=0&&(P=C[I])instanceof z){if(!I&&P.type==k&&P.length==B)return P;(V=P.prop(L.lookAhead))&&(U=T[I]+P.length+V)}return p(k,C,T,B,U)}}function d(k,C,T,B,U,I,P,V){let G=[],Q=[];for(;k.length>B;)G.push(k.pop()),Q.push(C.pop()+T-U);k.push(p(i.types[P],G,Q,I-U,V-I)),C.push(U-T)}function p(k,C,T,B,U=0,I){if(h){let P=[L.contextHash,h];I=I?[P].concat(I):[P]}if(U>25){let P=[L.lookAhead,U];I=I?[P].concat(I):[P]}return new z(k,C,T,B,I)}function g(k,C){let T=l.fork(),B=0,U=0,I=0,P=T.end-s,V={size:0,start:0,skip:0};e:for(let G=T.pos-k;T.pos>G;){let Q=T.size;if(T.id==C&&Q>=0){V.size=B,V.start=U,V.skip=I,I+=4,B+=4,T.next();continue}let M=T.pos-Q;if(Q<0||M=o?4:0,Y=T.start;for(T.next();T.pos>M;){if(T.size<0)if(T.size==-3)$+=4;else break e;else T.id>=o&&($+=4);T.next()}U=Y,B+=Q,I+=$}return(C<0||B==k)&&(V.size=B,V.start=U,V.skip=I),V.size>4?V:void 0}function y(k,C,T){let{id:B,start:U,end:I,size:P}=l;if(l.next(),P>=0&&B4){let G=l.pos-(P-4);for(;l.pos>G;)T=y(k,C,T)}C[--T]=V,C[--T]=I-k,C[--T]=U-k,C[--T]=B}else P==-3?h=B:P==-4&&(c=B);return T}let b=[],v=[];for(;l.pos>0;)f(n.start||0,n.bufferStart||0,b,v,-1);let S=(e=n.length)!==null&&e!==void 0?e:b.length?v[0]+b[0].length:0;return new z(a[n.topID],b.reverse(),v.reverse(),S)}const rl=new WeakMap;function gn(n,e){if(!n.isAnonymous||e instanceof Vt||e.type!=n)return 1;let t=rl.get(e);if(t==null){t=1;for(let i of e.children){if(i.type!=n||!(i instanceof z)){t=1;break}t+=gn(n,i)}rl.set(e,t)}return t}function _r(n,e,t,i,s,r,o,l,a){let h=0;for(let p=i;p=c)break;T+=B}if(S==k+1){if(T>c){let B=p[k];d(B.children,B.positions,0,B.children.length,g[k]+v);continue}f.push(p[k])}else{let B=g[S-1]+p[S-1].length-C;f.push(_r(n,p,g,k,S,C,B,null,a))}u.push(C+v-r)}}return d(e,t,i,s,0),(l||a)(f,u,o)}class ty{constructor(){this.map=new WeakMap}setBuffer(e,t,i){let s=this.map.get(e);s||this.map.set(e,s=new Map),s.set(t,i)}getBuffer(e,t){let i=this.map.get(e);return i&&i.get(t)}set(e,t){e instanceof Ye?this.setBuffer(e.context.buffer,e.index,t):e instanceof _e&&this.map.set(e.tree,t)}get(e){return e instanceof Ye?this.getBuffer(e.context.buffer,e.index):e instanceof _e?this.map.get(e.tree):void 0}cursorSet(e,t){e.buffer?this.setBuffer(e.buffer.buffer,e.index,t):this.map.set(e.tree,t)}cursorGet(e){return e.buffer?this.getBuffer(e.buffer.buffer,e.index):this.map.get(e.tree)}}class rt{constructor(e,t,i,s,r=!1,o=!1){this.from=e,this.to=t,this.tree=i,this.offset=s,this.open=(r?1:0)|(o?2:0)}get openStart(){return(this.open&1)>0}get openEnd(){return(this.open&2)>0}static addTree(e,t=[],i=!1){let s=[new rt(0,e.length,e,0,!1,i)];for(let r of t)r.to>e.length&&s.push(r);return s}static applyChanges(e,t,i=128){if(!t.length)return e;let s=[],r=1,o=e.length?e[0]:null;for(let l=0,a=0,h=0;;l++){let c=l=i)for(;o&&o.from=u.from||f<=u.to||h){let d=Math.max(u.from,a)-h,p=Math.min(u.to,f)-h;u=d>=p?null:new rt(d,p,u.tree,u.offset+h,l>0,!!c)}if(u&&s.push(u),o.to>f)break;o=rnew Le(s.from,s.to)):[new Le(0,0)]:[new Le(0,e.length)],this.createParse(e,t||[],i)}parse(e,t,i){let s=this.startParse(e,t,i);for(;;){let r=s.advance();if(r)return r}}}class Wd{constructor(e){this.string=e}get length(){return this.string.length}chunk(e){return this.string.slice(e)}get lineChunks(){return!1}read(e,t){return this.string.slice(e,t)}}function iy(n){return(e,t,i,s)=>new qd(e,n,t,i,s)}class ol{constructor(e,t,i,s,r){this.parser=e,this.parse=t,this.overlay=i,this.target=s,this.ranges=r}}class zd{constructor(e,t,i,s,r,o,l){this.parser=e,this.predicate=t,this.mounts=i,this.index=s,this.start=r,this.target=o,this.prev=l,this.depth=0,this.ranges=[]}}const cr=new L({perNode:!0});class qd{constructor(e,t,i,s,r){this.nest=t,this.input=i,this.fragments=s,this.ranges=r,this.inner=[],this.innerDone=0,this.baseTree=null,this.stoppedAt=null,this.baseParse=e}advance(){if(this.baseParse){let i=this.baseParse.advance();if(!i)return null;if(this.baseParse=null,this.baseTree=i,this.startInner(),this.stoppedAt!=null)for(let s of this.inner)s.parse.stopAt(this.stoppedAt)}if(this.innerDone==this.inner.length){let i=this.baseTree;return this.stoppedAt!=null&&(i=new z(i.type,i.children,i.positions,i.length,i.propValues.concat([[cr,this.stoppedAt]]))),i}let e=this.inner[this.innerDone],t=e.parse.advance();if(t){this.innerDone++;let i=Object.assign(Object.create(null),e.target.props);i[L.mounted.id]=new _d(t,e.overlay,e.parser),e.target.props=i}return null}get parsedPos(){if(this.baseParse)return 0;let e=this.input.length;for(let t=this.innerDone;tc.frag.from<=s.from&&c.frag.to>=s.to&&c.mount.overlay);if(h)for(let c of h.mount.overlay){let f=c.from+h.pos,u=c.to+h.pos;f>=s.from&&u<=s.to&&!t.ranges.some(d=>d.fromf)&&t.ranges.push({from:f,to:u})}}l=!1}else if(i&&(o=jd(i.ranges,s.from,s.to)))l=o!=2;else if(!s.type.isAnonymous&&s.fromnew Le(f.from-s.from,f.to-s.from)):null,s.tree,c)),r.overlay?c.length&&(i={ranges:c,depth:0,prev:i}):l=!1}}else t&&(a=t.predicate(s))&&(a===!0&&(a=new Le(s.from,s.to)),a.fromnew Le(c.from-t.start,c.to-t.start)),t.target,h)),t=t.prev}i&&!--i.depth&&(i=i.prev)}}}}function jd(n,e,t){for(let i of n){if(i.from>=t)break;if(i.to>e)return i.from<=e&&i.to>=t?2:1}return 0}function ll(n,e,t,i,s,r){if(e=e.to);i++);let o=s.children[i],l=o.buffer;function a(h,c,f,u,d){let p=h;for(;l[p+2]+r<=e.from;)p=l[p+3];let g=[],y=[];ll(o,h,p,g,y,u);let b=l[p+1],v=l[p+2],S=b+r==e.from&&v+r==e.to&&l[p]==e.type.id;return g.push(S?e.toTree():a(p+4,l[p+3],o.set.types[l[p]],b,v-b)),y.push(b-u),ll(o,l[p+3],c,g,y,u),new z(f,g,y,d)}s.children[i]=a(0,l.length,xe.none,0,o.length);for(let h=0;h<=t;h++)n.childAfter(e.from)}class al{constructor(e,t){this.offset=t,this.done=!1,this.cursor=e.cursor(ee.IncludeAnonymous|ee.IgnoreMounts)}moveTo(e){let{cursor:t}=this,i=e-this.offset;for(;!this.done&&t.from=e&&t.enter(i,1,ee.IgnoreOverlays|ee.ExcludeBuffers)||t.next(!1)||(this.done=!0)}hasNode(e){if(this.moveTo(e.from),!this.done&&this.cursor.from+this.offset==e.from&&this.cursor.tree)for(let t=this.cursor.tree;;){if(t==e.tree)return!0;if(t.children.length&&t.positions[0]==0&&t.children[0]instanceof z)t=t.children[0];else break}return!1}}class Ud{constructor(e){var t;if(this.fragments=e,this.curTo=0,this.fragI=0,e.length){let i=this.curFrag=e[0];this.curTo=(t=i.tree.prop(cr))!==null&&t!==void 0?t:i.to,this.inner=new al(i.tree,-i.offset)}else this.curFrag=this.inner=null}hasNode(e){for(;this.curFrag&&e.from>=this.curTo;)this.nextFrag();return this.curFrag&&this.curFrag.from<=e.from&&this.curTo>=e.to&&this.inner.hasNode(e)}nextFrag(){var e;if(this.fragI++,this.fragI==this.fragments.length)this.curFrag=this.inner=null;else{let t=this.curFrag=this.fragments[this.fragI];this.curTo=(e=t.tree.prop(cr))!==null&&e!==void 0?e:t.to,this.inner=new al(t.tree,-t.offset)}}findMounts(e,t){var i;let s=[];if(this.inner){this.inner.cursor.moveTo(e,1);for(let r=this.inner.cursor.node;r;r=r.parent){let o=(i=r.tree)===null||i===void 0?void 0:i.prop(L.mounted);if(o&&o.parser==t)for(let l=this.fragI;l=r.to)break;a.tree==this.curFrag.tree&&s.push({frag:a,pos:r.from-a.offset,mount:o})}}}return s}}function hl(n,e){let t=null,i=e;for(let s=1,r=0;s=l)break;a.to<=o||(t||(i=t=e.slice()),a.froml&&t.splice(r+1,0,new Le(l,a.to))):a.to>l?t[r--]=new Le(l,a.to):t.splice(r--,1))}}return i}function Gd(n,e,t,i){let s=0,r=0,o=!1,l=!1,a=-1e9,h=[];for(;;){let c=s==n.length?1e9:o?n[s].to:n[s].from,f=r==e.length?1e9:l?e[r].to:e[r].from;if(o!=l){let u=Math.max(a,t),d=Math.min(c,f,i);unew Le(u.from+i,u.to+i)),f=Gd(e,c,a,h);for(let u=0,d=a;;u++){let p=u==f.length,g=p?h:f[u].from;if(g>d&&t.push(new rt(d,g,s.tree,-o,r.from>=d||r.openStart,r.to<=g||r.openEnd)),p)break;d=f[u].to}}else t.push(new rt(a,h,s.tree,-o,r.from>=o||r.openStart,r.to<=l||r.openEnd))}return t}let $d=0;class Ge{constructor(e,t,i){this.set=e,this.base=t,this.modified=i,this.id=$d++}static define(e){if(e?.base)throw new Error("Can not derive from a modified tag");let t=new Ge([],null,[]);if(t.set.push(t),e)for(let i of e.set)t.set.push(i);return t}static defineModifier(){let e=new Pn;return t=>t.modified.indexOf(e)>-1?t:Pn.get(t.base||t,t.modified.concat(e).sort((i,s)=>i.id-s.id))}}let Jd=0;class Pn{constructor(){this.instances=[],this.id=Jd++}static get(e,t){if(!t.length)return e;let i=t[0].instances.find(l=>l.base==e&&Yd(t,l.modified));if(i)return i;let s=[],r=new Ge(s,e,t);for(let l of t)l.instances.push(r);let o=Xd(t);for(let l of e.set)if(!l.modified.length)for(let a of o)s.push(Pn.get(l,a));return r}}function Yd(n,e){return n.length==e.length&&n.every((t,i)=>t==e[i])}function Xd(n){let e=[[]];for(let t=0;ti.length-t.length)}function Zd(n){let e=Object.create(null);for(let t in n){let i=n[t];Array.isArray(i)||(i=[i]);for(let s of t.split(" "))if(s){let r=[],o=2,l=s;for(let f=0;;){if(l=="..."&&f>0&&f+3==s.length){o=1;break}let u=/^"(?:[^"\\]|\\.)*?"|[^\/!]+/.exec(l);if(!u)throw new RangeError("Invalid path: "+s);if(r.push(u[0]=="*"?"":u[0][0]=='"'?JSON.parse(u[0]):u[0]),f+=u[0].length,f==s.length)break;let d=s[f++];if(f==s.length&&d=="!"){o=0;break}if(d!="/")throw new RangeError("Invalid path: "+s);l=s.slice(f)}let a=r.length-1,h=r[a];if(!h)throw new RangeError("Invalid path: "+s);let c=new En(i,o,a>0?r.slice(0,a):null);e[h]=c.sort(e[h])}}return Lh.add(e)}const Lh=new L;class En{constructor(e,t,i,s){this.tags=e,this.mode=t,this.context=i,this.next=s}get opaque(){return this.mode==0}get inherit(){return this.mode==1}sort(e){return!e||e.depth{let o=s;for(let l of r)for(let a of l.set){let h=t[a.id];if(h){o=o?o+" "+h:h;break}}return o},scope:i}}function Qd(n,e){let t=null;for(let i of n){let s=i.style(e);s&&(t=t?t+" "+s:s)}return t}function ep(n,e,t,i=0,s=n.length){let r=new tp(i,Array.isArray(e)?e:[e],t);r.highlightRange(n.cursor(),i,s,"",r.highlighters),r.flush(s)}class tp{constructor(e,t,i){this.at=e,this.highlighters=t,this.span=i,this.class=""}startSpan(e,t){t!=this.class&&(this.flush(e),e>this.at&&(this.at=e),this.class=t)}flush(e){e>this.at&&this.class&&this.span(this.at,e,this.class)}highlightRange(e,t,i,s,r){let{type:o,from:l,to:a}=e;if(l>=i||a<=t)return;o.isTop&&(r=this.highlighters.filter(d=>!d.scope||d.scope(o)));let h=s,c=ip(e)||En.empty,f=Qd(r,c.tags);if(f&&(h&&(h+=" "),h+=f,c.mode==1&&(s+=(s?" ":"")+f)),this.startSpan(e.from,h),c.opaque)return;let u=e.tree&&e.tree.prop(L.mounted);if(u&&u.overlay){let d=e.node.enter(u.overlay[0].from+l,1),p=this.highlighters.filter(y=>!y.scope||y.scope(u.tree.type)),g=e.firstChild();for(let y=0,b=l;;y++){let v=y=S||!e.nextSibling())););if(!v||S>i)break;b=v.to+l,b>t&&(this.highlightRange(d.cursor(),Math.max(t,v.from+l),Math.min(i,b),s,p),this.startSpan(b,h))}g&&e.parent()}else if(e.firstChild()){do if(!(e.to<=t)){if(e.from>=i)break;this.highlightRange(e,t,i,s,r),this.startSpan(Math.min(i,e.to),h)}while(e.nextSibling());e.parent()}}}function ip(n){let e=n.type.prop(Lh);for(;e&&e.context&&!n.matchContext(e.context);)e=e.next;return e||null}const x=Ge.define,nn=x(),ot=x(),fl=x(ot),ul=x(ot),lt=x(),sn=x(lt),ps=x(lt),Ke=x(),xt=x(Ke),qe=x(),je=x(),fr=x(),ui=x(fr),rn=x(),m={comment:nn,lineComment:x(nn),blockComment:x(nn),docComment:x(nn),name:ot,variableName:x(ot),typeName:fl,tagName:x(fl),propertyName:ul,attributeName:x(ul),className:x(ot),labelName:x(ot),namespace:x(ot),macroName:x(ot),literal:lt,string:sn,docString:x(sn),character:x(sn),attributeValue:x(sn),number:ps,integer:x(ps),float:x(ps),bool:x(lt),regexp:x(lt),escape:x(lt),color:x(lt),url:x(lt),keyword:qe,self:x(qe),null:x(qe),atom:x(qe),unit:x(qe),modifier:x(qe),operatorKeyword:x(qe),controlKeyword:x(qe),definitionKeyword:x(qe),moduleKeyword:x(qe),operator:je,derefOperator:x(je),arithmeticOperator:x(je),logicOperator:x(je),bitwiseOperator:x(je),compareOperator:x(je),updateOperator:x(je),definitionOperator:x(je),typeOperator:x(je),controlOperator:x(je),punctuation:fr,separator:x(fr),bracket:ui,angleBracket:x(ui),squareBracket:x(ui),paren:x(ui),brace:x(ui),content:Ke,heading:xt,heading1:x(xt),heading2:x(xt),heading3:x(xt),heading4:x(xt),heading5:x(xt),heading6:x(xt),contentSeparator:x(Ke),list:x(Ke),quote:x(Ke),emphasis:x(Ke),strong:x(Ke),link:x(Ke),monospace:x(Ke),strikethrough:x(Ke),inserted:x(),deleted:x(),changed:x(),invalid:x(),meta:rn,documentMeta:x(rn),annotation:x(rn),processingInstruction:x(rn),definition:Ge.defineModifier(),constant:Ge.defineModifier(),function:Ge.defineModifier(),standard:Ge.defineModifier(),local:Ge.defineModifier(),special:Ge.defineModifier()};Ih([{tag:m.link,class:"tok-link"},{tag:m.heading,class:"tok-heading"},{tag:m.emphasis,class:"tok-emphasis"},{tag:m.strong,class:"tok-strong"},{tag:m.keyword,class:"tok-keyword"},{tag:m.atom,class:"tok-atom"},{tag:m.bool,class:"tok-bool"},{tag:m.url,class:"tok-url"},{tag:m.labelName,class:"tok-labelName"},{tag:m.inserted,class:"tok-inserted"},{tag:m.deleted,class:"tok-deleted"},{tag:m.literal,class:"tok-literal"},{tag:m.string,class:"tok-string"},{tag:m.number,class:"tok-number"},{tag:[m.regexp,m.escape,m.special(m.string)],class:"tok-string2"},{tag:m.variableName,class:"tok-variableName"},{tag:m.local(m.variableName),class:"tok-variableName tok-local"},{tag:m.definition(m.variableName),class:"tok-variableName tok-definition"},{tag:m.special(m.variableName),class:"tok-variableName2"},{tag:m.definition(m.propertyName),class:"tok-propertyName tok-definition"},{tag:m.typeName,class:"tok-typeName"},{tag:m.namespace,class:"tok-namespace"},{tag:m.className,class:"tok-className"},{tag:m.macroName,class:"tok-macroName"},{tag:m.propertyName,class:"tok-propertyName"},{tag:m.operator,class:"tok-operator"},{tag:m.comment,class:"tok-comment"},{tag:m.meta,class:"tok-meta"},{tag:m.invalid,class:"tok-invalid"},{tag:m.punctuation,class:"tok-punctuation"}]);var ms;const Dt=new L;function Nh(n){return D.define({combine:n?e=>e.concat(n):void 0})}const np=new L;class Ie{constructor(e,t,i=[],s=""){this.data=e,this.name=s,N.prototype.hasOwnProperty("tree")||Object.defineProperty(N.prototype,"tree",{get(){return pe(this)}}),this.parser=t,this.extension=[wt.of(this),N.languageData.of((r,o,l)=>{let a=dl(r,o,l),h=a.type.prop(Dt);if(!h)return[];let c=r.facet(h),f=a.type.prop(np);if(f){let u=a.resolve(o-a.from,l);for(let d of f)if(d.test(u,r)){let p=r.facet(d.facet);return d.type=="replace"?p:p.concat(c)}}return c})].concat(i)}isActiveAt(e,t,i=-1){return dl(e,t,i).type.prop(Dt)==this.data}findRegions(e){let t=e.facet(wt);if(t?.data==this.data)return[{from:0,to:e.doc.length}];if(!t||!t.allowsNesting)return[];let i=[],s=(r,o)=>{if(r.prop(Dt)==this.data){i.push({from:o,to:o+r.length});return}let l=r.prop(L.mounted);if(l){if(l.tree.prop(Dt)==this.data){if(l.overlay)for(let a of l.overlay)i.push({from:a.from+o,to:a.to+o});else i.push({from:o,to:o+r.length});return}else if(l.overlay){let a=i.length;if(s(l.tree,l.overlay[0].from+o),i.length>a)return}}for(let a=0;ai.isTop?t:void 0)]}),e.name)}configure(e,t){return new ur(this.data,this.parser.configure(e),t||this.name)}get allowsNesting(){return this.parser.hasWrappers()}}function pe(n){let e=n.field(Ie.state,!1);return e?e.tree:z.empty}class sp{constructor(e){this.doc=e,this.cursorPos=0,this.string="",this.cursor=e.iter()}get length(){return this.doc.length}syncTo(e){return this.string=this.cursor.next(e-this.cursorPos).value,this.cursorPos=e+this.string.length,this.cursorPos-this.string.length}chunk(e){return this.syncTo(e),this.string}get lineChunks(){return!0}read(e,t){let i=this.cursorPos-this.string.length;return e=this.cursorPos?this.doc.sliceString(e,t):this.string.slice(e-i,t-i)}}let di=null;class ti{constructor(e,t,i=[],s,r,o,l,a){this.parser=e,this.state=t,this.fragments=i,this.tree=s,this.treeLen=r,this.viewport=o,this.skipped=l,this.scheduleOn=a,this.parse=null,this.tempSkipped=[]}static create(e,t,i){return new ti(e,t,[],z.empty,0,i,[],null)}startParse(){return this.parser.startParse(new sp(this.state.doc),this.fragments)}work(e,t){return t!=null&&t>=this.state.doc.length&&(t=void 0),this.tree!=z.empty&&this.isDone(t??this.state.doc.length)?(this.takeTree(),!0):this.withContext(()=>{var i;if(typeof e=="number"){let s=Date.now()+e;e=()=>Date.now()>s}for(this.parse||(this.parse=this.startParse()),t!=null&&(this.parse.stoppedAt==null||this.parse.stoppedAt>t)&&t=this.treeLen&&((this.parse.stoppedAt==null||this.parse.stoppedAt>e)&&this.parse.stopAt(e),this.withContext(()=>{for(;!(t=this.parse.advance()););}),this.treeLen=e,this.tree=t,this.fragments=this.withoutTempSkipped(rt.addTree(this.tree,this.fragments,!0)),this.parse=null)}withContext(e){let t=di;di=this;try{return e()}finally{di=t}}withoutTempSkipped(e){for(let t;t=this.tempSkipped.pop();)e=pl(e,t.from,t.to);return e}changes(e,t){let{fragments:i,tree:s,treeLen:r,viewport:o,skipped:l}=this;if(this.takeTree(),!e.empty){let a=[];if(e.iterChangedRanges((h,c,f,u)=>a.push({fromA:h,toA:c,fromB:f,toB:u})),i=rt.applyChanges(i,a),s=z.empty,r=0,o={from:e.mapPos(o.from,-1),to:e.mapPos(o.to,1)},this.skipped.length){l=[];for(let h of this.skipped){let c=e.mapPos(h.from,1),f=e.mapPos(h.to,-1);ce.from&&(this.fragments=pl(this.fragments,s,r),this.skipped.splice(i--,1))}return this.skipped.length>=t?!1:(this.reset(),!0)}reset(){this.parse&&(this.takeTree(),this.parse=null)}skipUntilInView(e,t){this.skipped.push({from:e,to:t})}static getSkippingParser(e){return new class extends Rh{createParse(t,i,s){let r=s[0].from,o=s[s.length-1].to;return{parsedPos:r,advance(){let a=di;if(a){for(let h of s)a.tempSkipped.push(h);e&&(a.scheduleOn=a.scheduleOn?Promise.all([a.scheduleOn,e]):e)}return this.parsedPos=o,new z(xe.none,[],[],o-r)},stoppedAt:null,stopAt(){}}}}}isDone(e){e=Math.min(e,this.state.doc.length);let t=this.fragments;return this.treeLen>=e&&t.length&&t[0].from==0&&t[0].to>=e}static get(){return di}}function pl(n,e,t){return rt.applyChanges(n,[{fromA:e,toA:t,fromB:e,toB:t}])}class ii{constructor(e){this.context=e,this.tree=e.tree}apply(e){if(!e.docChanged&&this.tree==this.context.tree)return this;let t=this.context.changes(e.changes,e.state),i=this.context.treeLen==e.startState.doc.length?void 0:Math.max(e.changes.mapPos(this.context.treeLen),t.viewport.to);return t.work(20,i)||t.takeTree(),new ii(t)}static init(e){let t=Math.min(3e3,e.doc.length),i=ti.create(e.facet(wt).parser,e,{from:0,to:t});return i.work(20,t)||i.takeTree(),new ii(i)}}Ie.state=Me.define({create:ii.init,update(n,e){for(let t of e.effects)if(t.is(Ie.setState))return t.value;return e.startState.facet(wt)!=e.state.facet(wt)?ii.init(e.state):n.apply(e)}});let _h=n=>{let e=setTimeout(()=>n(),500);return()=>clearTimeout(e)};typeof requestIdleCallback<"u"&&(_h=n=>{let e=-1,t=setTimeout(()=>{e=requestIdleCallback(n,{timeout:500-100})},100);return()=>e<0?clearTimeout(t):cancelIdleCallback(e)});const gs=typeof navigator<"u"&&(!((ms=navigator.scheduling)===null||ms===void 0)&&ms.isInputPending)?()=>navigator.scheduling.isInputPending():null,rp=be.fromClass(class{constructor(e){this.view=e,this.working=null,this.workScheduled=0,this.chunkEnd=-1,this.chunkBudget=-1,this.work=this.work.bind(this),this.scheduleWork()}update(e){let t=this.view.state.field(Ie.state).context;(t.updateViewport(e.view.viewport)||this.view.viewport.to>t.treeLen)&&this.scheduleWork(),e.docChanged&&(this.view.hasFocus&&(this.chunkBudget+=50),this.scheduleWork()),this.checkAsyncSchedule(t)}scheduleWork(){if(this.working)return;let{state:e}=this.view,t=e.field(Ie.state);(t.tree!=t.context.tree||!t.context.isDone(e.doc.length))&&(this.working=_h(this.work))}work(e){this.working=null;let t=Date.now();if(this.chunkEnds+1e3,a=r.context.work(()=>gs&&gs()||Date.now()>o,s+(l?0:1e5));this.chunkBudget-=Date.now()-t,(a||this.chunkBudget<=0)&&(r.context.takeTree(),this.view.dispatch({effects:Ie.setState.of(new ii(r.context))})),this.chunkBudget>0&&!(a&&!l)&&this.scheduleWork(),this.checkAsyncSchedule(r.context)}checkAsyncSchedule(e){e.scheduleOn&&(this.workScheduled++,e.scheduleOn.then(()=>this.scheduleWork()).catch(t=>He(this.view.state,t)).then(()=>this.workScheduled--),e.scheduleOn=null)}destroy(){this.working&&this.working()}isWorking(){return!!(this.working||this.workScheduled>0)}},{eventHandlers:{focus(){this.scheduleWork()}}}),wt=D.define({combine(n){return n.length?n[0]:null},enables:n=>[Ie.state,rp,O.contentAttributes.compute([n],e=>{let t=e.facet(n);return t&&t.name?{"data-language":t.name}:{}})]});class sy{constructor(e,t=[]){this.language=e,this.support=t,this.extension=[e,t]}}class Vh{constructor(e,t,i,s,r,o=void 0){this.name=e,this.alias=t,this.extensions=i,this.filename=s,this.loadFunc=r,this.support=o,this.loading=null}load(){return this.loading||(this.loading=this.loadFunc().then(e=>this.support=e,e=>{throw this.loading=null,e}))}static of(e){let{load:t,support:i}=e;if(!t){if(!i)throw new RangeError("Must pass either 'load' or 'support' to LanguageDescription.of");t=()=>Promise.resolve(i)}return new Vh(e.name,(e.alias||[]).concat(e.name).map(s=>s.toLowerCase()),e.extensions||[],e.filename,t,i)}static matchFilename(e,t){for(let s of e)if(s.filename&&s.filename.test(t))return s;let i=/\.([^.]+)$/.exec(t);if(i){for(let s of e)if(s.extensions.indexOf(i[1])>-1)return s}return null}static matchLanguageName(e,t,i=!0){t=t.toLowerCase();for(let s of e)if(s.alias.some(r=>r==t))return s;if(i)for(let s of e)for(let r of s.alias){let o=t.indexOf(r);if(o>-1&&(r.length>2||!/\w/.test(t[o-1])&&!/\w/.test(t[o+r.length])))return s}return null}}const Fh=D.define(),jn=D.define({combine:n=>{if(!n.length)return" ";let e=n[0];if(!e||/\S/.test(e)||Array.from(e).some(t=>t!=e[0]))throw new Error("Invalid indent unit: "+JSON.stringify(n[0]));return e}});function Rt(n){let e=n.facet(jn);return e.charCodeAt(0)==9?n.tabSize*e.length:e.length}function Li(n,e){let t="",i=n.tabSize,s=n.facet(jn)[0];if(s==" "){for(;e>=i;)t+=" ",e-=i;s=" "}for(let r=0;r=i.from&&s<=i.to?r&&s==e?{text:"",from:e}:(t<0?s-1&&(r+=o-this.countColumn(i,i.search(/\S|$/))),r}countColumn(e,t=e.length){return Fi(e,this.state.tabSize,t)}lineIndent(e,t=1){let{text:i,from:s}=this.lineAt(e,t),r=this.options.overrideIndentation;if(r){let o=r(s);if(o>-1)return o}return this.countColumn(i,i.search(/\S|$/))}get simulatedBreak(){return this.options.simulateBreak||null}}const op=new L;function lp(n,e,t){return Hh(e.resolveInner(t).enterUnfinishedNodesBefore(t),t,n)}function ap(n){return n.pos==n.options.simulateBreak&&n.options.simulateDoubleBreak}function hp(n){let e=n.type.prop(op);if(e)return e;let t=n.firstChild,i;if(t&&(i=t.type.prop(L.closedBy))){let s=n.lastChild,r=s&&i.indexOf(s.name)>-1;return o=>Wh(o,!0,1,void 0,r&&!ap(o)?s.from:void 0)}return n.parent==null?cp:null}function Hh(n,e,t){for(;n;n=n.parent){let i=hp(n);if(i)return i(Fr.create(t,e,n))}return null}function cp(){return 0}class Fr extends Kn{constructor(e,t,i){super(e.state,e.options),this.base=e,this.pos=t,this.node=i}static create(e,t,i){return new Fr(e,t,i)}get textAfter(){return this.textAfterPos(this.pos)}get baseIndent(){let e=this.state.doc.lineAt(this.node.from);for(;;){let t=this.node.resolve(e.from);for(;t.parent&&t.parent.from==t.from;)t=t.parent;if(fp(t,this.node))break;e=this.state.doc.lineAt(t.from)}return this.lineIndent(e.from)}continue(){let e=this.node.parent;return e?Hh(e,this.pos,this.base):0}}function fp(n,e){for(let t=e;t;t=t.parent)if(n==t)return!0;return!1}function up(n){let e=n.node,t=e.childAfter(e.from),i=e.lastChild;if(!t)return null;let s=n.options.simulateBreak,r=n.state.doc.lineAt(t.from),o=s==null||s<=r.from?r.to:Math.min(r.to,s);for(let l=t.to;;){let a=e.childAfter(l);if(!a||a==i)return null;if(!a.type.isSkipped)return a.fromWh(i,e,t,n)}function Wh(n,e,t,i,s){let r=n.textAfter,o=r.match(/^\s*/)[0].length,l=i&&r.slice(o,o+i.length)==i||s==n.pos+o,a=e?up(n):null;return a?l?n.column(a.from):n.column(a.to):n.baseIndent+(l?0:n.unit*t)}const oy=n=>n.baseIndent;function ly({except:n,units:e=1}={}){return t=>{let i=n&&n.test(t.textAfter);return t.baseIndent+(i?0:e*t.unit)}}const dp=200;function pp(){return N.transactionFilter.of(n=>{if(!n.docChanged||!n.isUserEvent("input.type")&&!n.isUserEvent("input.complete"))return n;let e=n.startState.languageDataAt("indentOnInput",n.startState.selection.main.head);if(!e.length)return n;let t=n.newDoc,{head:i}=n.newSelection.main,s=t.lineAt(i);if(i>s.from+dp)return n;let r=t.sliceString(s.from,i);if(!e.some(h=>h.test(r)))return n;let{state:o}=n,l=-1,a=[];for(let{head:h}of o.selection.ranges){let c=o.doc.lineAt(h);if(c.from==l)continue;l=c.from;let f=Vr(o,c.from);if(f==null)continue;let u=/^\s*/.exec(c.text)[0],d=Li(o,f);u!=d&&a.push({from:c.from,to:c.from+u.length,insert:d})}return a.length?[n,{changes:a,sequential:!0}]:n})}const mp=D.define(),gp=new L;function ay(n){let e=n.firstChild,t=n.lastChild;return e&&e.tot)continue;if(r&&o.from=e&&a.to>t&&(r=a)}}return r}function bp(n){let e=n.lastChild;return e&&e.to==n.to&&e.type.isError}function Rn(n,e,t){for(let i of n.facet(mp)){let s=i(n,e,t);if(s)return s}return yp(n,e,t)}function zh(n,e){let t=e.mapPos(n.from,1),i=e.mapPos(n.to,-1);return t>=i?void 0:{from:t,to:i}}const Un=R.define({map:zh}),Wi=R.define({map:zh});function qh(n){let e=[];for(let{head:t}of n.state.selection.ranges)e.some(i=>i.from<=t&&i.to>=t)||e.push(n.lineBlockAt(t));return e}const Lt=Me.define({create(){return E.none},update(n,e){n=n.map(e.changes);for(let t of e.effects)t.is(Un)&&!wp(n,t.value.from,t.value.to)?n=n.update({add:[ml.range(t.value.from,t.value.to)]}):t.is(Wi)&&(n=n.update({filter:(i,s)=>t.value.from!=i||t.value.to!=s,filterFrom:t.value.from,filterTo:t.value.to}));if(e.selection){let t=!1,{head:i}=e.selection.main;n.between(i,i,(s,r)=>{si&&(t=!0)}),t&&(n=n.update({filterFrom:i,filterTo:i,filter:(s,r)=>r<=i||s>=i}))}return n},provide:n=>O.decorations.from(n),toJSON(n,e){let t=[];return n.between(0,e.doc.length,(i,s)=>{t.push(i,s)}),t},fromJSON(n){if(!Array.isArray(n)||n.length%2)throw new RangeError("Invalid JSON for fold state");let e=[];for(let t=0;t{(!s||s.from>r)&&(s={from:r,to:o})}),s}function wp(n,e,t){let i=!1;return n.between(e,e,(s,r)=>{s==e&&r==t&&(i=!0)}),i}function jh(n,e){return n.field(Lt,!1)?e:e.concat(R.appendConfig.of(Gh()))}const kp=n=>{for(let e of qh(n)){let t=Rn(n.state,e.from,e.to);if(t)return n.dispatch({effects:jh(n.state,[Un.of(t),Kh(n,t)])}),!0}return!1},vp=n=>{if(!n.state.field(Lt,!1))return!1;let e=[];for(let t of qh(n)){let i=Ln(n.state,t.from,t.to);i&&e.push(Wi.of(i),Kh(n,i,!1))}return e.length&&n.dispatch({effects:e}),e.length>0};function Kh(n,e,t=!0){let i=n.state.doc.lineAt(e.from).number,s=n.state.doc.lineAt(e.to).number;return O.announce.of(`${n.state.phrase(t?"Folded lines":"Unfolded lines")} ${i} ${n.state.phrase("to")} ${s}.`)}const xp=n=>{let{state:e}=n,t=[];for(let i=0;i{let e=n.state.field(Lt,!1);if(!e||!e.size)return!1;let t=[];return e.between(0,n.state.doc.length,(i,s)=>{t.push(Wi.of({from:i,to:s}))}),n.dispatch({effects:t}),!0},Cp=[{key:"Ctrl-Shift-[",mac:"Cmd-Alt-[",run:kp},{key:"Ctrl-Shift-]",mac:"Cmd-Alt-]",run:vp},{key:"Ctrl-Alt-[",run:xp},{key:"Ctrl-Alt-]",run:Sp}],Ap={placeholderDOM:null,placeholderText:"…"},Uh=D.define({combine(n){return _t(n,Ap)}});function Gh(n){let e=[Lt,Tp];return n&&e.push(Uh.of(n)),e}const ml=E.replace({widget:new class extends tt{toDOM(n){let{state:e}=n,t=e.facet(Uh),i=r=>{let o=n.lineBlockAt(n.posAtDOM(r.target)),l=Ln(n.state,o.from,o.to);l&&n.dispatch({effects:Wi.of(l)}),r.preventDefault()};if(t.placeholderDOM)return t.placeholderDOM(n,i);let s=document.createElement("span");return s.textContent=t.placeholderText,s.setAttribute("aria-label",e.phrase("folded code")),s.title=e.phrase("unfold"),s.className="cm-foldPlaceholder",s.onclick=i,s}}}),Mp={openText:"⌄",closedText:"›",markerDOM:null,domEventHandlers:{},foldingChanged:()=>!1};class ys extends bt{constructor(e,t){super(),this.config=e,this.open=t}eq(e){return this.config==e.config&&this.open==e.open}toDOM(e){if(this.config.markerDOM)return this.config.markerDOM(this.open);let t=document.createElement("span");return t.textContent=this.open?this.config.openText:this.config.closedText,t.title=e.state.phrase(this.open?"Fold line":"Unfold line"),t}}function Dp(n={}){let e=Object.assign(Object.assign({},Mp),n),t=new ys(e,!0),i=new ys(e,!1),s=be.fromClass(class{constructor(o){this.from=o.viewport.from,this.markers=this.buildMarkers(o)}update(o){(o.docChanged||o.viewportChanged||o.startState.facet(wt)!=o.state.facet(wt)||o.startState.field(Lt,!1)!=o.state.field(Lt,!1)||pe(o.startState)!=pe(o.state)||e.foldingChanged(o))&&(this.markers=this.buildMarkers(o.view))}buildMarkers(o){let l=new Pt;for(let a of o.viewportLineBlocks){let h=Ln(o.state,a.from,a.to)?i:Rn(o.state,a.from,a.to)?t:null;h&&l.add(a.from,a.from,h)}return l.finish()}}),{domEventHandlers:r}=e;return[s,Td({class:"cm-foldGutter",markers(o){var l;return((l=o.plugin(s))===null||l===void 0?void 0:l.markers)||F.empty},initialSpacer(){return new ys(e,!1)},domEventHandlers:Object.assign(Object.assign({},r),{click:(o,l,a)=>{if(r.click&&r.click(o,l,a))return!0;let h=Ln(o.state,l.from,l.to);if(h)return o.dispatch({effects:Wi.of(h)}),!0;let c=Rn(o.state,l.from,l.to);return c?(o.dispatch({effects:Un.of(c)}),!0):!1}})}),Gh()]}const Tp=O.baseTheme({".cm-foldPlaceholder":{backgroundColor:"#eee",border:"1px solid #ddd",color:"#888",borderRadius:".2em",margin:"0 1px",padding:"0 1px",cursor:"pointer"},".cm-foldGutter span":{padding:"0 1px",cursor:"pointer"}});class li{constructor(e,t){this.specs=e;let i;function s(l){let a=mt.newName();return(i||(i=Object.create(null)))["."+a]=l,a}const r=typeof t.all=="string"?t.all:t.all?s(t.all):void 0,o=t.scope;this.scope=o instanceof Ie?l=>l.prop(Dt)==o.data:o?l=>l==o:void 0,this.style=Ih(e.map(l=>({tag:l.tag,class:l.class||s(Object.assign({},l,{tag:null}))})),{all:r}).style,this.module=i?new mt(i):null,this.themeType=t.themeType}static define(e,t){return new li(e,t||{})}}const dr=D.define(),$h=D.define({combine(n){return n.length?[n[0]]:null}});function bs(n){let e=n.facet(dr);return e.length?e:n.facet($h)}function Hr(n,e){let t=[Bp],i;return n instanceof li&&(n.module&&t.push(O.styleModule.of(n.module)),i=n.themeType),e?.fallback?t.push($h.of(n)):i?t.push(dr.computeN([O.darkTheme],s=>s.facet(O.darkTheme)==(i=="dark")?[n]:[])):t.push(dr.of(n)),t}class Op{constructor(e){this.markCache=Object.create(null),this.tree=pe(e.state),this.decorations=this.buildDeco(e,bs(e.state))}update(e){let t=pe(e.state),i=bs(e.state),s=i!=bs(e.startState);t.length{i.add(o,l,this.markCache[a]||(this.markCache[a]=E.mark({class:a})))},s,r);return i.finish()}}const Bp=Vi.high(be.fromClass(Op,{decorations:n=>n.decorations})),Pp=li.define([{tag:m.meta,color:"#404740"},{tag:m.link,textDecoration:"underline"},{tag:m.heading,textDecoration:"underline",fontWeight:"bold"},{tag:m.emphasis,fontStyle:"italic"},{tag:m.strong,fontWeight:"bold"},{tag:m.strikethrough,textDecoration:"line-through"},{tag:m.keyword,color:"#708"},{tag:[m.atom,m.bool,m.url,m.contentSeparator,m.labelName],color:"#219"},{tag:[m.literal,m.inserted],color:"#164"},{tag:[m.string,m.deleted],color:"#a11"},{tag:[m.regexp,m.escape,m.special(m.string)],color:"#e40"},{tag:m.definition(m.variableName),color:"#00f"},{tag:m.local(m.variableName),color:"#30a"},{tag:[m.typeName,m.namespace],color:"#085"},{tag:m.className,color:"#167"},{tag:[m.special(m.variableName),m.macroName],color:"#256"},{tag:m.definition(m.propertyName),color:"#00c"},{tag:m.comment,color:"#940"},{tag:m.invalid,color:"#f00"}]),Ep=1e4,Rp="()[]{}",Lp=new L;function pr(n,e,t){let i=n.prop(e<0?L.openedBy:L.closedBy);if(i)return i;if(n.name.length==1){let s=t.indexOf(n.name);if(s>-1&&s%2==(e<0?1:0))return[t[s+e]]}return null}function mr(n){let e=n.type.prop(Lp);return e?e(n.node):n}function qt(n,e,t,i={}){let s=i.maxScanDistance||Ep,r=i.brackets||Rp,o=pe(n),l=o.resolveInner(e,t);for(let a=l;a;a=a.parent){let h=pr(a.type,t,r);if(h&&a.from0?e>=c.from&&ec.from&&e<=c.to))return Ip(n,e,t,a,c,h,r)}}return Np(n,e,t,o,l.type,s,r)}function Ip(n,e,t,i,s,r,o){let l=i.parent,a={from:s.from,to:s.to},h=0,c=l?.cursor();if(c&&(t<0?c.childBefore(i.from):c.childAfter(i.to)))do if(t<0?c.to<=i.from:c.from>=i.to){if(h==0&&r.indexOf(c.type.name)>-1&&c.from0)return null;let h={from:t<0?e-1:e,to:t>0?e+1:e},c=n.doc.iterRange(e,t>0?n.doc.length:0),f=0;for(let u=0;!c.next().done&&u<=r;){let d=c.value;t<0&&(u+=d.length);let p=e+u*t;for(let g=t>0?0:d.length-1,y=t>0?d.length:-1;g!=y;g+=t){let b=o.indexOf(d[g]);if(!(b<0||i.resolveInner(p+g,1).type!=s))if(b%2==0==t>0)f++;else{if(f==1)return{start:h,end:{from:p+g,to:p+g+1},matched:b>>1==a>>1};f--}}t>0&&(u+=d.length)}return c.done?{start:h,matched:!1}:null}function gl(n,e,t,i=0,s=0){e==null&&(e=n.search(/[^\s\u00a0]/),e==-1&&(e=n.length));let r=s;for(let o=i;o=this.string.length}sol(){return this.pos==0}peek(){return this.string.charAt(this.pos)||void 0}next(){if(this.post}eatSpace(){let e=this.pos;for(;/[\s\u00a0]/.test(this.string.charAt(this.pos));)++this.pos;return this.pos>e}skipToEnd(){this.pos=this.string.length}skipTo(e){let t=this.string.indexOf(e,this.pos);if(t>-1)return this.pos=t,!0}backUp(e){this.pos-=e}column(){return this.lastColumnPosi?o.toLowerCase():o,r=this.string.substr(this.pos,e.length);return s(r)==s(e)?(t!==!1&&(this.pos+=e.length),!0):null}else{let s=this.string.slice(this.pos).match(e);return s&&s.index>0?null:(s&&t!==!1&&(this.pos+=s[0].length),s)}}current(){return this.string.slice(this.start,this.pos)}}function _p(n){return{name:n.name||"",token:n.token,blankLine:n.blankLine||(()=>{}),startState:n.startState||(()=>!0),copyState:n.copyState||Vp,indent:n.indent||(()=>null),languageData:n.languageData||{},tokenTable:n.tokenTable||zr}}function Vp(n){if(typeof n!="object")return n;let e={};for(let t in n){let i=n[t];e[t]=i instanceof Array?i.slice():i}return e}const yl=new WeakMap;class jt extends Ie{constructor(e){let t=Nh(e.languageData),i=_p(e),s,r=new class extends Rh{createParse(o,l,a){return new Hp(s,o,l,a)}};super(t,r,[Fh.of((o,l)=>this.getIndent(o,l))],e.name),this.topNode=qp(t),s=this,this.streamParser=i,this.stateAfter=new L({perNode:!0}),this.tokenTable=e.tokenTable?new Qh(i.tokenTable):zp}static define(e){return new jt(e)}getIndent(e,t){let i=pe(e.state),s=i.resolve(t);for(;s&&s.type!=this.topNode;)s=s.parent;if(!s)return null;let r,{overrideIndentation:o}=e.options;o&&(r=yl.get(e.state),r!=null&&r1e4)return null;for(;a=i&&t+e.length<=s&&e.prop(n.stateAfter);if(r)return{state:n.streamParser.copyState(r),pos:t+e.length};for(let o=e.children.length-1;o>=0;o--){let l=e.children[o],a=t+e.positions[o],h=l instanceof z&&a=e.length)return e;!s&&e.type==n.topNode&&(s=!0);for(let r=e.children.length-1;r>=0;r--){let o=e.positions[r],l=e.children[r],a;if(ot&&Wr(n,s.tree,0-s.offset,t,o),a;if(l&&(a=Yh(n,s.tree,t+s.offset,l.pos+s.offset,!1)))return{state:l.state,tree:a}}return{state:n.streamParser.startState(i?Rt(i):4),tree:z.empty}}class Hp{constructor(e,t,i,s){this.lang=e,this.input=t,this.fragments=i,this.ranges=s,this.stoppedAt=null,this.chunks=[],this.chunkPos=[],this.chunk=[],this.chunkReused=void 0,this.rangeIndex=0,this.to=s[s.length-1].to;let r=ti.get(),o=s[0].from,{state:l,tree:a}=Fp(e,i,o,r?.state);this.state=l,this.parsedPos=this.chunkStart=o+a.length;for(let h=0;h=t?this.finish():e&&this.parsedPos>=e.viewport.to?(e.skipUntilInView(this.parsedPos,t),this.finish()):null}stopAt(e){this.stoppedAt=e}lineAfter(e){let t=this.input.chunk(e);if(this.input.lineChunks)t==` -`&&(t="");else{let i=t.indexOf(` -`);i>-1&&(t=t.slice(0,i))}return e+t.length<=this.to?t:t.slice(0,this.to-e)}nextLine(){let e=this.parsedPos,t=this.lineAfter(e),i=e+t.length;for(let s=this.rangeIndex;;){let r=this.ranges[s].to;if(r>=i||(t=t.slice(0,r-(i-t.length)),s++,s==this.ranges.length))break;let o=this.ranges[s].from,l=this.lineAfter(o);t+=l,i=o+l.length}return{line:t,end:i}}skipGapsTo(e,t,i){for(;;){let s=this.ranges[this.rangeIndex].to,r=e+t;if(i>0?s>r:s>=r)break;let o=this.ranges[++this.rangeIndex].from;t+=o-s}return t}moveRangeIndex(){for(;this.ranges[this.rangeIndex].to1){r=this.skipGapsTo(t,r,1),t+=r;let o=this.chunk.length;r=this.skipGapsTo(i,r,-1),i+=r,s+=this.chunk.length-o}return this.chunk.push(e,t,i,s),r}parseLine(e){let{line:t,end:i}=this.nextLine(),s=0,{streamParser:r}=this.lang,o=new Jh(t,e?e.state.tabSize:4,e?Rt(e.state):2);if(o.eol())r.blankLine(this.state,o.indentUnit);else for(;!o.eol();){let l=Xh(r.token,o,this.state);if(l&&(s=this.emitToken(this.lang.tokenTable.resolve(l),this.parsedPos+o.start,this.parsedPos+o.pos,4,s)),o.start>1e4)break}this.parsedPos=i,this.moveRangeIndex(),this.parsedPose.start)return s}throw new Error("Stream parser failed to advance stream.")}const zr=Object.create(null),Ii=[xe.none],Wp=new Lr(Ii),bl=[],Zh=Object.create(null);for(let[n,e]of[["variable","variableName"],["variable-2","variableName.special"],["string-2","string.special"],["def","variableName.definition"],["tag","tagName"],["attribute","attributeName"],["type","typeName"],["builtin","variableName.standard"],["qualifier","modifier"],["error","invalid"],["header","heading"],["property","propertyName"]])Zh[n]=ec(zr,e);class Qh{constructor(e){this.extra=e,this.table=Object.assign(Object.create(null),Zh)}resolve(e){return e?this.table[e]||(this.table[e]=ec(this.extra,e)):0}}const zp=new Qh(zr);function ws(n,e){bl.indexOf(n)>-1||(bl.push(n),console.warn(e))}function ec(n,e){let t=null;for(let r of e.split(".")){let o=n[r]||m[r];o?typeof o=="function"?t?t=o(t):ws(r,`Modifier ${r} used at start of tag`):t?ws(r,`Tag ${r} used as modifier`):t=o:ws(r,`Unknown highlighting tag ${r}`)}if(!t)return 0;let i=e.replace(/ /g,"_"),s=xe.define({id:Ii.length,name:i,props:[Zd({[i]:t})]});return Ii.push(s),s.id}function qp(n){let e=xe.define({id:Ii.length,name:"Document",props:[Dt.add(()=>n)]});return Ii.push(e),e}const jp=n=>{let e=jr(n.state);return e.line?Kp(n):e.block?Gp(n):!1};function qr(n,e){return({state:t,dispatch:i})=>{if(t.readOnly)return!1;let s=n(e,t);return s?(i(t.update(s)),!0):!1}}const Kp=qr(Yp,0),Up=qr(tc,0),Gp=qr((n,e)=>tc(n,e,Jp(e)),0);function jr(n,e=n.selection.main.head){let t=n.languageDataAt("commentTokens",e);return t.length?t[0]:{}}const pi=50;function $p(n,{open:e,close:t},i,s){let r=n.sliceDoc(i-pi,i),o=n.sliceDoc(s,s+pi),l=/\s*$/.exec(r)[0].length,a=/^\s*/.exec(o)[0].length,h=r.length-l;if(r.slice(h-e.length,h)==e&&o.slice(a,a+t.length)==t)return{open:{pos:i-l,margin:l&&1},close:{pos:s+a,margin:a&&1}};let c,f;s-i<=2*pi?c=f=n.sliceDoc(i,s):(c=n.sliceDoc(i,i+pi),f=n.sliceDoc(s-pi,s));let u=/^\s*/.exec(c)[0].length,d=/\s*$/.exec(f)[0].length,p=f.length-d-t.length;return c.slice(u,u+e.length)==e&&f.slice(p,p+t.length)==t?{open:{pos:i+u+e.length,margin:/\s/.test(c.charAt(u+e.length))?1:0},close:{pos:s-d-t.length,margin:/\s/.test(f.charAt(p-1))?1:0}}:null}function Jp(n){let e=[];for(let t of n.selection.ranges){let i=n.doc.lineAt(t.from),s=t.to<=i.to?i:n.doc.lineAt(t.to),r=e.length-1;r>=0&&e[r].to>i.from?e[r].to=s.to:e.push({from:i.from,to:s.to})}return e}function tc(n,e,t=e.selection.ranges){let i=t.map(r=>jr(e,r.from).block);if(!i.every(r=>r))return null;let s=t.map((r,o)=>$p(e,i[o],r.from,r.to));if(n!=2&&!s.every(r=>r))return{changes:e.changes(t.map((r,o)=>s[o]?[]:[{from:r.from,insert:i[o].open+" "},{from:r.to,insert:" "+i[o].close}]))};if(n!=1&&s.some(r=>r)){let r=[];for(let o=0,l;os&&(r==o||o>c.from)){s=c.from;let f=jr(e,h).line;if(!f)continue;let u=/^\s*/.exec(c.text)[0].length,d=u==c.length,p=c.text.slice(u,u+f.length)==f?u:-1;ur.comment<0&&(!r.empty||r.single))){let r=[];for(let{line:l,token:a,indent:h,empty:c,single:f}of i)(f||!c)&&r.push({from:l.from+h,insert:a+" "});let o=e.changes(r);return{changes:o,selection:e.selection.map(o,1)}}else if(n!=1&&i.some(r=>r.comment>=0)){let r=[];for(let{line:o,comment:l,token:a}of i)if(l>=0){let h=o.from+l,c=h+a.length;o.text[c-o.from]==" "&&c++,r.push({from:h,to:c})}return{changes:r}}return null}const gr=Nt.define(),Xp=Nt.define(),Zp=D.define(),ic=D.define({combine(n){return _t(n,{minDepth:100,newGroupDelay:500},{minDepth:Math.max,newGroupDelay:Math.min})}});function Qp(n){let e=0;return n.iterChangedRanges((t,i)=>e=i),e}const nc=Me.define({create(){return Xe.empty},update(n,e){let t=e.state.facet(ic),i=e.annotation(gr);if(i){let a=e.docChanged?w.single(Qp(e.changes)):void 0,h=Se.fromTransaction(e,a),c=i.side,f=c==0?n.undone:n.done;return h?f=In(f,f.length,t.minDepth,h):f=oc(f,e.startState.selection),new Xe(c==0?i.rest:f,c==0?f:i.rest)}let s=e.annotation(Xp);if((s=="full"||s=="before")&&(n=n.isolate()),e.annotation(re.addToHistory)===!1)return e.changes.empty?n:n.addMapping(e.changes.desc);let r=Se.fromTransaction(e),o=e.annotation(re.time),l=e.annotation(re.userEvent);return r?n=n.addChanges(r,o,l,t.newGroupDelay,t.minDepth):e.selection&&(n=n.addSelection(e.startState.selection,o,l,t.newGroupDelay)),(s=="full"||s=="after")&&(n=n.isolate()),n},toJSON(n){return{done:n.done.map(e=>e.toJSON()),undone:n.undone.map(e=>e.toJSON())}},fromJSON(n){return new Xe(n.done.map(Se.fromJSON),n.undone.map(Se.fromJSON))}});function em(n={}){return[nc,ic.of(n),O.domEventHandlers({beforeinput(e,t){let i=e.inputType=="historyUndo"?sc:e.inputType=="historyRedo"?yr:null;return i?(e.preventDefault(),i(t)):!1}})]}function Gn(n,e){return function({state:t,dispatch:i}){if(!e&&t.readOnly)return!1;let s=t.field(nc,!1);if(!s)return!1;let r=s.pop(n,t,e);return r?(i(r),!0):!1}}const sc=Gn(0,!1),yr=Gn(1,!1),tm=Gn(0,!0),im=Gn(1,!0);class Se{constructor(e,t,i,s,r){this.changes=e,this.effects=t,this.mapped=i,this.startSelection=s,this.selectionsAfter=r}setSelAfter(e){return new Se(this.changes,this.effects,this.mapped,this.startSelection,e)}toJSON(){var e,t,i;return{changes:(e=this.changes)===null||e===void 0?void 0:e.toJSON(),mapped:(t=this.mapped)===null||t===void 0?void 0:t.toJSON(),startSelection:(i=this.startSelection)===null||i===void 0?void 0:i.toJSON(),selectionsAfter:this.selectionsAfter.map(s=>s.toJSON())}}static fromJSON(e){return new Se(e.changes&&ne.fromJSON(e.changes),[],e.mapped&&Ze.fromJSON(e.mapped),e.startSelection&&w.fromJSON(e.startSelection),e.selectionsAfter.map(w.fromJSON))}static fromTransaction(e,t){let i=Ne;for(let s of e.startState.facet(Zp)){let r=s(e);r.length&&(i=i.concat(r))}return!i.length&&e.changes.empty?null:new Se(e.changes.invert(e.startState.doc),i,void 0,t||e.startState.selection,Ne)}static selection(e){return new Se(void 0,Ne,void 0,void 0,e)}}function In(n,e,t,i){let s=e+1>t+20?e-t-1:0,r=n.slice(s,e);return r.push(i),r}function nm(n,e){let t=[],i=!1;return n.iterChangedRanges((s,r)=>t.push(s,r)),e.iterChangedRanges((s,r,o,l)=>{for(let a=0;a=h&&o<=c&&(i=!0)}}),i}function sm(n,e){return n.ranges.length==e.ranges.length&&n.ranges.filter((t,i)=>t.empty!=e.ranges[i].empty).length===0}function rc(n,e){return n.length?e.length?n.concat(e):n:e}const Ne=[],rm=200;function oc(n,e){if(n.length){let t=n[n.length-1],i=t.selectionsAfter.slice(Math.max(0,t.selectionsAfter.length-rm));return i.length&&i[i.length-1].eq(e)?n:(i.push(e),In(n,n.length-1,1e9,t.setSelAfter(i)))}else return[Se.selection([e])]}function om(n){let e=n[n.length-1],t=n.slice();return t[n.length-1]=e.setSelAfter(e.selectionsAfter.slice(0,e.selectionsAfter.length-1)),t}function ks(n,e){if(!n.length)return n;let t=n.length,i=Ne;for(;t;){let s=lm(n[t-1],e,i);if(s.changes&&!s.changes.empty||s.effects.length){let r=n.slice(0,t);return r[t-1]=s,r}else e=s.mapped,t--,i=s.selectionsAfter}return i.length?[Se.selection(i)]:Ne}function lm(n,e,t){let i=rc(n.selectionsAfter.length?n.selectionsAfter.map(l=>l.map(e)):Ne,t);if(!n.changes)return Se.selection(i);let s=n.changes.map(e),r=e.mapDesc(n.changes,!0),o=n.mapped?n.mapped.composeDesc(r):r;return new Se(s,R.mapEffects(n.effects,e),o,n.startSelection.map(r),i)}const am=/^(input\.type|delete)($|\.)/;class Xe{constructor(e,t,i=0,s=void 0){this.done=e,this.undone=t,this.prevTime=i,this.prevUserEvent=s}isolate(){return this.prevTime?new Xe(this.done,this.undone):this}addChanges(e,t,i,s,r){let o=this.done,l=o[o.length-1];return l&&l.changes&&!l.changes.empty&&e.changes&&(!i||am.test(i))&&(!l.selectionsAfter.length&&t-this.prevTime0&&t-this.prevTimet.empty?n.moveByChar(t,e):$n(t,e))}function we(n){return n.textDirectionAt(n.state.selection.main.head)==Z.LTR}const ac=n=>lc(n,!we(n)),hc=n=>lc(n,we(n));function cc(n,e){return We(n,t=>t.empty?n.moveByGroup(t,e):$n(t,e))}const cm=n=>cc(n,!we(n)),fm=n=>cc(n,we(n));function um(n,e,t){if(e.type.prop(t))return!0;let i=e.to-e.from;return i&&(i>2||/[^\s,.;:]/.test(n.sliceDoc(e.from,e.to)))||e.firstChild}function Jn(n,e,t){let i=pe(n).resolveInner(e.head),s=t?L.closedBy:L.openedBy;for(let a=e.head;;){let h=t?i.childAfter(a):i.childBefore(a);if(!h)break;um(n,h,s)?i=h:a=t?h.to:h.from}let r=i.type.prop(s),o,l;return r&&(o=t?qt(n,i.from,1):qt(n,i.to,-1))&&o.matched?l=t?o.end.to:o.end.from:l=t?i.to:i.from,w.cursor(l,t?-1:1)}const dm=n=>We(n,e=>Jn(n.state,e,!we(n))),pm=n=>We(n,e=>Jn(n.state,e,we(n)));function fc(n,e){return We(n,t=>{if(!t.empty)return $n(t,e);let i=n.moveVertically(t,e);return i.head!=t.head?i:n.moveToLineBoundary(t,e)})}const uc=n=>fc(n,!1),dc=n=>fc(n,!0);function pc(n){return Math.max(n.defaultLineHeight,Math.min(n.dom.clientHeight,innerHeight)-5)}function mc(n,e){let{state:t}=n,i=ai(t.selection,l=>l.empty?n.moveVertically(l,e,pc(n)):$n(l,e));if(i.eq(t.selection))return!1;let s=n.coordsAtPos(t.selection.main.head),r=n.scrollDOM.getBoundingClientRect(),o;return s&&s.top>r.top&&s.bottommc(n,!1),br=n=>mc(n,!0);function kt(n,e,t){let i=n.lineBlockAt(e.head),s=n.moveToLineBoundary(e,t);if(s.head==e.head&&s.head!=(t?i.to:i.from)&&(s=n.moveToLineBoundary(e,t,!1)),!t&&s.head==i.from&&i.length){let r=/^\s*/.exec(n.state.sliceDoc(i.from,Math.min(i.from+100,i.to)))[0].length;r&&e.head!=i.from+r&&(s=w.cursor(i.from+r))}return s}const mm=n=>We(n,e=>kt(n,e,!0)),gm=n=>We(n,e=>kt(n,e,!1)),ym=n=>We(n,e=>kt(n,e,!we(n))),bm=n=>We(n,e=>kt(n,e,we(n))),wm=n=>We(n,e=>w.cursor(n.lineBlockAt(e.head).from,1)),km=n=>We(n,e=>w.cursor(n.lineBlockAt(e.head).to,-1));function vm(n,e,t){let i=!1,s=ai(n.selection,r=>{let o=qt(n,r.head,-1)||qt(n,r.head,1)||r.head>0&&qt(n,r.head-1,1)||r.headvm(n,e,!1);function Ve(n,e){let t=ai(n.state.selection,i=>{let s=e(i);return w.range(i.anchor,s.head,s.goalColumn)});return t.eq(n.state.selection)?!1:(n.dispatch(it(n.state,t)),!0)}function gc(n,e){return Ve(n,t=>n.moveByChar(t,e))}const yc=n=>gc(n,!we(n)),bc=n=>gc(n,we(n));function wc(n,e){return Ve(n,t=>n.moveByGroup(t,e))}const Sm=n=>wc(n,!we(n)),Cm=n=>wc(n,we(n)),Am=n=>Ve(n,e=>Jn(n.state,e,!we(n))),Mm=n=>Ve(n,e=>Jn(n.state,e,we(n)));function kc(n,e){return Ve(n,t=>n.moveVertically(t,e))}const vc=n=>kc(n,!1),xc=n=>kc(n,!0);function Sc(n,e){return Ve(n,t=>n.moveVertically(t,e,pc(n)))}const kl=n=>Sc(n,!1),vl=n=>Sc(n,!0),Dm=n=>Ve(n,e=>kt(n,e,!0)),Tm=n=>Ve(n,e=>kt(n,e,!1)),Om=n=>Ve(n,e=>kt(n,e,!we(n))),Bm=n=>Ve(n,e=>kt(n,e,we(n))),Pm=n=>Ve(n,e=>w.cursor(n.lineBlockAt(e.head).from)),Em=n=>Ve(n,e=>w.cursor(n.lineBlockAt(e.head).to)),xl=({state:n,dispatch:e})=>(e(it(n,{anchor:0})),!0),Sl=({state:n,dispatch:e})=>(e(it(n,{anchor:n.doc.length})),!0),Cl=({state:n,dispatch:e})=>(e(it(n,{anchor:n.selection.main.anchor,head:0})),!0),Al=({state:n,dispatch:e})=>(e(it(n,{anchor:n.selection.main.anchor,head:n.doc.length})),!0),Rm=({state:n,dispatch:e})=>(e(n.update({selection:{anchor:0,head:n.doc.length},userEvent:"select"})),!0),Lm=({state:n,dispatch:e})=>{let t=Xn(n).map(({from:i,to:s})=>w.range(i,Math.min(s+1,n.doc.length)));return e(n.update({selection:w.create(t),userEvent:"select"})),!0},Im=({state:n,dispatch:e})=>{let t=ai(n.selection,i=>{var s;let r=pe(n).resolveInner(i.head,1);for(;!(r.from=i.to||r.to>i.to&&r.from<=i.from||!(!((s=r.parent)===null||s===void 0)&&s.parent));)r=r.parent;return w.range(r.to,r.from)});return e(it(n,t)),!0},Nm=({state:n,dispatch:e})=>{let t=n.selection,i=null;return t.ranges.length>1?i=w.create([t.main]):t.main.empty||(i=w.create([w.cursor(t.main.head)])),i?(e(it(n,i)),!0):!1};function Yn(n,e){if(n.state.readOnly)return!1;let t="delete.selection",{state:i}=n,s=i.changeByRange(r=>{let{from:o,to:l}=r;if(o==l){let a=e(o);ao&&(t="delete.forward",a=on(n,a,!0)),o=Math.min(o,a),l=Math.max(l,a)}else o=on(n,o,!1),l=on(n,l,!0);return o==l?{range:r}:{changes:{from:o,to:l},range:w.cursor(o)}});return s.changes.empty?!1:(n.dispatch(i.update(s,{scrollIntoView:!0,userEvent:t,effects:t=="delete.selection"?O.announce.of(i.phrase("Selection deleted")):void 0})),!0)}function on(n,e,t){if(n instanceof O)for(let i of n.state.facet(O.atomicRanges).map(s=>s(n)))i.between(e,e,(s,r)=>{se&&(e=t?r:s)});return e}const Cc=(n,e)=>Yn(n,t=>{let{state:i}=n,s=i.doc.lineAt(t),r,o;if(!e&&t>s.from&&tCc(n,!1),Ac=n=>Cc(n,!0),Mc=(n,e)=>Yn(n,t=>{let i=t,{state:s}=n,r=s.doc.lineAt(i),o=s.charCategorizer(i);for(let l=null;;){if(i==(e?r.to:r.from)){i==t&&r.number!=(e?s.doc.lines:1)&&(i+=e?1:-1);break}let a=Oe(r.text,i-r.from,e)+r.from,h=r.text.slice(Math.min(i,a)-r.from,Math.max(i,a)-r.from),c=o(h);if(l!=null&&c!=l)break;(h!=" "||i!=t)&&(l=c),i=a}return i}),Dc=n=>Mc(n,!1),_m=n=>Mc(n,!0),Tc=n=>Yn(n,e=>{let t=n.lineBlockAt(e).to;return eYn(n,e=>{let t=n.lineBlockAt(e).from;return e>t?t:Math.max(0,e-1)}),Fm=({state:n,dispatch:e})=>{if(n.readOnly)return!1;let t=n.changeByRange(i=>({changes:{from:i.from,to:i.to,insert:_.of(["",""])},range:w.cursor(i.from)}));return e(n.update(t,{scrollIntoView:!0,userEvent:"input"})),!0},Hm=({state:n,dispatch:e})=>{if(n.readOnly)return!1;let t=n.changeByRange(i=>{if(!i.empty||i.from==0||i.from==n.doc.length)return{range:i};let s=i.from,r=n.doc.lineAt(s),o=s==r.from?s-1:Oe(r.text,s-r.from,!1)+r.from,l=s==r.to?s+1:Oe(r.text,s-r.from,!0)+r.from;return{changes:{from:o,to:l,insert:n.doc.slice(s,l).append(n.doc.slice(o,s))},range:w.cursor(l)}});return t.changes.empty?!1:(e(n.update(t,{scrollIntoView:!0,userEvent:"move.character"})),!0)};function Xn(n){let e=[],t=-1;for(let i of n.selection.ranges){let s=n.doc.lineAt(i.from),r=n.doc.lineAt(i.to);if(!i.empty&&i.to==r.from&&(r=n.doc.lineAt(i.to-1)),t>=s.number){let o=e[e.length-1];o.to=r.to,o.ranges.push(i)}else e.push({from:s.from,to:r.to,ranges:[i]});t=r.number+1}return e}function Oc(n,e,t){if(n.readOnly)return!1;let i=[],s=[];for(let r of Xn(n)){if(t?r.to==n.doc.length:r.from==0)continue;let o=n.doc.lineAt(t?r.to+1:r.from-1),l=o.length+1;if(t){i.push({from:r.to,to:o.to},{from:r.from,insert:o.text+n.lineBreak});for(let a of r.ranges)s.push(w.range(Math.min(n.doc.length,a.anchor+l),Math.min(n.doc.length,a.head+l)))}else{i.push({from:o.from,to:r.from},{from:r.to,insert:n.lineBreak+o.text});for(let a of r.ranges)s.push(w.range(a.anchor-l,a.head-l))}}return i.length?(e(n.update({changes:i,scrollIntoView:!0,selection:w.create(s,n.selection.mainIndex),userEvent:"move.line"})),!0):!1}const Wm=({state:n,dispatch:e})=>Oc(n,e,!1),zm=({state:n,dispatch:e})=>Oc(n,e,!0);function Bc(n,e,t){if(n.readOnly)return!1;let i=[];for(let s of Xn(n))t?i.push({from:s.from,insert:n.doc.slice(s.from,s.to)+n.lineBreak}):i.push({from:s.to,insert:n.lineBreak+n.doc.slice(s.from,s.to)});return e(n.update({changes:i,scrollIntoView:!0,userEvent:"input.copyline"})),!0}const qm=({state:n,dispatch:e})=>Bc(n,e,!1),jm=({state:n,dispatch:e})=>Bc(n,e,!0),Km=n=>{if(n.state.readOnly)return!1;let{state:e}=n,t=e.changes(Xn(e).map(({from:s,to:r})=>(s>0?s--:rn.moveVertically(s,!0)).map(t);return n.dispatch({changes:t,selection:i,scrollIntoView:!0,userEvent:"delete.line"}),!0};function Um(n,e){if(/\(\)|\[\]|\{\}/.test(n.sliceDoc(e-1,e+1)))return{from:e,to:e};let t=pe(n).resolveInner(e),i=t.childBefore(e),s=t.childAfter(e),r;return i&&s&&i.to<=e&&s.from>=e&&(r=i.type.prop(L.closedBy))&&r.indexOf(s.name)>-1&&n.doc.lineAt(i.to).from==n.doc.lineAt(s.from).from?{from:i.to,to:s.from}:null}const Gm=Pc(!1),$m=Pc(!0);function Pc(n){return({state:e,dispatch:t})=>{if(e.readOnly)return!1;let i=e.changeByRange(s=>{let{from:r,to:o}=s,l=e.doc.lineAt(r),a=!n&&r==o&&Um(e,r);n&&(r=o=(o<=l.to?l:e.doc.lineAt(o)).to);let h=new Kn(e,{simulateBreak:r,simulateDoubleBreak:!!a}),c=Vr(h,r);for(c==null&&(c=/^\s*/.exec(e.doc.lineAt(r).text)[0].length);ol.from&&r{let s=[];for(let o=i.from;o<=i.to;){let l=n.doc.lineAt(o);l.number>t&&(i.empty||i.to>l.from)&&(e(l,s,i),t=l.number),o=l.to+1}let r=n.changes(s);return{changes:s,range:w.range(r.mapPos(i.anchor,1),r.mapPos(i.head,1))}})}const Jm=({state:n,dispatch:e})=>{if(n.readOnly)return!1;let t=Object.create(null),i=new Kn(n,{overrideIndentation:r=>{let o=t[r];return o??-1}}),s=Kr(n,(r,o,l)=>{let a=Vr(i,r.from);if(a==null)return;/\S/.test(r.text)||(a=0);let h=/^\s*/.exec(r.text)[0],c=Li(n,a);(h!=c||l.fromn.readOnly?!1:(e(n.update(Kr(n,(t,i)=>{i.push({from:t.from,insert:n.facet(jn)})}),{userEvent:"input.indent"})),!0),Rc=({state:n,dispatch:e})=>n.readOnly?!1:(e(n.update(Kr(n,(t,i)=>{let s=/^\s*/.exec(t.text)[0];if(!s)return;let r=Fi(s,n.tabSize),o=0,l=Li(n,Math.max(0,r-Rt(n)));for(;o({mac:n.key,run:n.run,shift:n.shift}))),Zm=[{key:"Alt-ArrowLeft",mac:"Ctrl-ArrowLeft",run:dm,shift:Am},{key:"Alt-ArrowRight",mac:"Ctrl-ArrowRight",run:pm,shift:Mm},{key:"Alt-ArrowUp",run:Wm},{key:"Shift-Alt-ArrowUp",run:qm},{key:"Alt-ArrowDown",run:zm},{key:"Shift-Alt-ArrowDown",run:jm},{key:"Escape",run:Nm},{key:"Mod-Enter",run:$m},{key:"Alt-l",mac:"Ctrl-l",run:Lm},{key:"Mod-i",run:Im,preventDefault:!0},{key:"Mod-[",run:Rc},{key:"Mod-]",run:Ec},{key:"Mod-Alt-\\",run:Jm},{key:"Shift-Mod-k",run:Km},{key:"Shift-Mod-\\",run:xm},{key:"Mod-/",run:jp},{key:"Alt-A",run:Up}].concat(Xm),Qm={key:"Tab",run:Ec,shift:Rc},eg="#2E3235",Ue="#DDDDDD",Ai="#B9D2FF",ln="#b0b0b0",tg="#e0e0e0",Lc="#808080",vs="#000000",ig="#A54543",Ic="#fc6d24",St="#fda331",xs="#8abeb7",Ml="#b5bd68",mi="#6fb3d2",gi="#cc99cc",ng="#6987AF",Dl=Ic,Tl="#292d30",an=Ai+"30",sg=eg,Ss=Ue,rg="#202325",Ol=Ue,og=O.theme({"&":{color:Ue,backgroundColor:sg},".cm-content":{caretColor:Ol},".cm-cursor, .cm-dropCursor":{borderLeftColor:Ol},"&.cm-focused .cm-selectionBackground, .cm-selectionBackground, .cm-content ::selection":{backgroundColor:rg},".cm-panels":{backgroundColor:Tl,color:ln},".cm-panels.cm-panels-top":{borderBottom:"2px solid black"},".cm-panels.cm-panels-bottom":{borderTop:"2px solid black"},".cm-searchMatch":{backgroundColor:Ai,outline:`1px solid ${ln}`,color:vs},".cm-searchMatch.cm-searchMatch-selected":{backgroundColor:tg,color:vs},".cm-activeLine":{backgroundColor:an},".cm-selectionMatch":{backgroundColor:an},"&.cm-focused .cm-matchingBracket, &.cm-focused .cm-nonmatchingBracket":{outline:`1px solid ${ln}`},"&.cm-focused .cm-matchingBracket":{backgroundColor:Ai,color:vs},".cm-gutters":{borderRight:"1px solid #ffffff10",color:Lc,backgroundColor:Tl},".cm-activeLineGutter":{backgroundColor:an},".cm-foldPlaceholder":{backgroundColor:"transparent",border:"none",color:Ai},".cm-tooltip":{border:"none",backgroundColor:Ss},".cm-tooltip .cm-tooltip-arrow:before":{borderTopColor:"transparent",borderBottomColor:"transparent"},".cm-tooltip .cm-tooltip-arrow:after":{borderTopColor:Ss,borderBottomColor:Ss},".cm-tooltip-autocomplete":{"& > ul > li[aria-selected]":{backgroundColor:an,color:ln}}},{dark:!0}),lg=li.define([{tag:m.keyword,color:St},{tag:[m.name,m.deleted,m.character,m.propertyName,m.macroName],color:Ml},{tag:[m.variableName],color:mi},{tag:[m.function(m.variableName)],color:St},{tag:[m.labelName],color:Ic},{tag:[m.color,m.constant(m.name),m.standard(m.name)],color:St},{tag:[m.definition(m.name),m.separator],color:gi},{tag:[m.brace],color:gi},{tag:[m.annotation],color:Dl},{tag:[m.number,m.changed,m.annotation,m.modifier,m.self,m.namespace],color:St},{tag:[m.typeName,m.className],color:mi},{tag:[m.operator,m.operatorKeyword],color:gi},{tag:[m.tagName],color:St},{tag:[m.squareBracket],color:gi},{tag:[m.angleBracket],color:gi},{tag:[m.attributeName],color:mi},{tag:[m.regexp],color:St},{tag:[m.quote],color:Ue},{tag:[m.string],color:Ml},{tag:m.link,color:ng,textDecoration:"underline",textUnderlinePosition:"under"},{tag:[m.url,m.escape,m.special(m.string)],color:xs},{tag:[m.meta],color:ig},{tag:[m.comment],color:Lc,fontStyle:"italic"},{tag:m.monospace,color:Ue},{tag:m.strong,fontWeight:"bold",color:St},{tag:m.emphasis,fontStyle:"italic",color:mi},{tag:m.strikethrough,textDecoration:"line-through"},{tag:m.heading,fontWeight:"bold",color:Ue},{tag:m.special(m.heading1),fontWeight:"bold",color:Ue},{tag:m.heading1,fontWeight:"bold",color:Ue},{tag:[m.heading2,m.heading3,m.heading4],fontWeight:"bold",color:Ue},{tag:[m.heading5,m.heading6],color:Ue},{tag:[m.atom,m.bool,m.special(m.variableName)],color:xs},{tag:[m.processingInstruction,m.inserted],color:xs},{tag:[m.contentSeparator],color:mi},{tag:m.invalid,color:Ai,borderBottom:`1px dotted ${Dl}`}]),ag=[og,Hr(lg)],Bl="#2e3440",Ur="#3b4252",Pl="#434c5e",hn="#4c566a",El="#e5e9f0",kr="#eceff4",Cs="#8fbcbb",Rl="#88c0d0",hg="#81a1c1",Fe="#5e81ac",cg="#bf616a",Wt="#d08770",As="#ebcb8b",Ll="#a3be8c",fg="#b48ead",Il="#d30102",Gr=kr,Ms=Gr,ug="#ffffff",Ds=Ur,dg=Gr,Nl=Ur,pg=O.theme({"&":{color:Bl,backgroundColor:ug},".cm-content":{caretColor:Nl},".cm-cursor, .cm-dropCursor":{borderLeftColor:Nl},"&.cm-focused .cm-selectionBackground, .cm-selectionBackground, .cm-content ::selection":{backgroundColor:dg},".cm-panels":{backgroundColor:Gr,color:hn},".cm-panels.cm-panels-top":{borderBottom:"2px solid black"},".cm-panels.cm-panels-bottom":{borderTop:"2px solid black"},".cm-searchMatch":{backgroundColor:"#72a1ff59",outline:`1px solid ${hn}`},".cm-searchMatch.cm-searchMatch-selected":{backgroundColor:El},".cm-activeLine":{backgroundColor:Ms},".cm-selectionMatch":{backgroundColor:El},"&.cm-focused .cm-matchingBracket, &.cm-focused .cm-nonmatchingBracket":{outline:`1px solid ${hn}`},"&.cm-focused .cm-matchingBracket":{backgroundColor:kr},".cm-gutters":{backgroundColor:kr,color:Bl,border:"none"},".cm-activeLineGutter":{backgroundColor:Ms},".cm-foldPlaceholder":{backgroundColor:"transparent",border:"none",color:"#ddd"},".cm-tooltip":{border:"none",backgroundColor:Ds},".cm-tooltip .cm-tooltip-arrow:before":{borderTopColor:"transparent",borderBottomColor:"transparent"},".cm-tooltip .cm-tooltip-arrow:after":{borderTopColor:Ds,borderBottomColor:Ds},".cm-tooltip-autocomplete":{"& > ul > li[aria-selected]":{backgroundColor:Ms,color:hn}}},{dark:!1}),mg=li.define([{tag:m.keyword,color:Fe},{tag:[m.name,m.deleted,m.character,m.propertyName,m.macroName],color:Wt},{tag:[m.variableName],color:Wt},{tag:[m.function(m.variableName)],color:Fe},{tag:[m.labelName],color:hg},{tag:[m.color,m.constant(m.name),m.standard(m.name)],color:Fe},{tag:[m.definition(m.name),m.separator],color:Ll},{tag:[m.brace],color:Cs},{tag:[m.annotation],color:Il},{tag:[m.number,m.changed,m.annotation,m.modifier,m.self,m.namespace],color:Rl},{tag:[m.typeName,m.className],color:As},{tag:[m.operator,m.operatorKeyword],color:Ll},{tag:[m.tagName],color:fg},{tag:[m.squareBracket],color:cg},{tag:[m.angleBracket],color:Wt},{tag:[m.attributeName],color:As},{tag:[m.regexp],color:Fe},{tag:[m.quote],color:Ur},{tag:[m.string],color:Wt},{tag:m.link,color:Cs,textDecoration:"underline",textUnderlinePosition:"under"},{tag:[m.url,m.escape,m.special(m.string)],color:Wt},{tag:[m.meta],color:Rl},{tag:[m.comment],color:Pl,fontStyle:"italic"},{tag:m.strong,fontWeight:"bold",color:Fe},{tag:m.emphasis,fontStyle:"italic",color:Fe},{tag:m.strikethrough,textDecoration:"line-through"},{tag:m.heading,fontWeight:"bold",color:Fe},{tag:m.special(m.heading1),fontWeight:"bold",color:Fe},{tag:m.heading1,fontWeight:"bold",color:Fe},{tag:[m.heading2,m.heading3,m.heading4],fontWeight:"bold",color:Fe},{tag:[m.heading5,m.heading6],color:Fe},{tag:[m.atom,m.bool,m.special(m.variableName)],color:Wt},{tag:[m.processingInstruction,m.inserted],color:Cs},{tag:[m.contentSeparator],color:As},{tag:m.invalid,color:Pl,borderBottom:`1px dotted ${Il}`}]),gg=[pg,Hr(mg)];function _l(n){let e=Object.keys(n).join(""),t=/\w/.test(e);return t&&(e=e.replace(/\w/g,"")),`[${t?"\\w":""}${e.replace(/[^\w\s]/g,"\\$&")}]`}function yg(n){let e=Object.create(null),t=Object.create(null);for(let{label:s}of n){e[s[0]]=!0;for(let r=1;rtypeof s=="string"?{label:s}:s),[t,i]=e.every(s=>/^\w+$/.test(s.label))?[/\w*$/,/\w+$/]:yg(e);return s=>{let r=s.matchBefore(i);return r||s.explicit?{from:r?r.from:s.pos,options:e,validFor:t}:null}}function hy(n,e){return t=>{for(let i=pe(t.state).resolveInner(t.pos,-1);i;i=i.parent)if(n.indexOf(i.name)>-1)return null;return e(t)}}class Vl{constructor(e,t,i){this.completion=e,this.source=t,this.match=i}}function vr(n){return n.selection.main.head}function wg(n,e,t,i){return Object.assign(Object.assign({},n.changeByRange(s=>{if(s==n.selection.main)return{changes:{from:t,to:i,insert:e},range:w.cursor(t+e.length)};let r=i-t;return!s.empty||r&&n.sliceDoc(s.from-r,s.from)!=n.sliceDoc(t,i)?{range:s}:{changes:{from:s.from-r,to:s.from,insert:e},range:w.cursor(s.from-r+e.length)}})),{userEvent:"input.complete"})}function Nc(n,e){const t=e.completion.apply||e.completion.label;let i=e.source;typeof t=="string"?n.dispatch(wg(n.state,t,i.from,i.to)):t(n,e.completion,i.from,i.to)}const Fl=new WeakMap;function kg(n){if(!Array.isArray(n))return n;let e=Fl.get(n);return e||Fl.set(n,e=bg(n)),e}class vg{constructor(e){this.pattern=e,this.chars=[],this.folded=[],this.any=[],this.precise=[],this.byWord=[];for(let t=0;t=48&&C<=57||C>=97&&C<=122?2:C>=65&&C<=90?1:0:(T=ga(C))!=T.toLowerCase()?1:T!=T.toUpperCase()?2:0;(!v||B==1&&y||k==0&&B!=0)&&(t[f]==C||i[f]==C&&(u=!0)?o[f++]=v:o.length&&(b=!1)),k=B,v+=Ee(C)}return f==a&&o[0]==0&&b?this.result(-100+(u?-200:0),o,e):d==a&&p==0?[-200-e.length,0,g]:l>-1?[-700-e.length,l,l+this.pattern.length]:d==a?[-200+-700-e.length,p,g]:f==a?this.result(-100+(u?-200:0)+-700+(b?0:-1100),o,e):t.length==2?null:this.result((s[0]?-700:0)+-200+-1100,s,e)}result(e,t,i){let s=[e-i.length],r=1;for(let o of t){let l=o+(this.astral?Ee(ge(i,o)):1);r>1&&s[r-1]==o?s[r-1]=l:(s[r++]=o,s[r++]=l)}return s}}const It=D.define({combine(n){return _t(n,{activateOnTyping:!0,selectOnOpen:!0,override:null,closeOnBlur:!0,maxRenderedOptions:100,defaultKeymap:!0,optionClass:()=>"",aboveCursor:!1,icons:!0,addToOptions:[],compareCompletions:(e,t)=>e.label.localeCompare(t.label),interactionDelay:75},{defaultKeymap:(e,t)=>e&&t,closeOnBlur:(e,t)=>e&&t,icons:(e,t)=>e&&t,optionClass:(e,t)=>i=>xg(e(i),t(i)),addToOptions:(e,t)=>e.concat(t)})}});function xg(n,e){return n?e?n+" "+e:n:e}function Sg(n){let e=n.addToOptions.slice();return n.icons&&e.push({render(t){let i=document.createElement("div");return i.classList.add("cm-completionIcon"),t.type&&i.classList.add(...t.type.split(/\s+/g).map(s=>"cm-completionIcon-"+s)),i.setAttribute("aria-hidden","true"),i},position:20}),e.push({render(t,i,s){let r=document.createElement("span");r.className="cm-completionLabel";let{label:o}=t,l=0;for(let a=1;al&&r.appendChild(document.createTextNode(o.slice(l,h)));let f=r.appendChild(document.createElement("span"));f.appendChild(document.createTextNode(o.slice(h,c))),f.className="cm-completionMatchedText",l=c}return lt.position-i.position).map(t=>t.render)}function Hl(n,e,t){if(n<=t)return{from:0,to:n};if(e<0&&(e=0),e<=n>>1){let s=Math.floor(e/t);return{from:s*t,to:(s+1)*t}}let i=Math.floor((n-e)/t);return{from:n-(i+1)*t,to:n-i*t}}class Cg{constructor(e,t){this.view=e,this.stateField=t,this.info=null,this.placeInfo={read:()=>this.measureInfo(),write:l=>this.positionInfo(l),key:this};let i=e.state.field(t),{options:s,selected:r}=i.open,o=e.state.facet(It);this.optionContent=Sg(o),this.optionClass=o.optionClass,this.range=Hl(s.length,r,o.maxRenderedOptions),this.dom=document.createElement("div"),this.dom.className="cm-tooltip-autocomplete",this.dom.addEventListener("mousedown",l=>{for(let a=l.target,h;a&&a!=this.dom;a=a.parentNode)if(a.nodeName=="LI"&&(h=/-(\d+)$/.exec(a.id))&&+h[1]{this.info&&this.view.requestMeasure(this.placeInfo)})}mount(){this.updateSel()}update(e){e.state.field(this.stateField)!=e.startState.field(this.stateField)&&this.updateSel()}positioned(){this.info&&this.view.requestMeasure(this.placeInfo)}updateSel(){let e=this.view.state.field(this.stateField),t=e.open;if((t.selected>-1&&t.selected=this.range.to)&&(this.range=Hl(t.options.length,t.selected,this.view.state.facet(It).maxRenderedOptions),this.list.remove(),this.list=this.dom.appendChild(this.createListBox(t.options,e.id,this.range)),this.list.addEventListener("scroll",()=>{this.info&&this.view.requestMeasure(this.placeInfo)})),this.updateSelectedOption(t.selected)){this.info&&(this.info.remove(),this.info=null);let{completion:i}=t.options[t.selected],{info:s}=i;if(!s)return;let r=typeof s=="string"?document.createTextNode(s):s(i);if(!r)return;"then"in r?r.then(o=>{o&&this.view.state.field(this.stateField,!1)==e&&this.addInfoPane(o)}).catch(o=>He(this.view.state,o,"completion info")):this.addInfoPane(r)}}addInfoPane(e){let t=this.info=document.createElement("div");t.className="cm-tooltip cm-completionInfo",t.appendChild(e),this.dom.appendChild(t),this.view.requestMeasure(this.placeInfo)}updateSelectedOption(e){let t=null;for(let i=this.list.firstChild,s=this.range.from;i;i=i.nextSibling,s++)s==e?i.hasAttribute("aria-selected")||(i.setAttribute("aria-selected","true"),t=i):i.hasAttribute("aria-selected")&&i.removeAttribute("aria-selected");return t&&Mg(this.list,t),t}measureInfo(){let e=this.dom.querySelector("[aria-selected]");if(!e||!this.info)return null;let t=this.dom.ownerDocument.defaultView||window,i=this.dom.getBoundingClientRect(),s=this.info.getBoundingClientRect(),r=e.getBoundingClientRect();if(r.top>Math.min(t.innerHeight,i.bottom)-10||r.bottom=s.height||p>i.top?c=r.bottom-i.top+"px":f=i.bottom-r.top+"px"}return{top:c,bottom:f,maxWidth:h,class:a?o?"left-narrow":"right-narrow":l?"left":"right"}}positionInfo(e){this.info&&(e?(this.info.style.top=e.top,this.info.style.bottom=e.bottom,this.info.style.maxWidth=e.maxWidth,this.info.className="cm-tooltip cm-completionInfo cm-completionInfo-"+e.class):this.info.style.top="-1e6px")}createListBox(e,t,i){const s=document.createElement("ul");s.id=t,s.setAttribute("role","listbox"),s.setAttribute("aria-expanded","true"),s.setAttribute("aria-label",this.view.state.phrase("Completions"));for(let r=i.from;rnew Cg(e,n)}function Mg(n,e){let t=n.getBoundingClientRect(),i=e.getBoundingClientRect();i.topt.bottom&&(n.scrollTop+=i.bottom-t.bottom)}function Wl(n){return(n.boost||0)*100+(n.apply?10:0)+(n.info?5:0)+(n.type?1:0)}function Dg(n,e){let t=[],i=0;for(let l of n)if(l.hasResult())if(l.result.filter===!1){let a=l.result.getMatch;for(let h of l.result.options){let c=[1e9-i++];if(a)for(let f of a(h))c.push(f);t.push(new Vl(h,l,c))}}else{let a=new vg(e.sliceDoc(l.from,l.to)),h;for(let c of l.result.options)(h=a.match(c.label))&&(c.boost!=null&&(h[0]+=c.boost),t.push(new Vl(c,l,h)))}let s=[],r=null,o=e.facet(It).compareCompletions;for(let l of t.sort((a,h)=>h.match[0]-a.match[0]||o(a.completion,h.completion)))!r||r.label!=l.completion.label||r.detail!=l.completion.detail||r.type!=null&&l.completion.type!=null&&r.type!=l.completion.type||r.apply!=l.completion.apply?s.push(l):Wl(l.completion)>Wl(r)&&(s[s.length-1]=l),r=l.completion;return s}class Mi{constructor(e,t,i,s,r){this.options=e,this.attrs=t,this.tooltip=i,this.timestamp=s,this.selected=r}setSelected(e,t){return e==this.selected||e>=this.options.length?this:new Mi(this.options,zl(t,e),this.tooltip,this.timestamp,e)}static build(e,t,i,s,r){let o=Dg(e,t);if(!o.length)return null;let l=t.facet(It).selectOnOpen?0:-1;if(s&&s.selected!=l&&s.selected!=-1){let a=s.options[s.selected].completion;for(let h=0;hh.hasResult()?Math.min(a,h.from):a,1e8),create:Ag(zi),above:r.aboveCursor},s?s.timestamp:Date.now(),l)}map(e){return new Mi(this.options,this.attrs,Object.assign(Object.assign({},this.tooltip),{pos:e.mapPos(this.tooltip.pos)}),this.timestamp,this.selected)}}class Nn{constructor(e,t,i){this.active=e,this.id=t,this.open=i}static start(){return new Nn(Bg,"cm-ac-"+Math.floor(Math.random()*2e6).toString(36),null)}update(e){let{state:t}=e,i=t.facet(It),r=(i.override||t.languageDataAt("autocomplete",vr(t)).map(kg)).map(l=>(this.active.find(h=>h.source==l)||new st(l,this.active.some(h=>h.state!=0)?1:0)).update(e,i));r.length==this.active.length&&r.every((l,a)=>l==this.active[a])&&(r=this.active);let o=e.selection||r.some(l=>l.hasResult()&&e.changes.touchesRange(l.from,l.to))||!Tg(r,this.active)?Mi.build(r,t,this.id,this.open,i):this.open&&e.docChanged?this.open.map(e.changes):this.open;!o&&r.every(l=>l.state!=1)&&r.some(l=>l.hasResult())&&(r=r.map(l=>l.hasResult()?new st(l.source,0):l));for(let l of e.effects)l.is(Fc)&&(o=o&&o.setSelected(l.value,this.id));return r==this.active&&o==this.open?this:new Nn(r,this.id,o)}get tooltip(){return this.open?this.open.tooltip:null}get attrs(){return this.open?this.open.attrs:Og}}function Tg(n,e){if(n==e)return!0;for(let t=0,i=0;;){for(;t-1&&(t["aria-activedescendant"]=n+"-"+e),t}const Bg=[];function Pg(n){return n.isUserEvent("input.type")?"input":n.isUserEvent("delete.backward")?"delete":null}class st{constructor(e,t,i=-1){this.source=e,this.state=t,this.explicitPos=i}hasResult(){return!1}update(e,t){let i=Pg(e),s=this;i?s=s.handleUserEvent(e,i,t):e.docChanged?s=s.handleChange(e):e.selection&&s.state!=0&&(s=new st(s.source,0));for(let r of e.effects)if(r.is(_c))s=new st(s.source,1,r.value?vr(e.state):-1);else if(r.is(Vc))s=new st(s.source,0);else if(r.is(Eg))for(let o of r.value)o.source==s.source&&(s=o);return s}handleUserEvent(e,t,i){return t=="delete"||!i.activateOnTyping?this.map(e.changes):new st(this.source,1)}handleChange(e){return e.changes.touchesRange(vr(e.startState))?new st(this.source,0):this.map(e.changes)}map(e){return e.empty||this.explicitPos<0?this:new st(this.source,this.state,e.mapPos(this.explicitPos))}}const _c=R.define(),Vc=R.define(),Eg=R.define({map(n,e){return n.map(t=>t.map(e))}}),Fc=R.define(),zi=Me.define({create(){return Nn.start()},update(n,e){return n.update(e)},provide:n=>[Er.from(n,e=>e.tooltip),O.contentAttributes.from(n,e=>e.attrs)]});function cn(n,e="option"){return t=>{let i=t.state.field(zi,!1);if(!i||!i.open||Date.now()-i.open.timestamp-1?i.open.selected+s*(n?1:-1):n?0:o-1;return l<0?l=e=="page"?0:o-1:l>=o&&(l=e=="page"?o-1:0),t.dispatch({effects:Fc.of(l)}),!0}}const Rg=n=>{let e=n.state.field(zi,!1);return n.state.readOnly||!e||!e.open||e.open.selected<0||Date.now()-e.open.timestampn.state.field(zi,!1)?(n.dispatch({effects:_c.of(!0)}),!0):!1,Ig=n=>{let e=n.state.field(zi,!1);return!e||!e.active.some(t=>t.state!=0)?!1:(n.dispatch({effects:Vc.of(null)}),!0)},Ng=O.baseTheme({".cm-tooltip.cm-tooltip-autocomplete":{"& > ul":{fontFamily:"monospace",whiteSpace:"nowrap",overflow:"hidden auto",maxWidth_fallback:"700px",maxWidth:"min(700px, 95vw)",minWidth:"250px",maxHeight:"10em",listStyle:"none",margin:0,padding:0,"& > li":{overflowX:"hidden",textOverflow:"ellipsis",cursor:"pointer",padding:"1px 3px",lineHeight:1.2}}},"&light .cm-tooltip-autocomplete ul li[aria-selected]":{background:"#17c",color:"white"},"&dark .cm-tooltip-autocomplete ul li[aria-selected]":{background:"#347",color:"white"},".cm-completionListIncompleteTop:before, .cm-completionListIncompleteBottom:after":{content:'"···"',opacity:.5,display:"block",textAlign:"center"},".cm-tooltip.cm-completionInfo":{position:"absolute",padding:"3px 9px",width:"max-content",maxWidth:"400px",boxSizing:"border-box"},".cm-completionInfo.cm-completionInfo-left":{right:"100%"},".cm-completionInfo.cm-completionInfo-right":{left:"100%"},".cm-completionInfo.cm-completionInfo-left-narrow":{right:"30px"},".cm-completionInfo.cm-completionInfo-right-narrow":{left:"30px"},"&light .cm-snippetField":{backgroundColor:"#00000022"},"&dark .cm-snippetField":{backgroundColor:"#ffffff22"},".cm-snippetFieldPosition":{verticalAlign:"text-top",width:0,height:"1.15em",display:"inline-block",margin:"0 -0.7px -.7em",borderLeft:"1.4px dotted #888"},".cm-completionMatchedText":{textDecoration:"underline"},".cm-completionDetail":{marginLeft:"0.5em",fontStyle:"italic"},".cm-completionIcon":{fontSize:"90%",width:".8em",display:"inline-block",textAlign:"center",paddingRight:".6em",opacity:"0.6"},".cm-completionIcon-function, .cm-completionIcon-method":{"&:after":{content:"'ƒ'"}},".cm-completionIcon-class":{"&:after":{content:"'○'"}},".cm-completionIcon-interface":{"&:after":{content:"'◌'"}},".cm-completionIcon-variable":{"&:after":{content:"'𝑥'"}},".cm-completionIcon-constant":{"&:after":{content:"'𝐶'"}},".cm-completionIcon-type":{"&:after":{content:"'𝑡'"}},".cm-completionIcon-enum":{"&:after":{content:"'∪'"}},".cm-completionIcon-property":{"&:after":{content:"'□'"}},".cm-completionIcon-keyword":{"&:after":{content:"'🔑︎'"}},".cm-completionIcon-namespace":{"&:after":{content:"'▢'"}},".cm-completionIcon-text":{"&:after":{content:"'abc'",fontSize:"50%",verticalAlign:"middle"}}});class _g{constructor(e,t,i,s){this.field=e,this.line=t,this.from=i,this.to=s}}class $r{constructor(e,t,i){this.field=e,this.from=t,this.to=i}map(e){let t=e.mapPos(this.from,-1,ce.TrackDel),i=e.mapPos(this.to,1,ce.TrackDel);return t==null||i==null?null:new $r(this.field,t,i)}}class Jr{constructor(e,t){this.lines=e,this.fieldPositions=t}instantiate(e,t){let i=[],s=[t],r=e.doc.lineAt(t),o=/^\s*/.exec(r.text)[0];for(let a of this.lines){if(i.length){let h=o,c=/^\t*/.exec(a)[0].length;for(let f=0;fnew $r(a.field,s[a.line]+a.from,s[a.line]+a.to));return{text:i,ranges:l}}static parse(e){let t=[],i=[],s=[],r;for(let o of e.split(/\r\n?|\n/)){for(;r=/[#$]\{(?:(\d+)(?::([^}]*))?|([^}]*))\}/.exec(o);){let l=r[1]?+r[1]:null,a=r[2]||r[3]||"",h=-1;for(let c=0;c=h&&f.field++}s.push(new _g(h,i.length,r.index,r.index+a.length)),o=o.slice(0,r.index)+a+o.slice(r.index+r[0].length)}for(let l;l=/([$#])\\{/.exec(o);){o=o.slice(0,l.index)+l[1]+"{"+o.slice(l.index+l[0].length);for(let a of s)a.line==i.length&&a.from>l.index&&(a.from--,a.to--)}i.push(o)}return new Jr(i,s)}}let Vg=E.widget({widget:new class extends tt{toDOM(){let n=document.createElement("span");return n.className="cm-snippetFieldPosition",n}ignoreEvent(){return!1}}}),Fg=E.mark({class:"cm-snippetField"});class hi{constructor(e,t){this.ranges=e,this.active=t,this.deco=E.set(e.map(i=>(i.from==i.to?Vg:Fg).range(i.from,i.to)))}map(e){let t=[];for(let i of this.ranges){let s=i.map(e);if(!s)return null;t.push(s)}return new hi(t,this.active)}selectionInsideField(e){return e.ranges.every(t=>this.ranges.some(i=>i.field==this.active&&i.from<=t.from&&i.to>=t.to))}}const qi=R.define({map(n,e){return n&&n.map(e)}}),Hg=R.define(),Ni=Me.define({create(){return null},update(n,e){for(let t of e.effects){if(t.is(qi))return t.value;if(t.is(Hg)&&n)return new hi(n.ranges,t.value)}return n&&e.docChanged&&(n=n.map(e.changes)),n&&e.selection&&!n.selectionInsideField(e.selection)&&(n=null),n},provide:n=>O.decorations.from(n,e=>e?e.deco:E.none)});function Yr(n,e){return w.create(n.filter(t=>t.field==e).map(t=>w.range(t.from,t.to)))}function Wg(n){let e=Jr.parse(n);return(t,i,s,r)=>{let{text:o,ranges:l}=e.instantiate(t.state,s),a={changes:{from:s,to:r,insert:_.of(o)},scrollIntoView:!0};if(l.length&&(a.selection=Yr(l,0)),l.length>1){let h=new hi(l,0),c=a.effects=[qi.of(h)];t.state.field(Ni,!1)===void 0&&c.push(R.appendConfig.of([Ni,Ug,Gg,Ng]))}t.dispatch(t.state.update(a))}}function Hc(n){return({state:e,dispatch:t})=>{let i=e.field(Ni,!1);if(!i||n<0&&i.active==0)return!1;let s=i.active+n,r=n>0&&!i.ranges.some(o=>o.field==s+n);return t(e.update({selection:Yr(i.ranges,s),effects:qi.of(r?null:new hi(i.ranges,s))})),!0}}const zg=({state:n,dispatch:e})=>n.field(Ni,!1)?(e(n.update({effects:qi.of(null)})),!0):!1,qg=Hc(1),jg=Hc(-1),Kg=[{key:"Tab",run:qg,shift:jg},{key:"Escape",run:zg}],ql=D.define({combine(n){return n.length?n[0]:Kg}}),Ug=Vi.highest(qn.compute([ql],n=>n.facet(ql)));function cy(n,e){return Object.assign(Object.assign({},e),{apply:Wg(n)})}const Gg=O.domEventHandlers({mousedown(n,e){let t=e.state.field(Ni,!1),i;if(!t||(i=e.posAtCoords({x:n.clientX,y:n.clientY}))==null)return!1;let s=t.ranges.find(r=>r.from<=i&&r.to>=i);return!s||s.field==t.active?!1:(e.dispatch({selection:Yr(t.ranges,s.field),effects:qi.of(t.ranges.some(r=>r.field>s.field)?new hi(t.ranges,s.field):null)}),!0)}}),_i={brackets:["(","[","{","'",'"'],before:")]}:;>",stringPrefixes:[]},Tt=R.define({map(n,e){let t=e.mapPos(n,-1,ce.TrackAfter);return t??void 0}}),Xr=R.define({map(n,e){return e.mapPos(n)}}),Zr=new class extends Bt{};Zr.startSide=1;Zr.endSide=-1;const Wc=Me.define({create(){return F.empty},update(n,e){if(e.selection){let t=e.state.doc.lineAt(e.selection.main.head).from,i=e.startState.doc.lineAt(e.startState.selection.main.head).from;t!=e.changes.mapPos(i,-1)&&(n=F.empty)}n=n.map(e.changes);for(let t of e.effects)t.is(Tt)?n=n.update({add:[Zr.range(t.value,t.value+1)]}):t.is(Xr)&&(n=n.update({filter:i=>i!=t.value}));return n}});function $g(){return[Yg,Wc]}const Ts="()[]{}<>";function zc(n){for(let e=0;e{if((Jg?n.composing:n.compositionStarted)||n.state.readOnly)return!1;let s=n.state.selection.main;if(i.length>2||i.length==2&&Ee(ge(i,0))==1||e!=s.from||t!=s.to)return!1;let r=Qg(n.state,i);return r?(n.dispatch(r),!0):!1}),Xg=({state:n,dispatch:e})=>{if(n.readOnly)return!1;let i=qc(n,n.selection.main.head).brackets||_i.brackets,s=null,r=n.changeByRange(o=>{if(o.empty){let l=e0(n.doc,o.head);for(let a of i)if(a==l&&Zn(n.doc,o.head)==zc(ge(a,0)))return{changes:{from:o.head-a.length,to:o.head+a.length},range:w.cursor(o.head-a.length)}}return{range:s=o}});return s||e(n.update(r,{scrollIntoView:!0,userEvent:"delete.backward"})),!s},Zg=[{key:"Backspace",run:Xg}];function Qg(n,e){let t=qc(n,n.selection.main.head),i=t.brackets||_i.brackets;for(let s of i){let r=zc(ge(s,0));if(e==s)return r==s?n0(n,s,i.indexOf(s+s+s)>-1,t):t0(n,s,r,t.before||_i.before);if(e==r&&jc(n,n.selection.main.from))return i0(n,s,r)}return null}function jc(n,e){let t=!1;return n.field(Wc).between(0,n.doc.length,i=>{i==e&&(t=!0)}),t}function Zn(n,e){let t=n.sliceString(e,e+2);return t.slice(0,Ee(ge(t,0)))}function e0(n,e){let t=n.sliceString(e-2,e);return Ee(ge(t,0))==t.length?t:t.slice(1)}function t0(n,e,t,i){let s=null,r=n.changeByRange(o=>{if(!o.empty)return{changes:[{insert:e,from:o.from},{insert:t,from:o.to}],effects:Tt.of(o.to+e.length),range:w.range(o.anchor+e.length,o.head+e.length)};let l=Zn(n.doc,o.head);return!l||/\s/.test(l)||i.indexOf(l)>-1?{changes:{insert:e+t,from:o.head},effects:Tt.of(o.head+e.length),range:w.cursor(o.head+e.length)}:{range:s=o}});return s?null:n.update(r,{scrollIntoView:!0,userEvent:"input.type"})}function i0(n,e,t){let i=null,s=n.selection.ranges.map(r=>r.empty&&Zn(n.doc,r.head)==t?w.cursor(r.head+t.length):i=r);return i?null:n.update({selection:w.create(s,n.selection.mainIndex),scrollIntoView:!0,effects:n.selection.ranges.map(({from:r})=>Xr.of(r))})}function n0(n,e,t,i){let s=i.stringPrefixes||_i.stringPrefixes,r=null,o=n.changeByRange(l=>{if(!l.empty)return{changes:[{insert:e,from:l.from},{insert:e,from:l.to}],effects:Tt.of(l.to+e.length),range:w.range(l.anchor+e.length,l.head+e.length)};let a=l.head,h=Zn(n.doc,a),c;if(h==e){if(jl(n,a))return{changes:{insert:e+e,from:a},effects:Tt.of(a+e.length),range:w.cursor(a+e.length)};if(jc(n,a)){let f=t&&n.sliceDoc(a,a+e.length*3)==e+e+e;return{range:w.cursor(a+e.length*(f?3:1)),effects:Xr.of(a)}}}else{if(t&&n.sliceDoc(a-2*e.length,a)==e+e&&(c=Kl(n,a-2*e.length,s))>-1&&jl(n,c))return{changes:{insert:e+e+e+e,from:a},effects:Tt.of(a+e.length),range:w.cursor(a+e.length)};if(n.charCategorizer(a)(h)!=Re.Word&&Kl(n,a,s)>-1&&!s0(n,a,e,s))return{changes:{insert:e+e,from:a},effects:Tt.of(a+e.length),range:w.cursor(a+e.length)}}return{range:r=l}});return r?null:n.update(o,{scrollIntoView:!0,userEvent:"input.type"})}function jl(n,e){let t=pe(n).resolveInner(e+1);return t.parent&&t.from==e}function s0(n,e,t,i){let s=pe(n).resolveInner(e,-1),r=i.reduce((o,l)=>Math.max(o,l.length),0);for(let o=0;o<5;o++){let l=n.sliceDoc(s.from,Math.min(s.to,s.from+t.length+r)),a=l.indexOf(t);if(!a||a>-1&&i.indexOf(l.slice(0,a))>-1){let c=s.firstChild;for(;c&&c.from==s.from&&c.to-c.from>t.length+a;){if(n.sliceDoc(c.to-t.length,c.to)==t)return!1;c=c.firstChild}return!0}let h=s.to==e&&s.parent;if(!h)break;s=h}return!1}function Kl(n,e,t){let i=n.charCategorizer(e);if(i(n.sliceDoc(e-1,e))!=Re.Word)return e;for(let s of t){let r=e-s.length;if(n.sliceDoc(r,e)==s&&i(n.sliceDoc(r-1,r))!=Re.Word)return r}return-1}const r0=[{key:"Ctrl-Space",run:Lg},{key:"Escape",run:Ig},{key:"ArrowDown",run:cn(!0)},{key:"ArrowUp",run:cn(!1)},{key:"PageDown",run:cn(!0,"page")},{key:"PageUp",run:cn(!1,"page")},{key:"Enter",run:Rg}];function Je(){var n=arguments[0];typeof n=="string"&&(n=document.createElement(n));var e=1,t=arguments[1];if(t&&typeof t=="object"&&t.nodeType==null&&!Array.isArray(t)){for(var i in t)if(Object.prototype.hasOwnProperty.call(t,i)){var s=t[i];typeof s=="string"?n.setAttribute(i,s):s!=null&&(n[i]=s)}e++}for(;el.from==l.to||l.from==l.to-1&&i.doc.lineAt(l.from).to==l.from?E.widget({widget:new g0(l),diagnostic:l}).range(l.from):E.mark({attributes:{class:"cm-lintRange cm-lintRange-"+l.severity},diagnostic:l}).range(l.from,l.to)),!0);return new At(o,t,ni(o))}}function ni(n,e=null,t=0){let i=null;return n.between(t,1e9,(s,r,{spec:o})=>{if(!(e&&o.diagnostic!=e))return i=new o0(s,r,o.diagnostic),!1}),i}function l0(n,e){return!!(n.effects.some(t=>t.is(Qr))||n.changes.touchesRange(e.pos))}function Uc(n,e){return n.field(Be,!1)?e:e.concat(R.appendConfig.of([Be,O.decorations.compute([Be],t=>{let{selected:i,panel:s}=t.field(Be);return!i||!s||i.from==i.to?E.none:E.set([h0.range(i.from,i.to)])}),Sd(c0,{hideOn:l0}),b0]))}function a0(n,e){return{effects:Uc(n,[Qr.of(e)])}}const Qr=R.define(),eo=R.define(),Gc=R.define(),Be=Me.define({create(){return new At(E.none,null,null)},update(n,e){if(e.docChanged){let t=n.diagnostics.map(e.changes),i=null;if(n.selected){let s=e.changes.mapPos(n.selected.from,1);i=ni(t,n.selected.diagnostic,s)||ni(t,null,s)}n=new At(t,n.panel,i)}for(let t of e.effects)t.is(Qr)?n=At.init(t.value,n.panel,e.state):t.is(eo)?n=new At(n.diagnostics,t.value?Qn.open:null,n.selected):t.is(Gc)&&(n=new At(n.diagnostics,n.panel,t.value));return n},provide:n=>[ar.from(n,e=>e.panel),O.decorations.from(n,e=>e.diagnostics)]}),h0=E.mark({class:"cm-lintRange cm-lintRange-active"});function c0(n,e,t){let{diagnostics:i}=n.state.field(Be),s=[],r=2e8,o=0;i.between(e-(t<0?1:0),e+(t>0?1:0),(a,h,{spec:c})=>{e>=a&&e<=h&&(a==h||(e>a||t>0)&&(eJc(n,t,!1)))}const u0=n=>{let e=n.state.field(Be,!1);(!e||!e.panel)&&n.dispatch({effects:Uc(n.state,[eo.of(!0)])});let t=Md(n,Qn.open);return t&&t.dom.querySelector(".cm-panel-lint ul").focus(),!0},Ul=n=>{let e=n.state.field(Be,!1);return!e||!e.panel?!1:(n.dispatch({effects:eo.of(!1)}),!0)},d0=n=>{let e=n.state.field(Be,!1);if(!e)return!1;let t=n.state.selection.main,i=e.diagnostics.iter(t.to+1);return!i.value&&(i=e.diagnostics.iter(0),!i.value||i.from==t.from&&i.to==t.to)?!1:(n.dispatch({selection:{anchor:i.from,head:i.to},scrollIntoView:!0}),!0)},p0=[{key:"Mod-Shift-m",run:u0},{key:"F8",run:d0}],m0=be.fromClass(class{constructor(n){this.view=n,this.timeout=-1,this.set=!0;let{delay:e}=n.state.facet(Kt);this.lintTime=Date.now()+e,this.run=this.run.bind(this),this.timeout=setTimeout(this.run,e)}run(){let n=Date.now();if(nPromise.resolve(i(this.view)))).then(i=>{let s=i.reduce((r,o)=>r.concat(o));this.view.state.doc==e.doc&&this.view.dispatch(a0(this.view.state,s))},i=>{He(this.view.state,i)})}}update(n){let e=n.state.facet(Kt);(n.docChanged||e!=n.startState.facet(Kt))&&(this.lintTime=Date.now()+e.delay,this.set||(this.set=!0,this.timeout=setTimeout(this.run,e.delay)))}force(){this.set&&(this.lintTime=Date.now(),this.run())}destroy(){clearTimeout(this.timeout)}}),Kt=D.define({combine(n){return Object.assign({sources:n.map(e=>e.source)},_t(n.map(e=>e.config),{delay:750,markerFilter:null,tooltipFilter:null}))},enables:m0});function $c(n){let e=[];if(n)e:for(let{name:t}of n){for(let i=0;ir.toLowerCase()==s.toLowerCase())){e.push(s);continue e}}e.push("")}return e}function Jc(n,e,t){var i;let s=t?$c(e.actions):[];return Je("li",{class:"cm-diagnostic cm-diagnostic-"+e.severity},Je("span",{class:"cm-diagnosticText"},e.renderMessage?e.renderMessage():e.message),(i=e.actions)===null||i===void 0?void 0:i.map((r,o)=>{let l=f=>{f.preventDefault();let u=ni(n.state.field(Be).diagnostics,e);u&&r.apply(n,u.from,u.to)},{name:a}=r,h=s[o]?a.indexOf(s[o]):-1,c=h<0?a:[a.slice(0,h),Je("u",a.slice(h,h+1)),a.slice(h+1)];return Je("button",{type:"button",class:"cm-diagnosticAction",onclick:l,onmousedown:l,"aria-label":` Action: ${a}${h<0?"":` (access key "${s[o]})"`}.`},c)}),e.source&&Je("div",{class:"cm-diagnosticSource"},e.source))}class g0 extends tt{constructor(e){super(),this.diagnostic=e}eq(e){return e.diagnostic==this.diagnostic}toDOM(){return Je("span",{class:"cm-lintPoint cm-lintPoint-"+this.diagnostic.severity})}}class Gl{constructor(e,t){this.diagnostic=t,this.id="item_"+Math.floor(Math.random()*4294967295).toString(16),this.dom=Jc(e,t,!0),this.dom.id=this.id,this.dom.setAttribute("role","option")}}class Qn{constructor(e){this.view=e,this.items=[];let t=s=>{if(s.keyCode==27)Ul(this.view),this.view.focus();else if(s.keyCode==38||s.keyCode==33)this.moveSelection((this.selectedIndex-1+this.items.length)%this.items.length);else if(s.keyCode==40||s.keyCode==34)this.moveSelection((this.selectedIndex+1)%this.items.length);else if(s.keyCode==36)this.moveSelection(0);else if(s.keyCode==35)this.moveSelection(this.items.length-1);else if(s.keyCode==13)this.view.focus();else if(s.keyCode>=65&&s.keyCode<=90&&this.selectedIndex>=0){let{diagnostic:r}=this.items[this.selectedIndex],o=$c(r.actions);for(let l=0;l{for(let r=0;rUl(this.view)},"×")),this.update()}get selectedIndex(){let e=this.view.state.field(Be).selected;if(!e)return-1;for(let t=0;t{let h=-1,c;for(let f=i;fi&&(this.items.splice(i,h-i),s=!0)),t&&c.diagnostic==t.diagnostic?c.dom.hasAttribute("aria-selected")||(c.dom.setAttribute("aria-selected","true"),r=c):c.dom.hasAttribute("aria-selected")&&c.dom.removeAttribute("aria-selected"),i++});i({sel:r.dom.getBoundingClientRect(),panel:this.list.getBoundingClientRect()}),write:({sel:o,panel:l})=>{o.topl.bottom&&(this.list.scrollTop+=o.bottom-l.bottom)}})):this.selectedIndex<0&&this.list.removeAttribute("aria-activedescendant"),s&&this.sync()}sync(){let e=this.list.firstChild;function t(){let i=e;e=i.nextSibling,i.remove()}for(let i of this.items)if(i.dom.parentNode==this.list){for(;e!=i.dom;)t();e=i.dom.nextSibling}else this.list.insertBefore(i.dom,e);for(;e;)t()}moveSelection(e){if(this.selectedIndex<0)return;let t=this.view.state.field(Be),i=ni(t.diagnostics,this.items[e].diagnostic);i&&this.view.dispatch({selection:{anchor:i.from,head:i.to},scrollIntoView:!0,effects:Gc.of(i)})}static open(e){return new Qn(e)}}function y0(n,e='viewBox="0 0 40 40"'){return`url('data:image/svg+xml,${encodeURIComponent(n)}')`}function Os(n){return y0(``,'width="6" height="3"')}const b0=O.baseTheme({".cm-diagnostic":{padding:"3px 6px 3px 8px",marginLeft:"-1px",display:"block",whiteSpace:"pre-wrap"},".cm-diagnostic-error":{borderLeft:"5px solid #d11"},".cm-diagnostic-warning":{borderLeft:"5px solid orange"},".cm-diagnostic-info":{borderLeft:"5px solid #999"},".cm-diagnosticAction":{font:"inherit",border:"none",padding:"2px 4px",backgroundColor:"#444",color:"white",borderRadius:"3px",marginLeft:"8px"},".cm-diagnosticSource":{fontSize:"70%",opacity:.7},".cm-lintRange":{backgroundPosition:"left bottom",backgroundRepeat:"repeat-x",paddingBottom:"0.7px"},".cm-lintRange-error":{backgroundImage:Os("#d11")},".cm-lintRange-warning":{backgroundImage:Os("orange")},".cm-lintRange-info":{backgroundImage:Os("#999")},".cm-lintRange-active":{backgroundColor:"#ffdd9980"},".cm-tooltip-lint":{padding:0,margin:0},".cm-lintPoint":{position:"relative","&:after":{content:'""',position:"absolute",bottom:0,left:"-2px",borderLeft:"3px solid transparent",borderRight:"3px solid transparent",borderBottom:"4px solid #d11"}},".cm-lintPoint-warning":{"&:after":{borderBottomColor:"orange"}},".cm-lintPoint-info":{"&:after":{borderBottomColor:"#999"}},".cm-panel.cm-panel-lint":{position:"relative","& ul":{maxHeight:"100px",overflowY:"auto","& [aria-selected]":{backgroundColor:"#ddd","& u":{textDecoration:"underline"}},"&:focus [aria-selected]":{background_fallback:"#bdf",backgroundColor:"Highlight",color_fallback:"white",color:"HighlightText"},"& u":{textDecoration:"none"},padding:0,margin:0},"& [name=close]":{position:"absolute",top:"0",right:"2px",background:"inherit",border:"none",font:"inherit",padding:0,margin:0}}}),w0=(()=>[Ld(),td(),em(),Dp(),Ku(),N.allowMultipleSelections.of(!0),pp(),Hr(Pp,{fallback:!0}),$g(),ud(),md(),qn.of([...Zg,...Zm,...hm,...Cp,...r0,...p0])])(),$l={python:()=>Pe(()=>import("./index-0d85b7dd.js"),["assets/index-0d85b7dd.js","assets/index-f8ff95a1.js","assets/index-1d65707a.js","assets/index-f2292b12.css","assets/Blocks-c9e1499d.js","assets/Button-f155035a.js","assets/Button-9b719f62.css","assets/Blocks-f08d137e.css","assets/BlockLabel-66866176.js","assets/Empty-eec13822.js","assets/Copy-9f1657c4.js","assets/Download-daff1959.js"]).then(n=>n.python()),markdown:async()=>{const[n,e]=await Promise.all([Pe(()=>import("./index-7648fc8d.js"),["assets/index-7648fc8d.js","assets/index-c48bd2e8.js","assets/index-f8ff95a1.js","assets/index-7f39cecc.js","assets/index-1d65707a.js","assets/index-f2292b12.css","assets/Blocks-c9e1499d.js","assets/Button-f155035a.js","assets/Button-9b719f62.css","assets/Blocks-f08d137e.css","assets/BlockLabel-66866176.js","assets/Empty-eec13822.js","assets/Copy-9f1657c4.js","assets/Download-daff1959.js","assets/index-b6ab4199.js"]),Pe(()=>import("./frontmatter-5878523d.js"),["assets/frontmatter-5878523d.js","assets/yaml-95012b83.js","assets/index-1d65707a.js","assets/index-f2292b12.css","assets/Blocks-c9e1499d.js","assets/Button-f155035a.js","assets/Button-9b719f62.css","assets/Blocks-f08d137e.css","assets/BlockLabel-66866176.js","assets/Empty-eec13822.js","assets/Copy-9f1657c4.js","assets/Download-daff1959.js"])]);return n.markdown({extensions:[e.frontmatter]})},json:()=>Pe(()=>import("./index-82eb6288.js"),["assets/index-82eb6288.js","assets/index-f8ff95a1.js","assets/index-1d65707a.js","assets/index-f2292b12.css","assets/Blocks-c9e1499d.js","assets/Button-f155035a.js","assets/Button-9b719f62.css","assets/Blocks-f08d137e.css","assets/BlockLabel-66866176.js","assets/Empty-eec13822.js","assets/Copy-9f1657c4.js","assets/Download-daff1959.js"]).then(n=>n.json()),html:()=>Pe(()=>import("./index-c48bd2e8.js"),["assets/index-c48bd2e8.js","assets/index-f8ff95a1.js","assets/index-7f39cecc.js","assets/index-1d65707a.js","assets/index-f2292b12.css","assets/Blocks-c9e1499d.js","assets/Button-f155035a.js","assets/Button-9b719f62.css","assets/Blocks-f08d137e.css","assets/BlockLabel-66866176.js","assets/Empty-eec13822.js","assets/Copy-9f1657c4.js","assets/Download-daff1959.js","assets/index-b6ab4199.js"]).then(n=>n.html()),css:()=>Pe(()=>import("./index-7f39cecc.js"),["assets/index-7f39cecc.js","assets/index-f8ff95a1.js","assets/index-1d65707a.js","assets/index-f2292b12.css","assets/Blocks-c9e1499d.js","assets/Button-f155035a.js","assets/Button-9b719f62.css","assets/Blocks-f08d137e.css","assets/BlockLabel-66866176.js","assets/Empty-eec13822.js","assets/Copy-9f1657c4.js","assets/Download-daff1959.js"]).then(n=>n.css()),javascript:()=>Pe(()=>import("./index-b6ab4199.js"),["assets/index-b6ab4199.js","assets/index-f8ff95a1.js","assets/index-1d65707a.js","assets/index-f2292b12.css","assets/Blocks-c9e1499d.js","assets/Button-f155035a.js","assets/Button-9b719f62.css","assets/Blocks-f08d137e.css","assets/BlockLabel-66866176.js","assets/Empty-eec13822.js","assets/Copy-9f1657c4.js","assets/Download-daff1959.js"]).then(n=>n.javascript()),typescript:()=>Pe(()=>import("./index-b6ab4199.js"),["assets/index-b6ab4199.js","assets/index-f8ff95a1.js","assets/index-1d65707a.js","assets/index-f2292b12.css","assets/Blocks-c9e1499d.js","assets/Button-f155035a.js","assets/Button-9b719f62.css","assets/Blocks-f08d137e.css","assets/BlockLabel-66866176.js","assets/Empty-eec13822.js","assets/Copy-9f1657c4.js","assets/Download-daff1959.js"]).then(n=>n.javascript({typescript:!0})),yaml:()=>Pe(()=>import("./yaml-95012b83.js"),[]).then(n=>jt.define(n.yaml)),dockerfile:()=>Pe(()=>import("./dockerfile-d67bbd50.js"),[]).then(n=>jt.define(n.dockerFile)),shell:()=>Pe(()=>import("./shell-86dd1d99.js"),[]).then(n=>jt.define(n.shell)),r:()=>Pe(()=>import("./r-3ca97919.js"),[]).then(n=>jt.define(n.r))},k0={py:"python",md:"markdown",js:"javascript",ts:"typescript",sh:"shell"};async function v0(n){const e=$l[n]||$l[k0[n]]||void 0;if(e)return e()}function x0(n){let e,t,i;return{c(){e=dt("div"),t=dt("div"),se(t,"class",i="codemirror-wrapper "+n[0]+" svelte-1sc8eck"),se(e,"class","wrap svelte-1sc8eck")},m(s,r){Ce(s,e,r),Yt(e,t),n[12](t)},p(s,[r]){r&1&&i!==(i="codemirror-wrapper "+s[0]+" svelte-1sc8eck")&&se(t,"class",i)},i:vi,o:vi,d(s){s&&Ae(e),n[12](null)}}}function S0(n){let e=n.dom.querySelectorAll(".cm-gutterElement");if(e.length===0)return null;for(var t=0;t(y=k(),()=>y?.destroy()));function Q(M){xr[M?"unshift":"push"](()=>{g=M,t(1,g)})}return n.$$set=M=>{"classNames"in M&&t(0,i=M.classNames),"value"in M&&t(2,s=M.value),"dark_mode"in M&&t(3,r=M.dark_mode),"basic"in M&&t(4,o=M.basic),"language"in M&&t(5,l=M.language),"lines"in M&&t(6,a=M.lines),"extensions"in M&&t(7,h=M.extensions),"useTab"in M&&t(8,c=M.useTab),"readonly"in M&&t(9,f=M.readonly),"placeholder"in M&&t(10,u=M.placeholder)},n.$$.update=()=>{n.$$.dirty&32&&b(l),n.$$.dirty&2048&&G(),n.$$.dirty&4&&v(s),n.$$.dirty&64&&S()},[i,g,s,r,o,l,a,h,c,f,u,p,Q]}class Yc extends si{constructor(e){super(),ri(this,e,C0,x0,oi,{classNames:0,value:2,dark_mode:3,basic:4,language:5,lines:6,extensions:7,useTab:8,readonly:9,placeholder:10})}}function Jl(n){let e,t,i,s;return t=new ca({}),{c(){e=dt("span"),fe(t.$$.fragment),se(e,"class","check svelte-qi7jcw")},m(r,o){Ce(r,e,o),ue(t,e,null),s=!0},i(r){s||(H(t.$$.fragment,r),r&&ea(()=>{s&&(i||(i=bn(e,wn,{},!0)),i.run(1))}),s=!0)},o(r){j(t.$$.fragment,r),r&&(i||(i=bn(e,wn,{},!1)),i.run(0)),s=!1},d(r){r&&Ae(e),de(t),r&&i&&i.end()}}}function A0(n){let e,t,i,s,r,o,l;i=new ef({});let a=n[0]&&Jl();return{c(){e=dt("button"),t=dt("span"),fe(i.$$.fragment),s=pt(),a&&a.c(),se(t,"class","copy-text"),yn(t,"copied",n[0]),se(e,"title","copy"),se(e,"class","svelte-qi7jcw")},m(h,c){Ce(h,e,c),Yt(e,t),ue(i,t,null),Yt(e,s),a&&a.m(e,null),r=!0,o||(l=Zl(e,"click",n[1]),o=!0)},p(h,[c]){(!r||c&1)&&yn(t,"copied",h[0]),h[0]?a?c&1&&H(a,1):(a=Jl(),a.c(),H(a,1),a.m(e,null)):a&&(_n(),j(a,1,1,()=>{a=null}),Vn())},i(h){r||(H(i.$$.fragment,h),H(a),r=!0)},o(h){j(i.$$.fragment,h),j(a),r=!1},d(h){h&&Ae(e),de(i),a&&a.d(),o=!1,l()}}}function M0(n,e,t){let i=!1,{value:s}=e,r;function o(){t(0,i=!0),r&&clearTimeout(r),r=setTimeout(()=>{t(0,i=!1)},2e3)}async function l(){"clipboard"in navigator&&(await navigator.clipboard.writeText(s),o())}return Ql(()=>{r&&clearTimeout(r)}),n.$$set=a=>{"value"in a&&t(2,s=a.value)},[i,l,s]}class D0 extends si{constructor(e){super(),ri(this,e,M0,A0,oi,{value:2})}}function Yl(n){let e,t,i,s;return t=new ca({}),{c(){e=dt("span"),fe(t.$$.fragment),se(e,"class","check svelte-14d303a")},m(r,o){Ce(r,e,o),ue(t,e,null),s=!0},i(r){s||(H(t.$$.fragment,r),r&&ea(()=>{s&&(i||(i=bn(e,wn,{},!0)),i.run(1))}),s=!0)},o(r){j(t.$$.fragment,r),r&&(i||(i=bn(e,wn,{},!1)),i.run(0)),s=!1},d(r){r&&Ae(e),de(t),r&&i&&i.end()}}}function T0(n){let e,t,i,s,r,o,l;t=new tf({});let a=n[0]&&Yl();return{c(){e=dt("a"),fe(t.$$.fragment),i=pt(),a&&a.c(),se(e,"download",s="file."+n[2]),se(e,"href",n[1]),se(e,"class","svelte-14d303a"),yn(e,"copied",n[0])},m(h,c){Ce(h,e,c),ue(t,e,null),Yt(e,i),a&&a.m(e,null),r=!0,o||(l=Zl(e,"click",n[3]),o=!0)},p(h,[c]){h[0]?a?c&1&&H(a,1):(a=Yl(),a.c(),H(a,1),a.m(e,null)):a&&(_n(),j(a,1,1,()=>{a=null}),Vn()),(!r||c&4&&s!==(s="file."+h[2]))&&se(e,"download",s),(!r||c&2)&&se(e,"href",h[1]),(!r||c&1)&&yn(e,"copied",h[0])},i(h){r||(H(t.$$.fragment,h),H(a),r=!0)},o(h){j(t.$$.fragment,h),j(a),r=!1},d(h){h&&Ae(e),de(t),a&&a.d(),o=!1,l()}}}function O0(n){return{py:"py",python:"py",md:"md",markdown:"md",json:"json",html:"html",css:"css",js:"js",javascript:"js",ts:"ts",typescript:"ts",yaml:"yaml",yml:"yml",dockerfile:"dockerfile",sh:"sh",shell:"sh",r:"r"}[n]||"txt"}function B0(n,e,t){let i,s,{value:r}=e,{language:o}=e,l=!1,a;function h(){t(0,l=!0),a&&clearTimeout(a),a=setTimeout(()=>{t(0,l=!1)},2e3)}return Ql(()=>{a&&clearTimeout(a)}),n.$$set=c=>{"value"in c&&t(4,r=c.value),"language"in c&&t(5,o=c.language)},n.$$.update=()=>{n.$$.dirty&32&&t(2,i=O0(o)),n.$$.dirty&16&&t(1,s=URL.createObjectURL(new Blob([r])))},[l,s,i,h,r,o]}class P0 extends si{constructor(e){super(),ri(this,e,B0,T0,oi,{value:4,language:5})}}function E0(n){let e,t,i,s,r;return t=new P0({props:{value:n[0],language:n[1]}}),s=new D0({props:{value:n[0]}}),{c(){e=dt("div"),fe(t.$$.fragment),i=pt(),fe(s.$$.fragment),se(e,"class","svelte-1yin446")},m(o,l){Ce(o,e,l),ue(t,e,null),Yt(e,i),ue(s,e,null),r=!0},p(o,[l]){const a={};l&1&&(a.value=o[0]),l&2&&(a.language=o[1]),t.$set(a);const h={};l&1&&(h.value=o[0]),s.$set(h)},i(o){r||(H(t.$$.fragment,o),H(s.$$.fragment,o),r=!0)},o(o){j(t.$$.fragment,o),j(s.$$.fragment,o),r=!1},d(o){o&&Ae(e),de(t),de(s)}}}function R0(n,e,t){let{value:i}=e,{language:s}=e;return n.$$set=r=>{"value"in r&&t(0,i=r.value),"language"in r&&t(1,s=r.language)},[i,s]}class L0 extends si{constructor(e){super(),ri(this,e,R0,E0,oi,{value:0,language:1})}}function I0(n){let e,t;return e=new aa({props:{variant:"solid",padding:!1,elem_id:n[3],elem_classes:n[4],visible:n[5],$$slots:{default:[_0]},$$scope:{ctx:n}}}),{c(){fe(e.$$.fragment)},m(i,s){ue(e,i,s),t=!0},p(i,s){const r={};s&8&&(r.elem_id=i[3]),s&16&&(r.elem_classes=i[4]),s&32&&(r.visible=i[5]),s&131975&&(r.$$scope={dirty:s,ctx:i}),e.$set(r)},i(i){t||(H(e.$$.fragment,i),t=!0)},o(i){j(e.$$.fragment,i),t=!1},d(i){de(e,i)}}}function N0(n){let e,t;return e=new aa({props:{variant:"solid",padding:!1,elem_id:n[3],elem_classes:n[4],visible:n[5],$$slots:{default:[W0]},$$scope:{ctx:n}}}),{c(){fe(e.$$.fragment)},m(i,s){ue(e,i,s),t=!0},p(i,s){const r={};s&8&&(r.elem_id=i[3]),s&16&&(r.elem_classes=i[4]),s&32&&(r.visible=i[5]),s&131975&&(r.$$scope={dirty:s,ctx:i}),e.$set(r)},i(i){t||(H(e.$$.fragment,i),t=!0)},o(i){j(e.$$.fragment,i),t=!1},d(i){de(e,i)}}}function _0(n){let e,t,i,s,r,o,l;const a=[n[9]];let h={};for(let u=0;usa(r,"value",c)),{c(){fe(e.$$.fragment),t=pt(),fe(i.$$.fragment),s=pt(),fe(r.$$.fragment)},m(u,d){ue(e,u,d),Ce(u,t,d),ue(i,u,d),Ce(u,s,d),ue(r,u,d),l=!0},p(u,d){const p=d&512?ra(a,[oa(u[9])]):{};e.$set(p);const g={};d&256&&(g.show_label=u[8]),d&128&&(g.label=u[7]),i.$set(g);const y={};d&2&&(y.language=u[1]),d&4&&(y.lines=u[2]),!o&&d&1&&(o=!0,y.value=u[0],la(()=>o=!1)),r.$set(y)},i(u){l||(H(e.$$.fragment,u),H(i.$$.fragment,u),H(r.$$.fragment,u),l=!0)},o(u){j(e.$$.fragment,u),j(i.$$.fragment,u),j(r.$$.fragment,u),l=!1},d(u){u&&(Ae(t),Ae(s)),de(e,u),de(i,u),de(r,u)}}}function V0(n){let e,t,i,s,r;e=new L0({props:{language:n[1],value:n[0]}});function o(a){n[13](a)}let l={language:n[1],lines:n[2],dark_mode:n[10],readonly:!0};return n[0]!==void 0&&(l.value=n[0]),i=new Yc({props:l}),xr.push(()=>sa(i,"value",o)),{c(){fe(e.$$.fragment),t=pt(),fe(i.$$.fragment)},m(a,h){ue(e,a,h),Ce(a,t,h),ue(i,a,h),r=!0},p(a,h){const c={};h&2&&(c.language=a[1]),h&1&&(c.value=a[0]),e.$set(c);const f={};h&2&&(f.language=a[1]),h&4&&(f.lines=a[2]),!s&&h&1&&(s=!0,f.value=a[0],la(()=>s=!1)),i.$set(f)},i(a){r||(H(e.$$.fragment,a),H(i.$$.fragment,a),r=!0)},o(a){j(e.$$.fragment,a),j(i.$$.fragment,a),r=!1},d(a){a&&Ae(t),de(e,a),de(i,a)}}}function F0(n){let e,t;return e=new Qc({props:{unpadded_box:!0,size:"large",$$slots:{default:[H0]},$$scope:{ctx:n}}}),{c(){fe(e.$$.fragment)},m(i,s){ue(e,i,s),t=!0},p(i,s){const r={};s&131072&&(r.$$scope={dirty:s,ctx:i}),e.$set(r)},i(i){t||(H(e.$$.fragment,i),t=!0)},o(i){j(e.$$.fragment,i),t=!1},d(i){de(e,i)}}}function H0(n){let e,t;return e=new Sr({}),{c(){fe(e.$$.fragment)},m(i,s){ue(e,i,s),t=!0},i(i){t||(H(e.$$.fragment,i),t=!0)},o(i){j(e.$$.fragment,i),t=!1},d(i){de(e,i)}}}function W0(n){let e,t,i,s,r,o,l,a;const h=[n[9]];let c={};for(let p=0;p{u[v]=null}),Vn(),o=u[r],o?o.p(p,g):(o=u[r]=f[r](p),o.c()),H(o,1),o.m(l.parentNode,l))},i(p){a||(H(e.$$.fragment,p),H(i.$$.fragment,p),H(o),a=!0)},o(p){j(e.$$.fragment,p),j(i.$$.fragment,p),j(o),a=!1},d(p){p&&(Ae(t),Ae(s),Ae(l)),de(e,p),de(i,p),u[r].d(p)}}}function z0(n){let e,t,i,s;const r=[N0,I0],o=[];function l(a,h){return a[6]==="static"?0:1}return e=l(n),t=o[e]=r[e](n),{c(){t.c(),i=ta()},m(a,h){o[e].m(a,h),Ce(a,i,h),s=!0},p(a,[h]){let c=e;e=l(a),e===c?o[e].p(a,h):(_n(),j(o[c],1,1,()=>{o[c]=null}),Vn(),t=o[e],t?t.p(a,h):(t=o[e]=r[e](a),t.c()),H(t,1),t.m(i.parentNode,i))},i(a){s||(H(t),s=!0)},o(a){j(t),s=!1},d(a){a&&Ae(i),o[e].d(a)}}}function q0(n,e,t){const i=Xl();let{value:s=""}=e,{value_is_output:r=!1}=e,{language:o=""}=e,{lines:l=5}=e,{target:a}=e,{elem_id:h=""}=e,{elem_classes:c=[]}=e,{visible:f=!0}=e,{mode:u}=e,{label:d="Code"}=e,{show_label:p=!0}=e,{loading_status:g}=e,y=a.classList.contains("dark");function b(){i("change",s),r||i("input")}Zc(()=>{t(11,r=!1)});function v(k){s=k,t(0,s)}function S(k){s=k,t(0,s)}return n.$$set=k=>{"value"in k&&t(0,s=k.value),"value_is_output"in k&&t(11,r=k.value_is_output),"language"in k&&t(1,o=k.language),"lines"in k&&t(2,l=k.lines),"target"in k&&t(12,a=k.target),"elem_id"in k&&t(3,h=k.elem_id),"elem_classes"in k&&t(4,c=k.elem_classes),"visible"in k&&t(5,f=k.visible),"mode"in k&&t(6,u=k.mode),"label"in k&&t(7,d=k.label),"show_label"in k&&t(8,p=k.show_label),"loading_status"in k&&t(9,g=k.loading_status)},n.$$.update=()=>{n.$$.dirty&1&&b()},[s,o,l,h,c,f,u,d,p,g,y,r,a,v,S]}class j0 extends si{constructor(e){super(),ri(this,e,q0,z0,oi,{value:0,value_is_output:11,language:1,lines:2,target:12,elem_id:3,elem_classes:4,visible:5,mode:6,label:7,show_label:8,loading_status:9})}}const K0=j0,U0=["static","dynamic"],fy=Object.freeze(Object.defineProperty({__proto__:null,Component:K0,modes:U0},Symbol.toStringTag,{value:"Module"}));export{np as A,hy as B,bg as C,Id as D,w as E,fy as F,ee as I,ur as L,Lr as N,Rh as P,jt as S,z as T,ay as a,sy as b,xe as c,ry as d,L as e,gp as f,Ge as g,pe as h,op as i,Vi as j,qn as k,Ie as l,Nh as m,Dt as n,mp as o,iy as p,Vh as q,ti as r,Zd as s,m as t,Lp as u,O as v,ly as w,ty as x,cy as y,oy as z}; -//# sourceMappingURL=index-3ba00a4a.js.map diff --git a/spaces/Dagfinn1962/stablediffusion-articlera/appworks.py b/spaces/Dagfinn1962/stablediffusion-articlera/appworks.py deleted file mode 100644 index 878c757de65298f3affa61b5456b53e02dadb9fd..0000000000000000000000000000000000000000 --- a/spaces/Dagfinn1962/stablediffusion-articlera/appworks.py +++ /dev/null @@ -1,80 +0,0 @@ -import gradio as gr -import os -import sys -from pathlib import Path - -models = [ - {"name": "Stable Diffusion 1.4","url": "CompVis/stable-diffusion-v1-4"}, - {"name": "Stable Diffusion 1.5","url": "runwayml/stable-diffusion-v1-5"}, - ] - -current_model = models[0] - -text_gen = gr.Interface.load("spaces/daspartho/prompt-extend") - -models2 = [] -for model in models: - model_url = f"models/{model['url']}" - loaded_model = gr.Interface.load(model_url, live=True, preprocess=True) - models2.append(loaded_model) - - -def text_it(inputs, text_gen=text_gen): - return text_gen(inputs) - - -def set_model(current_model_index): - global current_model - current_model = models[current_model_index] - return gr.update(value=f"{current_model['name']}") - - -def send_it(inputs, model_choice): - proc = models2[model_choice] - return proc(inputs) - - -with gr.Blocks() as myface: - gr.HTML(""" - """ - - ) - with gr.Row(): - input_text = gr.Textbox(label=" ",placeholder="PROMPT HERE ",lines=4) - # Model selection dropdown - model_name1 = gr.Dropdown( - label=" ", - choices=[m["name"] for m in models], - type="index", - value=current_model["name"], - interactive=True, - - - ) - with gr.Row(): - see_prompts = gr.Button("Generate Prompts") - run = gr.Button("Generate Images", varant="primery") - - with gr.Row(): - output1 = gr.Image(label="") - output2 = gr.Image(label="") - output3 = gr.Image(label="") - with gr.Row(): - magic1 = gr.Textbox(label="Generated Prompt", lines=2) - magic2 = gr.Textbox(label="Generated Prompt", lines=2) - magic3 = gr.Textbox(label="Generated Prompt", lines=2) - - model_name1.change(set_model, inputs=model_name1, outputs=[output1, output2, output3,]) - - run.click(send_it, inputs=[magic1, model_name1], outputs=[output1]) - run.click(send_it, inputs=[magic2, model_name1], outputs=[output2]) - run.click(send_it, inputs=[magic3, model_name1], outputs=[output3]) - - - see_prompts.click(text_it, inputs=[input_text], outputs=[magic1]) - see_prompts.click(text_it, inputs=[input_text], outputs=[magic2]) - see_prompts.click(text_it, inputs=[input_text], outputs=[magic3]) - - -myface.queue(concurrency_count=200) -myface.launch(inline=True, show_api=False, max_threads=400) \ No newline at end of file diff --git a/spaces/Deepaksiwania12/Face-Landmark-Detection/README.md b/spaces/Deepaksiwania12/Face-Landmark-Detection/README.md deleted file mode 100644 index 241d9130271904cb02c1902c736243b565dd7b2b..0000000000000000000000000000000000000000 --- a/spaces/Deepaksiwania12/Face-Landmark-Detection/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Face Landmark Detection -emoji: 👁 -colorFrom: gray -colorTo: purple -sdk: gradio -sdk_version: 3.44.3 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/DhilshaM/MyGenAI/app.py b/spaces/DhilshaM/MyGenAI/app.py deleted file mode 100644 index a362dcc7d0ddd1eee86961f1bc3db6d894fbd3d5..0000000000000000000000000000000000000000 --- a/spaces/DhilshaM/MyGenAI/app.py +++ /dev/null @@ -1,34 +0,0 @@ -import os -import gradio as gr -from langchain.chat_models import ChatOpenAI -from langchain import LLMChain, PromptTemplate -from langchain.memory import ConversationBufferMemory - -OPENAI_API_KEY=os.getenv('OPENAI_API_KEY') - -template = """You are a helpful assistant to answer all user queries. -{chat_history} -User: {user_message} -Chatbot:""" - -prompt = PromptTemplate( - input_variables=["chat_history", "user_message"], template=template -) - -memory = ConversationBufferMemory(memory_key="chat_history") - -llm_chain = LLMChain( - llm=ChatOpenAI(temperature='0.5', model_name="gpt-3.5-turbo"), - prompt=prompt, - verbose=True, - memory=memory, -) - -def get_text_response(user_message,history): - response = llm_chain.predict(user_message = user_message) - return response - -demo = gr.ChatInterface(get_text_response) - -if __name__ == "__main__": - demo.launch() #To create a public link, set `share=True` in `launch()`. To enable errors and logs, set `debug=True` in `launch()`. diff --git a/spaces/ECCV2022/bytetrack/deploy/TensorRT/cpp/src/STrack.cpp b/spaces/ECCV2022/bytetrack/deploy/TensorRT/cpp/src/STrack.cpp deleted file mode 100644 index 8306165304355fe6d3d6e244207211757f21a646..0000000000000000000000000000000000000000 --- a/spaces/ECCV2022/bytetrack/deploy/TensorRT/cpp/src/STrack.cpp +++ /dev/null @@ -1,192 +0,0 @@ -#include "STrack.h" - -STrack::STrack(vector tlwh_, float score) -{ - _tlwh.resize(4); - _tlwh.assign(tlwh_.begin(), tlwh_.end()); - - is_activated = false; - track_id = 0; - state = TrackState::New; - - tlwh.resize(4); - tlbr.resize(4); - - static_tlwh(); - static_tlbr(); - frame_id = 0; - tracklet_len = 0; - this->score = score; - start_frame = 0; -} - -STrack::~STrack() -{ -} - -void STrack::activate(byte_kalman::KalmanFilter &kalman_filter, int frame_id) -{ - this->kalman_filter = kalman_filter; - this->track_id = this->next_id(); - - vector _tlwh_tmp(4); - _tlwh_tmp[0] = this->_tlwh[0]; - _tlwh_tmp[1] = this->_tlwh[1]; - _tlwh_tmp[2] = this->_tlwh[2]; - _tlwh_tmp[3] = this->_tlwh[3]; - vector xyah = tlwh_to_xyah(_tlwh_tmp); - DETECTBOX xyah_box; - xyah_box[0] = xyah[0]; - xyah_box[1] = xyah[1]; - xyah_box[2] = xyah[2]; - xyah_box[3] = xyah[3]; - auto mc = this->kalman_filter.initiate(xyah_box); - this->mean = mc.first; - this->covariance = mc.second; - - static_tlwh(); - static_tlbr(); - - this->tracklet_len = 0; - this->state = TrackState::Tracked; - if (frame_id == 1) - { - this->is_activated = true; - } - //this->is_activated = true; - this->frame_id = frame_id; - this->start_frame = frame_id; -} - -void STrack::re_activate(STrack &new_track, int frame_id, bool new_id) -{ - vector xyah = tlwh_to_xyah(new_track.tlwh); - DETECTBOX xyah_box; - xyah_box[0] = xyah[0]; - xyah_box[1] = xyah[1]; - xyah_box[2] = xyah[2]; - xyah_box[3] = xyah[3]; - auto mc = this->kalman_filter.update(this->mean, this->covariance, xyah_box); - this->mean = mc.first; - this->covariance = mc.second; - - static_tlwh(); - static_tlbr(); - - this->tracklet_len = 0; - this->state = TrackState::Tracked; - this->is_activated = true; - this->frame_id = frame_id; - this->score = new_track.score; - if (new_id) - this->track_id = next_id(); -} - -void STrack::update(STrack &new_track, int frame_id) -{ - this->frame_id = frame_id; - this->tracklet_len++; - - vector xyah = tlwh_to_xyah(new_track.tlwh); - DETECTBOX xyah_box; - xyah_box[0] = xyah[0]; - xyah_box[1] = xyah[1]; - xyah_box[2] = xyah[2]; - xyah_box[3] = xyah[3]; - - auto mc = this->kalman_filter.update(this->mean, this->covariance, xyah_box); - this->mean = mc.first; - this->covariance = mc.second; - - static_tlwh(); - static_tlbr(); - - this->state = TrackState::Tracked; - this->is_activated = true; - - this->score = new_track.score; -} - -void STrack::static_tlwh() -{ - if (this->state == TrackState::New) - { - tlwh[0] = _tlwh[0]; - tlwh[1] = _tlwh[1]; - tlwh[2] = _tlwh[2]; - tlwh[3] = _tlwh[3]; - return; - } - - tlwh[0] = mean[0]; - tlwh[1] = mean[1]; - tlwh[2] = mean[2]; - tlwh[3] = mean[3]; - - tlwh[2] *= tlwh[3]; - tlwh[0] -= tlwh[2] / 2; - tlwh[1] -= tlwh[3] / 2; -} - -void STrack::static_tlbr() -{ - tlbr.clear(); - tlbr.assign(tlwh.begin(), tlwh.end()); - tlbr[2] += tlbr[0]; - tlbr[3] += tlbr[1]; -} - -vector STrack::tlwh_to_xyah(vector tlwh_tmp) -{ - vector tlwh_output = tlwh_tmp; - tlwh_output[0] += tlwh_output[2] / 2; - tlwh_output[1] += tlwh_output[3] / 2; - tlwh_output[2] /= tlwh_output[3]; - return tlwh_output; -} - -vector STrack::to_xyah() -{ - return tlwh_to_xyah(tlwh); -} - -vector STrack::tlbr_to_tlwh(vector &tlbr) -{ - tlbr[2] -= tlbr[0]; - tlbr[3] -= tlbr[1]; - return tlbr; -} - -void STrack::mark_lost() -{ - state = TrackState::Lost; -} - -void STrack::mark_removed() -{ - state = TrackState::Removed; -} - -int STrack::next_id() -{ - static int _count = 0; - _count++; - return _count; -} - -int STrack::end_frame() -{ - return this->frame_id; -} - -void STrack::multi_predict(vector &stracks, byte_kalman::KalmanFilter &kalman_filter) -{ - for (int i = 0; i < stracks.size(); i++) - { - if (stracks[i]->state != TrackState::Tracked) - { - stracks[i]->mean[7] = 0; - } - kalman_filter.predict(stracks[i]->mean, stracks[i]->covariance); - } -} \ No newline at end of file diff --git a/spaces/Eddycrack864/Applio-Inference/demucs/separate.py b/spaces/Eddycrack864/Applio-Inference/demucs/separate.py deleted file mode 100644 index 3fc7af9e711978b3e21398aa6f1deb9ae87dd370..0000000000000000000000000000000000000000 --- a/spaces/Eddycrack864/Applio-Inference/demucs/separate.py +++ /dev/null @@ -1,185 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -import argparse -import sys -from pathlib import Path -import subprocess - -import julius -import torch as th -import torchaudio as ta - -from .audio import AudioFile, convert_audio_channels -from .pretrained import is_pretrained, load_pretrained -from .utils import apply_model, load_model - - -def load_track(track, device, audio_channels, samplerate): - errors = {} - wav = None - - try: - wav = AudioFile(track).read( - streams=0, - samplerate=samplerate, - channels=audio_channels).to(device) - except FileNotFoundError: - errors['ffmpeg'] = 'Ffmpeg is not installed.' - except subprocess.CalledProcessError: - errors['ffmpeg'] = 'FFmpeg could not read the file.' - - if wav is None: - try: - wav, sr = ta.load(str(track)) - except RuntimeError as err: - errors['torchaudio'] = err.args[0] - else: - wav = convert_audio_channels(wav, audio_channels) - wav = wav.to(device) - wav = julius.resample_frac(wav, sr, samplerate) - - if wav is None: - print(f"Could not load file {track}. " - "Maybe it is not a supported file format? ") - for backend, error in errors.items(): - print(f"When trying to load using {backend}, got the following error: {error}") - sys.exit(1) - return wav - - -def encode_mp3(wav, path, bitrate=320, samplerate=44100, channels=2, verbose=False): - try: - import lameenc - except ImportError: - print("Failed to call lame encoder. Maybe it is not installed? " - "On windows, run `python.exe -m pip install -U lameenc`, " - "on OSX/Linux, run `python3 -m pip install -U lameenc`, " - "then try again.", file=sys.stderr) - sys.exit(1) - encoder = lameenc.Encoder() - encoder.set_bit_rate(bitrate) - encoder.set_in_sample_rate(samplerate) - encoder.set_channels(channels) - encoder.set_quality(2) # 2-highest, 7-fastest - if not verbose: - encoder.silence() - wav = wav.transpose(0, 1).numpy() - mp3_data = encoder.encode(wav.tobytes()) - mp3_data += encoder.flush() - with open(path, "wb") as f: - f.write(mp3_data) - - -def main(): - parser = argparse.ArgumentParser("demucs.separate", - description="Separate the sources for the given tracks") - parser.add_argument("tracks", nargs='+', type=Path, default=[], help='Path to tracks') - parser.add_argument("-n", - "--name", - default="demucs_quantized", - help="Model name. See README.md for the list of pretrained models. " - "Default is demucs_quantized.") - parser.add_argument("-v", "--verbose", action="store_true") - parser.add_argument("-o", - "--out", - type=Path, - default=Path("separated"), - help="Folder where to put extracted tracks. A subfolder " - "with the model name will be created.") - parser.add_argument("--models", - type=Path, - default=Path("models"), - help="Path to trained models. " - "Also used to store downloaded pretrained models") - parser.add_argument("-d", - "--device", - default="cuda" if th.cuda.is_available() else "cpu", - help="Device to use, default is cuda if available else cpu") - parser.add_argument("--shifts", - default=0, - type=int, - help="Number of random shifts for equivariant stabilization." - "Increase separation time but improves quality for Demucs. 10 was used " - "in the original paper.") - parser.add_argument("--overlap", - default=0.25, - type=float, - help="Overlap between the splits.") - parser.add_argument("--no-split", - action="store_false", - dest="split", - default=True, - help="Doesn't split audio in chunks. This can use large amounts of memory.") - parser.add_argument("--float32", - action="store_true", - help="Convert the output wavefile to use pcm f32 format instead of s16. " - "This should not make a difference if you just plan on listening to the " - "audio but might be needed to compute exactly metrics like SDR etc.") - parser.add_argument("--int16", - action="store_false", - dest="float32", - help="Opposite of --float32, here for compatibility.") - parser.add_argument("--mp3", action="store_true", - help="Convert the output wavs to mp3.") - parser.add_argument("--mp3-bitrate", - default=320, - type=int, - help="Bitrate of converted mp3.") - - args = parser.parse_args() - name = args.name + ".th" - model_path = args.models / name - if model_path.is_file(): - model = load_model(model_path) - else: - if is_pretrained(args.name): - model = load_pretrained(args.name) - else: - print(f"No pre-trained model {args.name}", file=sys.stderr) - sys.exit(1) - model.to(args.device) - - out = args.out / args.name - out.mkdir(parents=True, exist_ok=True) - print(f"Separated tracks will be stored in {out.resolve()}") - for track in args.tracks: - if not track.exists(): - print( - f"File {track} does not exist. If the path contains spaces, " - "please try again after surrounding the entire path with quotes \"\".", - file=sys.stderr) - continue - print(f"Separating track {track}") - wav = load_track(track, args.device, model.audio_channels, model.samplerate) - - ref = wav.mean(0) - wav = (wav - ref.mean()) / ref.std() - sources = apply_model(model, wav, shifts=args.shifts, split=args.split, - overlap=args.overlap, progress=True) - sources = sources * ref.std() + ref.mean() - - track_folder = out / track.name.rsplit(".", 1)[0] - track_folder.mkdir(exist_ok=True) - for source, name in zip(sources, model.sources): - source = source / max(1.01 * source.abs().max(), 1) - if args.mp3 or not args.float32: - source = (source * 2**15).clamp_(-2**15, 2**15 - 1).short() - source = source.cpu() - stem = str(track_folder / name) - if args.mp3: - encode_mp3(source, stem + ".mp3", - bitrate=args.mp3_bitrate, - samplerate=model.samplerate, - channels=model.audio_channels, - verbose=args.verbose) - else: - wavname = str(track_folder / f"{name}.wav") - ta.save(wavname, source, sample_rate=model.samplerate) - - -if __name__ == "__main__": - main() diff --git a/spaces/Eddycrack864/Applio-Inference/julius/__init__.py b/spaces/Eddycrack864/Applio-Inference/julius/__init__.py deleted file mode 100644 index 69811b0415a291ca1beb845531785ba03c57099a..0000000000000000000000000000000000000000 --- a/spaces/Eddycrack864/Applio-Inference/julius/__init__.py +++ /dev/null @@ -1,41 +0,0 @@ -# File under the MIT license, see https://github.com/adefossez/julius/LICENSE for details. -# Author: adefossez, 2020 - -# flake8: noqa -""" -.. image:: ../logo.png - -Julius contains different Digital Signal Processing algorithms implemented -with PyTorch, so that they are differentiable and available on CUDA. -Note that all the modules implemented here can be used with TorchScript. - -For now, I have implemented: - -- `julius.resample`: fast sinc resampling. -- `julius.fftconv`: FFT based convolutions. -- `julius.lowpass`: FIR low pass filter banks. -- `julius.filters`: FIR high pass and band pass filters. -- `julius.bands`: Decomposition of a waveform signal over mel-scale frequency bands. - -Along that, you might found useful utilities in: - -- `julius.core`: DSP related functions. -- `julius.utils`: Generic utilities. - - -Please checkout [the Github repository](https://github.com/adefossez/julius) for other informations. -For a verification of the speed and correctness of Julius, check the benchmark module `bench`. - - -This package is named in this honor of -[Julius O. Smith](https://ccrma.stanford.edu/~jos/), -whose books and website were a gold mine of information for me to learn about DSP. Go checkout his website if you want -to learn more about DSP. -""" - -from .bands import SplitBands, split_bands -from .fftconv import fft_conv1d, FFTConv1d -from .filters import bandpass_filter, BandPassFilter -from .filters import highpass_filter, highpass_filters, HighPassFilter, HighPassFilters -from .lowpass import lowpass_filter, lowpass_filters, LowPassFilters, LowPassFilter -from .resample import resample_frac, ResampleFrac diff --git a/spaces/EuroPython2022/mmocr-demo/configs/_base_/schedules/schedule_adam_step_600e.py b/spaces/EuroPython2022/mmocr-demo/configs/_base_/schedules/schedule_adam_step_600e.py deleted file mode 100644 index 5daa2d4cf5ee79e48de7d984fcfdbc336f885a96..0000000000000000000000000000000000000000 --- a/spaces/EuroPython2022/mmocr-demo/configs/_base_/schedules/schedule_adam_step_600e.py +++ /dev/null @@ -1,8 +0,0 @@ -# optimizer -optimizer = dict(type='Adam', lr=1e-4) -optimizer_config = dict(grad_clip=None) -# learning policy -lr_config = dict(policy='step', step=[200, 400]) -# running settings -runner = dict(type='EpochBasedRunner', max_epochs=600) -checkpoint_config = dict(interval=100) diff --git a/spaces/Felladrin/MiniSearch/src/types.d.ts b/spaces/Felladrin/MiniSearch/src/types.d.ts deleted file mode 100644 index 85cabec7c26fec07087e7894e1d109f283ba21ed..0000000000000000000000000000000000000000 --- a/spaces/Felladrin/MiniSearch/src/types.d.ts +++ /dev/null @@ -1,24 +0,0 @@ -/// - -declare module "loadbar" { - export default class Loadbar { - constructor( - options?: { - height?: string; - backgroundColor?: string; - easeFunction?: function; - zIndex?: number; - startPoint?: number; - pausePoint?: number; - }, - el?: HTMLElement, - ); - growTo(num: number): void; - start(): void; - loading(): void; - pause(): this; - stop(): void; - destroy(): void; - done(): void; - } -} diff --git a/spaces/GAIR/Factool/factool/utils/utils_json.py b/spaces/GAIR/Factool/factool/utils/utils_json.py deleted file mode 100644 index 14aa390f39fc590f4dd976a54fe5ad749b83a9d2..0000000000000000000000000000000000000000 --- a/spaces/GAIR/Factool/factool/utils/utils_json.py +++ /dev/null @@ -1,12 +0,0 @@ -import json -import numpy as np - -class CustomJSONEncoder(json.JSONEncoder): - def default(self, obj): - if isinstance(obj, np.int64): - return int(obj) - elif isinstance(obj, tuple): - return list(obj) - elif isinstance(obj, np.ndarray): - return obj.tolist() - return super(CustomJSONEncoder, self).default(obj) \ No newline at end of file diff --git a/spaces/GT4SD/protein_properties/model_cards/description.md b/spaces/GT4SD/protein_properties/model_cards/description.md deleted file mode 100644 index 86b684d7035ec779a25b237fba7894f2e0c12697..0000000000000000000000000000000000000000 --- a/spaces/GT4SD/protein_properties/model_cards/description.md +++ /dev/null @@ -1,7 +0,0 @@ - - -logo - -### Protein property prediction - -This is the GT4SD web-app for prediction of various protein (or peptide) properties. For **examples** and **documentation** of the supported properties, please see below. Please note that this API does not expose **all** properties that are supported in GT4SD (a list of the non-supported ones can be found at the bottom). diff --git a/spaces/GastonMazzei/escher-inpaint-project/model-card.md b/spaces/GastonMazzei/escher-inpaint-project/model-card.md deleted file mode 100644 index 8bf5b18aef4548f65654f60852b01e7bfd6c4e06..0000000000000000000000000000000000000000 --- a/spaces/GastonMazzei/escher-inpaint-project/model-card.md +++ /dev/null @@ -1,50 +0,0 @@ -# Overview - -This card describes the diffusion model GLIDE (filtered) and noised CLIP model described in the paper [GLIDE: Towards -Photorealistic Image Generation and Editing with Text-Guided Diffusion Models](https://arxiv.org/abs/2112.10741) - -# Datasets - -GLIDE (filtered) was trained on a filtered version of a dataset comprised of several hundred million text-image pairs -collected from the internet. We constructed a set of filters intended to remove all images of people, violent objects, and some -and hate symbols (see Appendix F of the paper for details). The size of the dataset after filtering was approximately -67M text-image pairs. - -Our noised CLIP model which was trained on the dataset described above, augmented with a filtered version of the dataset used -to train the [original CLIP models](https://github.com/openai/clip). The total size of this augmented dataset is approximately 137M pairs. - -# Performance - -Qualitatively, we find that the generated images from GLIDE (filtered) often look semi-realistic, but the small size of the model hinders -its ability to bind attributes to objects and perform compositional tasks. Because the dataset used to train GLIDE -(filtered) has been preprocessed to remove images of people, this also limits its world knowledge, especially in regard -to concepts that involve people. -Finally, due to the dataset used to train GLIDE (filtered), the model has reduced capabilities to compose multiple objects in complex ways compared to models of a similar size trained on our internal dataset. - -We do not directly measure quantitative metrics for GLIDE (filtered). In particular, most of the evaluations we report for our other models are biased against GLIDE (filtered), since they use prompts that often require generations of people. Evaluating people-free models remains an open area of research. - -# Intended Use - -We release these models to help advance research in generative modeling. Due to the limitations and biases of GLIDE (filtered), we do not currently recommend it for commercial use. - -Functionally, these models are intended to be able to perform the following tasks for research purposes: - * Generate images from natural language prompts - * Iteratively edit and refine images using inpainting - -These models are explicitly not intended to generate images of people or other subjects we filtered for (see Appendix F of the paper for details). - -# Limitations - -Despite the dataset filtering applied before training, GLIDE (filtered) continues to exhibit biases that extend beyond those found in images of people. -We explore some of these biases in our paper. For example: - - * It produces different outputs when asked to generate toys for boys and toys for girls. - * It gravitates toward generating images of churches when asked to generate "a religious place", - and this bias is amplified by classifier-free guidance. - * It may have a greater propensity for generating hate symbols other than swastikas and confederate flags. Our filter - for hate symbols focused specifically on these two cases, as we found few relevant images of hate symbols in our - dataset. However, we also found that the model has diminished capabilities across a wider set of symbols. - -GLIDE (filtered) can fail to produce realistic outputs for complex prompts or for prompts that involve concepts that are -not well-represented in its training data. While the data for the model was filtered to remove certain types of images, -the data still exhibits biases toward Western-centric concepts. diff --git a/spaces/Giuliano/Conversational-Datasets/README.md b/spaces/Giuliano/Conversational-Datasets/README.md deleted file mode 100644 index 4352e212c3ed6ad9b7fdef165a8d786eb8fa21ac..0000000000000000000000000000000000000000 --- a/spaces/Giuliano/Conversational-Datasets/README.md +++ /dev/null @@ -1,45 +0,0 @@ ---- -title: Conversational Datasets -emoji: 🏃 -colorFrom: blue -colorTo: purple -sdk: gradio -app_file: app.py -pinned: false ---- - -# Configuration - -`title`: _string_ -Display title for the Space - -`emoji`: _string_ -Space emoji (emoji-only character allowed) - -`colorFrom`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) - -`colorTo`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) - -`sdk`: _string_ -Can be either `gradio`, `streamlit`, or `static` - -`sdk_version` : _string_ -Only applicable for `streamlit` SDK. -See [doc](https://hf.co/docs/hub/spaces) for more info on supported versions. - -`app_file`: _string_ -Path to your main application file (which contains either `gradio` or `streamlit` Python code, or `static` html code). -Path is relative to the root of the repository. - -`models`: _List[string]_ -HF model IDs (like "gpt2" or "deepset/roberta-base-squad2") used in the Space. -Will be parsed automatically from your code if not specified here. - -`datasets`: _List[string]_ -HF dataset IDs (like "common_voice" or "oscar-corpus/OSCAR-2109") used in the Space. -Will be parsed automatically from your code if not specified here. - -`pinned`: _boolean_ -Whether the Space stays on top of your list. diff --git a/spaces/GoAPI/Midjourney-zoom-video-generator-GoAPI/zoom_video_composer.py b/spaces/GoAPI/Midjourney-zoom-video-generator-GoAPI/zoom_video_composer.py deleted file mode 100644 index b30b4bf2a0c4dfedf599bf8e22eb91697d5d763c..0000000000000000000000000000000000000000 --- a/spaces/GoAPI/Midjourney-zoom-video-generator-GoAPI/zoom_video_composer.py +++ /dev/null @@ -1,367 +0,0 @@ -#!/usr/bin/env python3 - -# zoom_video_composer.py v0.3.2 -# https://github.com/mwydmuch/ZoomVideoComposer - -# Copyright (c) 2023 Marek Wydmuch and the respective contributors - -# Permission is hereby granted, free of charge, to any person obtaining a copy -# of this software and associated documentation files (the "Software"), to deal -# in the Software without restriction, including without limitation the rights -# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -# copies of the Software, and to permit persons to whom the Software is -# furnished to do so, subject to the following conditions: - -# The above copyright notice and this permission notice shall be included in all -# copies or substantial portions of the Software. - -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -# SOFTWARE. - -import concurrent -import os -import shutil -from concurrent.futures import ThreadPoolExecutor -from hashlib import md5 -from multiprocessing import cpu_count -import click -from tqdm import tqdm - -from helpers import * - -VERSION = "0.3.2" - -@click.command() -@click.argument( - "image_paths", - nargs=-1, - type=click.Path(exists=True), - required=True, -) -@click.option( - "-a", - "--audio_path", - type=click.Path(exists=True, dir_okay=False), - default=None, - help="Audio file path that will be added to the video.", -) -@click.option( - "-z", - "--zoom", - type=float, - default=2.0, - help="Zoom factor/ratio between images.", - show_default=True, -) -@click.option( - "-d", - "--duration", - type=float, - default=10.0, - help="Duration of the video in seconds.", - show_default=True, -) -@click.option( - "-e", - "--easing", - type=click.Choice(list(EASING_FUNCTIONS.keys())), - default=DEFAULT_EASING_KEY, - help="Easing function.", - show_default=True, -) -@click.option( - "--easing-power", - type=float, - default=DEFAULT_EASING_POWER, - help="Power argument of easeInPow, easeOutPow and easeInOutPow easing functions.", - show_default=True, -) -@click.option( - "--ease-duration", - type=float, - default=DEFAULT_EASE_DURATION, - help="Duration of easing in linearWithInOutEase as a fraction of video duration.", - show_default=True, -) -@click.option( - "-r", - "--direction", - type=click.Choice(["in", "out", "inout", "outin"]), - default="out", - help="Zoom direction. Inout and outin combine both directions.", - show_default=True, -) -@click.option( - "-f", - "--fps", - type=int, - default=30, - help="Frames per second of the output video.", - show_default=True, -) -@click.option( - "-w", - "--width", - type=float, - default=1, - help="Width of the output video. Values > 1 are interpreted as specific sizes in pixels. Values <= 1 are " - "interpreted as a fraction of the width of the first image.", - show_default=True, -) -@click.option( - "-h", - "--height", - type=float, - default=1, - help="Height of the output video. Values > 1 are interpreted as specific sizes in pixels. Values <= 1 are " - "interpreted as a fraction of the height of the first image.", - show_default=True, -) -@click.option( - "-s", - "--resampling", - type=click.Choice(list(RESAMPLING_FUNCTIONS_PIL.keys())), - default=DEFAULT_RESAMPLING_KEY, - help="Resampling technique to use when resizing images.", - show_default=True, -) -@click.option( - "-m", - "--margin", - type=float, - default=0.05, - help="Size of the margin to cut from the edges of each image for better blending with the next/previous image. " - "Values > 1 are interpreted as specific sizes in pixels. Values <= 1 are interpreted as a fraction of the " - "smaller size of the first image.", - show_default=True, -) -@click.option( - "-o", - "--output", - type=click.Path(), - default="output.mp4", - help="Output video file.", - show_default=True, -) -@click.option( - "-t", - "--threads", - type=int, - default=-1, - help="Number of threads to use to generate frames. Use values <= 0 for number of available threads on your " - "machine minus the provided absolute value.", - show_default=True, -) -@click.option( - "--tmp-dir", - type=click.Path(), - default="tmp", - help="Temporary directory to store frames.", - show_default=True, -) -@click.option( - "--keep-frames", - is_flag=True, - default=False, - help="Keep frames in the temporary directory. Otherwise, it will be deleted after the video is generated.", - show_default=True, -) -@click.option( - "--skip-video-generation", - is_flag=True, - default=False, - help="Skip video generation. Useful if you only want to generate the frames. This option will keep the temporary " - "directory similar to --keep-frames flag.", - show_default=True, -) -@click.option( - "--reverse-images", - is_flag=True, - default=False, - help="Reverse the order of the images.", - show_default=True, -) -@click.option( - "--image-engine", - type=click.Choice(list(IMAGE_CLASSES.keys())), - default=DEFAULT_IMAGE_ENGINE, - help="Image engine to use for image processing.", - show_default=True, -) -@click.option( - "--resume", - is_flag=True, - default=False, - help="Resume generation of the video.", - show_default=True, -) -def zoom_video_composer_cli( - image_paths, - audio_path=None, - zoom=2.0, - duration=10.0, - easing=DEFAULT_EASING_KEY, - easing_power=DEFAULT_EASING_POWER, - ease_duration=DEFAULT_EASE_DURATION, - direction="out", - fps=30, - reverse_images=False, - width=1, - height=1, - resampling=DEFAULT_RESAMPLING_KEY, - margin=0.05, - output="output.mp4", - threads=-1, - tmp_dir="tmp", - keep_frames=False, - skip_video_generation=False, - image_engine=DEFAULT_IMAGE_ENGINE, - resume=False, -): - """Compose a zoom video from multiple provided images.""" - zoom_video_composer( - image_paths, - audio_path, - zoom, - duration, - easing, - easing_power, - ease_duration, - direction, - fps, - reverse_images, - width, - height, - resampling, - margin, - output, - threads, - tmp_dir, - keep_frames, - skip_video_generation, - image_engine, - resume, - ) - - -def zoom_video_composer( - image_paths, - audio_path=None, - zoom=2.0, - duration=10.0, - easing=DEFAULT_EASING_KEY, - easing_power=DEFAULT_EASING_POWER, - ease_duration=DEFAULT_EASE_DURATION, - direction="out", - fps=30, - reverse_images=False, - width=1, - height=1, - resampling=DEFAULT_RESAMPLING_KEY, - margin=0.05, - output="output.mp4", - threads=-1, - tmp_dir="tmp", - keep_frames=False, - skip_video_generation=False, - image_engine=DEFAULT_IMAGE_ENGINE, - resume=False, - logger=click.echo, -): - """Compose a zoom video from multiple provided images.""" - video_params = f'zoom={zoom}, fps={fps}, dur={duration}, easing={easing}, easing_power={easing_power}, ease_duration={ease_duration}, direction={direction}, resampling={resampling}, margin={margin}, width={width}, height={height}' - logger(f"Starting zoom video composition with parameters:\n{video_params}") - - # Read images - image_paths = get_image_paths(image_paths) - logger(f"Reading {len(image_paths)} image files ...") - images = read_images(image_paths, logger, image_engine) - - # Setup some additional variables - easing_func = get_easing_function(easing, easing_power, ease_duration) - resampling_func = get_resampling_function(resampling, image_engine) - - num_images = len(images) - 1 - num_frames = int(duration * fps) - num_frames_half = int(num_frames / 2) - video_params_to_hash = video_params + "".join(image_paths) - tmp_dir_hash = os.path.join( - tmp_dir, md5(video_params_to_hash.encode("utf-8")).hexdigest() - ) - - # Calculate sizes based on arguments - width, height, margin = get_sizes(images[0], width, height, margin) - - # Create tmp dir - if not os.path.exists(tmp_dir_hash): - logger(f"Creating temporary directory for frames: {tmp_dir_hash} ...") - os.makedirs(tmp_dir_hash, exist_ok=True) - - # Reverse images - images = images_reverse(images, direction, reverse_images) - - # Blend images (take care of margins) - logger(f"Blending {len(images)} images ...") - images = blend_images(images, margin, zoom, resampling_func) - - # Create frames - n_jobs = threads if threads > 0 else cpu_count() - threads - logger(f"Creating frames in {n_jobs} threads ...") - - start_frame = 0 - if resume: - while os.path.exists(os.path.join(tmp_dir_hash, f"{start_frame:06d}.png")): - start_frame += 1 - - with ThreadPoolExecutor(max_workers=n_jobs) as executor: - futures = [ - executor.submit( - process_frame, - i, - images, - direction, - easing_func, - num_frames, - num_frames_half, - num_images, - zoom, - width, - height, - resampling_func, - tmp_dir_hash, - ) - for i in range(start_frame, num_frames) - ] - try: - completed = concurrent.futures.as_completed(futures) - for _ in tqdm(range(num_frames - start_frame), desc="Generating the frames"): - completed.__next__() - except KeyboardInterrupt: - executor.shutdown(wait=False, cancel_futures=True) - raise - - # Images are no longer needed - del images - - # Create video clip using images in tmp dir and audio if provided - logger(f"Writting video in {n_jobs} threads to: {output} ...") - create_video_clip(output, fps, num_frames, tmp_dir_hash, audio_path, n_jobs) - - # Remove tmp dir - if not keep_frames and not skip_video_generation: - logger(f"Removing temporary directory: {tmp_dir_hash} ...") - shutil.rmtree(tmp_dir_hash, ignore_errors=False, onerror=None) - if not os.listdir(tmp_dir): - os.rmdir(tmp_dir) - - logger("Done!") - return output - - -if __name__ == "__main__": - zoom_video_composer_cli() diff --git a/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/psanet/psanet_r50-d8_512x512_80k_ade20k.py b/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/psanet/psanet_r50-d8_512x512_80k_ade20k.py deleted file mode 100644 index 0141a6d0925c2a2aa37517670a9f12ac7d3a02d4..0000000000000000000000000000000000000000 --- a/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/psanet/psanet_r50-d8_512x512_80k_ade20k.py +++ /dev/null @@ -1,7 +0,0 @@ -_base_ = [ - '../_base_/models/psanet_r50-d8.py', '../_base_/datasets/ade20k.py', - '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' -] -model = dict( - decode_head=dict(mask_size=(66, 66), num_classes=150), - auxiliary_head=dict(num_classes=150)) diff --git a/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/psanet/psanet_r50-d8_769x769_80k_cityscapes.py b/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/psanet/psanet_r50-d8_769x769_80k_cityscapes.py deleted file mode 100644 index 0966b4770cc649e95525c366b09801408b99567a..0000000000000000000000000000000000000000 --- a/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/psanet/psanet_r50-d8_769x769_80k_cityscapes.py +++ /dev/null @@ -1,9 +0,0 @@ -_base_ = [ - '../_base_/models/psanet_r50-d8.py', - '../_base_/datasets/cityscapes_769x769.py', '../_base_/default_runtime.py', - '../_base_/schedules/schedule_80k.py' -] -model = dict( - decode_head=dict(align_corners=True), - auxiliary_head=dict(align_corners=True), - test_cfg=dict(mode='slide', crop_size=(769, 769), stride=(513, 513))) diff --git a/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/sem_fpn/fpn_r50_512x512_160k_ade20k.py b/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/sem_fpn/fpn_r50_512x512_160k_ade20k.py deleted file mode 100644 index 5cdfc8ca264c6045dcb7ad890d89f15537bef233..0000000000000000000000000000000000000000 --- a/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/sem_fpn/fpn_r50_512x512_160k_ade20k.py +++ /dev/null @@ -1,5 +0,0 @@ -_base_ = [ - '../_base_/models/fpn_r50.py', '../_base_/datasets/ade20k.py', - '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' -] -model = dict(decode_head=dict(num_classes=150)) diff --git a/spaces/Gradio-Blocks/uniformer_image_segmentation/mmseg/datasets/cityscapes.py b/spaces/Gradio-Blocks/uniformer_image_segmentation/mmseg/datasets/cityscapes.py deleted file mode 100644 index fa9958ac1401644420d264c48cf8d807a44d7cf9..0000000000000000000000000000000000000000 --- a/spaces/Gradio-Blocks/uniformer_image_segmentation/mmseg/datasets/cityscapes.py +++ /dev/null @@ -1,217 +0,0 @@ -import os.path as osp -import tempfile - -import mmcv -import numpy as np -from mmcv.utils import print_log -from PIL import Image - -from .builder import DATASETS -from .custom import CustomDataset - - -@DATASETS.register_module() -class CityscapesDataset(CustomDataset): - """Cityscapes dataset. - - The ``img_suffix`` is fixed to '_leftImg8bit.png' and ``seg_map_suffix`` is - fixed to '_gtFine_labelTrainIds.png' for Cityscapes dataset. - """ - - CLASSES = ('road', 'sidewalk', 'building', 'wall', 'fence', 'pole', - 'traffic light', 'traffic sign', 'vegetation', 'terrain', 'sky', - 'person', 'rider', 'car', 'truck', 'bus', 'train', 'motorcycle', - 'bicycle') - - PALETTE = [[128, 64, 128], [244, 35, 232], [70, 70, 70], [102, 102, 156], - [190, 153, 153], [153, 153, 153], [250, 170, 30], [220, 220, 0], - [107, 142, 35], [152, 251, 152], [70, 130, 180], [220, 20, 60], - [255, 0, 0], [0, 0, 142], [0, 0, 70], [0, 60, 100], - [0, 80, 100], [0, 0, 230], [119, 11, 32]] - - def __init__(self, **kwargs): - super(CityscapesDataset, self).__init__( - img_suffix='_leftImg8bit.png', - seg_map_suffix='_gtFine_labelTrainIds.png', - **kwargs) - - @staticmethod - def _convert_to_label_id(result): - """Convert trainId to id for cityscapes.""" - if isinstance(result, str): - result = np.load(result) - import cityscapesscripts.helpers.labels as CSLabels - result_copy = result.copy() - for trainId, label in CSLabels.trainId2label.items(): - result_copy[result == trainId] = label.id - - return result_copy - - def results2img(self, results, imgfile_prefix, to_label_id): - """Write the segmentation results to images. - - Args: - results (list[list | tuple | ndarray]): Testing results of the - dataset. - imgfile_prefix (str): The filename prefix of the png files. - If the prefix is "somepath/xxx", - the png files will be named "somepath/xxx.png". - to_label_id (bool): whether convert output to label_id for - submission - - Returns: - list[str: str]: result txt files which contains corresponding - semantic segmentation images. - """ - mmcv.mkdir_or_exist(imgfile_prefix) - result_files = [] - prog_bar = mmcv.ProgressBar(len(self)) - for idx in range(len(self)): - result = results[idx] - if to_label_id: - result = self._convert_to_label_id(result) - filename = self.img_infos[idx]['filename'] - basename = osp.splitext(osp.basename(filename))[0] - - png_filename = osp.join(imgfile_prefix, f'{basename}.png') - - output = Image.fromarray(result.astype(np.uint8)).convert('P') - import cityscapesscripts.helpers.labels as CSLabels - palette = np.zeros((len(CSLabels.id2label), 3), dtype=np.uint8) - for label_id, label in CSLabels.id2label.items(): - palette[label_id] = label.color - - output.putpalette(palette) - output.save(png_filename) - result_files.append(png_filename) - prog_bar.update() - - return result_files - - def format_results(self, results, imgfile_prefix=None, to_label_id=True): - """Format the results into dir (standard format for Cityscapes - evaluation). - - Args: - results (list): Testing results of the dataset. - imgfile_prefix (str | None): The prefix of images files. It - includes the file path and the prefix of filename, e.g., - "a/b/prefix". If not specified, a temp file will be created. - Default: None. - to_label_id (bool): whether convert output to label_id for - submission. Default: False - - Returns: - tuple: (result_files, tmp_dir), result_files is a list containing - the image paths, tmp_dir is the temporal directory created - for saving json/png files when img_prefix is not specified. - """ - - assert isinstance(results, list), 'results must be a list' - assert len(results) == len(self), ( - 'The length of results is not equal to the dataset len: ' - f'{len(results)} != {len(self)}') - - if imgfile_prefix is None: - tmp_dir = tempfile.TemporaryDirectory() - imgfile_prefix = tmp_dir.name - else: - tmp_dir = None - result_files = self.results2img(results, imgfile_prefix, to_label_id) - - return result_files, tmp_dir - - def evaluate(self, - results, - metric='mIoU', - logger=None, - imgfile_prefix=None, - efficient_test=False): - """Evaluation in Cityscapes/default protocol. - - Args: - results (list): Testing results of the dataset. - metric (str | list[str]): Metrics to be evaluated. - logger (logging.Logger | None | str): Logger used for printing - related information during evaluation. Default: None. - imgfile_prefix (str | None): The prefix of output image file, - for cityscapes evaluation only. It includes the file path and - the prefix of filename, e.g., "a/b/prefix". - If results are evaluated with cityscapes protocol, it would be - the prefix of output png files. The output files would be - png images under folder "a/b/prefix/xxx.png", where "xxx" is - the image name of cityscapes. If not specified, a temp file - will be created for evaluation. - Default: None. - - Returns: - dict[str, float]: Cityscapes/default metrics. - """ - - eval_results = dict() - metrics = metric.copy() if isinstance(metric, list) else [metric] - if 'cityscapes' in metrics: - eval_results.update( - self._evaluate_cityscapes(results, logger, imgfile_prefix)) - metrics.remove('cityscapes') - if len(metrics) > 0: - eval_results.update( - super(CityscapesDataset, - self).evaluate(results, metrics, logger, efficient_test)) - - return eval_results - - def _evaluate_cityscapes(self, results, logger, imgfile_prefix): - """Evaluation in Cityscapes protocol. - - Args: - results (list): Testing results of the dataset. - logger (logging.Logger | str | None): Logger used for printing - related information during evaluation. Default: None. - imgfile_prefix (str | None): The prefix of output image file - - Returns: - dict[str: float]: Cityscapes evaluation results. - """ - try: - import cityscapesscripts.evaluation.evalPixelLevelSemanticLabeling as CSEval # noqa - except ImportError: - raise ImportError('Please run "pip install cityscapesscripts" to ' - 'install cityscapesscripts first.') - msg = 'Evaluating in Cityscapes style' - if logger is None: - msg = '\n' + msg - print_log(msg, logger=logger) - - result_files, tmp_dir = self.format_results(results, imgfile_prefix) - - if tmp_dir is None: - result_dir = imgfile_prefix - else: - result_dir = tmp_dir.name - - eval_results = dict() - print_log(f'Evaluating results under {result_dir} ...', logger=logger) - - CSEval.args.evalInstLevelScore = True - CSEval.args.predictionPath = osp.abspath(result_dir) - CSEval.args.evalPixelAccuracy = True - CSEval.args.JSONOutput = False - - seg_map_list = [] - pred_list = [] - - # when evaluating with official cityscapesscripts, - # **_gtFine_labelIds.png is used - for seg_map in mmcv.scandir( - self.ann_dir, 'gtFine_labelIds.png', recursive=True): - seg_map_list.append(osp.join(self.ann_dir, seg_map)) - pred_list.append(CSEval.getPrediction(CSEval.args, seg_map)) - - eval_results.update( - CSEval.evaluateImgLists(pred_list, seg_map_list, CSEval.args)) - - if tmp_dir is not None: - tmp_dir.cleanup() - - return eval_results diff --git a/spaces/HadiTajari/Penguins_pred_App/README.md b/spaces/HadiTajari/Penguins_pred_App/README.md deleted file mode 100644 index 37342b6f3cb9420dd0de6235eb08ef457884d6ac..0000000000000000000000000000000000000000 --- a/spaces/HadiTajari/Penguins_pred_App/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Penguins Pred App 🐧 -emoji: 🦀 -colorFrom: indigo -colorTo: yellow -sdk: streamlit -sdk_version: 1.19.0 -app_file: app.py -pinned: true ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference \ No newline at end of file diff --git a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/clib/libnat/edit_dist.cpp b/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/clib/libnat/edit_dist.cpp deleted file mode 100644 index 9ffb60569d74d2868ed8113b7c787ef870e9da20..0000000000000000000000000000000000000000 --- a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/clib/libnat/edit_dist.cpp +++ /dev/null @@ -1,231 +0,0 @@ -/** - * Copyright 2017-present, Facebook, Inc. - * All rights reserved. - * - * This source code is licensed under the license found in the - * LICENSE file in the root directory of this source tree. - */ - -#include -#include -#include // @manual=//caffe2:torch_extension -#include -#include -#include -#include -#include -#include -#include -#include - -using namespace ::std; - -vector> edit_distance2_with_dp( - vector& x, - vector& y) { - uint32_t lx = x.size(); - uint32_t ly = y.size(); - vector> d(lx + 1, vector(ly + 1)); - for (uint32_t i = 0; i < lx + 1; i++) { - d[i][0] = i; - } - for (uint32_t j = 0; j < ly + 1; j++) { - d[0][j] = j; - } - for (uint32_t i = 1; i < lx + 1; i++) { - for (uint32_t j = 1; j < ly + 1; j++) { - d[i][j] = - min(min(d[i - 1][j], d[i][j - 1]) + 1, - d[i - 1][j - 1] + 2 * (x.at(i - 1) == y.at(j - 1) ? 0 : 1)); - } - } - return d; -} - -vector> edit_distance2_backtracking( - vector>& d, - vector& x, - vector& y, - uint32_t terminal_symbol) { - vector seq; - vector> edit_seqs(x.size() + 2, vector()); - /* - edit_seqs: - 0~x.size() cell is the insertion sequences - last cell is the delete sequence - */ - - if (x.size() == 0) { - edit_seqs.at(0) = y; - return edit_seqs; - } - - uint32_t i = d.size() - 1; - uint32_t j = d.at(0).size() - 1; - - while ((i >= 0) && (j >= 0)) { - if ((i == 0) && (j == 0)) { - break; - } - - if ((j > 0) && (d.at(i).at(j - 1) < d.at(i).at(j))) { - seq.push_back(1); // insert - seq.push_back(y.at(j - 1)); - j--; - } else if ((i > 0) && (d.at(i - 1).at(j) < d.at(i).at(j))) { - seq.push_back(2); // delete - seq.push_back(x.at(i - 1)); - i--; - } else { - seq.push_back(3); // keep - seq.push_back(x.at(i - 1)); - i--; - j--; - } - } - - uint32_t prev_op, op, s, word; - prev_op = 0, s = 0; - for (uint32_t k = 0; k < seq.size() / 2; k++) { - op = seq.at(seq.size() - 2 * k - 2); - word = seq.at(seq.size() - 2 * k - 1); - if (prev_op != 1) { - s++; - } - if (op == 1) // insert - { - edit_seqs.at(s - 1).push_back(word); - } else if (op == 2) // delete - { - edit_seqs.at(x.size() + 1).push_back(1); - } else { - edit_seqs.at(x.size() + 1).push_back(0); - } - - prev_op = op; - } - - for (uint32_t k = 0; k < edit_seqs.size(); k++) { - if (edit_seqs[k].size() == 0) { - edit_seqs[k].push_back(terminal_symbol); - } - } - return edit_seqs; -} - -vector> edit_distance2_backtracking_with_delete( - vector>& d, - vector& x, - vector& y, - uint32_t terminal_symbol, - uint32_t deletion_symbol) { - vector seq; - vector> edit_seqs(x.size() + 1, vector()); - /* - edit_seqs: - 0~x.size() cell is the insertion sequences - last cell is the delete sequence - */ - - if (x.size() == 0) { - edit_seqs.at(0) = y; - return edit_seqs; - } - - uint32_t i = d.size() - 1; - uint32_t j = d.at(0).size() - 1; - - while ((i >= 0) && (j >= 0)) { - if ((i == 0) && (j == 0)) { - break; - } - - if ((j > 0) && (d.at(i).at(j - 1) < d.at(i).at(j))) { - seq.push_back(1); // insert - seq.push_back(y.at(j - 1)); - j--; - } else if ((i > 0) && (d.at(i - 1).at(j) < d.at(i).at(j))) { - seq.push_back(2); // delete - seq.push_back(x.at(i - 1)); - i--; - } else { - seq.push_back(3); // keep - seq.push_back(x.at(i - 1)); - i--; - j--; - } - } - - uint32_t prev_op, op, s, word; - prev_op = 0, s = 0; - for (uint32_t k = 0; k < seq.size() / 2; k++) { - op = seq.at(seq.size() - 2 * k - 2); - word = seq.at(seq.size() - 2 * k - 1); - if (prev_op != 1) { - s++; - } - if (op == 1) // insert - { - edit_seqs.at(s - 1).push_back(word); - } else if (op == 2) // delete - { - edit_seqs.at(s - 1).push_back(deletion_symbol); - } - - prev_op = op; - } - - for (uint32_t k = 0; k < edit_seqs.size(); k++) { - if (edit_seqs.at(k).size() == 0) { - edit_seqs.at(k).push_back(terminal_symbol); - } - } - return edit_seqs; -} - -vector compute_ed2( - vector>& xs, - vector>& ys) { - vector distances(xs.size()); - for (uint32_t i = 0; i < xs.size(); i++) { - vector> d = edit_distance2_with_dp(xs.at(i), ys.at(i)); - distances.at(i) = d.at(xs.at(i).size()).at(ys.at(i).size()); - } - return distances; -} - -vector>> suggested_ed2_path( - vector>& xs, - vector>& ys, - uint32_t terminal_symbol) { - vector>> seq(xs.size()); - for (uint32_t i = 0; i < xs.size(); i++) { - vector> d = edit_distance2_with_dp(xs.at(i), ys.at(i)); - seq.at(i) = - edit_distance2_backtracking(d, xs.at(i), ys.at(i), terminal_symbol); - } - return seq; -} - -vector>> suggested_ed2_path_with_delete( - vector>& xs, - vector>& ys, - uint32_t terminal_symbol, - uint32_t deletion_symbol) { - vector>> seq(xs.size()); - for (uint32_t i = 0; i < xs.size(); i++) { - vector> d = edit_distance2_with_dp(xs.at(i), ys.at(i)); - seq.at(i) = edit_distance2_backtracking_with_delete( - d, xs.at(i), ys.at(i), terminal_symbol, deletion_symbol); - } - return seq; -} - -PYBIND11_MODULE(libnat, m) { - m.def("compute_ed2", &compute_ed2, "compute_ed2"); - m.def("suggested_ed2_path", &suggested_ed2_path, "suggested_ed2_path"); - m.def( - "suggested_ed2_path_with_delete", - &suggested_ed2_path_with_delete, - "suggested_ed2_path_with_delete"); -} diff --git a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/tasks/audio_finetuning.py b/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/tasks/audio_finetuning.py deleted file mode 100644 index 4ef87c604f00581f03075e9ebe10a43dd51d6e45..0000000000000000000000000000000000000000 --- a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/tasks/audio_finetuning.py +++ /dev/null @@ -1,346 +0,0 @@ -# Copyright (c) 2017-present, Facebook, Inc. -# All rights reserved. -# -# This source code is licensed under the license found in the LICENSE file in -# the root directory of this source tree. An additional grant of patent rights -# can be found in the PATENTS file in the same directory. - -import logging -import os -import torch -import json - -from argparse import Namespace -from dataclasses import dataclass, field -from typing import Optional, Any - -from fairseq.data import AddTargetDataset, Dictionary, encoders -from fairseq.tasks.audio_pretraining import AudioPretrainingTask, AudioPretrainingConfig -from fairseq.dataclass import FairseqDataclass -from fairseq.dataclass.configs import GenerationConfig -from fairseq.data.text_compressor import TextCompressor, TextCompressionLevel - -from . import register_task -from .. import utils -from ..logging import metrics - - -logger = logging.getLogger(__name__) - - -class LabelEncoder(object): - def __init__(self, dictionary): - self.dictionary = dictionary - - def __call__(self, label): - return self.dictionary.encode_line( - label, append_eos=False, add_if_not_exist=False - ) - - -def label_len_fn(label): - return len(label.split(" ")) - - -@dataclass -class AudioFinetuningConfig(AudioPretrainingConfig): - # Options for reporting WER metrics during validation. Only applicable to - # Seq2Seq models during fine-tuning - eval_wer: bool = field( - default=False, metadata={"help": "compute WER for Seq2Seq models"} - ) - eval_wer_config: GenerationConfig = field( - default_factory=lambda: GenerationConfig(), - metadata={"help": "beam search config for evaluating wer during training"}, - ) - eval_wer_tokenizer: Any = field( - default=None, - metadata={"help": "tokenizer config for evaluating wer during training"}, - ) - eval_wer_post_process: str = field( - default="letter", - metadata={ - "help": "remove BPE tokens before scoring (can be sentencepiece, letter, and more)" - }, - ) - eval_bleu: bool = field( - default=False, metadata={"help": "evaluation with BLEU scores"} - ) - eval_bleu_detok: Optional[str] = field( - default=None, metadata={ - "help": "detokenize before computing BLEU (e.g., 'moses'); " - "required if using --eval-bleu; use 'space' to disable " - "detokenization; see fairseq.data.encoders for other options" - } - ) - eval_bleu_detok_args: str = field( - default="{}", - metadata={"help": "args for building the tokenizer, if needed"} - ) - eval_tokenized_bleu: bool = field( - default=False, - metadata={"help": "compute tokenized BLEU instead of sacrebleu"} - ) - eval_bleu_remove_bpe: Optional[str] = field( - default=None, metadata={"help": "remove BPE before computing BLEU"} - ) - eval_bleu_args: str = field( - default="{}", - metadata={"help": "generation args for BLUE scoring, e.g., " - "'{\"beam\": 4, \"lenpen\": 0.6}'"} - ) - eval_bleu_print_samples: bool = field( - default=False, - metadata={"help": "print sample generations during validation"} - ) - autoregressive: bool = field( - default=False, - metadata={ - "help": "required for autoregressive decoders (like seq2seq models); " - "adds 'prev_output_tokens' to input and appends eos to target" - }, - ) - - -@register_task("audio_finetuning", dataclass=AudioFinetuningConfig) -class AudioFinetuningTask(AudioPretrainingTask): - """ """ - - cfg: AudioFinetuningConfig - - def __init__( - self, - cfg: AudioFinetuningConfig, - ): - super().__init__(cfg) - self.blank_symbol = "" - - self.state.add_factory("target_dictionary", self.load_target_dictionary) - - def load_target_dictionary(self): - if self.cfg.labels: - dict_path = os.path.join(self.cfg.data, f"dict.{self.cfg.labels}.txt") - return Dictionary.load(dict_path) - return None - - def load_dataset(self, split: str, task_cfg: AudioFinetuningConfig = None, **kwargs): - super().load_dataset(split, task_cfg, **kwargs) - - task_cfg = task_cfg or self.cfg - assert task_cfg.labels is not None - text_compression_level = getattr( - TextCompressionLevel, str(self.cfg.text_compression_level) - ) - data_path = self.cfg.data - label_path = os.path.join(data_path, f"{split}.{task_cfg.labels}") - skipped_indices = getattr(self.datasets[split], "skipped_indices", set()) - text_compressor = TextCompressor(level=text_compression_level) - with open(label_path, "r") as f: - labels = [ - text_compressor.compress(l) - for i, l in enumerate(f) if i not in skipped_indices - ] - - assert len(labels) == len(self.datasets[split]), ( - f"labels length ({len(labels)}) and dataset length " - f"({len(self.datasets[split])}) do not match" - ) - - process_label = LabelEncoder(self.target_dictionary) - - self.datasets[split] = AddTargetDataset( - self.datasets[split], - labels, - pad=self.target_dictionary.pad(), - eos=self.target_dictionary.eos(), - batch_targets=True, - process_label=process_label, - label_len_fn=label_len_fn, - add_to_input=task_cfg.get("autoregressive", False), - text_compression_level=text_compression_level - ) - - @property - def target_dictionary(self): - """Return the :class:`~fairseq.data.Dictionary` for the language - model.""" - return self.state.target_dictionary - - def valid_step(self, sample, model, criterion): - loss, sample_size, logging_output = super().valid_step(sample, model, criterion) - if self.cfg.eval_wer and self.cfg.autoregressive: - metrics = self._inference_with_wer(self.sequence_generator, sample, model) - logging_output["_num_char_errors"] = metrics["num_char_errors"] - logging_output["_num_chars"] = metrics["num_chars"] - logging_output["_num_word_errors"] = metrics["num_word_errors"] - logging_output["_num_words"] = metrics["num_words"] - if self.cfg.eval_bleu and self.cfg.autoregressive: - metrics = self._inference_with_bleu(self.sequence_generator, sample, model) - logging_output['_bleu_sys_len'] = metrics.sys_len - logging_output['_bleu_ref_len'] = metrics.ref_len - # we split counts into separate entries so that they can be - # summed efficiently across workers using fast-stat-sync - assert len(metrics.counts) == 4 - for i in range(4): - logging_output[f"_bleu_counts_{i}"] = metrics.counts[i] - logging_output[f"_bleu_totals_{i}"] = metrics.totals[i] - return loss, sample_size, logging_output - - def build_model(self, model_cfg: FairseqDataclass): - model = super().build_model(model_cfg) - - if self.cfg.eval_wer and self.cfg.autoregressive: - self.sequence_generator = self.build_generator( - [model], - self.cfg.eval_wer_config, - ) - if self.cfg.eval_wer_tokenizer: - self.tokenizer = encoders.build_tokenizer(self.cfg.eval_wer_tokenizer) - else: - self.tokenizer = None - if self.cfg.eval_bleu and self.cfg.autoregressive: - assert self.cfg.eval_bleu_detok is not None, ( - '--eval-bleu-detok is required if using --eval-bleu; ' - 'try --eval-bleu-detok=moses (or --eval-bleu-detok=space ' - 'to disable detokenization, e.g., when using sentencepiece)' - ) - detok_args = json.loads(self.cfg.eval_bleu_detok_args) - self.tokenizer = encoders.build_tokenizer( - Namespace(tokenizer=self.cfg.eval_bleu_detok, **detok_args) - ) - gen_args = json.loads(self.cfg.eval_bleu_args) - gen_args = Namespace(**gen_args) - self.sequence_generator = self.build_generator([model], gen_args) - - return model - - def _inference_with_wer(self, generator, sample, model): - import editdistance - - def decode(toks): - s = self.target_dictionary.string( - toks.int().cpu(), - self.cfg.eval_wer_post_process, - escape_unk=True, - ) - if self.tokenizer: - s = self.tokenizer.decode(s) - return s - - num_word_errors, num_char_errors = 0, 0 - num_chars, num_words = 0, 0 - gen_out = self.inference_step(generator, [model], sample, None) - for i in range(len(gen_out)): - hyp = decode(gen_out[i][0]["tokens"]) - ref = decode( - utils.strip_pad(sample["target"][i], self.target_dictionary.pad()), - ) - num_char_errors += editdistance.eval(hyp, ref) - num_chars += len(ref) - hyp_words = hyp.split() - ref_words = ref.split() - num_word_errors += editdistance.eval(hyp_words, ref_words) - num_words += len(ref_words) - - return { - "num_char_errors": num_char_errors, - "num_chars": num_chars, - "num_word_errors": num_word_errors, - "num_words": num_words, - } - - def _inference_with_bleu(self, generator, sample, model): - import sacrebleu - - def decode(toks, is_ref): - s = self.target_dictionary.string( - toks.int().cpu(), - self.cfg.eval_bleu_remove_bpe, - # The default unknown string in fairseq is ``, but - # this is tokenized by sacrebleu as `< unk >`, inflating - # BLEU scores. Instead, we use a somewhat more verbose - # alternative that is unlikely to appear in the real - # reference, but doesn't get split into multiple tokens. - unk_string=( - "UNKNOWNTOKENINREF" if is_ref else "UNKNOWNTOKENINHYP" - ), - ) - if self.tokenizer: - s = self.tokenizer.decode(s) - return s - - gen_out = self.inference_step(generator, [model], sample) - hyps, refs = [], [] - for i in range(len(gen_out)): - hyps.append(decode(gen_out[i][0]['tokens'], is_ref=False)) - refs.append( - decode( - utils.strip_pad( - sample['target'][i], - self.target_dictionary.pad() - ), - is_ref=True, # don't count as matches to the hypo - ) - ) - if self.cfg.eval_bleu_print_samples: - logger.info('H-{} {}'.format(sample["id"][0], hyps[0])) - logger.info('T-{} {}'.format(sample["id"][0], refs[0])) - - eval_tokenization = 'none' if self.cfg.eval_tokenized_bleu else '13a' - return sacrebleu.corpus_bleu(hyps, [refs], tokenize=eval_tokenization) - - def reduce_metrics(self, logging_outputs, criterion): - super().reduce_metrics(logging_outputs, criterion) - - if self.cfg.eval_wer: - zero = torch.scalar_tensor(0.0) - num_char_errors = sum( - log.get("_num_char_errors", zero) for log in logging_outputs - ) - num_chars = sum(log.get("_num_chars", zero) for log in logging_outputs) - num_word_errors = sum( - log.get("_num_word_errors", zero) for log in logging_outputs - ) - num_words = sum(log.get("_num_words", zero) for log in logging_outputs) - metrics.log_scalar("_num_char_errors", num_char_errors) - metrics.log_scalar("_num_chars", num_chars) - metrics.log_scalar("_num_word_errors", num_word_errors) - metrics.log_scalar("_num_words", num_words) - if num_chars > 0: - metrics.log_derived( - "uer", - lambda meters: meters["_num_char_errors"].sum - * 100.0 - / meters["_num_chars"].sum - if meters["_num_chars"].sum > 0 - else float("nan"), - ) - if num_words > 0: - metrics.log_derived( - "wer", - lambda meters: meters["_num_word_errors"].sum - * 100.0 - / meters["_num_words"].sum - if meters["_num_words"].sum > 0 - else float("nan"), - ) - if self.cfg.eval_bleu: - len_keys = ["_bleu_sys_len", "_bleu_ref_len"] - count_keys = [f"_bleu_counts_{i}" for i in range(4)] - total_keys = [f"_bleu_totals_{i}" for i in range(4)] - for k in len_keys + count_keys + total_keys: - metrics.log_scalar( - k, sum(log.get(k, 0) for log in logging_outputs) - ) - - import sacrebleu - metrics.log_derived( - 'bleu', - lambda meters: sacrebleu.compute_bleu( - correct=[meters[k].sum for k in count_keys], - total=[meters[k].sum for k in total_keys], - sys_len=meters['_bleu_sys_len'].sum, - ref_len=meters['_bleu_ref_len'].sum, - smooth_method="exp" - ).score - ) diff --git a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/tests/test_multihead_attention.py b/spaces/HarryLee/eCommerceImageCaptioning/fairseq/tests/test_multihead_attention.py deleted file mode 100644 index 620a2d679147bbbb8d15f3323374a39939686ec2..0000000000000000000000000000000000000000 --- a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/tests/test_multihead_attention.py +++ /dev/null @@ -1,73 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import unittest - -import torch -from fairseq.modules.multihead_attention import MultiheadAttention - - -class TestMultiheadAttention(unittest.TestCase): - def test_append_prev_key_padding_mask(self): - bsz = 1 - src_len = 4 - - cases = [ - # no padding mask - (None, None, None), - # current padding mask only - ( - torch.tensor([[1]]).bool(), - None, - torch.tensor([[0, 0, 0, 1]]).bool(), - ), - # previous padding mask only - ( - None, - torch.tensor([[0, 1, 0]]).bool(), - torch.tensor([[0, 1, 0, 0]]).bool(), - ), - # both padding masks - ( - torch.tensor([[1]]).bool(), - torch.tensor([[0, 1, 0]]).bool(), - torch.tensor([[0, 1, 0, 1]]).bool(), - ), - # prev_key_padding_mask already full - ( - torch.tensor([[0, 1, 0, 1]]).bool(), - None, - torch.tensor([[0, 1, 0, 1]]).bool(), - ), - # key_padding_mask already full - ( - None, - torch.tensor([[0, 1, 0, 1]]).bool(), - torch.tensor([[0, 1, 0, 1]]).bool(), - ), - ] - for c in cases: - key_padding_mask = MultiheadAttention._append_prev_key_padding_mask( - c[0], - c[1], - batch_size=bsz, - src_len=src_len, - static_kv=False, - ) - - if key_padding_mask is not None: - self.assertTrue( - torch.all(torch.eq(key_padding_mask, c[2])), - f"Unexpected resultant key padding mask: {key_padding_mask}" - f" given current: {c[0]} and previous: {c[1]}", - ) - self.assertEqual(key_padding_mask.size(0), bsz) - self.assertEqual(key_padding_mask.size(1), src_len) - else: - self.assertIsNone(c[2]) - - -if __name__ == "__main__": - unittest.main() diff --git a/spaces/Harveenchadha/Vakyansh-Malayalam-TTS/ttsv/utils/__init__.py b/spaces/Harveenchadha/Vakyansh-Malayalam-TTS/ttsv/utils/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/Hexamind/iPADS/TwoDimEnv.py b/spaces/Hexamind/iPADS/TwoDimEnv.py deleted file mode 100644 index 7a2637ab082bf553eb504ed7f5217bf37e35cda4..0000000000000000000000000000000000000000 --- a/spaces/Hexamind/iPADS/TwoDimEnv.py +++ /dev/null @@ -1,94 +0,0 @@ -import gymnasium as gym -from params import * - - -class TwoDimEnv(gym.Env): - """ - Custom 2D-Environment that follows gym interface. - This is a simple 2D-env where the agent must go to the 0,0 point, - it can go right or straight in a continuous action space, but cannot change speed - """ - def __init__(self): - super(TwoDimEnv, self).__init__() - - # agent cannot be further away from target than space_limits - self.space_limits = SPACE_LIMITS - - # Initialize the agent position - rho_init = np.random.random() * SPACE_LIMITS - theta_init = np.random.random() * 2 * PI - PI - self.rho_init, self.theta_init, self.z_init = rho_init, theta_init, Z_INIT - self.agent_pos = rho_init * np.exp(1j * theta_init) - self.agent_z = Z_INIT - - # agent abs speed is constant, may change of direction on the plane, but doesnt change in the z-axis - self.agent_speed = SPEED_RHO * np.exp(1j*SPEED_ANGLE) - self.agent_speed_z = SPEED_Z - self.agent_max_angle = MAX_ANGLE - - # - self.agent_previous_pos = self.agent_pos - - - def reset(self, training=True, rho_init=RHO_INIT, theta_init=THETA_INIT, z_init=Z_INIT): - """ - :input: TwoDimEnv - :return: TwoDimEnv - """ - # Initialize the agent position and speed - self.agent_pos = rho_init * np.exp(1j * theta_init) - self.agent_speed = SPEED_RHO * np.exp(1j * SPEED_ANGLE) - self.agent_z = z_init - - # init step index - self.step_index = 0 - - return self.get_obs() - - def step(self, action): - ''' - the Gym step - :param action: - :return: - ''' - - self.step_index += 1 - - # Agent changes of direction according to its action: action[0] in [-1; 1] - self.agent_speed *= np.exp(1j * self.agent_max_angle * action[0], dtype=complex) - self.agent_previous_pos = self.agent_pos - self.agent_pos += self.agent_speed - self.agent_z -= self.agent_speed_z - - # Are we done? - done = bool(self.agent_z <= 0) - - # get normalized obs - obs = self.get_obs() - - return obs, 0., done, {} - - def render(self, **kwargs): - pass - - def close(self): - pass - - # calculates normalized obs - def get_obs(self): - ''' - normalises the observation - :return: normalised observation - ''' - agent_dist = np.abs(self.agent_pos) / self.space_limits - agent_angle = np.angle(self.agent_pos) / (2 * PI) - if agent_angle < 0: - agent_angle += 1 - agent_speed = np.angle(self.agent_speed) / (2 * PI) - if agent_speed < 0: - agent_speed += 1 - agent_z = (Z_INIT-self.agent_z) / Z_INIT - obs = np.array([agent_dist, agent_angle, agent_speed, agent_z]).astype(np.float32) - return obs - - diff --git a/spaces/HuggingFaceM4/obelics_visualization/README.md b/spaces/HuggingFaceM4/obelics_visualization/README.md deleted file mode 100644 index 4586b153cab15e159f6720aea847c219274dce85..0000000000000000000000000000000000000000 --- a/spaces/HuggingFaceM4/obelics_visualization/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: OBELISC Web Document Visualization -emoji: 📈 -colorFrom: blue -colorTo: red -sdk: streamlit -sdk_version: 1.19.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Ikaros521/so-vits-svc-4.0-ikaros/preprocess_hubert_f0.py b/spaces/Ikaros521/so-vits-svc-4.0-ikaros/preprocess_hubert_f0.py deleted file mode 100644 index 29a1c7ee028fefbe7905d235447d98cda34ce840..0000000000000000000000000000000000000000 --- a/spaces/Ikaros521/so-vits-svc-4.0-ikaros/preprocess_hubert_f0.py +++ /dev/null @@ -1,62 +0,0 @@ -import math -import multiprocessing -import os -import argparse -from random import shuffle - -import torch -from glob import glob -from tqdm import tqdm - -import utils -import logging -logging.getLogger('numba').setLevel(logging.WARNING) -import librosa -import numpy as np - -hps = utils.get_hparams_from_file("configs/config.json") -sampling_rate = hps.data.sampling_rate -hop_length = hps.data.hop_length - - -def process_one(filename, hmodel): - # print(filename) - wav, sr = librosa.load(filename, sr=sampling_rate) - soft_path = filename + ".soft.pt" - if not os.path.exists(soft_path): - devive = torch.device("cuda" if torch.cuda.is_available() else "cpu") - wav16k = librosa.resample(wav, orig_sr=sampling_rate, target_sr=16000) - wav16k = torch.from_numpy(wav16k).to(devive) - c = utils.get_hubert_content(hmodel, wav_16k_tensor=wav16k) - torch.save(c.cpu(), soft_path) - f0_path = filename + ".f0.npy" - if not os.path.exists(f0_path): - f0 = utils.compute_f0_dio(wav, sampling_rate=sampling_rate, hop_length=hop_length) - np.save(f0_path, f0) - - -def process_batch(filenames): - print("Loading hubert for content...") - device = "cuda" if torch.cuda.is_available() else "cpu" - hmodel = utils.get_hubert_model().to(device) - print("Loaded hubert.") - for filename in tqdm(filenames): - process_one(filename, hmodel) - - -if __name__ == "__main__": - parser = argparse.ArgumentParser() - parser.add_argument("--in_dir", type=str, default="dataset/44k", help="path to input dir") - - args = parser.parse_args() - filenames = glob(f'{args.in_dir}/*/*.wav', recursive=True) # [:10] - shuffle(filenames) - multiprocessing.set_start_method('spawn') - - num_processes = 1 - chunk_size = int(math.ceil(len(filenames) / num_processes)) - chunks = [filenames[i:i + chunk_size] for i in range(0, len(filenames), chunk_size)] - print([len(c) for c in chunks]) - processes = [multiprocessing.Process(target=process_batch, args=(chunk,)) for chunk in chunks] - for p in processes: - p.start() diff --git a/spaces/Illumotion/Koboldcpp/examples/batched.swift/README.md b/spaces/Illumotion/Koboldcpp/examples/batched.swift/README.md deleted file mode 100644 index 464c9079c4660836159b9c7b8be0e25e233c3004..0000000000000000000000000000000000000000 --- a/spaces/Illumotion/Koboldcpp/examples/batched.swift/README.md +++ /dev/null @@ -1,4 +0,0 @@ -This is a swift clone of `examples/batched`. - -$ `make` -$ `./swift MODEL_PATH [PROMPT] [PARALLEL]` diff --git a/spaces/Jeff2323/ai-comic-factory/src/lib/useImageDimension.ts b/spaces/Jeff2323/ai-comic-factory/src/lib/useImageDimension.ts deleted file mode 100644 index 9cfd06e473929b1046a5dd9caa9d577ebaf09b7a..0000000000000000000000000000000000000000 --- a/spaces/Jeff2323/ai-comic-factory/src/lib/useImageDimension.ts +++ /dev/null @@ -1,20 +0,0 @@ -import { useEffect, useState } from "react" - -import { ImageDimension, getImageDimension } from "./getImageDimension" - -export function useImageDimension(src: string) { - const [dimension, setDimension] = useState({ - width: 0, - height: 0, - }) - - useEffect(() => { - const compute = async () => { - const newDimension = await getImageDimension(src) - setDimension(newDimension) - } - compute() - }, [src]) - - return dimension -} \ No newline at end of file diff --git a/spaces/JohnnyPittt/audio-styling/deepafx_st/processors/spsa/eps_scheduler.py b/spaces/JohnnyPittt/audio-styling/deepafx_st/processors/spsa/eps_scheduler.py deleted file mode 100644 index abcee2274d86b146726f413bb6fcd5980863f109..0000000000000000000000000000000000000000 --- a/spaces/JohnnyPittt/audio-styling/deepafx_st/processors/spsa/eps_scheduler.py +++ /dev/null @@ -1,32 +0,0 @@ -import torch - - -class EpsilonScheduler: - def __init__( - self, - epsilon: float = 0.001, - patience: int = 10, - factor: float = 0.5, - verbose: bool = False, - ): - self.epsilon = epsilon - self.patience = patience - self.factor = factor - self.best = 1e16 - self.count = 0 - self.verbose = verbose - - def step(self, metric: float): - - if metric < self.best: - self.best = metric - self.count = 0 - else: - self.count += 1 - if self.verbose: - print(f"Train loss has not improved for {self.count} epochs.") - if self.count >= self.patience: - self.count = 0 - self.epsilon *= self.factor - if self.verbose: - print(f"Reducing epsilon to {self.epsilon:0.2e}...") diff --git a/spaces/KarmKarma/genshinimpact-rvc-models-v2/lib/infer_pack/modules.py b/spaces/KarmKarma/genshinimpact-rvc-models-v2/lib/infer_pack/modules.py deleted file mode 100644 index c83289df7c79a4810dacd15c050148544ba0b6a9..0000000000000000000000000000000000000000 --- a/spaces/KarmKarma/genshinimpact-rvc-models-v2/lib/infer_pack/modules.py +++ /dev/null @@ -1,522 +0,0 @@ -import copy -import math -import numpy as np -import scipy -import torch -from torch import nn -from torch.nn import functional as F - -from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d -from torch.nn.utils import weight_norm, remove_weight_norm - -from lib.infer_pack import commons -from lib.infer_pack.commons import init_weights, get_padding -from lib.infer_pack.transforms import piecewise_rational_quadratic_transform - - -LRELU_SLOPE = 0.1 - - -class LayerNorm(nn.Module): - def __init__(self, channels, eps=1e-5): - super().__init__() - self.channels = channels - self.eps = eps - - self.gamma = nn.Parameter(torch.ones(channels)) - self.beta = nn.Parameter(torch.zeros(channels)) - - def forward(self, x): - x = x.transpose(1, -1) - x = F.layer_norm(x, (self.channels,), self.gamma, self.beta, self.eps) - return x.transpose(1, -1) - - -class ConvReluNorm(nn.Module): - def __init__( - self, - in_channels, - hidden_channels, - out_channels, - kernel_size, - n_layers, - p_dropout, - ): - super().__init__() - self.in_channels = in_channels - self.hidden_channels = hidden_channels - self.out_channels = out_channels - self.kernel_size = kernel_size - self.n_layers = n_layers - self.p_dropout = p_dropout - assert n_layers > 1, "Number of layers should be larger than 0." - - self.conv_layers = nn.ModuleList() - self.norm_layers = nn.ModuleList() - self.conv_layers.append( - nn.Conv1d( - in_channels, hidden_channels, kernel_size, padding=kernel_size // 2 - ) - ) - self.norm_layers.append(LayerNorm(hidden_channels)) - self.relu_drop = nn.Sequential(nn.ReLU(), nn.Dropout(p_dropout)) - for _ in range(n_layers - 1): - self.conv_layers.append( - nn.Conv1d( - hidden_channels, - hidden_channels, - kernel_size, - padding=kernel_size // 2, - ) - ) - self.norm_layers.append(LayerNorm(hidden_channels)) - self.proj = nn.Conv1d(hidden_channels, out_channels, 1) - self.proj.weight.data.zero_() - self.proj.bias.data.zero_() - - def forward(self, x, x_mask): - x_org = x - for i in range(self.n_layers): - x = self.conv_layers[i](x * x_mask) - x = self.norm_layers[i](x) - x = self.relu_drop(x) - x = x_org + self.proj(x) - return x * x_mask - - -class DDSConv(nn.Module): - """ - Dialted and Depth-Separable Convolution - """ - - def __init__(self, channels, kernel_size, n_layers, p_dropout=0.0): - super().__init__() - self.channels = channels - self.kernel_size = kernel_size - self.n_layers = n_layers - self.p_dropout = p_dropout - - self.drop = nn.Dropout(p_dropout) - self.convs_sep = nn.ModuleList() - self.convs_1x1 = nn.ModuleList() - self.norms_1 = nn.ModuleList() - self.norms_2 = nn.ModuleList() - for i in range(n_layers): - dilation = kernel_size**i - padding = (kernel_size * dilation - dilation) // 2 - self.convs_sep.append( - nn.Conv1d( - channels, - channels, - kernel_size, - groups=channels, - dilation=dilation, - padding=padding, - ) - ) - self.convs_1x1.append(nn.Conv1d(channels, channels, 1)) - self.norms_1.append(LayerNorm(channels)) - self.norms_2.append(LayerNorm(channels)) - - def forward(self, x, x_mask, g=None): - if g is not None: - x = x + g - for i in range(self.n_layers): - y = self.convs_sep[i](x * x_mask) - y = self.norms_1[i](y) - y = F.gelu(y) - y = self.convs_1x1[i](y) - y = self.norms_2[i](y) - y = F.gelu(y) - y = self.drop(y) - x = x + y - return x * x_mask - - -class WN(torch.nn.Module): - def __init__( - self, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=0, - p_dropout=0, - ): - super(WN, self).__init__() - assert kernel_size % 2 == 1 - self.hidden_channels = hidden_channels - self.kernel_size = (kernel_size,) - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.gin_channels = gin_channels - self.p_dropout = p_dropout - - self.in_layers = torch.nn.ModuleList() - self.res_skip_layers = torch.nn.ModuleList() - self.drop = nn.Dropout(p_dropout) - - if gin_channels != 0: - cond_layer = torch.nn.Conv1d( - gin_channels, 2 * hidden_channels * n_layers, 1 - ) - self.cond_layer = torch.nn.utils.weight_norm(cond_layer, name="weight") - - for i in range(n_layers): - dilation = dilation_rate**i - padding = int((kernel_size * dilation - dilation) / 2) - in_layer = torch.nn.Conv1d( - hidden_channels, - 2 * hidden_channels, - kernel_size, - dilation=dilation, - padding=padding, - ) - in_layer = torch.nn.utils.weight_norm(in_layer, name="weight") - self.in_layers.append(in_layer) - - # last one is not necessary - if i < n_layers - 1: - res_skip_channels = 2 * hidden_channels - else: - res_skip_channels = hidden_channels - - res_skip_layer = torch.nn.Conv1d(hidden_channels, res_skip_channels, 1) - res_skip_layer = torch.nn.utils.weight_norm(res_skip_layer, name="weight") - self.res_skip_layers.append(res_skip_layer) - - def forward(self, x, x_mask, g=None, **kwargs): - output = torch.zeros_like(x) - n_channels_tensor = torch.IntTensor([self.hidden_channels]) - - if g is not None: - g = self.cond_layer(g) - - for i in range(self.n_layers): - x_in = self.in_layers[i](x) - if g is not None: - cond_offset = i * 2 * self.hidden_channels - g_l = g[:, cond_offset : cond_offset + 2 * self.hidden_channels, :] - else: - g_l = torch.zeros_like(x_in) - - acts = commons.fused_add_tanh_sigmoid_multiply(x_in, g_l, n_channels_tensor) - acts = self.drop(acts) - - res_skip_acts = self.res_skip_layers[i](acts) - if i < self.n_layers - 1: - res_acts = res_skip_acts[:, : self.hidden_channels, :] - x = (x + res_acts) * x_mask - output = output + res_skip_acts[:, self.hidden_channels :, :] - else: - output = output + res_skip_acts - return output * x_mask - - def remove_weight_norm(self): - if self.gin_channels != 0: - torch.nn.utils.remove_weight_norm(self.cond_layer) - for l in self.in_layers: - torch.nn.utils.remove_weight_norm(l) - for l in self.res_skip_layers: - torch.nn.utils.remove_weight_norm(l) - - -class ResBlock1(torch.nn.Module): - def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5)): - super(ResBlock1, self).__init__() - self.convs1 = nn.ModuleList( - [ - weight_norm( - Conv1d( - channels, - channels, - kernel_size, - 1, - dilation=dilation[0], - padding=get_padding(kernel_size, dilation[0]), - ) - ), - weight_norm( - Conv1d( - channels, - channels, - kernel_size, - 1, - dilation=dilation[1], - padding=get_padding(kernel_size, dilation[1]), - ) - ), - weight_norm( - Conv1d( - channels, - channels, - kernel_size, - 1, - dilation=dilation[2], - padding=get_padding(kernel_size, dilation[2]), - ) - ), - ] - ) - self.convs1.apply(init_weights) - - self.convs2 = nn.ModuleList( - [ - weight_norm( - Conv1d( - channels, - channels, - kernel_size, - 1, - dilation=1, - padding=get_padding(kernel_size, 1), - ) - ), - weight_norm( - Conv1d( - channels, - channels, - kernel_size, - 1, - dilation=1, - padding=get_padding(kernel_size, 1), - ) - ), - weight_norm( - Conv1d( - channels, - channels, - kernel_size, - 1, - dilation=1, - padding=get_padding(kernel_size, 1), - ) - ), - ] - ) - self.convs2.apply(init_weights) - - def forward(self, x, x_mask=None): - for c1, c2 in zip(self.convs1, self.convs2): - xt = F.leaky_relu(x, LRELU_SLOPE) - if x_mask is not None: - xt = xt * x_mask - xt = c1(xt) - xt = F.leaky_relu(xt, LRELU_SLOPE) - if x_mask is not None: - xt = xt * x_mask - xt = c2(xt) - x = xt + x - if x_mask is not None: - x = x * x_mask - return x - - def remove_weight_norm(self): - for l in self.convs1: - remove_weight_norm(l) - for l in self.convs2: - remove_weight_norm(l) - - -class ResBlock2(torch.nn.Module): - def __init__(self, channels, kernel_size=3, dilation=(1, 3)): - super(ResBlock2, self).__init__() - self.convs = nn.ModuleList( - [ - weight_norm( - Conv1d( - channels, - channels, - kernel_size, - 1, - dilation=dilation[0], - padding=get_padding(kernel_size, dilation[0]), - ) - ), - weight_norm( - Conv1d( - channels, - channels, - kernel_size, - 1, - dilation=dilation[1], - padding=get_padding(kernel_size, dilation[1]), - ) - ), - ] - ) - self.convs.apply(init_weights) - - def forward(self, x, x_mask=None): - for c in self.convs: - xt = F.leaky_relu(x, LRELU_SLOPE) - if x_mask is not None: - xt = xt * x_mask - xt = c(xt) - x = xt + x - if x_mask is not None: - x = x * x_mask - return x - - def remove_weight_norm(self): - for l in self.convs: - remove_weight_norm(l) - - -class Log(nn.Module): - def forward(self, x, x_mask, reverse=False, **kwargs): - if not reverse: - y = torch.log(torch.clamp_min(x, 1e-5)) * x_mask - logdet = torch.sum(-y, [1, 2]) - return y, logdet - else: - x = torch.exp(x) * x_mask - return x - - -class Flip(nn.Module): - def forward(self, x, *args, reverse=False, **kwargs): - x = torch.flip(x, [1]) - if not reverse: - logdet = torch.zeros(x.size(0)).to(dtype=x.dtype, device=x.device) - return x, logdet - else: - return x - - -class ElementwiseAffine(nn.Module): - def __init__(self, channels): - super().__init__() - self.channels = channels - self.m = nn.Parameter(torch.zeros(channels, 1)) - self.logs = nn.Parameter(torch.zeros(channels, 1)) - - def forward(self, x, x_mask, reverse=False, **kwargs): - if not reverse: - y = self.m + torch.exp(self.logs) * x - y = y * x_mask - logdet = torch.sum(self.logs * x_mask, [1, 2]) - return y, logdet - else: - x = (x - self.m) * torch.exp(-self.logs) * x_mask - return x - - -class ResidualCouplingLayer(nn.Module): - def __init__( - self, - channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - p_dropout=0, - gin_channels=0, - mean_only=False, - ): - assert channels % 2 == 0, "channels should be divisible by 2" - super().__init__() - self.channels = channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.half_channels = channels // 2 - self.mean_only = mean_only - - self.pre = nn.Conv1d(self.half_channels, hidden_channels, 1) - self.enc = WN( - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - p_dropout=p_dropout, - gin_channels=gin_channels, - ) - self.post = nn.Conv1d(hidden_channels, self.half_channels * (2 - mean_only), 1) - self.post.weight.data.zero_() - self.post.bias.data.zero_() - - def forward(self, x, x_mask, g=None, reverse=False): - x0, x1 = torch.split(x, [self.half_channels] * 2, 1) - h = self.pre(x0) * x_mask - h = self.enc(h, x_mask, g=g) - stats = self.post(h) * x_mask - if not self.mean_only: - m, logs = torch.split(stats, [self.half_channels] * 2, 1) - else: - m = stats - logs = torch.zeros_like(m) - - if not reverse: - x1 = m + x1 * torch.exp(logs) * x_mask - x = torch.cat([x0, x1], 1) - logdet = torch.sum(logs, [1, 2]) - return x, logdet - else: - x1 = (x1 - m) * torch.exp(-logs) * x_mask - x = torch.cat([x0, x1], 1) - return x - - def remove_weight_norm(self): - self.enc.remove_weight_norm() - - -class ConvFlow(nn.Module): - def __init__( - self, - in_channels, - filter_channels, - kernel_size, - n_layers, - num_bins=10, - tail_bound=5.0, - ): - super().__init__() - self.in_channels = in_channels - self.filter_channels = filter_channels - self.kernel_size = kernel_size - self.n_layers = n_layers - self.num_bins = num_bins - self.tail_bound = tail_bound - self.half_channels = in_channels // 2 - - self.pre = nn.Conv1d(self.half_channels, filter_channels, 1) - self.convs = DDSConv(filter_channels, kernel_size, n_layers, p_dropout=0.0) - self.proj = nn.Conv1d( - filter_channels, self.half_channels * (num_bins * 3 - 1), 1 - ) - self.proj.weight.data.zero_() - self.proj.bias.data.zero_() - - def forward(self, x, x_mask, g=None, reverse=False): - x0, x1 = torch.split(x, [self.half_channels] * 2, 1) - h = self.pre(x0) - h = self.convs(h, x_mask, g=g) - h = self.proj(h) * x_mask - - b, c, t = x0.shape - h = h.reshape(b, c, -1, t).permute(0, 1, 3, 2) # [b, cx?, t] -> [b, c, t, ?] - - unnormalized_widths = h[..., : self.num_bins] / math.sqrt(self.filter_channels) - unnormalized_heights = h[..., self.num_bins : 2 * self.num_bins] / math.sqrt( - self.filter_channels - ) - unnormalized_derivatives = h[..., 2 * self.num_bins :] - - x1, logabsdet = piecewise_rational_quadratic_transform( - x1, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=reverse, - tails="linear", - tail_bound=self.tail_bound, - ) - - x = torch.cat([x0, x1], 1) * x_mask - logdet = torch.sum(logabsdet * x_mask, [1, 2]) - if not reverse: - return x, logdet - else: - return x diff --git a/spaces/Kevin676/ChatGPT-with-Voice-Cloning-in-Chinese/encoder/data_objects/utterance.py b/spaces/Kevin676/ChatGPT-with-Voice-Cloning-in-Chinese/encoder/data_objects/utterance.py deleted file mode 100644 index 0768c3420f422a7464f305b4c1fb6752c57ceda7..0000000000000000000000000000000000000000 --- a/spaces/Kevin676/ChatGPT-with-Voice-Cloning-in-Chinese/encoder/data_objects/utterance.py +++ /dev/null @@ -1,26 +0,0 @@ -import numpy as np - - -class Utterance: - def __init__(self, frames_fpath, wave_fpath): - self.frames_fpath = frames_fpath - self.wave_fpath = wave_fpath - - def get_frames(self): - return np.load(self.frames_fpath) - - def random_partial(self, n_frames): - """ - Crops the frames into a partial utterance of n_frames - - :param n_frames: The number of frames of the partial utterance - :return: the partial utterance frames and a tuple indicating the start and end of the - partial utterance in the complete utterance. - """ - frames = self.get_frames() - if frames.shape[0] == n_frames: - start = 0 - else: - start = np.random.randint(0, frames.shape[0] - n_frames) - end = start + n_frames - return frames[start:end], (start, end) \ No newline at end of file diff --git a/spaces/Kevin676/Real-Time-Voice-Cloning/vocoder/models/deepmind_version.py b/spaces/Kevin676/Real-Time-Voice-Cloning/vocoder/models/deepmind_version.py deleted file mode 100644 index 1d973d9b8b9ab547571abc5a3f5ea86226a25924..0000000000000000000000000000000000000000 --- a/spaces/Kevin676/Real-Time-Voice-Cloning/vocoder/models/deepmind_version.py +++ /dev/null @@ -1,170 +0,0 @@ -import torch -import torch.nn as nn -import torch.nn.functional as F -from utils.display import * -from utils.dsp import * - - -class WaveRNN(nn.Module) : - def __init__(self, hidden_size=896, quantisation=256) : - super(WaveRNN, self).__init__() - - self.hidden_size = hidden_size - self.split_size = hidden_size // 2 - - # The main matmul - self.R = nn.Linear(self.hidden_size, 3 * self.hidden_size, bias=False) - - # Output fc layers - self.O1 = nn.Linear(self.split_size, self.split_size) - self.O2 = nn.Linear(self.split_size, quantisation) - self.O3 = nn.Linear(self.split_size, self.split_size) - self.O4 = nn.Linear(self.split_size, quantisation) - - # Input fc layers - self.I_coarse = nn.Linear(2, 3 * self.split_size, bias=False) - self.I_fine = nn.Linear(3, 3 * self.split_size, bias=False) - - # biases for the gates - self.bias_u = nn.Parameter(torch.zeros(self.hidden_size)) - self.bias_r = nn.Parameter(torch.zeros(self.hidden_size)) - self.bias_e = nn.Parameter(torch.zeros(self.hidden_size)) - - # display num params - self.num_params() - - - def forward(self, prev_y, prev_hidden, current_coarse) : - - # Main matmul - the projection is split 3 ways - R_hidden = self.R(prev_hidden) - R_u, R_r, R_e, = torch.split(R_hidden, self.hidden_size, dim=1) - - # Project the prev input - coarse_input_proj = self.I_coarse(prev_y) - I_coarse_u, I_coarse_r, I_coarse_e = \ - torch.split(coarse_input_proj, self.split_size, dim=1) - - # Project the prev input and current coarse sample - fine_input = torch.cat([prev_y, current_coarse], dim=1) - fine_input_proj = self.I_fine(fine_input) - I_fine_u, I_fine_r, I_fine_e = \ - torch.split(fine_input_proj, self.split_size, dim=1) - - # concatenate for the gates - I_u = torch.cat([I_coarse_u, I_fine_u], dim=1) - I_r = torch.cat([I_coarse_r, I_fine_r], dim=1) - I_e = torch.cat([I_coarse_e, I_fine_e], dim=1) - - # Compute all gates for coarse and fine - u = F.sigmoid(R_u + I_u + self.bias_u) - r = F.sigmoid(R_r + I_r + self.bias_r) - e = F.tanh(r * R_e + I_e + self.bias_e) - hidden = u * prev_hidden + (1. - u) * e - - # Split the hidden state - hidden_coarse, hidden_fine = torch.split(hidden, self.split_size, dim=1) - - # Compute outputs - out_coarse = self.O2(F.relu(self.O1(hidden_coarse))) - out_fine = self.O4(F.relu(self.O3(hidden_fine))) - - return out_coarse, out_fine, hidden - - - def generate(self, seq_len): - with torch.no_grad(): - # First split up the biases for the gates - b_coarse_u, b_fine_u = torch.split(self.bias_u, self.split_size) - b_coarse_r, b_fine_r = torch.split(self.bias_r, self.split_size) - b_coarse_e, b_fine_e = torch.split(self.bias_e, self.split_size) - - # Lists for the two output seqs - c_outputs, f_outputs = [], [] - - # Some initial inputs - out_coarse = torch.LongTensor([0]).cuda() - out_fine = torch.LongTensor([0]).cuda() - - # We'll meed a hidden state - hidden = self.init_hidden() - - # Need a clock for display - start = time.time() - - # Loop for generation - for i in range(seq_len) : - - # Split into two hidden states - hidden_coarse, hidden_fine = \ - torch.split(hidden, self.split_size, dim=1) - - # Scale and concat previous predictions - out_coarse = out_coarse.unsqueeze(0).float() / 127.5 - 1. - out_fine = out_fine.unsqueeze(0).float() / 127.5 - 1. - prev_outputs = torch.cat([out_coarse, out_fine], dim=1) - - # Project input - coarse_input_proj = self.I_coarse(prev_outputs) - I_coarse_u, I_coarse_r, I_coarse_e = \ - torch.split(coarse_input_proj, self.split_size, dim=1) - - # Project hidden state and split 6 ways - R_hidden = self.R(hidden) - R_coarse_u , R_fine_u, \ - R_coarse_r, R_fine_r, \ - R_coarse_e, R_fine_e = torch.split(R_hidden, self.split_size, dim=1) - - # Compute the coarse gates - u = F.sigmoid(R_coarse_u + I_coarse_u + b_coarse_u) - r = F.sigmoid(R_coarse_r + I_coarse_r + b_coarse_r) - e = F.tanh(r * R_coarse_e + I_coarse_e + b_coarse_e) - hidden_coarse = u * hidden_coarse + (1. - u) * e - - # Compute the coarse output - out_coarse = self.O2(F.relu(self.O1(hidden_coarse))) - posterior = F.softmax(out_coarse, dim=1) - distrib = torch.distributions.Categorical(posterior) - out_coarse = distrib.sample() - c_outputs.append(out_coarse) - - # Project the [prev outputs and predicted coarse sample] - coarse_pred = out_coarse.float() / 127.5 - 1. - fine_input = torch.cat([prev_outputs, coarse_pred.unsqueeze(0)], dim=1) - fine_input_proj = self.I_fine(fine_input) - I_fine_u, I_fine_r, I_fine_e = \ - torch.split(fine_input_proj, self.split_size, dim=1) - - # Compute the fine gates - u = F.sigmoid(R_fine_u + I_fine_u + b_fine_u) - r = F.sigmoid(R_fine_r + I_fine_r + b_fine_r) - e = F.tanh(r * R_fine_e + I_fine_e + b_fine_e) - hidden_fine = u * hidden_fine + (1. - u) * e - - # Compute the fine output - out_fine = self.O4(F.relu(self.O3(hidden_fine))) - posterior = F.softmax(out_fine, dim=1) - distrib = torch.distributions.Categorical(posterior) - out_fine = distrib.sample() - f_outputs.append(out_fine) - - # Put the hidden state back together - hidden = torch.cat([hidden_coarse, hidden_fine], dim=1) - - # Display progress - speed = (i + 1) / (time.time() - start) - stream('Gen: %i/%i -- Speed: %i', (i + 1, seq_len, speed)) - - coarse = torch.stack(c_outputs).squeeze(1).cpu().data.numpy() - fine = torch.stack(f_outputs).squeeze(1).cpu().data.numpy() - output = combine_signal(coarse, fine) - - return output, coarse, fine - - def init_hidden(self, batch_size=1) : - return torch.zeros(batch_size, self.hidden_size).cuda() - - def num_params(self) : - parameters = filter(lambda p: p.requires_grad, self.parameters()) - parameters = sum([np.prod(p.size()) for p in parameters]) / 1_000_000 - print('Trainable Parameters: %.3f million' % parameters) \ No newline at end of file diff --git a/spaces/Kimata/Sanskrit-TTS/datetime_cleaner.py b/spaces/Kimata/Sanskrit-TTS/datetime_cleaner.py deleted file mode 100644 index 8f8b42989723862146701a75fddb95f40f339953..0000000000000000000000000000000000000000 --- a/spaces/Kimata/Sanskrit-TTS/datetime_cleaner.py +++ /dev/null @@ -1,180 +0,0 @@ -import re -from datetime import datetime -import unidecode -import normalizer_utils - -dict_num = normalizer_utils.dict_num -dict_num_list = list(dict_num) - - -def datetime_forwardslash_format(text): - # Regular expression pattern to match date format - pattern = r"\b\d{1,2}/\d{1,2}/\d{4}\b" - - # Check if the text matches the pattern - match = re.search(pattern, text) - - # Return True if there is a match, False otherwise - return match is not None - -def normalize_date(date_string): - #Get the format of the datestring. - #Regex code to check for first format - if datetime_forwardslash_format(date_string): - date_string = unidecode.unidecode(date_string) - date = datetime.strptime(date_string, r"%d/%m/%Y") - else: - date_string = unidecode.unidecode(date_string) - date = datetime.strptime(date_string, r"%d-%m-%Y") - - # Extract day, month, and year from the datetime object - day = date.day - month = date.month - year = date.year - - # Define lookup tables for day, month, and year - day_lookup = ['शून्य', 'प्रथम', 'द्वितीय', 'तृतीय', 'चतुर्थ', 'पञ्चमी', 'षष्ठी', 'सप्तमी', 'अष्टमी', 'नवम', 'दशम', 'एकादश', ' द्वादश', 'त्रयोदश', 'चतुर्दश', 'पञ्चदश', 'षोडश', - 'सप्तदश', 'अष्टादश', 'नवदशम्', 'विंशति', 'एकविंशति', 'द्वविंशति', 'विंशति' 'तृतीय', 'चतुर्विंशतिम', 'पञ्चविंशतिम', 'षड्विंशतिम', 'सप्तविंशति', 'अष्टाविंशति', 'नवविंशति', 'त्रिंशत्', 'एकत्रिंशत्'] - - month_lookup = ['शून्य', 'प्रथम', 'द्वितीय', 'तृतीय', 'चतुर्थ', 'पञ्चमी', 'षष्ठी', 'सप्तमी', 'अष्टमी', 'नवम', 'दशम', 'एकादश', 'द्वादशः'] - - tens_year_lookup = ['शून्य', 'दशम', 'विंशति', 'त्रिंशत्', 'चत्वारिंशत्', 'पञ्चाशत्', 'षष्टिम', 'सप्तति', 'अशीतिम', 'नवति'] - - units_year_lookup = ['शून्य', 'प्रथम', 'द्वितीय', 'तृतीय', 'चतुर्थ', 'पंचम', 'षष्ठी', 'सप्तम', 'अष्टम', 'नवम्'] - - year_split = [char for char in str(year)] - first_split = int(''.join(year_split[:2])) - second_split = int(''.join(year_split[2:])) - - first_split_str = dict_num_list[first_split - 1] - normalized_first_split = dict_num[first_split_str] - - second_split_str = dict_num_list[second_split - 1] - normalized_second_split = dict_num[second_split_str] - - year_str = normalized_first_split + ' ' + normalized_second_split - - return day_lookup[day], month_lookup[month], year_str - - - -def is_date_or_year(text): - # Regular expression pattern to match date format with slashes or dashes - pattern = r"\b\d{1,2}[/-]\d{1,2}[/-]\d{4}\b" - - # Check if the text matches the pattern - match = re.search(pattern, text) - - # Return True if there is a match, False otherwise - return match is not None - -def is_text_time_format(text): - # Sample input string - input_string = unidecode.unidecode(text) - - # Regular expression pattern to match the date format - second_pattern = r"\d{2}:\d{2}:\d{2}" - minute_and_hour_pattern = r"\d{2}:\d{2}" - - # Check if the input string matches the patterns above. - second_match = re.match(second_pattern, input_string) - hour_and_minute_match = re.match(minute_and_hour_pattern, input_string) - - if second_match: - return True - elif hour_and_minute_match: - return True - elif minute_and_hour_pattern: - return True - else: - return False - -def handle_time(text): - '''Normalizes time into string''' - # Sample input string - input_string = unidecode.unidecode(text) - - # Regular expression pattern to match the date format - hour_minute_second_pattern = r"\d{2}:\d{2}:\d{2}" - minute_and_hour_pattern = r"\d{2}:\d{2}" - - # Check if the input string matches the patterns above. - hour_minute_second_match = re.match(hour_minute_second_pattern, input_string) - hour_and_minute_match = re.match(minute_and_hour_pattern, input_string) - - if hour_minute_second_match: - # Parse the input string using datetime - datetime_object = datetime.strptime(input_string, "%H:%M:%S") - - # Extract hours, minutes, and seconds - hours = datetime_object.hour - minutes = datetime_object.minute - seconds = datetime_object.second - - dict_num_list = list(dict_num) - - if type(int(hours)) == int: - #Parse the hours str in a different format. - #Get the index of the hours str. - hours_str = dict_num_list[int(hours) - 1] - hours_string = dict_num[hours_str] - - else: - if hours in dict_num: - hours_string = dict_num[hours] - - if type(int(minutes)) == int: - #Parse the hours str in a different format. - #Get the index of the hours str. - minutes_str = dict_num_list[int(minutes) - 1] - minutes_string = dict_num[minutes_str] - - else: - if minutes in dict_num: - minutes_string = dict_num[minutes] - - if type(int(seconds)) == int: - #Parse the hours str in a different format. - #Get the index of the hours str. - seconds_str = dict_num_list[int(seconds) - 1] - seconds_string = dict_num[seconds_str] - - else: - if seconds in dict_num: - seconds_string = dict_num[seconds] - - # return hours, minutes, seconds - time_str = f'{hours_string} {minutes_string} {seconds_string}' - return time_str - - elif hour_and_minute_match: - # Parse the input string using datetime - datetime_object = datetime.strptime(input_string, "%H:%M") - - hours_str = text.split(sep = ':')[0] - minutes_str = text.split(sep = ':')[1] - dict_num_list = list(dict_num) - - # Extract hours, minutes, and seconds - hours = datetime_object.hour - minutes = datetime_object.minute - - if type(int(hours)) == int: - hours_str = dict_num_list[int(hours) - 1] - hours_string = dict_num[hours_str] - else: - hours_string = dict_num[hours_str] - - if type(int(minutes)) == int: - minutes_str = dict_num_list[int(minutes) - 1] - minutes_string = dict_num[minutes_str] - else: - minutes_string = dict_num[minutes_str] - - - time_str = f'{hours_string} {minutes_string}' - return time_str - - else: - - return text diff --git a/spaces/KonradSzafer/HF-QA-Demo/app.py b/spaces/KonradSzafer/HF-QA-Demo/app.py deleted file mode 100644 index 76cb450468bff3b310566717fce7dd20254ddda0..0000000000000000000000000000000000000000 --- a/spaces/KonradSzafer/HF-QA-Demo/app.py +++ /dev/null @@ -1,70 +0,0 @@ -import threading - -import gradio as gr - -from qa_engine import logger, Config, QAEngine -from discord_bot import DiscordClient - - - -config = Config() -qa_engine = QAEngine( - llm_model_id=config.question_answering_model_id, - embedding_model_id=config.embedding_model_id, - index_repo_id=config.index_repo_id, - prompt_template=config.prompt_template, - use_docs_for_context=config.use_docs_for_context, - add_sources_to_response=config.add_sources_to_response, - use_messages_for_context=config.use_messages_in_context, - debug=config.debug -) - - -def gradio_interface(): - with gr.Blocks() as demo: - chatbot = gr.Chatbot() - msg = gr.Textbox() - clear = gr.ClearButton([msg, chatbot]) - - def respond(message, chat_history): - context = ''.join(f'User: {msg} \nBot:{bot_msg}\n' for msg, bot_msg in chat_history) - logger.info(f'Context: {context}') - response = qa_engine.get_response(message, context) - bot_message = response.get_answer() + response.get_sources_as_text() + '\n' - chat_history.append((message, bot_message)) - return '', chat_history - - msg.submit(respond, [msg, chatbot], [msg, chatbot]) - demo.launch(share=True) - - -def discord_bot_inference_thread(): - client = DiscordClient( - qa_engine=qa_engine, - num_last_messages=config.num_last_messages, - use_names_in_context=config.use_names_in_context, - enable_commands=config.enable_commands, - debug=config.debug - ) - client.run(config.discord_token) - -def discord_bot(): - thread = threading.Thread(target=discord_bot_inference_thread) - thread.start() - with gr.Blocks() as demo: - gr.Markdown(f'Discord bot is running.') - demo.queue(concurrency_count=100) - demo.queue(max_size=100) - demo.launch() - - -if __name__ == '__main__': - if config.app_mode == 'gradio': - gradio_interface() - elif config.app_mode == 'discord': - discord_bot() - else: - raise ValueError( - f'Invalid app mode: {config.app_mode}, ', - f'set APP_MODE to "gradio" or "discord"' - ) diff --git a/spaces/KonradSzafer/HF-QA-Demo/benchmark/__main__.py b/spaces/KonradSzafer/HF-QA-Demo/benchmark/__main__.py deleted file mode 100644 index 2557f4287b4ee74a8558332d88f07a5eb40407d7..0000000000000000000000000000000000000000 --- a/spaces/KonradSzafer/HF-QA-Demo/benchmark/__main__.py +++ /dev/null @@ -1,76 +0,0 @@ -import time -import json - -import wandb -import gradio as gr - -from qa_engine import logger, Config, QAEngine - - -QUESTIONS_FILENAME = 'benchmark/questions.json' - -config = Config() -qa_engine = QAEngine( - llm_model_id=config.question_answering_model_id, - embedding_model_id=config.embedding_model_id, - index_repo_id=config.index_repo_id, - prompt_template=config.prompt_template, - use_docs_for_context=config.use_docs_for_context, - add_sources_to_response=config.add_sources_to_response, - use_messages_for_context=config.use_messages_in_context, - debug=config.debug -) - - -def main(): - filtered_config = config.asdict() - disallowed_config_keys = [ - "DISCORD_TOKEN", "NUM_LAST_MESSAGES", "USE_NAMES_IN_CONTEXT", - "ENABLE_COMMANDS", "APP_MODE", "DEBUG" - ] - for key in disallowed_config_keys: - filtered_config.pop(key, None) - - wandb.init( - project='HF-Docs-QA', - name=f'{config.question_answering_model_id} - {config.embedding_model_id} - {config.index_repo_id}', - mode='run', # run/disabled - config=filtered_config - ) - - with open(QUESTIONS_FILENAME, 'r') as f: - questions = json.load(f) - - table = wandb.Table( - columns=[ - "id", "question", "messages_context", "answer", "sources", "time" - ] - ) - for i, q in enumerate(questions): - logger.info(f"Question {i+1}/{len(questions)}") - - question = q['question'] - messages_context = q['messages_context'] - - time_start = time.perf_counter() - response = qa_engine.get_response( - question=question, - messages_context=messages_context - ) - time_end = time.perf_counter() - - table.add_data( - i, - question, - messages_context, - response.get_answer(), - response.get_sources_as_text(), - time_end - time_start - ) - - wandb.log({"answers": table}) - wandb.finish() - - -if __name__ == '__main__': - main() diff --git a/spaces/LaynzKunz/Aesthetic_RVC_Inference_HF/lib/infer/infer_pack/modules/F0Predictor/F0Predictor.py b/spaces/LaynzKunz/Aesthetic_RVC_Inference_HF/lib/infer/infer_pack/modules/F0Predictor/F0Predictor.py deleted file mode 100644 index f56e49e7f0e6eab3babf0711cae2933371b9f9cc..0000000000000000000000000000000000000000 --- a/spaces/LaynzKunz/Aesthetic_RVC_Inference_HF/lib/infer/infer_pack/modules/F0Predictor/F0Predictor.py +++ /dev/null @@ -1,16 +0,0 @@ -class F0Predictor(object): - def compute_f0(self, wav, p_len): - """ - input: wav:[signal_length] - p_len:int - output: f0:[signal_length//hop_length] - """ - pass - - def compute_f0_uv(self, wav, p_len): - """ - input: wav:[signal_length] - p_len:int - output: f0:[signal_length//hop_length],uv:[signal_length//hop_length] - """ - pass diff --git a/spaces/LaynzKunz/RVC-Inference-webui-grado-colab-huggingafce/vc_infer_pipeline.py b/spaces/LaynzKunz/RVC-Inference-webui-grado-colab-huggingafce/vc_infer_pipeline.py deleted file mode 100644 index 82c15f59a8072e1b317fa1d750ccc1b814a6989d..0000000000000000000000000000000000000000 --- a/spaces/LaynzKunz/RVC-Inference-webui-grado-colab-huggingafce/vc_infer_pipeline.py +++ /dev/null @@ -1,443 +0,0 @@ -import numpy as np, parselmouth, torch, pdb, sys, os -from time import time as ttime -import torch.nn.functional as F -import scipy.signal as signal -import pyworld, os, traceback, faiss, librosa, torchcrepe -from scipy import signal -from functools import lru_cache - -now_dir = os.getcwd() -sys.path.append(now_dir) - -bh, ah = signal.butter(N=5, Wn=48, btype="high", fs=16000) - -input_audio_path2wav = {} - - -@lru_cache -def cache_harvest_f0(input_audio_path, fs, f0max, f0min, frame_period): - audio = input_audio_path2wav[input_audio_path] - f0, t = pyworld.harvest( - audio, - fs=fs, - f0_ceil=f0max, - f0_floor=f0min, - frame_period=frame_period, - ) - f0 = pyworld.stonemask(audio, f0, t, fs) - return f0 - - -def change_rms(data1, sr1, data2, sr2, rate): # 1是输入音频,2是输出音频,rate是2的占比 - # print(data1.max(),data2.max()) - rms1 = librosa.feature.rms( - y=data1, frame_length=sr1 // 2 * 2, hop_length=sr1 // 2 - ) # 每半秒一个点 - rms2 = librosa.feature.rms(y=data2, frame_length=sr2 // 2 * 2, hop_length=sr2 // 2) - rms1 = torch.from_numpy(rms1) - rms1 = F.interpolate( - rms1.unsqueeze(0), size=data2.shape[0], mode="linear" - ).squeeze() - rms2 = torch.from_numpy(rms2) - rms2 = F.interpolate( - rms2.unsqueeze(0), size=data2.shape[0], mode="linear" - ).squeeze() - rms2 = torch.max(rms2, torch.zeros_like(rms2) + 1e-6) - data2 *= ( - torch.pow(rms1, torch.tensor(1 - rate)) - * torch.pow(rms2, torch.tensor(rate - 1)) - ).numpy() - return data2 - - -class VC(object): - def __init__(self, tgt_sr, config): - self.x_pad, self.x_query, self.x_center, self.x_max, self.is_half = ( - config.x_pad, - config.x_query, - config.x_center, - config.x_max, - config.is_half, - ) - self.sr = 16000 # hubert输入采样率 - self.window = 160 # 每帧点数 - self.t_pad = self.sr * self.x_pad # 每条前后pad时间 - self.t_pad_tgt = tgt_sr * self.x_pad - self.t_pad2 = self.t_pad * 2 - self.t_query = self.sr * self.x_query # 查询切点前后查询时间 - self.t_center = self.sr * self.x_center # 查询切点位置 - self.t_max = self.sr * self.x_max # 免查询时长阈值 - self.device = config.device - - def get_f0( - self, - input_audio_path, - x, - p_len, - f0_up_key, - f0_method, - filter_radius, - inp_f0=None, - ): - global input_audio_path2wav - time_step = self.window / self.sr * 1000 - f0_min = 50 - f0_max = 1100 - f0_mel_min = 1127 * np.log(1 + f0_min / 700) - f0_mel_max = 1127 * np.log(1 + f0_max / 700) - if f0_method == "pm": - f0 = ( - parselmouth.Sound(x, self.sr) - .to_pitch_ac( - time_step=time_step / 1000, - voicing_threshold=0.6, - pitch_floor=f0_min, - pitch_ceiling=f0_max, - ) - .selected_array["frequency"] - ) - pad_size = (p_len - len(f0) + 1) // 2 - if pad_size > 0 or p_len - len(f0) - pad_size > 0: - f0 = np.pad( - f0, [[pad_size, p_len - len(f0) - pad_size]], mode="constant" - ) - elif f0_method == "harvest": - input_audio_path2wav[input_audio_path] = x.astype(np.double) - f0 = cache_harvest_f0(input_audio_path, self.sr, f0_max, f0_min, 10) - if filter_radius > 2: - f0 = signal.medfilt(f0, 3) - elif f0_method == "crepe": - model = "full" - # Pick a batch size that doesn't cause memory errors on your gpu - batch_size = 512 - # Compute pitch using first gpu - audio = torch.tensor(np.copy(x))[None].float() - f0, pd = torchcrepe.predict( - audio, - self.sr, - self.window, - f0_min, - f0_max, - model, - batch_size=batch_size, - device=self.device, - return_periodicity=True, - ) - pd = torchcrepe.filter.median(pd, 3) - f0 = torchcrepe.filter.mean(f0, 3) - f0[pd < 0.1] = 0 - f0 = f0[0].cpu().numpy() - elif f0_method == "rmvpe": - if hasattr(self, "model_rmvpe") == False: - from rmvpe import RMVPE - - print("loading rmvpe model") - self.model_rmvpe = RMVPE( - "rmvpe.pt", is_half=self.is_half, device=self.device - ) - f0 = self.model_rmvpe.infer_from_audio(x, thred=0.03) - f0 *= pow(2, f0_up_key / 12) - # with open("test.txt","w")as f:f.write("\n".join([str(i)for i in f0.tolist()])) - tf0 = self.sr // self.window # 每秒f0点数 - if inp_f0 is not None: - delta_t = np.round( - (inp_f0[:, 0].max() - inp_f0[:, 0].min()) * tf0 + 1 - ).astype("int16") - replace_f0 = np.interp( - list(range(delta_t)), inp_f0[:, 0] * 100, inp_f0[:, 1] - ) - shape = f0[self.x_pad * tf0 : self.x_pad * tf0 + len(replace_f0)].shape[0] - f0[self.x_pad * tf0 : self.x_pad * tf0 + len(replace_f0)] = replace_f0[ - :shape - ] - # with open("test_opt.txt","w")as f:f.write("\n".join([str(i)for i in f0.tolist()])) - f0bak = f0.copy() - f0_mel = 1127 * np.log(1 + f0 / 700) - f0_mel[f0_mel > 0] = (f0_mel[f0_mel > 0] - f0_mel_min) * 254 / ( - f0_mel_max - f0_mel_min - ) + 1 - f0_mel[f0_mel <= 1] = 1 - f0_mel[f0_mel > 255] = 255 - f0_coarse = np.rint(f0_mel).astype(np.int) - return f0_coarse, f0bak # 1-0 - - def vc( - self, - model, - net_g, - sid, - audio0, - pitch, - pitchf, - times, - index, - big_npy, - index_rate, - version, - protect, - ): # ,file_index,file_big_npy - feats = torch.from_numpy(audio0) - if self.is_half: - feats = feats.half() - else: - feats = feats.float() - if feats.dim() == 2: # double channels - feats = feats.mean(-1) - assert feats.dim() == 1, feats.dim() - feats = feats.view(1, -1) - padding_mask = torch.BoolTensor(feats.shape).to(self.device).fill_(False) - - inputs = { - "source": feats.to(self.device), - "padding_mask": padding_mask, - "output_layer": 9 if version == "v1" else 12, - } - t0 = ttime() - with torch.no_grad(): - logits = model.extract_features(**inputs) - feats = model.final_proj(logits[0]) if version == "v1" else logits[0] - if protect < 0.5 and pitch != None and pitchf != None: - feats0 = feats.clone() - if ( - isinstance(index, type(None)) == False - and isinstance(big_npy, type(None)) == False - and index_rate != 0 - ): - npy = feats[0].cpu().numpy() - if self.is_half: - npy = npy.astype("float32") - - # _, I = index.search(npy, 1) - # npy = big_npy[I.squeeze()] - - score, ix = index.search(npy, k=8) - weight = np.square(1 / score) - weight /= weight.sum(axis=1, keepdims=True) - npy = np.sum(big_npy[ix] * np.expand_dims(weight, axis=2), axis=1) - - if self.is_half: - npy = npy.astype("float16") - feats = ( - torch.from_numpy(npy).unsqueeze(0).to(self.device) * index_rate - + (1 - index_rate) * feats - ) - - feats = F.interpolate(feats.permute(0, 2, 1), scale_factor=2).permute(0, 2, 1) - if protect < 0.5 and pitch != None and pitchf != None: - feats0 = F.interpolate(feats0.permute(0, 2, 1), scale_factor=2).permute( - 0, 2, 1 - ) - t1 = ttime() - p_len = audio0.shape[0] // self.window - if feats.shape[1] < p_len: - p_len = feats.shape[1] - if pitch != None and pitchf != None: - pitch = pitch[:, :p_len] - pitchf = pitchf[:, :p_len] - - if protect < 0.5 and pitch != None and pitchf != None: - pitchff = pitchf.clone() - pitchff[pitchf > 0] = 1 - pitchff[pitchf < 1] = protect - pitchff = pitchff.unsqueeze(-1) - feats = feats * pitchff + feats0 * (1 - pitchff) - feats = feats.to(feats0.dtype) - p_len = torch.tensor([p_len], device=self.device).long() - with torch.no_grad(): - if pitch != None and pitchf != None: - audio1 = ( - (net_g.infer(feats, p_len, pitch, pitchf, sid)[0][0, 0]) - .data.cpu() - .float() - .numpy() - ) - else: - audio1 = ( - (net_g.infer(feats, p_len, sid)[0][0, 0]).data.cpu().float().numpy() - ) - del feats, p_len, padding_mask - if torch.cuda.is_available(): - torch.cuda.empty_cache() - t2 = ttime() - times[0] += t1 - t0 - times[2] += t2 - t1 - return audio1 - - def pipeline( - self, - model, - net_g, - sid, - audio, - input_audio_path, - times, - f0_up_key, - f0_method, - file_index, - # file_big_npy, - index_rate, - if_f0, - filter_radius, - tgt_sr, - resample_sr, - rms_mix_rate, - version, - protect, - f0_file=None, - ): - if ( - file_index != "" - # and file_big_npy != "" - # and os.path.exists(file_big_npy) == True - and os.path.exists(file_index) == True - and index_rate != 0 - ): - try: - index = faiss.read_index(file_index) - # big_npy = np.load(file_big_npy) - big_npy = index.reconstruct_n(0, index.ntotal) - except: - traceback.print_exc() - index = big_npy = None - else: - index = big_npy = None - audio = signal.filtfilt(bh, ah, audio) - audio_pad = np.pad(audio, (self.window // 2, self.window // 2), mode="reflect") - opt_ts = [] - if audio_pad.shape[0] > self.t_max: - audio_sum = np.zeros_like(audio) - for i in range(self.window): - audio_sum += audio_pad[i : i - self.window] - for t in range(self.t_center, audio.shape[0], self.t_center): - opt_ts.append( - t - - self.t_query - + np.where( - np.abs(audio_sum[t - self.t_query : t + self.t_query]) - == np.abs(audio_sum[t - self.t_query : t + self.t_query]).min() - )[0][0] - ) - s = 0 - audio_opt = [] - t = None - t1 = ttime() - audio_pad = np.pad(audio, (self.t_pad, self.t_pad), mode="reflect") - p_len = audio_pad.shape[0] // self.window - inp_f0 = None - if hasattr(f0_file, "name") == True: - try: - with open(f0_file.name, "r") as f: - lines = f.read().strip("\n").split("\n") - inp_f0 = [] - for line in lines: - inp_f0.append([float(i) for i in line.split(",")]) - inp_f0 = np.array(inp_f0, dtype="float32") - except: - traceback.print_exc() - sid = torch.tensor(sid, device=self.device).unsqueeze(0).long() - pitch, pitchf = None, None - if if_f0 == 1: - pitch, pitchf = self.get_f0( - input_audio_path, - audio_pad, - p_len, - f0_up_key, - f0_method, - filter_radius, - inp_f0, - ) - pitch = pitch[:p_len] - pitchf = pitchf[:p_len] - if self.device == "mps": - pitchf = pitchf.astype(np.float32) - pitch = torch.tensor(pitch, device=self.device).unsqueeze(0).long() - pitchf = torch.tensor(pitchf, device=self.device).unsqueeze(0).float() - t2 = ttime() - times[1] += t2 - t1 - for t in opt_ts: - t = t // self.window * self.window - if if_f0 == 1: - audio_opt.append( - self.vc( - model, - net_g, - sid, - audio_pad[s : t + self.t_pad2 + self.window], - pitch[:, s // self.window : (t + self.t_pad2) // self.window], - pitchf[:, s // self.window : (t + self.t_pad2) // self.window], - times, - index, - big_npy, - index_rate, - version, - protect, - )[self.t_pad_tgt : -self.t_pad_tgt] - ) - else: - audio_opt.append( - self.vc( - model, - net_g, - sid, - audio_pad[s : t + self.t_pad2 + self.window], - None, - None, - times, - index, - big_npy, - index_rate, - version, - protect, - )[self.t_pad_tgt : -self.t_pad_tgt] - ) - s = t - if if_f0 == 1: - audio_opt.append( - self.vc( - model, - net_g, - sid, - audio_pad[t:], - pitch[:, t // self.window :] if t is not None else pitch, - pitchf[:, t // self.window :] if t is not None else pitchf, - times, - index, - big_npy, - index_rate, - version, - protect, - )[self.t_pad_tgt : -self.t_pad_tgt] - ) - else: - audio_opt.append( - self.vc( - model, - net_g, - sid, - audio_pad[t:], - None, - None, - times, - index, - big_npy, - index_rate, - version, - protect, - )[self.t_pad_tgt : -self.t_pad_tgt] - ) - audio_opt = np.concatenate(audio_opt) - if rms_mix_rate != 1: - audio_opt = change_rms(audio, 16000, audio_opt, tgt_sr, rms_mix_rate) - if resample_sr >= 16000 and tgt_sr != resample_sr: - audio_opt = librosa.resample( - audio_opt, orig_sr=tgt_sr, target_sr=resample_sr - ) - audio_max = np.abs(audio_opt).max() / 0.99 - max_int16 = 32768 - if audio_max > 1: - max_int16 /= audio_max - audio_opt = (audio_opt * max_int16).astype(np.int16) - del pitch, pitchf, sid - if torch.cuda.is_available(): - torch.cuda.empty_cache() - return audio_opt diff --git a/spaces/LeoLeoLeo1/ChuanhuChatGPT/llama_func.py b/spaces/LeoLeoLeo1/ChuanhuChatGPT/llama_func.py deleted file mode 100644 index c71027dd4e6f99c0c12626cbbf276f407877be04..0000000000000000000000000000000000000000 --- a/spaces/LeoLeoLeo1/ChuanhuChatGPT/llama_func.py +++ /dev/null @@ -1,192 +0,0 @@ -import os -import logging - -from llama_index import GPTSimpleVectorIndex -from llama_index import download_loader -from llama_index import ( - Document, - LLMPredictor, - PromptHelper, - QuestionAnswerPrompt, - RefinePrompt, -) -from langchain.llms import OpenAI -import colorama - - -from presets import * -from utils import * - - -def get_documents(file_src): - documents = [] - index_name = "" - logging.debug("Loading documents...") - logging.debug(f"file_src: {file_src}") - for file in file_src: - logging.debug(f"file: {file.name}") - index_name += file.name - if os.path.splitext(file.name)[1] == ".pdf": - logging.debug("Loading PDF...") - CJKPDFReader = download_loader("CJKPDFReader") - loader = CJKPDFReader() - documents += loader.load_data(file=file.name) - elif os.path.splitext(file.name)[1] == ".docx": - logging.debug("Loading DOCX...") - DocxReader = download_loader("DocxReader") - loader = DocxReader() - documents += loader.load_data(file=file.name) - elif os.path.splitext(file.name)[1] == ".epub": - logging.debug("Loading EPUB...") - EpubReader = download_loader("EpubReader") - loader = EpubReader() - documents += loader.load_data(file=file.name) - else: - logging.debug("Loading text file...") - with open(file.name, "r", encoding="utf-8") as f: - text = add_space(f.read()) - documents += [Document(text)] - index_name = sha1sum(index_name) - return documents, index_name - - -def construct_index( - api_key, - file_src, - max_input_size=4096, - num_outputs=1, - max_chunk_overlap=20, - chunk_size_limit=600, - embedding_limit=None, - separator=" ", - num_children=10, - max_keywords_per_chunk=10, -): - os.environ["OPENAI_API_KEY"] = api_key - chunk_size_limit = None if chunk_size_limit == 0 else chunk_size_limit - embedding_limit = None if embedding_limit == 0 else embedding_limit - separator = " " if separator == "" else separator - - llm_predictor = LLMPredictor( - llm=OpenAI(model_name="gpt-3.5-turbo-0301", openai_api_key=api_key) - ) - prompt_helper = PromptHelper( - max_input_size, - num_outputs, - max_chunk_overlap, - embedding_limit, - chunk_size_limit, - separator=separator, - ) - documents, index_name = get_documents(file_src) - if os.path.exists(f"./index/{index_name}.json"): - logging.info("找到了缓存的索引文件,加载中……") - return GPTSimpleVectorIndex.load_from_disk(f"./index/{index_name}.json") - else: - try: - logging.debug("构建索引中……") - index = GPTSimpleVectorIndex( - documents, llm_predictor=llm_predictor, prompt_helper=prompt_helper - ) - os.makedirs("./index", exist_ok=True) - index.save_to_disk(f"./index/{index_name}.json") - return index - except Exception as e: - print(e) - return None - - -def chat_ai( - api_key, - index, - question, - context, - chatbot, -): - os.environ["OPENAI_API_KEY"] = api_key - - logging.info(f"Question: {question}") - - response, chatbot_display, status_text = ask_ai( - api_key, - index, - question, - replace_today(PROMPT_TEMPLATE), - REFINE_TEMPLATE, - SIM_K, - INDEX_QUERY_TEMPRATURE, - context, - ) - if response is None: - status_text = "查询失败,请换个问法试试" - return context, chatbot - response = response - - context.append({"role": "user", "content": question}) - context.append({"role": "assistant", "content": response}) - chatbot.append((question, chatbot_display)) - - os.environ["OPENAI_API_KEY"] = "" - return context, chatbot, status_text - - -def ask_ai( - api_key, - index, - question, - prompt_tmpl, - refine_tmpl, - sim_k=1, - temprature=0, - prefix_messages=[], -): - os.environ["OPENAI_API_KEY"] = api_key - - logging.debug("Index file found") - logging.debug("Querying index...") - llm_predictor = LLMPredictor( - llm=OpenAI( - temperature=temprature, - model_name="gpt-3.5-turbo-0301", - prefix_messages=prefix_messages, - ) - ) - - response = None # Initialize response variable to avoid UnboundLocalError - qa_prompt = QuestionAnswerPrompt(prompt_tmpl) - rf_prompt = RefinePrompt(refine_tmpl) - response = index.query( - question, - llm_predictor=llm_predictor, - similarity_top_k=sim_k, - text_qa_template=qa_prompt, - refine_template=rf_prompt, - response_mode="compact", - ) - - if response is not None: - logging.info(f"Response: {response}") - ret_text = response.response - nodes = [] - for index, node in enumerate(response.source_nodes): - brief = node.source_text[:25].replace("\n", "") - nodes.append( - f"
    [{index+1}]\t{brief}...

    {node.source_text}

    " - ) - new_response = ret_text + "\n----------\n" + "\n\n".join(nodes) - logging.info( - f"Response: {colorama.Fore.BLUE}{ret_text}{colorama.Style.RESET_ALL}" - ) - os.environ["OPENAI_API_KEY"] = "" - return ret_text, new_response, f"查询消耗了{llm_predictor.last_token_usage} tokens" - else: - logging.warning("No response found, returning None") - os.environ["OPENAI_API_KEY"] = "" - return None - - -def add_space(text): - punctuations = {",": ", ", "。": "。 ", "?": "? ", "!": "! ", ":": ": ", ";": "; "} - for cn_punc, en_punc in punctuations.items(): - text = text.replace(cn_punc, en_punc) - return text diff --git a/spaces/Lippppxy/AiAnimeVoice/transforms.py b/spaces/Lippppxy/AiAnimeVoice/transforms.py deleted file mode 100644 index 4793d67ca5a5630e0ffe0f9fb29445c949e64dae..0000000000000000000000000000000000000000 --- a/spaces/Lippppxy/AiAnimeVoice/transforms.py +++ /dev/null @@ -1,193 +0,0 @@ -import torch -from torch.nn import functional as F - -import numpy as np - - -DEFAULT_MIN_BIN_WIDTH = 1e-3 -DEFAULT_MIN_BIN_HEIGHT = 1e-3 -DEFAULT_MIN_DERIVATIVE = 1e-3 - - -def piecewise_rational_quadratic_transform(inputs, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=False, - tails=None, - tail_bound=1., - min_bin_width=DEFAULT_MIN_BIN_WIDTH, - min_bin_height=DEFAULT_MIN_BIN_HEIGHT, - min_derivative=DEFAULT_MIN_DERIVATIVE): - - if tails is None: - spline_fn = rational_quadratic_spline - spline_kwargs = {} - else: - spline_fn = unconstrained_rational_quadratic_spline - spline_kwargs = { - 'tails': tails, - 'tail_bound': tail_bound - } - - outputs, logabsdet = spline_fn( - inputs=inputs, - unnormalized_widths=unnormalized_widths, - unnormalized_heights=unnormalized_heights, - unnormalized_derivatives=unnormalized_derivatives, - inverse=inverse, - min_bin_width=min_bin_width, - min_bin_height=min_bin_height, - min_derivative=min_derivative, - **spline_kwargs - ) - return outputs, logabsdet - - -def searchsorted(bin_locations, inputs, eps=1e-6): - bin_locations[..., -1] += eps - return torch.sum( - inputs[..., None] >= bin_locations, - dim=-1 - ) - 1 - - -def unconstrained_rational_quadratic_spline(inputs, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=False, - tails='linear', - tail_bound=1., - min_bin_width=DEFAULT_MIN_BIN_WIDTH, - min_bin_height=DEFAULT_MIN_BIN_HEIGHT, - min_derivative=DEFAULT_MIN_DERIVATIVE): - inside_interval_mask = (inputs >= -tail_bound) & (inputs <= tail_bound) - outside_interval_mask = ~inside_interval_mask - - outputs = torch.zeros_like(inputs) - logabsdet = torch.zeros_like(inputs) - - if tails == 'linear': - unnormalized_derivatives = F.pad(unnormalized_derivatives, pad=(1, 1)) - constant = np.log(np.exp(1 - min_derivative) - 1) - unnormalized_derivatives[..., 0] = constant - unnormalized_derivatives[..., -1] = constant - - outputs[outside_interval_mask] = inputs[outside_interval_mask] - logabsdet[outside_interval_mask] = 0 - else: - raise RuntimeError('{} tails are not implemented.'.format(tails)) - - outputs[inside_interval_mask], logabsdet[inside_interval_mask] = rational_quadratic_spline( - inputs=inputs[inside_interval_mask], - unnormalized_widths=unnormalized_widths[inside_interval_mask, :], - unnormalized_heights=unnormalized_heights[inside_interval_mask, :], - unnormalized_derivatives=unnormalized_derivatives[inside_interval_mask, :], - inverse=inverse, - left=-tail_bound, right=tail_bound, bottom=-tail_bound, top=tail_bound, - min_bin_width=min_bin_width, - min_bin_height=min_bin_height, - min_derivative=min_derivative - ) - - return outputs, logabsdet - -def rational_quadratic_spline(inputs, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=False, - left=0., right=1., bottom=0., top=1., - min_bin_width=DEFAULT_MIN_BIN_WIDTH, - min_bin_height=DEFAULT_MIN_BIN_HEIGHT, - min_derivative=DEFAULT_MIN_DERIVATIVE): - if torch.min(inputs) < left or torch.max(inputs) > right: - raise ValueError('Input to a transform is not within its domain') - - num_bins = unnormalized_widths.shape[-1] - - if min_bin_width * num_bins > 1.0: - raise ValueError('Minimal bin width too large for the number of bins') - if min_bin_height * num_bins > 1.0: - raise ValueError('Minimal bin height too large for the number of bins') - - widths = F.softmax(unnormalized_widths, dim=-1) - widths = min_bin_width + (1 - min_bin_width * num_bins) * widths - cumwidths = torch.cumsum(widths, dim=-1) - cumwidths = F.pad(cumwidths, pad=(1, 0), mode='constant', value=0.0) - cumwidths = (right - left) * cumwidths + left - cumwidths[..., 0] = left - cumwidths[..., -1] = right - widths = cumwidths[..., 1:] - cumwidths[..., :-1] - - derivatives = min_derivative + F.softplus(unnormalized_derivatives) - - heights = F.softmax(unnormalized_heights, dim=-1) - heights = min_bin_height + (1 - min_bin_height * num_bins) * heights - cumheights = torch.cumsum(heights, dim=-1) - cumheights = F.pad(cumheights, pad=(1, 0), mode='constant', value=0.0) - cumheights = (top - bottom) * cumheights + bottom - cumheights[..., 0] = bottom - cumheights[..., -1] = top - heights = cumheights[..., 1:] - cumheights[..., :-1] - - if inverse: - bin_idx = searchsorted(cumheights, inputs)[..., None] - else: - bin_idx = searchsorted(cumwidths, inputs)[..., None] - - input_cumwidths = cumwidths.gather(-1, bin_idx)[..., 0] - input_bin_widths = widths.gather(-1, bin_idx)[..., 0] - - input_cumheights = cumheights.gather(-1, bin_idx)[..., 0] - delta = heights / widths - input_delta = delta.gather(-1, bin_idx)[..., 0] - - input_derivatives = derivatives.gather(-1, bin_idx)[..., 0] - input_derivatives_plus_one = derivatives[..., 1:].gather(-1, bin_idx)[..., 0] - - input_heights = heights.gather(-1, bin_idx)[..., 0] - - if inverse: - a = (((inputs - input_cumheights) * (input_derivatives - + input_derivatives_plus_one - - 2 * input_delta) - + input_heights * (input_delta - input_derivatives))) - b = (input_heights * input_derivatives - - (inputs - input_cumheights) * (input_derivatives - + input_derivatives_plus_one - - 2 * input_delta)) - c = - input_delta * (inputs - input_cumheights) - - discriminant = b.pow(2) - 4 * a * c - assert (discriminant >= 0).all() - - root = (2 * c) / (-b - torch.sqrt(discriminant)) - outputs = root * input_bin_widths + input_cumwidths - - theta_one_minus_theta = root * (1 - root) - denominator = input_delta + ((input_derivatives + input_derivatives_plus_one - 2 * input_delta) - * theta_one_minus_theta) - derivative_numerator = input_delta.pow(2) * (input_derivatives_plus_one * root.pow(2) - + 2 * input_delta * theta_one_minus_theta - + input_derivatives * (1 - root).pow(2)) - logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator) - - return outputs, -logabsdet - else: - theta = (inputs - input_cumwidths) / input_bin_widths - theta_one_minus_theta = theta * (1 - theta) - - numerator = input_heights * (input_delta * theta.pow(2) - + input_derivatives * theta_one_minus_theta) - denominator = input_delta + ((input_derivatives + input_derivatives_plus_one - 2 * input_delta) - * theta_one_minus_theta) - outputs = input_cumheights + numerator / denominator - - derivative_numerator = input_delta.pow(2) * (input_derivatives_plus_one * theta.pow(2) - + 2 * input_delta * theta_one_minus_theta - + input_derivatives * (1 - theta).pow(2)) - logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator) - - return outputs, logabsdet diff --git a/spaces/LovnishVermaPRINCE/attendanceviaface/README.md b/spaces/LovnishVermaPRINCE/attendanceviaface/README.md deleted file mode 100644 index 1656bcdf0d310012934f4972c9824190c0af74f6..0000000000000000000000000000000000000000 --- a/spaces/LovnishVermaPRINCE/attendanceviaface/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Attendanceviaface -emoji: 🦀 -colorFrom: red -colorTo: pink -sdk: streamlit -sdk_version: 1.21.0 -app_file: app.py -pinned: false -license: cc ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/MLVKU/Human_Object_Interaction/hotr/util/ramp.py b/spaces/MLVKU/Human_Object_Interaction/hotr/util/ramp.py deleted file mode 100644 index 20687b717333fb12aa4695350c0871a2de3b4902..0000000000000000000000000000000000000000 --- a/spaces/MLVKU/Human_Object_Interaction/hotr/util/ramp.py +++ /dev/null @@ -1,23 +0,0 @@ -# Copyright (c) 2018, Curious AI Ltd. All rights reserved. -# -# This work is licensed under the Creative Commons Attribution-NonCommercial -# 4.0 International License. To view a copy of this license, visit -# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to -# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA. - -import numpy as np - -def sigmoid_rampup(current, rampup_length,max_coef=1.): - """Exponential rampup from https://arxiv.org/abs/1610.02242""" - """Modified version from https://github.com/vikasverma1077/GraphMix/blob/master/semisupervised/codes/ramps.py""" - if rampup_length == 0: - return max_coef - else: - current = np.clip(current, 0.0, rampup_length) - phase = 1.0 - current / rampup_length - return float(np.exp(-5.0 * phase * phase))*max_coef - -def cosine_rampdown(current, rampdown_length,max_coef=1.): - """Cosine rampdown from https://arxiv.org/abs/1608.03983""" - assert 0 <= current <= rampdown_length - return float(.5 * (np.cos(np.pi *current / rampdown_length) + 1))*max_coef \ No newline at end of file diff --git a/spaces/Make-A-Protagonist/Make-A-Protagonist-inference/Make-A-Protagonist/experts/GroundedSAM/GroundingDINO/groundingdino/models/GroundingDINO/csrc/MsDeformAttn/ms_deform_attn_cuda.h b/spaces/Make-A-Protagonist/Make-A-Protagonist-inference/Make-A-Protagonist/experts/GroundedSAM/GroundingDINO/groundingdino/models/GroundingDINO/csrc/MsDeformAttn/ms_deform_attn_cuda.h deleted file mode 100644 index ad1311a78f61303616504eb991aaa9c4a93d9948..0000000000000000000000000000000000000000 --- a/spaces/Make-A-Protagonist/Make-A-Protagonist-inference/Make-A-Protagonist/experts/GroundedSAM/GroundingDINO/groundingdino/models/GroundingDINO/csrc/MsDeformAttn/ms_deform_attn_cuda.h +++ /dev/null @@ -1,33 +0,0 @@ -/*! -************************************************************************************************** -* Deformable DETR -* Copyright (c) 2020 SenseTime. All Rights Reserved. -* Licensed under the Apache License, Version 2.0 [see LICENSE for details] -************************************************************************************************** -* Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0 -************************************************************************************************** -*/ - -#pragma once -#include - -namespace groundingdino { - -at::Tensor ms_deform_attn_cuda_forward( - const at::Tensor &value, - const at::Tensor &spatial_shapes, - const at::Tensor &level_start_index, - const at::Tensor &sampling_loc, - const at::Tensor &attn_weight, - const int im2col_step); - -std::vector ms_deform_attn_cuda_backward( - const at::Tensor &value, - const at::Tensor &spatial_shapes, - const at::Tensor &level_start_index, - const at::Tensor &sampling_loc, - const at::Tensor &attn_weight, - const at::Tensor &grad_output, - const int im2col_step); - -} // namespace groundingdino \ No newline at end of file diff --git a/spaces/Make-A-Protagonist/Make-A-Protagonist-inference/Make-A-Protagonist/experts/xmem_inference.py b/spaces/Make-A-Protagonist/Make-A-Protagonist-inference/Make-A-Protagonist/experts/xmem_inference.py deleted file mode 100644 index 3313aefc265e008125030ab2b17ceacbdcbcaac2..0000000000000000000000000000000000000000 --- a/spaces/Make-A-Protagonist/Make-A-Protagonist-inference/Make-A-Protagonist/experts/xmem_inference.py +++ /dev/null @@ -1,196 +0,0 @@ -import sys -import os - -project_dir = os.path.dirname(os.path.abspath(__file__)) -sys.path.append(project_dir) - -from argparse import ArgumentParser -import shutil - -import torch -import torch.nn.functional as F -from torch.utils.data import DataLoader -import numpy as np -from PIL import Image - -from XMem.inference.data.test_datasets import CustomDataset -from XMem.inference.data.mask_mapper import MaskMapper -from XMem.model.network import XMem -from XMem.inference.inference_core import InferenceCore - -from progressbar import progressbar - - - -""" -Arguments loading -""" -parser = ArgumentParser() -parser.add_argument('--model', default='checkpoints/XMem.pth') - -# Data options -parser.add_argument('-d', '--data', default='data', required=True, help='the dir name to the images data/dir/images') -parser.add_argument('-v', '--video', required=False, help='video name') -parser.add_argument('--mask_dir', required=True, help='the dir name to the mask e.g., man.mask') - -parser.add_argument('--dataset', help='D16/D17/Y18/Y19/LV1/LV3/G', default='G') -parser.add_argument('--split', help='val/test', default='val') -parser.add_argument('--output', default=None) -parser.add_argument('--save_all', action='store_true', - help='Save all frames. Useful only in YouTubeVOS/long-time video', ) - -parser.add_argument('--benchmark', action='store_true', help='enable to disable amp for FPS benchmarking') - -# Long-term memory options -parser.add_argument('--disable_long_term', action='store_true') -parser.add_argument('--max_mid_term_frames', help='T_max in paper, decrease to save memory', type=int, default=10) -parser.add_argument('--min_mid_term_frames', help='T_min in paper, decrease to save memory', type=int, default=5) -parser.add_argument('--max_long_term_elements', help='LT_max in paper, increase if objects disappear for a long time', - type=int, default=10000) -parser.add_argument('--num_prototypes', help='P in paper', type=int, default=128) - -parser.add_argument('--top_k', type=int, default=30) -parser.add_argument('--mem_every', help='r in paper. Increase to improve running speed.', type=int, default=5) -parser.add_argument('--deep_update_every', help='Leave -1 normally to synchronize with mem_every', type=int, default=-1) - -# Multi-scale options -parser.add_argument('--save_scores', action='store_true') -parser.add_argument('--flip', action='store_true') -parser.add_argument('--size', default=480, type=int, - help='Resize the shorter side to this size. -1 to use original resolution. ') - -args = parser.parse_args() -config = vars(args) -config['enable_long_term'] = not config['disable_long_term'] - -if args.output is None: - args.output = args.data.replace('images', args.mask_dir) - print(f'Output path not provided. By default saving to the mask dir') - -os.makedirs(args.output, exist_ok=True) - -""" -Data preparation -""" - -out_path = args.output - -if args.dataset == 'G': - meta_dataset = CustomDataset(args.data, mask_dir=args.mask_dir, size=args.size) - if not args.save_all: - args.save_all = True - print('save_all is forced to be true in generic evaluation mode.') -else: - raise NotImplementedError - -torch.autograd.set_grad_enabled(False) - -# Set up loader -meta_loader = meta_dataset.get_datasets() - -# Load our checkpoint -network = XMem(config, args.model).cuda().eval() -if args.model is not None: - model_weights = torch.load(args.model) - network.load_weights(model_weights, init_as_zero_if_needed=True) -else: - print('No model loaded.') - -total_process_time = 0 -total_frames = 0 - -# Start eval -for vid_reader in progressbar(meta_loader, max_value=len(meta_dataset), redirect_stdout=True): - - loader = DataLoader(vid_reader, batch_size=1, shuffle=False, num_workers=2) - vid_name = vid_reader.vid_name - vid_length = len(loader) - # no need to count usage for LT if the video is not that long anyway - config['enable_long_term_count_usage'] = ( - config['enable_long_term'] and - (vid_length - / (config['max_mid_term_frames']-config['min_mid_term_frames']) - * config['num_prototypes']) - >= config['max_long_term_elements'] - ) - - mapper = MaskMapper() - processor = InferenceCore(network, config=config) - first_mask_loaded = False - - for ti, data in enumerate(loader): - with torch.cuda.amp.autocast(enabled=not args.benchmark): - rgb = data['rgb'].cuda()[0] - msk = data.get('mask') - info = data['info'] - frame = info['frame'][0] - shape = info['shape'] - need_resize = info['need_resize'][0] - - """ - For timing see https://discuss.pytorch.org/t/how-to-measure-time-in-pytorch/26964 - Seems to be very similar in testing as my previous timing method - with two cuda sync + time.time() in STCN though - """ - start = torch.cuda.Event(enable_timing=True) - end = torch.cuda.Event(enable_timing=True) - start.record() - - if not first_mask_loaded: - if msk is not None: - first_mask_loaded = True - else: - # no point to do anything without a mask - continue - - if args.flip: - rgb = torch.flip(rgb, dims=[-1]) - msk = torch.flip(msk, dims=[-1]) if msk is not None else None - - # Map possibly non-continuous labels to continuous ones - if msk is not None: - msk, labels = mapper.convert_mask(msk[0].numpy()) - msk = torch.Tensor(msk).cuda() - if need_resize: - msk = vid_reader.resize_mask(msk.unsqueeze(0))[0] - processor.set_all_labels(list(mapper.remappings.values())) - else: - labels = None - - # Run the model on this frame - prob = processor.step(rgb, msk, labels, end=(ti==vid_length-1)) - - # Upsample to original size if needed - if need_resize: - prob = F.interpolate(prob.unsqueeze(1), shape, mode='bilinear', align_corners=False)[:,0] - - end.record() - torch.cuda.synchronize() - total_process_time += (start.elapsed_time(end)/1000) - total_frames += 1 - - if args.flip: - prob = torch.flip(prob, dims=[-1]) - - # Probability mask -> index mask - out_mask = torch.argmax(prob, dim=0) - out_mask = (out_mask.detach().cpu().numpy()).astype(np.uint8) - - # Save the mask - if args.save_all or info['save'][0]: - this_out_path = out_path - os.makedirs(this_out_path, exist_ok=True) - out_mask = mapper.remap_index_mask(out_mask) - out_img = Image.fromarray(out_mask) - if vid_reader.get_palette() is not None: - out_img.putpalette(vid_reader.get_palette()) - out_img.save(os.path.join(this_out_path, frame[:-4]+'.png')) - - - - -print(f'Total processing time: {total_process_time}') -print(f'Total processed frames: {total_frames}') -print(f'FPS: {total_frames / total_process_time}') -print(f'Max allocated memory (MB): {torch.cuda.max_memory_allocated() / (2**20)}') - diff --git a/spaces/Mellow-ai/PhotoAI_Mellow/annotator/uniformer/mmcv/ops/deform_conv.py b/spaces/Mellow-ai/PhotoAI_Mellow/annotator/uniformer/mmcv/ops/deform_conv.py deleted file mode 100644 index a3f8c75ee774823eea334e3b3732af6a18f55038..0000000000000000000000000000000000000000 --- a/spaces/Mellow-ai/PhotoAI_Mellow/annotator/uniformer/mmcv/ops/deform_conv.py +++ /dev/null @@ -1,405 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from typing import Tuple, Union - -import torch -import torch.nn as nn -import torch.nn.functional as F -from torch import Tensor -from torch.autograd import Function -from torch.autograd.function import once_differentiable -from torch.nn.modules.utils import _pair, _single - -from annotator.uniformer.mmcv.utils import deprecated_api_warning -from ..cnn import CONV_LAYERS -from ..utils import ext_loader, print_log - -ext_module = ext_loader.load_ext('_ext', [ - 'deform_conv_forward', 'deform_conv_backward_input', - 'deform_conv_backward_parameters' -]) - - -class DeformConv2dFunction(Function): - - @staticmethod - def symbolic(g, - input, - offset, - weight, - stride, - padding, - dilation, - groups, - deform_groups, - bias=False, - im2col_step=32): - return g.op( - 'mmcv::MMCVDeformConv2d', - input, - offset, - weight, - stride_i=stride, - padding_i=padding, - dilation_i=dilation, - groups_i=groups, - deform_groups_i=deform_groups, - bias_i=bias, - im2col_step_i=im2col_step) - - @staticmethod - def forward(ctx, - input, - offset, - weight, - stride=1, - padding=0, - dilation=1, - groups=1, - deform_groups=1, - bias=False, - im2col_step=32): - if input is not None and input.dim() != 4: - raise ValueError( - f'Expected 4D tensor as input, got {input.dim()}D tensor \ - instead.') - assert bias is False, 'Only support bias is False.' - ctx.stride = _pair(stride) - ctx.padding = _pair(padding) - ctx.dilation = _pair(dilation) - ctx.groups = groups - ctx.deform_groups = deform_groups - ctx.im2col_step = im2col_step - - # When pytorch version >= 1.6.0, amp is adopted for fp16 mode; - # amp won't cast the type of model (float32), but "offset" is cast - # to float16 by nn.Conv2d automatically, leading to the type - # mismatch with input (when it is float32) or weight. - # The flag for whether to use fp16 or amp is the type of "offset", - # we cast weight and input to temporarily support fp16 and amp - # whatever the pytorch version is. - input = input.type_as(offset) - weight = weight.type_as(input) - ctx.save_for_backward(input, offset, weight) - - output = input.new_empty( - DeformConv2dFunction._output_size(ctx, input, weight)) - - ctx.bufs_ = [input.new_empty(0), input.new_empty(0)] # columns, ones - - cur_im2col_step = min(ctx.im2col_step, input.size(0)) - assert (input.size(0) % - cur_im2col_step) == 0, 'im2col step must divide batchsize' - ext_module.deform_conv_forward( - input, - weight, - offset, - output, - ctx.bufs_[0], - ctx.bufs_[1], - kW=weight.size(3), - kH=weight.size(2), - dW=ctx.stride[1], - dH=ctx.stride[0], - padW=ctx.padding[1], - padH=ctx.padding[0], - dilationW=ctx.dilation[1], - dilationH=ctx.dilation[0], - group=ctx.groups, - deformable_group=ctx.deform_groups, - im2col_step=cur_im2col_step) - return output - - @staticmethod - @once_differentiable - def backward(ctx, grad_output): - input, offset, weight = ctx.saved_tensors - - grad_input = grad_offset = grad_weight = None - - cur_im2col_step = min(ctx.im2col_step, input.size(0)) - assert (input.size(0) % cur_im2col_step - ) == 0, 'batch size must be divisible by im2col_step' - - grad_output = grad_output.contiguous() - if ctx.needs_input_grad[0] or ctx.needs_input_grad[1]: - grad_input = torch.zeros_like(input) - grad_offset = torch.zeros_like(offset) - ext_module.deform_conv_backward_input( - input, - offset, - grad_output, - grad_input, - grad_offset, - weight, - ctx.bufs_[0], - kW=weight.size(3), - kH=weight.size(2), - dW=ctx.stride[1], - dH=ctx.stride[0], - padW=ctx.padding[1], - padH=ctx.padding[0], - dilationW=ctx.dilation[1], - dilationH=ctx.dilation[0], - group=ctx.groups, - deformable_group=ctx.deform_groups, - im2col_step=cur_im2col_step) - - if ctx.needs_input_grad[2]: - grad_weight = torch.zeros_like(weight) - ext_module.deform_conv_backward_parameters( - input, - offset, - grad_output, - grad_weight, - ctx.bufs_[0], - ctx.bufs_[1], - kW=weight.size(3), - kH=weight.size(2), - dW=ctx.stride[1], - dH=ctx.stride[0], - padW=ctx.padding[1], - padH=ctx.padding[0], - dilationW=ctx.dilation[1], - dilationH=ctx.dilation[0], - group=ctx.groups, - deformable_group=ctx.deform_groups, - scale=1, - im2col_step=cur_im2col_step) - - return grad_input, grad_offset, grad_weight, \ - None, None, None, None, None, None, None - - @staticmethod - def _output_size(ctx, input, weight): - channels = weight.size(0) - output_size = (input.size(0), channels) - for d in range(input.dim() - 2): - in_size = input.size(d + 2) - pad = ctx.padding[d] - kernel = ctx.dilation[d] * (weight.size(d + 2) - 1) + 1 - stride_ = ctx.stride[d] - output_size += ((in_size + (2 * pad) - kernel) // stride_ + 1, ) - if not all(map(lambda s: s > 0, output_size)): - raise ValueError( - 'convolution input is too small (output would be ' + - 'x'.join(map(str, output_size)) + ')') - return output_size - - -deform_conv2d = DeformConv2dFunction.apply - - -class DeformConv2d(nn.Module): - r"""Deformable 2D convolution. - - Applies a deformable 2D convolution over an input signal composed of - several input planes. DeformConv2d was described in the paper - `Deformable Convolutional Networks - `_ - - Note: - The argument ``im2col_step`` was added in version 1.3.17, which means - number of samples processed by the ``im2col_cuda_kernel`` per call. - It enables users to define ``batch_size`` and ``im2col_step`` more - flexibly and solved `issue mmcv#1440 - `_. - - Args: - in_channels (int): Number of channels in the input image. - out_channels (int): Number of channels produced by the convolution. - kernel_size(int, tuple): Size of the convolving kernel. - stride(int, tuple): Stride of the convolution. Default: 1. - padding (int or tuple): Zero-padding added to both sides of the input. - Default: 0. - dilation (int or tuple): Spacing between kernel elements. Default: 1. - groups (int): Number of blocked connections from input. - channels to output channels. Default: 1. - deform_groups (int): Number of deformable group partitions. - bias (bool): If True, adds a learnable bias to the output. - Default: False. - im2col_step (int): Number of samples processed by im2col_cuda_kernel - per call. It will work when ``batch_size`` > ``im2col_step``, but - ``batch_size`` must be divisible by ``im2col_step``. Default: 32. - `New in version 1.3.17.` - """ - - @deprecated_api_warning({'deformable_groups': 'deform_groups'}, - cls_name='DeformConv2d') - def __init__(self, - in_channels: int, - out_channels: int, - kernel_size: Union[int, Tuple[int, ...]], - stride: Union[int, Tuple[int, ...]] = 1, - padding: Union[int, Tuple[int, ...]] = 0, - dilation: Union[int, Tuple[int, ...]] = 1, - groups: int = 1, - deform_groups: int = 1, - bias: bool = False, - im2col_step: int = 32) -> None: - super(DeformConv2d, self).__init__() - - assert not bias, \ - f'bias={bias} is not supported in DeformConv2d.' - assert in_channels % groups == 0, \ - f'in_channels {in_channels} cannot be divisible by groups {groups}' - assert out_channels % groups == 0, \ - f'out_channels {out_channels} cannot be divisible by groups \ - {groups}' - - self.in_channels = in_channels - self.out_channels = out_channels - self.kernel_size = _pair(kernel_size) - self.stride = _pair(stride) - self.padding = _pair(padding) - self.dilation = _pair(dilation) - self.groups = groups - self.deform_groups = deform_groups - self.im2col_step = im2col_step - # enable compatibility with nn.Conv2d - self.transposed = False - self.output_padding = _single(0) - - # only weight, no bias - self.weight = nn.Parameter( - torch.Tensor(out_channels, in_channels // self.groups, - *self.kernel_size)) - - self.reset_parameters() - - def reset_parameters(self): - # switch the initialization of `self.weight` to the standard kaiming - # method described in `Delving deep into rectifiers: Surpassing - # human-level performance on ImageNet classification` - He, K. et al. - # (2015), using a uniform distribution - nn.init.kaiming_uniform_(self.weight, nonlinearity='relu') - - def forward(self, x: Tensor, offset: Tensor) -> Tensor: - """Deformable Convolutional forward function. - - Args: - x (Tensor): Input feature, shape (B, C_in, H_in, W_in) - offset (Tensor): Offset for deformable convolution, shape - (B, deform_groups*kernel_size[0]*kernel_size[1]*2, - H_out, W_out), H_out, W_out are equal to the output's. - - An offset is like `[y0, x0, y1, x1, y2, x2, ..., y8, x8]`. - The spatial arrangement is like: - - .. code:: text - - (x0, y0) (x1, y1) (x2, y2) - (x3, y3) (x4, y4) (x5, y5) - (x6, y6) (x7, y7) (x8, y8) - - Returns: - Tensor: Output of the layer. - """ - # To fix an assert error in deform_conv_cuda.cpp:128 - # input image is smaller than kernel - input_pad = (x.size(2) < self.kernel_size[0]) or (x.size(3) < - self.kernel_size[1]) - if input_pad: - pad_h = max(self.kernel_size[0] - x.size(2), 0) - pad_w = max(self.kernel_size[1] - x.size(3), 0) - x = F.pad(x, (0, pad_w, 0, pad_h), 'constant', 0).contiguous() - offset = F.pad(offset, (0, pad_w, 0, pad_h), 'constant', 0) - offset = offset.contiguous() - out = deform_conv2d(x, offset, self.weight, self.stride, self.padding, - self.dilation, self.groups, self.deform_groups, - False, self.im2col_step) - if input_pad: - out = out[:, :, :out.size(2) - pad_h, :out.size(3) - - pad_w].contiguous() - return out - - def __repr__(self): - s = self.__class__.__name__ - s += f'(in_channels={self.in_channels},\n' - s += f'out_channels={self.out_channels},\n' - s += f'kernel_size={self.kernel_size},\n' - s += f'stride={self.stride},\n' - s += f'padding={self.padding},\n' - s += f'dilation={self.dilation},\n' - s += f'groups={self.groups},\n' - s += f'deform_groups={self.deform_groups},\n' - # bias is not supported in DeformConv2d. - s += 'bias=False)' - return s - - -@CONV_LAYERS.register_module('DCN') -class DeformConv2dPack(DeformConv2d): - """A Deformable Conv Encapsulation that acts as normal Conv layers. - - The offset tensor is like `[y0, x0, y1, x1, y2, x2, ..., y8, x8]`. - The spatial arrangement is like: - - .. code:: text - - (x0, y0) (x1, y1) (x2, y2) - (x3, y3) (x4, y4) (x5, y5) - (x6, y6) (x7, y7) (x8, y8) - - Args: - in_channels (int): Same as nn.Conv2d. - out_channels (int): Same as nn.Conv2d. - kernel_size (int or tuple[int]): Same as nn.Conv2d. - stride (int or tuple[int]): Same as nn.Conv2d. - padding (int or tuple[int]): Same as nn.Conv2d. - dilation (int or tuple[int]): Same as nn.Conv2d. - groups (int): Same as nn.Conv2d. - bias (bool or str): If specified as `auto`, it will be decided by the - norm_cfg. Bias will be set as True if norm_cfg is None, otherwise - False. - """ - - _version = 2 - - def __init__(self, *args, **kwargs): - super(DeformConv2dPack, self).__init__(*args, **kwargs) - self.conv_offset = nn.Conv2d( - self.in_channels, - self.deform_groups * 2 * self.kernel_size[0] * self.kernel_size[1], - kernel_size=self.kernel_size, - stride=_pair(self.stride), - padding=_pair(self.padding), - dilation=_pair(self.dilation), - bias=True) - self.init_offset() - - def init_offset(self): - self.conv_offset.weight.data.zero_() - self.conv_offset.bias.data.zero_() - - def forward(self, x): - offset = self.conv_offset(x) - return deform_conv2d(x, offset, self.weight, self.stride, self.padding, - self.dilation, self.groups, self.deform_groups, - False, self.im2col_step) - - def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict, - missing_keys, unexpected_keys, error_msgs): - version = local_metadata.get('version', None) - - if version is None or version < 2: - # the key is different in early versions - # In version < 2, DeformConvPack loads previous benchmark models. - if (prefix + 'conv_offset.weight' not in state_dict - and prefix[:-1] + '_offset.weight' in state_dict): - state_dict[prefix + 'conv_offset.weight'] = state_dict.pop( - prefix[:-1] + '_offset.weight') - if (prefix + 'conv_offset.bias' not in state_dict - and prefix[:-1] + '_offset.bias' in state_dict): - state_dict[prefix + - 'conv_offset.bias'] = state_dict.pop(prefix[:-1] + - '_offset.bias') - - if version is not None and version > 1: - print_log( - f'DeformConv2dPack {prefix.rstrip(".")} is upgraded to ' - 'version 2.', - logger='root') - - super()._load_from_state_dict(state_dict, prefix, local_metadata, - strict, missing_keys, unexpected_keys, - error_msgs) diff --git a/spaces/Mellow-ai/PhotoAI_Mellow/ldm/modules/image_degradation/bsrgan_light.py b/spaces/Mellow-ai/PhotoAI_Mellow/ldm/modules/image_degradation/bsrgan_light.py deleted file mode 100644 index 808c7f882cb75e2ba2340d5b55881d11927351f0..0000000000000000000000000000000000000000 --- a/spaces/Mellow-ai/PhotoAI_Mellow/ldm/modules/image_degradation/bsrgan_light.py +++ /dev/null @@ -1,651 +0,0 @@ -# -*- coding: utf-8 -*- -import numpy as np -import cv2 -import torch - -from functools import partial -import random -from scipy import ndimage -import scipy -import scipy.stats as ss -from scipy.interpolate import interp2d -from scipy.linalg import orth -import albumentations - -import ldm.modules.image_degradation.utils_image as util - -""" -# -------------------------------------------- -# Super-Resolution -# -------------------------------------------- -# -# Kai Zhang (cskaizhang@gmail.com) -# https://github.com/cszn -# From 2019/03--2021/08 -# -------------------------------------------- -""" - -def modcrop_np(img, sf): - ''' - Args: - img: numpy image, WxH or WxHxC - sf: scale factor - Return: - cropped image - ''' - w, h = img.shape[:2] - im = np.copy(img) - return im[:w - w % sf, :h - h % sf, ...] - - -""" -# -------------------------------------------- -# anisotropic Gaussian kernels -# -------------------------------------------- -""" - - -def analytic_kernel(k): - """Calculate the X4 kernel from the X2 kernel (for proof see appendix in paper)""" - k_size = k.shape[0] - # Calculate the big kernels size - big_k = np.zeros((3 * k_size - 2, 3 * k_size - 2)) - # Loop over the small kernel to fill the big one - for r in range(k_size): - for c in range(k_size): - big_k[2 * r:2 * r + k_size, 2 * c:2 * c + k_size] += k[r, c] * k - # Crop the edges of the big kernel to ignore very small values and increase run time of SR - crop = k_size // 2 - cropped_big_k = big_k[crop:-crop, crop:-crop] - # Normalize to 1 - return cropped_big_k / cropped_big_k.sum() - - -def anisotropic_Gaussian(ksize=15, theta=np.pi, l1=6, l2=6): - """ generate an anisotropic Gaussian kernel - Args: - ksize : e.g., 15, kernel size - theta : [0, pi], rotation angle range - l1 : [0.1,50], scaling of eigenvalues - l2 : [0.1,l1], scaling of eigenvalues - If l1 = l2, will get an isotropic Gaussian kernel. - Returns: - k : kernel - """ - - v = np.dot(np.array([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]]), np.array([1., 0.])) - V = np.array([[v[0], v[1]], [v[1], -v[0]]]) - D = np.array([[l1, 0], [0, l2]]) - Sigma = np.dot(np.dot(V, D), np.linalg.inv(V)) - k = gm_blur_kernel(mean=[0, 0], cov=Sigma, size=ksize) - - return k - - -def gm_blur_kernel(mean, cov, size=15): - center = size / 2.0 + 0.5 - k = np.zeros([size, size]) - for y in range(size): - for x in range(size): - cy = y - center + 1 - cx = x - center + 1 - k[y, x] = ss.multivariate_normal.pdf([cx, cy], mean=mean, cov=cov) - - k = k / np.sum(k) - return k - - -def shift_pixel(x, sf, upper_left=True): - """shift pixel for super-resolution with different scale factors - Args: - x: WxHxC or WxH - sf: scale factor - upper_left: shift direction - """ - h, w = x.shape[:2] - shift = (sf - 1) * 0.5 - xv, yv = np.arange(0, w, 1.0), np.arange(0, h, 1.0) - if upper_left: - x1 = xv + shift - y1 = yv + shift - else: - x1 = xv - shift - y1 = yv - shift - - x1 = np.clip(x1, 0, w - 1) - y1 = np.clip(y1, 0, h - 1) - - if x.ndim == 2: - x = interp2d(xv, yv, x)(x1, y1) - if x.ndim == 3: - for i in range(x.shape[-1]): - x[:, :, i] = interp2d(xv, yv, x[:, :, i])(x1, y1) - - return x - - -def blur(x, k): - ''' - x: image, NxcxHxW - k: kernel, Nx1xhxw - ''' - n, c = x.shape[:2] - p1, p2 = (k.shape[-2] - 1) // 2, (k.shape[-1] - 1) // 2 - x = torch.nn.functional.pad(x, pad=(p1, p2, p1, p2), mode='replicate') - k = k.repeat(1, c, 1, 1) - k = k.view(-1, 1, k.shape[2], k.shape[3]) - x = x.view(1, -1, x.shape[2], x.shape[3]) - x = torch.nn.functional.conv2d(x, k, bias=None, stride=1, padding=0, groups=n * c) - x = x.view(n, c, x.shape[2], x.shape[3]) - - return x - - -def gen_kernel(k_size=np.array([15, 15]), scale_factor=np.array([4, 4]), min_var=0.6, max_var=10., noise_level=0): - """" - # modified version of https://github.com/assafshocher/BlindSR_dataset_generator - # Kai Zhang - # min_var = 0.175 * sf # variance of the gaussian kernel will be sampled between min_var and max_var - # max_var = 2.5 * sf - """ - # Set random eigen-vals (lambdas) and angle (theta) for COV matrix - lambda_1 = min_var + np.random.rand() * (max_var - min_var) - lambda_2 = min_var + np.random.rand() * (max_var - min_var) - theta = np.random.rand() * np.pi # random theta - noise = -noise_level + np.random.rand(*k_size) * noise_level * 2 - - # Set COV matrix using Lambdas and Theta - LAMBDA = np.diag([lambda_1, lambda_2]) - Q = np.array([[np.cos(theta), -np.sin(theta)], - [np.sin(theta), np.cos(theta)]]) - SIGMA = Q @ LAMBDA @ Q.T - INV_SIGMA = np.linalg.inv(SIGMA)[None, None, :, :] - - # Set expectation position (shifting kernel for aligned image) - MU = k_size // 2 - 0.5 * (scale_factor - 1) # - 0.5 * (scale_factor - k_size % 2) - MU = MU[None, None, :, None] - - # Create meshgrid for Gaussian - [X, Y] = np.meshgrid(range(k_size[0]), range(k_size[1])) - Z = np.stack([X, Y], 2)[:, :, :, None] - - # Calcualte Gaussian for every pixel of the kernel - ZZ = Z - MU - ZZ_t = ZZ.transpose(0, 1, 3, 2) - raw_kernel = np.exp(-0.5 * np.squeeze(ZZ_t @ INV_SIGMA @ ZZ)) * (1 + noise) - - # shift the kernel so it will be centered - # raw_kernel_centered = kernel_shift(raw_kernel, scale_factor) - - # Normalize the kernel and return - # kernel = raw_kernel_centered / np.sum(raw_kernel_centered) - kernel = raw_kernel / np.sum(raw_kernel) - return kernel - - -def fspecial_gaussian(hsize, sigma): - hsize = [hsize, hsize] - siz = [(hsize[0] - 1.0) / 2.0, (hsize[1] - 1.0) / 2.0] - std = sigma - [x, y] = np.meshgrid(np.arange(-siz[1], siz[1] + 1), np.arange(-siz[0], siz[0] + 1)) - arg = -(x * x + y * y) / (2 * std * std) - h = np.exp(arg) - h[h < scipy.finfo(float).eps * h.max()] = 0 - sumh = h.sum() - if sumh != 0: - h = h / sumh - return h - - -def fspecial_laplacian(alpha): - alpha = max([0, min([alpha, 1])]) - h1 = alpha / (alpha + 1) - h2 = (1 - alpha) / (alpha + 1) - h = [[h1, h2, h1], [h2, -4 / (alpha + 1), h2], [h1, h2, h1]] - h = np.array(h) - return h - - -def fspecial(filter_type, *args, **kwargs): - ''' - python code from: - https://github.com/ronaldosena/imagens-medicas-2/blob/40171a6c259edec7827a6693a93955de2bd39e76/Aulas/aula_2_-_uniform_filter/matlab_fspecial.py - ''' - if filter_type == 'gaussian': - return fspecial_gaussian(*args, **kwargs) - if filter_type == 'laplacian': - return fspecial_laplacian(*args, **kwargs) - - -""" -# -------------------------------------------- -# degradation models -# -------------------------------------------- -""" - - -def bicubic_degradation(x, sf=3): - ''' - Args: - x: HxWxC image, [0, 1] - sf: down-scale factor - Return: - bicubicly downsampled LR image - ''' - x = util.imresize_np(x, scale=1 / sf) - return x - - -def srmd_degradation(x, k, sf=3): - ''' blur + bicubic downsampling - Args: - x: HxWxC image, [0, 1] - k: hxw, double - sf: down-scale factor - Return: - downsampled LR image - Reference: - @inproceedings{zhang2018learning, - title={Learning a single convolutional super-resolution network for multiple degradations}, - author={Zhang, Kai and Zuo, Wangmeng and Zhang, Lei}, - booktitle={IEEE Conference on Computer Vision and Pattern Recognition}, - pages={3262--3271}, - year={2018} - } - ''' - x = ndimage.convolve(x, np.expand_dims(k, axis=2), mode='wrap') # 'nearest' | 'mirror' - x = bicubic_degradation(x, sf=sf) - return x - - -def dpsr_degradation(x, k, sf=3): - ''' bicubic downsampling + blur - Args: - x: HxWxC image, [0, 1] - k: hxw, double - sf: down-scale factor - Return: - downsampled LR image - Reference: - @inproceedings{zhang2019deep, - title={Deep Plug-and-Play Super-Resolution for Arbitrary Blur Kernels}, - author={Zhang, Kai and Zuo, Wangmeng and Zhang, Lei}, - booktitle={IEEE Conference on Computer Vision and Pattern Recognition}, - pages={1671--1681}, - year={2019} - } - ''' - x = bicubic_degradation(x, sf=sf) - x = ndimage.convolve(x, np.expand_dims(k, axis=2), mode='wrap') - return x - - -def classical_degradation(x, k, sf=3): - ''' blur + downsampling - Args: - x: HxWxC image, [0, 1]/[0, 255] - k: hxw, double - sf: down-scale factor - Return: - downsampled LR image - ''' - x = ndimage.convolve(x, np.expand_dims(k, axis=2), mode='wrap') - # x = filters.correlate(x, np.expand_dims(np.flip(k), axis=2)) - st = 0 - return x[st::sf, st::sf, ...] - - -def add_sharpening(img, weight=0.5, radius=50, threshold=10): - """USM sharpening. borrowed from real-ESRGAN - Input image: I; Blurry image: B. - 1. K = I + weight * (I - B) - 2. Mask = 1 if abs(I - B) > threshold, else: 0 - 3. Blur mask: - 4. Out = Mask * K + (1 - Mask) * I - Args: - img (Numpy array): Input image, HWC, BGR; float32, [0, 1]. - weight (float): Sharp weight. Default: 1. - radius (float): Kernel size of Gaussian blur. Default: 50. - threshold (int): - """ - if radius % 2 == 0: - radius += 1 - blur = cv2.GaussianBlur(img, (radius, radius), 0) - residual = img - blur - mask = np.abs(residual) * 255 > threshold - mask = mask.astype('float32') - soft_mask = cv2.GaussianBlur(mask, (radius, radius), 0) - - K = img + weight * residual - K = np.clip(K, 0, 1) - return soft_mask * K + (1 - soft_mask) * img - - -def add_blur(img, sf=4): - wd2 = 4.0 + sf - wd = 2.0 + 0.2 * sf - - wd2 = wd2/4 - wd = wd/4 - - if random.random() < 0.5: - l1 = wd2 * random.random() - l2 = wd2 * random.random() - k = anisotropic_Gaussian(ksize=random.randint(2, 11) + 3, theta=random.random() * np.pi, l1=l1, l2=l2) - else: - k = fspecial('gaussian', random.randint(2, 4) + 3, wd * random.random()) - img = ndimage.convolve(img, np.expand_dims(k, axis=2), mode='mirror') - - return img - - -def add_resize(img, sf=4): - rnum = np.random.rand() - if rnum > 0.8: # up - sf1 = random.uniform(1, 2) - elif rnum < 0.7: # down - sf1 = random.uniform(0.5 / sf, 1) - else: - sf1 = 1.0 - img = cv2.resize(img, (int(sf1 * img.shape[1]), int(sf1 * img.shape[0])), interpolation=random.choice([1, 2, 3])) - img = np.clip(img, 0.0, 1.0) - - return img - - -# def add_Gaussian_noise(img, noise_level1=2, noise_level2=25): -# noise_level = random.randint(noise_level1, noise_level2) -# rnum = np.random.rand() -# if rnum > 0.6: # add color Gaussian noise -# img += np.random.normal(0, noise_level / 255.0, img.shape).astype(np.float32) -# elif rnum < 0.4: # add grayscale Gaussian noise -# img += np.random.normal(0, noise_level / 255.0, (*img.shape[:2], 1)).astype(np.float32) -# else: # add noise -# L = noise_level2 / 255. -# D = np.diag(np.random.rand(3)) -# U = orth(np.random.rand(3, 3)) -# conv = np.dot(np.dot(np.transpose(U), D), U) -# img += np.random.multivariate_normal([0, 0, 0], np.abs(L ** 2 * conv), img.shape[:2]).astype(np.float32) -# img = np.clip(img, 0.0, 1.0) -# return img - -def add_Gaussian_noise(img, noise_level1=2, noise_level2=25): - noise_level = random.randint(noise_level1, noise_level2) - rnum = np.random.rand() - if rnum > 0.6: # add color Gaussian noise - img = img + np.random.normal(0, noise_level / 255.0, img.shape).astype(np.float32) - elif rnum < 0.4: # add grayscale Gaussian noise - img = img + np.random.normal(0, noise_level / 255.0, (*img.shape[:2], 1)).astype(np.float32) - else: # add noise - L = noise_level2 / 255. - D = np.diag(np.random.rand(3)) - U = orth(np.random.rand(3, 3)) - conv = np.dot(np.dot(np.transpose(U), D), U) - img = img + np.random.multivariate_normal([0, 0, 0], np.abs(L ** 2 * conv), img.shape[:2]).astype(np.float32) - img = np.clip(img, 0.0, 1.0) - return img - - -def add_speckle_noise(img, noise_level1=2, noise_level2=25): - noise_level = random.randint(noise_level1, noise_level2) - img = np.clip(img, 0.0, 1.0) - rnum = random.random() - if rnum > 0.6: - img += img * np.random.normal(0, noise_level / 255.0, img.shape).astype(np.float32) - elif rnum < 0.4: - img += img * np.random.normal(0, noise_level / 255.0, (*img.shape[:2], 1)).astype(np.float32) - else: - L = noise_level2 / 255. - D = np.diag(np.random.rand(3)) - U = orth(np.random.rand(3, 3)) - conv = np.dot(np.dot(np.transpose(U), D), U) - img += img * np.random.multivariate_normal([0, 0, 0], np.abs(L ** 2 * conv), img.shape[:2]).astype(np.float32) - img = np.clip(img, 0.0, 1.0) - return img - - -def add_Poisson_noise(img): - img = np.clip((img * 255.0).round(), 0, 255) / 255. - vals = 10 ** (2 * random.random() + 2.0) # [2, 4] - if random.random() < 0.5: - img = np.random.poisson(img * vals).astype(np.float32) / vals - else: - img_gray = np.dot(img[..., :3], [0.299, 0.587, 0.114]) - img_gray = np.clip((img_gray * 255.0).round(), 0, 255) / 255. - noise_gray = np.random.poisson(img_gray * vals).astype(np.float32) / vals - img_gray - img += noise_gray[:, :, np.newaxis] - img = np.clip(img, 0.0, 1.0) - return img - - -def add_JPEG_noise(img): - quality_factor = random.randint(80, 95) - img = cv2.cvtColor(util.single2uint(img), cv2.COLOR_RGB2BGR) - result, encimg = cv2.imencode('.jpg', img, [int(cv2.IMWRITE_JPEG_QUALITY), quality_factor]) - img = cv2.imdecode(encimg, 1) - img = cv2.cvtColor(util.uint2single(img), cv2.COLOR_BGR2RGB) - return img - - -def random_crop(lq, hq, sf=4, lq_patchsize=64): - h, w = lq.shape[:2] - rnd_h = random.randint(0, h - lq_patchsize) - rnd_w = random.randint(0, w - lq_patchsize) - lq = lq[rnd_h:rnd_h + lq_patchsize, rnd_w:rnd_w + lq_patchsize, :] - - rnd_h_H, rnd_w_H = int(rnd_h * sf), int(rnd_w * sf) - hq = hq[rnd_h_H:rnd_h_H + lq_patchsize * sf, rnd_w_H:rnd_w_H + lq_patchsize * sf, :] - return lq, hq - - -def degradation_bsrgan(img, sf=4, lq_patchsize=72, isp_model=None): - """ - This is the degradation model of BSRGAN from the paper - "Designing a Practical Degradation Model for Deep Blind Image Super-Resolution" - ---------- - img: HXWXC, [0, 1], its size should be large than (lq_patchsizexsf)x(lq_patchsizexsf) - sf: scale factor - isp_model: camera ISP model - Returns - ------- - img: low-quality patch, size: lq_patchsizeXlq_patchsizeXC, range: [0, 1] - hq: corresponding high-quality patch, size: (lq_patchsizexsf)X(lq_patchsizexsf)XC, range: [0, 1] - """ - isp_prob, jpeg_prob, scale2_prob = 0.25, 0.9, 0.25 - sf_ori = sf - - h1, w1 = img.shape[:2] - img = img.copy()[:w1 - w1 % sf, :h1 - h1 % sf, ...] # mod crop - h, w = img.shape[:2] - - if h < lq_patchsize * sf or w < lq_patchsize * sf: - raise ValueError(f'img size ({h1}X{w1}) is too small!') - - hq = img.copy() - - if sf == 4 and random.random() < scale2_prob: # downsample1 - if np.random.rand() < 0.5: - img = cv2.resize(img, (int(1 / 2 * img.shape[1]), int(1 / 2 * img.shape[0])), - interpolation=random.choice([1, 2, 3])) - else: - img = util.imresize_np(img, 1 / 2, True) - img = np.clip(img, 0.0, 1.0) - sf = 2 - - shuffle_order = random.sample(range(7), 7) - idx1, idx2 = shuffle_order.index(2), shuffle_order.index(3) - if idx1 > idx2: # keep downsample3 last - shuffle_order[idx1], shuffle_order[idx2] = shuffle_order[idx2], shuffle_order[idx1] - - for i in shuffle_order: - - if i == 0: - img = add_blur(img, sf=sf) - - elif i == 1: - img = add_blur(img, sf=sf) - - elif i == 2: - a, b = img.shape[1], img.shape[0] - # downsample2 - if random.random() < 0.75: - sf1 = random.uniform(1, 2 * sf) - img = cv2.resize(img, (int(1 / sf1 * img.shape[1]), int(1 / sf1 * img.shape[0])), - interpolation=random.choice([1, 2, 3])) - else: - k = fspecial('gaussian', 25, random.uniform(0.1, 0.6 * sf)) - k_shifted = shift_pixel(k, sf) - k_shifted = k_shifted / k_shifted.sum() # blur with shifted kernel - img = ndimage.convolve(img, np.expand_dims(k_shifted, axis=2), mode='mirror') - img = img[0::sf, 0::sf, ...] # nearest downsampling - img = np.clip(img, 0.0, 1.0) - - elif i == 3: - # downsample3 - img = cv2.resize(img, (int(1 / sf * a), int(1 / sf * b)), interpolation=random.choice([1, 2, 3])) - img = np.clip(img, 0.0, 1.0) - - elif i == 4: - # add Gaussian noise - img = add_Gaussian_noise(img, noise_level1=2, noise_level2=8) - - elif i == 5: - # add JPEG noise - if random.random() < jpeg_prob: - img = add_JPEG_noise(img) - - elif i == 6: - # add processed camera sensor noise - if random.random() < isp_prob and isp_model is not None: - with torch.no_grad(): - img, hq = isp_model.forward(img.copy(), hq) - - # add final JPEG compression noise - img = add_JPEG_noise(img) - - # random crop - img, hq = random_crop(img, hq, sf_ori, lq_patchsize) - - return img, hq - - -# todo no isp_model? -def degradation_bsrgan_variant(image, sf=4, isp_model=None, up=False): - """ - This is the degradation model of BSRGAN from the paper - "Designing a Practical Degradation Model for Deep Blind Image Super-Resolution" - ---------- - sf: scale factor - isp_model: camera ISP model - Returns - ------- - img: low-quality patch, size: lq_patchsizeXlq_patchsizeXC, range: [0, 1] - hq: corresponding high-quality patch, size: (lq_patchsizexsf)X(lq_patchsizexsf)XC, range: [0, 1] - """ - image = util.uint2single(image) - isp_prob, jpeg_prob, scale2_prob = 0.25, 0.9, 0.25 - sf_ori = sf - - h1, w1 = image.shape[:2] - image = image.copy()[:w1 - w1 % sf, :h1 - h1 % sf, ...] # mod crop - h, w = image.shape[:2] - - hq = image.copy() - - if sf == 4 and random.random() < scale2_prob: # downsample1 - if np.random.rand() < 0.5: - image = cv2.resize(image, (int(1 / 2 * image.shape[1]), int(1 / 2 * image.shape[0])), - interpolation=random.choice([1, 2, 3])) - else: - image = util.imresize_np(image, 1 / 2, True) - image = np.clip(image, 0.0, 1.0) - sf = 2 - - shuffle_order = random.sample(range(7), 7) - idx1, idx2 = shuffle_order.index(2), shuffle_order.index(3) - if idx1 > idx2: # keep downsample3 last - shuffle_order[idx1], shuffle_order[idx2] = shuffle_order[idx2], shuffle_order[idx1] - - for i in shuffle_order: - - if i == 0: - image = add_blur(image, sf=sf) - - # elif i == 1: - # image = add_blur(image, sf=sf) - - if i == 0: - pass - - elif i == 2: - a, b = image.shape[1], image.shape[0] - # downsample2 - if random.random() < 0.8: - sf1 = random.uniform(1, 2 * sf) - image = cv2.resize(image, (int(1 / sf1 * image.shape[1]), int(1 / sf1 * image.shape[0])), - interpolation=random.choice([1, 2, 3])) - else: - k = fspecial('gaussian', 25, random.uniform(0.1, 0.6 * sf)) - k_shifted = shift_pixel(k, sf) - k_shifted = k_shifted / k_shifted.sum() # blur with shifted kernel - image = ndimage.convolve(image, np.expand_dims(k_shifted, axis=2), mode='mirror') - image = image[0::sf, 0::sf, ...] # nearest downsampling - - image = np.clip(image, 0.0, 1.0) - - elif i == 3: - # downsample3 - image = cv2.resize(image, (int(1 / sf * a), int(1 / sf * b)), interpolation=random.choice([1, 2, 3])) - image = np.clip(image, 0.0, 1.0) - - elif i == 4: - # add Gaussian noise - image = add_Gaussian_noise(image, noise_level1=1, noise_level2=2) - - elif i == 5: - # add JPEG noise - if random.random() < jpeg_prob: - image = add_JPEG_noise(image) - # - # elif i == 6: - # # add processed camera sensor noise - # if random.random() < isp_prob and isp_model is not None: - # with torch.no_grad(): - # img, hq = isp_model.forward(img.copy(), hq) - - # add final JPEG compression noise - image = add_JPEG_noise(image) - image = util.single2uint(image) - if up: - image = cv2.resize(image, (w1, h1), interpolation=cv2.INTER_CUBIC) # todo: random, as above? want to condition on it then - example = {"image": image} - return example - - - - -if __name__ == '__main__': - print("hey") - img = util.imread_uint('utils/test.png', 3) - img = img[:448, :448] - h = img.shape[0] // 4 - print("resizing to", h) - sf = 4 - deg_fn = partial(degradation_bsrgan_variant, sf=sf) - for i in range(20): - print(i) - img_hq = img - img_lq = deg_fn(img)["image"] - img_hq, img_lq = util.uint2single(img_hq), util.uint2single(img_lq) - print(img_lq) - img_lq_bicubic = albumentations.SmallestMaxSize(max_size=h, interpolation=cv2.INTER_CUBIC)(image=img_hq)["image"] - print(img_lq.shape) - print("bicubic", img_lq_bicubic.shape) - print(img_hq.shape) - lq_nearest = cv2.resize(util.single2uint(img_lq), (int(sf * img_lq.shape[1]), int(sf * img_lq.shape[0])), - interpolation=0) - lq_bicubic_nearest = cv2.resize(util.single2uint(img_lq_bicubic), - (int(sf * img_lq.shape[1]), int(sf * img_lq.shape[0])), - interpolation=0) - img_concat = np.concatenate([lq_bicubic_nearest, lq_nearest, util.single2uint(img_hq)], axis=1) - util.imsave(img_concat, str(i) + '.png') diff --git a/spaces/Mochine/hackathon_chatbot_openai_api/README.md b/spaces/Mochine/hackathon_chatbot_openai_api/README.md deleted file mode 100644 index 74244c635d9cfdfbcb01d269720a19e11de2c584..0000000000000000000000000000000000000000 --- a/spaces/Mochine/hackathon_chatbot_openai_api/README.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -title: hackathon chatbot openai api -emoji: 🐨 -colorFrom: red -colorTo: gray -sdk: gradio -sdk_version: 3.20.1 -app_file: app.py -pinned: false -license: cc-by-4.0 -duplicated_from: baixing/hackathon_chatbot_openai_api ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Mountchicken/MAERec-Gradio/mmocr/datasets/preparers/dumpers/json_dumper.py b/spaces/Mountchicken/MAERec-Gradio/mmocr/datasets/preparers/dumpers/json_dumper.py deleted file mode 100644 index e1c8ab026df3b03231e2edd6e9bf39de7cf27e38..0000000000000000000000000000000000000000 --- a/spaces/Mountchicken/MAERec-Gradio/mmocr/datasets/preparers/dumpers/json_dumper.py +++ /dev/null @@ -1,24 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import os.path as osp -from typing import Dict - -import mmengine - -from mmocr.registry import DATA_DUMPERS -from .base import BaseDumper - - -@DATA_DUMPERS.register_module() -class JsonDumper(BaseDumper): - """Dumper for json file.""" - - def dump(self, data: Dict) -> None: - """Dump data to json file. - - Args: - data (Dict): Data to be dumped. - """ - - filename = f'{self.task}_{self.split}.json' - dst_file = osp.join(self.data_root, filename) - mmengine.dump(data, dst_file) diff --git a/spaces/Mountchicken/MAERec-Gradio/mmocr/models/textrecog/encoders/sar_encoder.py b/spaces/Mountchicken/MAERec-Gradio/mmocr/models/textrecog/encoders/sar_encoder.py deleted file mode 100644 index 33d8c1ef8f5b8f57c5762d4449bc8baf06f8a380..0000000000000000000000000000000000000000 --- a/spaces/Mountchicken/MAERec-Gradio/mmocr/models/textrecog/encoders/sar_encoder.py +++ /dev/null @@ -1,124 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import math -from typing import Dict, Optional, Sequence, Union - -import torch -import torch.nn as nn -import torch.nn.functional as F - -from mmocr.registry import MODELS -from mmocr.structures import TextRecogDataSample -from .base import BaseEncoder - - -@MODELS.register_module() -class SAREncoder(BaseEncoder): - """Implementation of encoder module in `SAR. - - `_. - - Args: - enc_bi_rnn (bool): If True, use bidirectional RNN in encoder. - Defaults to False. - rnn_dropout (float): Dropout probability of RNN layer in encoder. - Defaults to 0.0. - enc_gru (bool): If True, use GRU, else LSTM in encoder. Defaults - to False. - d_model (int): Dim :math:`D_i` of channels from backbone. Defaults - to 512. - d_enc (int): Dim :math:`D_m` of encoder RNN layer. Defaults to 512. - mask (bool): If True, mask padding in RNN sequence. Defaults to - True. - init_cfg (dict or list[dict], optional): Initialization configs. - Defaults to [dict(type='Xavier', layer='Conv2d'), - dict(type='Uniform', layer='BatchNorm2d')]. - """ - - def __init__(self, - enc_bi_rnn: bool = False, - rnn_dropout: Union[int, float] = 0.0, - enc_gru: bool = False, - d_model: int = 512, - d_enc: int = 512, - mask: bool = True, - init_cfg: Sequence[Dict] = [ - dict(type='Xavier', layer='Conv2d'), - dict(type='Uniform', layer='BatchNorm2d') - ], - **kwargs) -> None: - super().__init__(init_cfg=init_cfg) - assert isinstance(enc_bi_rnn, bool) - assert isinstance(rnn_dropout, (int, float)) - assert 0 <= rnn_dropout < 1.0 - assert isinstance(enc_gru, bool) - assert isinstance(d_model, int) - assert isinstance(d_enc, int) - assert isinstance(mask, bool) - - self.enc_bi_rnn = enc_bi_rnn - self.rnn_dropout = rnn_dropout - self.mask = mask - - # LSTM Encoder - kwargs = dict( - input_size=d_model, - hidden_size=d_enc, - num_layers=2, - batch_first=True, - dropout=rnn_dropout, - bidirectional=enc_bi_rnn) - if enc_gru: - self.rnn_encoder = nn.GRU(**kwargs) - else: - self.rnn_encoder = nn.LSTM(**kwargs) - - # global feature transformation - encoder_rnn_out_size = d_enc * (int(enc_bi_rnn) + 1) - self.linear = nn.Linear(encoder_rnn_out_size, encoder_rnn_out_size) - - def forward( - self, - feat: torch.Tensor, - data_samples: Optional[Sequence[TextRecogDataSample]] = None - ) -> torch.Tensor: - """ - Args: - feat (Tensor): Tensor of shape :math:`(N, D_i, H, W)`. - data_samples (list[TextRecogDataSample], optional): Batch of - TextRecogDataSample, containing valid_ratio information. - Defaults to None. - - Returns: - Tensor: A tensor of shape :math:`(N, D_m)`. - """ - if data_samples is not None: - assert len(data_samples) == feat.size(0) - - valid_ratios = None - if data_samples is not None: - valid_ratios = [ - data_sample.get('valid_ratio', 1.0) - for data_sample in data_samples - ] if self.mask else None - - h_feat = feat.size(2) - feat_v = F.max_pool2d( - feat, kernel_size=(h_feat, 1), stride=1, padding=0) - feat_v = feat_v.squeeze(2) # bsz * C * W - feat_v = feat_v.permute(0, 2, 1).contiguous() # bsz * W * C - - holistic_feat = self.rnn_encoder(feat_v)[0] # bsz * T * C - - if valid_ratios is not None: - valid_hf = [] - T = holistic_feat.size(1) - for i, valid_ratio in enumerate(valid_ratios): - valid_step = min(T, math.ceil(T * valid_ratio)) - 1 - valid_hf.append(holistic_feat[i, valid_step, :]) - valid_hf = torch.stack(valid_hf, dim=0) - else: - valid_hf = holistic_feat[:, -1, :] # bsz * C - - holistic_feat = self.linear(valid_hf) # bsz * C - - return holistic_feat diff --git a/spaces/Mountchicken/MAERec-Gradio/tools/analysis_tools/offline_eval.py b/spaces/Mountchicken/MAERec-Gradio/tools/analysis_tools/offline_eval.py deleted file mode 100644 index b454942238d59d4f07067896ca9f9742094d0d59..0000000000000000000000000000000000000000 --- a/spaces/Mountchicken/MAERec-Gradio/tools/analysis_tools/offline_eval.py +++ /dev/null @@ -1,49 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import argparse -import json - -import mmengine -from mmengine.config import Config, DictAction -from mmengine.evaluator import Evaluator -from mmengine.registry import init_default_scope - - -def parse_args(): - parser = argparse.ArgumentParser(description='Offline evaluation of the ' - 'prediction saved in pkl format') - parser.add_argument('config', help='Config of the model') - parser.add_argument( - 'pkl_results', help='Path to the predictions in ' - 'pickle format') - parser.add_argument( - '--cfg-options', - nargs='+', - action=DictAction, - help='Override some settings in the used config, the key-value pair ' - 'in xxx=yyy format will be merged into config file. If the value to ' - 'be overwritten is a list, it should be like key="[a,b]" or key=a,b ' - 'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" ' - 'Note that the quotation marks are necessary and that no white space ' - 'is allowed.') - args = parser.parse_args() - return args - - -def main(): - args = parse_args() - - # load config - cfg = Config.fromfile(args.config) - init_default_scope(cfg.get('default_scope', 'mmocr')) - if args.cfg_options is not None: - cfg.merge_from_dict(args.cfg_options) - - predictions = mmengine.load(args.pkl_results) - - evaluator = Evaluator(cfg.test_evaluator) - eval_results = evaluator.offline_evaluate(predictions) - print(json.dumps(eval_results)) - - -if __name__ == '__main__': - main() diff --git a/spaces/MuhammedAyman29/Fruits/README.md b/spaces/MuhammedAyman29/Fruits/README.md deleted file mode 100644 index 54a06ab9890ffd499a669bea1d959818b0c1eb70..0000000000000000000000000000000000000000 --- a/spaces/MuhammedAyman29/Fruits/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Fruits -emoji: ⚡ -colorFrom: red -colorTo: blue -sdk: gradio -sdk_version: 3.21.0 -app_file: app.py -pinned: false -license: apache-2.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/NAACL2022/CLIP-Caption-Reward/captioning/utils/clipscore.py b/spaces/NAACL2022/CLIP-Caption-Reward/captioning/utils/clipscore.py deleted file mode 100644 index 0345140d9f7b47e37b3a895915a135e1441c907b..0000000000000000000000000000000000000000 --- a/spaces/NAACL2022/CLIP-Caption-Reward/captioning/utils/clipscore.py +++ /dev/null @@ -1,396 +0,0 @@ -from transformers import CLIPModel, CLIPTokenizer -import os -import json -import argparse -from random import shuffle, seed -import string -# non-standard dependencies: -import h5py -from six.moves import cPickle -import numpy as np -import torch -import torchvision.models as models -import skimage.io - -from torchvision.transforms import Compose, Resize, CenterCrop, ToTensor, Normalize -from PIL import Image -from torch import nn - - -class CLIPScore(nn.Module): - def __init__(self, clipscore_w=2.5, image_size=224, mode='clip_s', use_grammar=False, joint_out=False): - super(CLIPScore, self).__init__() - # from transformers import CLIPModel, CLIPTokenizer - self.clip_model = CLIPModel.from_pretrained( - 'openai/clip-vit-base-patch32') - self.tokenizer = CLIPTokenizer.from_pretrained( - 'openai/clip-vit-base-patch32') - - self.clip_model.eval() - - self.clipscore_w = clipscore_w - - self.image_transform = self._transform(image_size) - - self.mode = mode - assert mode in ['clip_s', 'refclip_s'] - - self.use_grammar = use_grammar - self.joint_out = joint_out - - if self.use_grammar and joint_out is False: - self.grammar_score_head = nn.Sequential( - nn.Linear(self.clip_model.text_embed_dim, self.clip_model.projection_dim, bias=False), - nn.ReLU(), - nn.Linear(self.clip_model.projection_dim, 2, bias=False) - ) - - def _transform(self, n_px): - return Compose([ - Resize(n_px, interpolation=Image.BICUBIC), - CenterCrop(n_px), - lambda image: image.convert("RGB"), - ToTensor(), - Normalize((0.48145466, 0.4578275, 0.40821073), - (0.26862954, 0.26130258, 0.27577711)), - ]) - - def load_image(self, image_path): - image = Image.open(image_path) - return image - - # @torch.no_grad() - def image_extract(self, image): - if isinstance(image, str): - image = self.load_image(image) - if not isinstance(image, torch.Tensor): - image = self.image_transform(image) - - img_tensor = image.view(-1, 3, 224, 224) - device = next(self.clip_model.parameters()).device - img_tensor = img_tensor.to(device) - - clip_model = self.clip_model - - img_feat = clip_model.vision_model(img_tensor).pooler_output - img_feat = clip_model.visual_projection(img_feat) - img_feat = img_feat / img_feat.norm(dim=-1, keepdim=True) - - return img_feat - - # @torch.no_grad() - def text_extract(self, text, prompt="A photo depicts", proj_norm=True): - if isinstance(text, str): - text_batch = [" ".join([prompt, text])] - elif isinstance(text, list): - text_batch = [" ".join([prompt, txt]) for txt in text] - - if isinstance(text, tuple) and isinstance(text[0], torch.Tensor): - input_ids, attention_mask = text - else: - input_text = text_batch - - tokenized = self.tokenizer( - input_text, return_tensors='pt', padding=True, truncation=True) - - input_ids = tokenized.input_ids - attention_mask = tokenized.attention_mask - - clip_model = self.clip_model - device = next(self.clip_model.parameters()).device - input_ids = input_ids.to(device) - attention_mask = attention_mask.to(device) - - text_feat = clip_model.text_model(input_ids, attention_mask).pooler_output - - if proj_norm: - text_feat = clip_model.text_projection(text_feat) - text_feat = text_feat / text_feat.norm(dim=-1, keepdim=True) - - return text_feat - - # @torch.no_grad() - def calc_clip_s(self, img_feat, text_feat): - return self.clipscore_w * torch.relu((img_feat * text_feat).sum(dim=-1)) - - # @torch.no_grad() - def calc_refclip_s(self, img_feat=None, text_feat=None, ref_text_feat=None, ref_text_mask=None, clip_s=None): - - if clip_s is None: - clip_s = self.calc_clip_s(img_feat, text_feat) - - B, dim = img_feat.size() - - ref_text_feat = ref_text_feat.view(B, -1, dim) - - K = ref_text_feat.size(1) - - text_feat = text_feat.view(B, 1, dim).expand(-1, K, -1) - assert ref_text_feat.size() == text_feat.size( - ), (ref_text_feat.size(), text_feat.size()) - - ref_score = self.calc_clip_s(text_feat, ref_text_feat) - if ref_text_mask is not None: - if not isinstance(ref_text_mask, torch.Tensor): - ref_text_mask = torch.tensor( - ref_text_mask, dtype=ref_score.dtype, device=ref_score.device) - ref_score = ref_score.view(B, K) * ref_text_mask.view(B, K) - - ref_score = ref_score.view(B, K).max(dim=1).values - - assert clip_s.size() == (B,) - assert clip_s.size() == ref_score.size() - - # harmonic mean - refclip_s = 2 / (1 / clip_s + 1 / ref_score) - return refclip_s - - @torch.no_grad() - def forward(self, - images=None, text=None, - img_feat=None, text_feat=None, - ref_text=None, ref_text_feat=None, ref_text_mask=None, - prompt="A photo depicts", - mode=None): - if img_feat is None: - img_feat = self.image_extract(images) - img_feat = img_feat.view(-1, 512) - - B = img_feat.size(0) - - if text_feat is None: - text_feat = self.text_extract(text, prompt=prompt) - text_feat = text_feat.view(-1, 512) - - if mode is None: - mode = self.mode - assert mode in ['clip_s', 'refclip_s'] - - if mode == 'clip_s': - clip_s = self.calc_clip_s(img_feat, text_feat) - return clip_s - elif mode == 'refclip_s': - if ref_text_feat is None: - ref_text_feat = self.text_extract(ref_text, prompt=prompt) - ref_text_feat = ref_text_feat.view(-1, 512) - - refclip_s = self.calc_refclip_s( - img_feat, text_feat, ref_text_feat, ref_text_mask=ref_text_mask) - return refclip_s - - - def train_step(self, - images=None, text=None, - img_feat=None, text_feat=None, - neg_text=None, neg_text_feat=None, - # ref_text=None, ref_text_feat=None, ref_text_mask=None, - prompt="A photo depicts", - # return_loss=True, - **kwargs): - - if img_feat is None: - img_feat = self.image_extract(images) - img_feat = img_feat.view(-1, 512) - - B = img_feat.size(0) - - if text_feat is None: - text_feat = self.text_extract(text, prompt=prompt, proj_norm=False) - - text_cont_feat = self.clip_model.text_projection(text_feat) - text_cont_feat = text_cont_feat / text_cont_feat.norm(dim=-1, keepdim=True) - text_cont_feat = text_cont_feat.view(B, 512) - - # cosine similarity as logits - logit_scale = self.clip_model.logit_scale.exp() - logits_per_text = torch.matmul(text_cont_feat, img_feat.t()) * logit_scale - # logits_per_image = logits_per_text.T - - clip_loss = clip_loss_fn(logits_per_text) - - - # negative sampling - pos_text_feat = text_feat.view(B, 512) - neg_text_feat = self.text_extract(neg_text, prompt=prompt, proj_norm=False).view(B, 512) - - grammar_text_feat = torch.cat([pos_text_feat, neg_text_feat], dim=0) - - # 2B, 1 - grammar_text_logit = self.grammar_score_head(grammar_text_feat) - grammar_labels = torch.LongTensor([1] * B + [0] * B).to(grammar_text_logit.device).view(2 * B) - - grammar_loss = torch.nn.functional.cross_entropy(grammar_text_logit, grammar_labels) - - grammar_pred = grammar_text_logit.argmax(dim=1, keepdim=False) - grammar_pos_pred = grammar_pred[:B] - grammar_neg_pred = grammar_pred[B:] - # grammar_acc = (grammar_pred == grammar_labels).float().mean() - - out = { - 'clip_loss': clip_loss, - 'grammar_loss': grammar_loss, - 'img_feat': img_feat, - 'text_feat': text_cont_feat, - 'neg_text_feat': neg_text_feat, - 'grammar_pos_pred': grammar_pos_pred, - 'grammar_neg_pred': grammar_neg_pred, - } - - return out - -# contrastive loss function, adapted from -# https://sachinruk.github.io/blog/pytorch/pytorch%20lightning/loss%20function/gpu/2021/03/07/CLIP.html -def contrastive_loss(logits: torch.Tensor, dim: int) -> torch.Tensor: - neg_ce = torch.diag(nn.functional.log_softmax(logits, dim=dim)) - return -neg_ce.mean() - - -def clip_loss_fn(similarity: torch.Tensor) -> torch.Tensor: - caption_loss = contrastive_loss(similarity, dim=0) - image_loss = contrastive_loss(similarity, dim=1) - return (caption_loss + image_loss) / 2.0 - - - -# class CLIPScore(nn.Module): -# def __init__(self, clipscore_w=2.5, image_size=224, mode='clip_s'): -# super(CLIPScore, self).__init__() -# # from transformers import CLIPModel, CLIPTokenizer -# self.clip_model = CLIPModel.from_pretrained( -# 'openai/clip-vit-base-patch32') -# self.tokenizer = CLIPTokenizer.from_pretrained( -# 'openai/clip-vit-base-patch32') - -# self.clip_model.eval() - -# self.clipscore_w = clipscore_w - -# self.image_transform = self._transform(image_size) - -# self.mode = mode -# assert mode in ['clip_s', 'refclip_s'] - -# def _transform(self, n_px): -# return Compose([ -# Resize(n_px, interpolation=Image.BICUBIC), -# CenterCrop(n_px), -# lambda image: image.convert("RGB"), -# ToTensor(), -# Normalize((0.48145466, 0.4578275, 0.40821073), -# (0.26862954, 0.26130258, 0.27577711)), -# ]) - -# def load_image(self, image_path): -# image = Image.open(image_path) -# return image - -# @torch.no_grad() -# def image_extract(self, image): -# if isinstance(image, str): -# image = self.load_image(image) -# if not isinstance(image, torch.Tensor): -# image = self.image_transform(image) - -# img_tensor = image.view(-1, 3, 224, 224) -# device = next(self.clip_model.parameters()).device -# img_tensor = img_tensor.to(device) - -# clip_model = self.clip_model - -# img_feat = clip_model.vision_model(img_tensor).pooler_output -# img_feat = clip_model.visual_projection(img_feat) -# img_feat = img_feat / img_feat.norm(dim=-1, keepdim=True) - -# return img_feat - -# @torch.no_grad() -# def text_extract(self, text, prompt="A photo depicts"): -# if isinstance(text, str): -# text_batch = [" ".join([prompt, text])] -# else: -# text_batch = [" ".join([prompt, txt]) for txt in text] - -# input_text = text_batch - -# tokenized = self.tokenizer( -# input_text, return_tensors='pt', padding=True) - -# input_ids = tokenized.input_ids -# attention_mask = tokenized.attention_mask - -# clip_model = self.clip_model -# device = next(self.clip_model.parameters()).device -# input_ids = input_ids.to(device) -# attention_mask = attention_mask.to(device) - -# text_feat = clip_model.text_model(input_ids, attention_mask).pooler_output -# text_feat = clip_model.text_projection(text_feat) -# text_feat = text_feat / text_feat.norm(dim=-1, keepdim=True) - -# return text_feat - -# @torch.no_grad() -# def calc_clip_s(self, img_feat, text_feat): -# return self.clipscore_w * torch.relu((img_feat * text_feat).sum(dim=-1)) - -# @torch.no_grad() -# def calc_refclip_s(self, img_feat=None, text_feat=None, ref_text_feat=None, ref_text_mask=None, clip_s=None): - -# if clip_s is None: -# clip_s = self.calc_clip_s(img_feat, text_feat) - -# B, dim = img_feat.size() - -# ref_text_feat = ref_text_feat.view(B, -1, dim) - -# K = ref_text_feat.size(1) - -# text_feat = text_feat.view(B, 1, dim).expand(-1, K, -1) -# assert ref_text_feat.size() == text_feat.size(), (ref_text_feat.size(), text_feat.size()) - -# ref_score = self.calc_clip_s(text_feat, ref_text_feat) -# if ref_text_mask is not None: -# if not isinstance(ref_text_mask, torch.Tensor): -# ref_text_mask = torch.tensor(ref_text_mask, dtype=ref_score.dtype, device=ref_score.device) -# ref_score = ref_score.view(B, K) * ref_text_mask.view(B, K) - -# ref_score = ref_score.view(B, K).max(dim=1).values - -# assert clip_s.size() == (B,) -# assert clip_s.size() == ref_score.size() - -# # harmonic mean -# refclip_s = 2 / (1 / clip_s + 1 / ref_score) -# return refclip_s - - -# @torch.no_grad() -# def forward(self, -# images=None, text=None, -# img_feat=None, text_feat=None, -# ref_text=None, ref_text_feat=None, ref_text_mask=None, -# prompt="A photo depicts", -# mode=None): -# if img_feat is None: -# img_feat = self.image_extract(images) -# img_feat = img_feat.view(-1, 512) - -# if text_feat is None: -# text_feat = self.text_extract(text, prompt=prompt) -# text_feat = text_feat.view(-1, 512) - -# if mode is None: -# mode = self.mode -# assert mode in ['clip_s', 'refclip_s'] - -# if mode == 'clip_s': -# clip_s = self.calc_clip_s(img_feat, text_feat) -# return clip_s -# elif mode == 'refclip_s': -# if ref_text_feat is None: -# ref_text_feat = self.text_extract(ref_text, prompt=prompt) -# ref_text_feat = ref_text_feat.view(-1, 512) - -# refclip_s = self.calc_refclip_s(img_feat, text_feat, ref_text_feat, ref_text_mask=ref_text_mask) -# return refclip_s - diff --git a/spaces/NATSpeech/DiffSpeech/utils/commons/trainer.py b/spaces/NATSpeech/DiffSpeech/utils/commons/trainer.py deleted file mode 100644 index 81ffa3bd78a0065cc4005f5af957cb82567ff021..0000000000000000000000000000000000000000 --- a/spaces/NATSpeech/DiffSpeech/utils/commons/trainer.py +++ /dev/null @@ -1,559 +0,0 @@ -import random -import subprocess -import traceback -from datetime import datetime - -from torch.cuda.amp import GradScaler, autocast -import numpy as np -import torch.optim -import torch.utils.data -import copy -import logging -import os -import re -import sys -import torch -import torch.distributed as dist -import torch.multiprocessing as mp -import tqdm - -from utils.commons.ckpt_utils import get_last_checkpoint, get_all_ckpts -from utils.commons.ddp_utils import DDP -from utils.commons.hparams import hparams -from utils.commons.tensor_utils import move_to_cuda -from utils.os_utils import remove_file - - -class Tee(object): - def __init__(self, name, mode): - self.file = open(name, mode) - self.stdout = sys.stdout - sys.stdout = self - - def __del__(self): - sys.stdout = self.stdout - self.file.close() - - def write(self, data): - self.file.write(data) - self.stdout.write(data) - - def flush(self): - self.file.flush() - - -class Trainer: - def __init__( - self, - work_dir, - default_save_path=None, - accumulate_grad_batches=1, - max_updates=160000, - print_nan_grads=False, - val_check_interval=2000, - num_sanity_val_steps=5, - amp=False, - # tb logger - log_save_interval=100, - tb_log_interval=10, - # checkpoint - monitor_key='val_loss', - monitor_mode='min', - num_ckpt_keep=5, - save_best=True, - resume_from_checkpoint=0, - seed=1234, - debug=False, - ): - os.makedirs(work_dir, exist_ok=True) - self.work_dir = work_dir - self.accumulate_grad_batches = accumulate_grad_batches - self.max_updates = max_updates - self.num_sanity_val_steps = num_sanity_val_steps - self.print_nan_grads = print_nan_grads - self.default_save_path = default_save_path - self.resume_from_checkpoint = resume_from_checkpoint if resume_from_checkpoint > 0 else None - self.seed = seed - self.debug = debug - # model and optm - self.task = None - self.optimizers = [] - - # trainer state - self.testing = False - self.global_step = 0 - self.current_epoch = 0 - self.total_batches = 0 - - # configure checkpoint - self.monitor_key = monitor_key - self.num_ckpt_keep = num_ckpt_keep - self.save_best = save_best - self.monitor_op = np.less if monitor_mode == 'min' else np.greater - self.best_val_results = np.Inf if monitor_mode == 'min' else -np.Inf - self.mode = 'min' - - # allow int, string and gpu list - self.all_gpu_ids = [ - int(x) for x in os.environ.get("CUDA_VISIBLE_DEVICES", "").split(",") if x != ''] - self.num_gpus = len(self.all_gpu_ids) - self.on_gpu = self.num_gpus > 0 - self.root_gpu = 0 - logging.info(f'GPU available: {torch.cuda.is_available()}, GPU used: {self.all_gpu_ids}') - self.use_ddp = self.num_gpus > 1 - self.proc_rank = 0 - # Tensorboard logging - self.log_save_interval = log_save_interval - self.val_check_interval = val_check_interval - self.tb_log_interval = tb_log_interval - self.amp = amp - self.amp_scalar = GradScaler() - - def test(self, task_cls): - self.testing = True - self.fit(task_cls) - - def fit(self, task_cls): - if len(self.all_gpu_ids) > 1: - mp.spawn(self.ddp_run, nprocs=self.num_gpus, args=(task_cls, copy.deepcopy(hparams))) - else: - self.task = task_cls() - self.task.trainer = self - self.run_single_process(self.task) - return 1 - - def ddp_run(self, gpu_idx, task_cls, hparams_): - hparams.update(hparams_) - self.proc_rank = gpu_idx - self.init_ddp_connection(self.proc_rank, self.num_gpus) - if dist.get_rank() != 0 and not self.debug: - sys.stdout = open(os.devnull, "w") - sys.stderr = open(os.devnull, "w") - task = task_cls() - task.trainer = self - torch.cuda.set_device(gpu_idx) - self.root_gpu = gpu_idx - self.task = task - self.run_single_process(task) - - def run_single_process(self, task): - """Sanity check a few things before starting actual training. - - :param task: - """ - # build model, optm and load checkpoint - if self.proc_rank == 0: - self.save_terminal_logs() - if not self.testing: - self.save_codes() - - model = task.build_model() - if model is not None: - task.layers = model - checkpoint, _ = get_last_checkpoint(self.work_dir, self.resume_from_checkpoint) - if checkpoint is not None: - self.restore_weights(checkpoint) - elif self.on_gpu: - task.cuda(self.root_gpu) - if not self.testing: - self.optimizers = task.configure_optimizers() - self.fisrt_epoch = True - if checkpoint is not None: - self.restore_opt_state(checkpoint) - del checkpoint - # clear cache after restore - if self.on_gpu: - torch.cuda.empty_cache() - - if self.use_ddp: - self.task = self.configure_ddp(self.task) - dist.barrier() - - task_ref = self.get_task_ref() - task_ref.trainer = self - task_ref.testing = self.testing - # link up experiment object - if self.proc_rank == 0: - task_ref.build_tensorboard(save_dir=self.work_dir, name='tb_logs') - else: - os.makedirs('tmp', exist_ok=True) - task_ref.build_tensorboard(save_dir='tmp', name='tb_tmp') - self.logger = task_ref.logger - try: - if self.testing: - self.run_evaluation(test=True) - else: - self.train() - except KeyboardInterrupt as e: - traceback.print_exc() - task_ref.on_keyboard_interrupt() - - #################### - # valid and test - #################### - def run_evaluation(self, test=False): - eval_results = self.evaluate(self.task, test, tqdm_desc='Valid' if not test else 'test', - max_batches=hparams['eval_max_batches']) - if eval_results is not None and 'tb_log' in eval_results: - tb_log_output = eval_results['tb_log'] - self.log_metrics_to_tb(tb_log_output) - if self.proc_rank == 0 and not test: - self.save_checkpoint(epoch=self.current_epoch, logs=eval_results) - - def evaluate(self, task, test=False, tqdm_desc='Valid', max_batches=None): - if max_batches == -1: - max_batches = None - # enable eval mode - task.zero_grad() - task.eval() - torch.set_grad_enabled(False) - - task_ref = self.get_task_ref() - if test: - ret = task_ref.test_start() - if ret == 'EXIT': - return - else: - task_ref.validation_start() - outputs = [] - dataloader = task_ref.test_dataloader() if test else task_ref.val_dataloader() - pbar = tqdm.tqdm(dataloader, desc=tqdm_desc, total=max_batches, dynamic_ncols=True, unit='step', - disable=self.root_gpu > 0) - # give model a chance to do something with the outputs (and method defined) - for batch_idx, batch in enumerate(pbar): - if batch is None: # pragma: no cover - continue - # stop short when on fast_dev_run (sets max_batch=1) - if max_batches is not None and batch_idx >= max_batches: - break - - # make dataloader_idx arg in validation_step optional - if self.on_gpu: - batch = move_to_cuda(batch, self.root_gpu) - args = [batch, batch_idx] - if self.use_ddp: - output = task(*args) - else: - if test: - output = task_ref.test_step(*args) - else: - output = task_ref.validation_step(*args) - # track outputs for collation - outputs.append(output) - # give model a chance to do something with the outputs (and method defined) - if test: - eval_results = task_ref.test_end(outputs) - else: - eval_results = task_ref.validation_end(outputs) - # enable train mode again - task.train() - torch.set_grad_enabled(True) - return eval_results - - #################### - # train - #################### - def train(self): - task_ref = self.get_task_ref() - task_ref.on_train_start() - if self.num_sanity_val_steps > 0: - # run tiny validation (if validation defined) to make sure program won't crash during val - self.evaluate(self.task, False, 'Sanity Val', max_batches=self.num_sanity_val_steps) - # clear cache before training - if self.on_gpu: - torch.cuda.empty_cache() - dataloader = task_ref.train_dataloader() - epoch = self.current_epoch - # run all epochs - while True: - # set seed for distributed sampler (enables shuffling for each epoch) - if self.use_ddp and hasattr(dataloader.sampler, 'set_epoch'): - dataloader.sampler.set_epoch(epoch) - # update training progress in trainer and model - task_ref.current_epoch = epoch - self.current_epoch = epoch - # total batches includes multiple val checks - self.batch_loss_value = 0 # accumulated grads - # before epoch hook - task_ref.on_epoch_start() - - # run epoch - train_pbar = tqdm.tqdm(dataloader, initial=self.global_step, total=float('inf'), - dynamic_ncols=True, unit='step', disable=self.root_gpu > 0) - for batch_idx, batch in enumerate(train_pbar): - if self.global_step % self.val_check_interval == 0 and not self.fisrt_epoch: - self.run_evaluation() - pbar_metrics, tb_metrics = self.run_training_batch(batch_idx, batch) - train_pbar.set_postfix(**pbar_metrics) - self.fisrt_epoch = False - # when metrics should be logged - if (self.global_step + 1) % self.tb_log_interval == 0: - # logs user requested information to logger - self.log_metrics_to_tb(tb_metrics) - - self.global_step += 1 - task_ref.global_step = self.global_step - if self.global_step > self.max_updates: - print("| Training end..") - break - # epoch end hook - task_ref.on_epoch_end() - epoch += 1 - if self.global_step > self.max_updates: - break - task_ref.on_train_end() - - def run_training_batch(self, batch_idx, batch): - if batch is None: - return {} - all_progress_bar_metrics = [] - all_log_metrics = [] - task_ref = self.get_task_ref() - for opt_idx, optimizer in enumerate(self.optimizers): - if optimizer is None: - continue - # make sure only the gradients of the current optimizer's paramaters are calculated - # in the training step to prevent dangling gradients in multiple-optimizer setup. - if len(self.optimizers) > 1: - for param in task_ref.parameters(): - param.requires_grad = False - for group in optimizer.param_groups: - for param in group['params']: - param.requires_grad = True - - # forward pass - with autocast(enabled=self.amp): - if self.on_gpu: - batch = move_to_cuda(copy.copy(batch), self.root_gpu) - args = [batch, batch_idx, opt_idx] - if self.use_ddp: - output = self.task(*args) - else: - output = task_ref.training_step(*args) - loss = output['loss'] - if loss is None: - continue - progress_bar_metrics = output['progress_bar'] - log_metrics = output['tb_log'] - # accumulate loss - loss = loss / self.accumulate_grad_batches - - # backward pass - if loss.requires_grad: - if self.amp: - self.amp_scalar.scale(loss).backward() - else: - loss.backward() - - # track progress bar metrics - all_log_metrics.append(log_metrics) - all_progress_bar_metrics.append(progress_bar_metrics) - - if loss is None: - continue - - # nan grads - if self.print_nan_grads: - has_nan_grad = False - for name, param in task_ref.named_parameters(): - if (param.grad is not None) and torch.isnan(param.grad.float()).any(): - print("| NaN params: ", name, param, param.grad) - has_nan_grad = True - if has_nan_grad: - exit(0) - - # gradient update with accumulated gradients - if (self.global_step + 1) % self.accumulate_grad_batches == 0: - task_ref.on_before_optimization(opt_idx) - if self.amp: - self.amp_scalar.step(optimizer) - self.amp_scalar.update() - else: - optimizer.step() - optimizer.zero_grad() - task_ref.on_after_optimization(self.current_epoch, batch_idx, optimizer, opt_idx) - - # collapse all metrics into one dict - all_progress_bar_metrics = {k: v for d in all_progress_bar_metrics for k, v in d.items()} - all_log_metrics = {k: v for d in all_log_metrics for k, v in d.items()} - return all_progress_bar_metrics, all_log_metrics - - #################### - # load and save checkpoint - #################### - def restore_weights(self, checkpoint): - # load model state - task_ref = self.get_task_ref() - - for k, v in checkpoint['state_dict'].items(): - getattr(task_ref, k).load_state_dict(v) - - if self.on_gpu: - task_ref.cuda(self.root_gpu) - # load training state (affects trainer only) - self.best_val_results = checkpoint['checkpoint_callback_best'] - self.global_step = checkpoint['global_step'] - self.current_epoch = checkpoint['epoch'] - task_ref.global_step = self.global_step - - # wait for all models to restore weights - if self.use_ddp: - # wait for all processes to catch up - dist.barrier() - - def restore_opt_state(self, checkpoint): - if self.testing: - return - # restore the optimizers - optimizer_states = checkpoint['optimizer_states'] - for optimizer, opt_state in zip(self.optimizers, optimizer_states): - if optimizer is None: - return - try: - optimizer.load_state_dict(opt_state) - # move optimizer to GPU 1 weight at a time - if self.on_gpu: - for state in optimizer.state.values(): - for k, v in state.items(): - if isinstance(v, torch.Tensor): - state[k] = v.cuda(self.root_gpu) - except ValueError: - print("| WARMING: optimizer parameters not match !!!") - try: - if dist.is_initialized() and dist.get_rank() > 0: - return - except Exception as e: - print(e) - return - did_restore = True - return did_restore - - def save_checkpoint(self, epoch, logs=None): - monitor_op = np.less - ckpt_path = f'{self.work_dir}/model_ckpt_steps_{self.global_step}.ckpt' - logging.info(f'Epoch {epoch:05d}@{self.global_step}: saving model to {ckpt_path}') - self._atomic_save(ckpt_path) - for old_ckpt in get_all_ckpts(self.work_dir)[self.num_ckpt_keep:]: - remove_file(old_ckpt) - logging.info(f'Delete ckpt: {os.path.basename(old_ckpt)}') - current = None - if logs is not None and self.monitor_key in logs: - current = logs[self.monitor_key] - if current is not None and self.save_best: - if monitor_op(current, self.best_val_results): - best_filepath = f'{self.work_dir}/model_ckpt_best.pt' - self.best_val_results = current - logging.info( - f'Epoch {epoch:05d}@{self.global_step}: {self.monitor_key} reached {current:0.5f}. ' - f'Saving model to {best_filepath}') - self._atomic_save(best_filepath) - - def _atomic_save(self, filepath): - checkpoint = self.dump_checkpoint() - tmp_path = str(filepath) + ".part" - torch.save(checkpoint, tmp_path, _use_new_zipfile_serialization=False) - os.replace(tmp_path, filepath) - - def dump_checkpoint(self): - checkpoint = {'epoch': self.current_epoch, 'global_step': self.global_step, - 'checkpoint_callback_best': self.best_val_results} - # save optimizers - optimizer_states = [] - for i, optimizer in enumerate(self.optimizers): - if optimizer is not None: - optimizer_states.append(optimizer.state_dict()) - - checkpoint['optimizer_states'] = optimizer_states - task_ref = self.get_task_ref() - checkpoint['state_dict'] = { - k: v.state_dict() for k, v in task_ref.named_children() if len(list(v.parameters())) > 0} - return checkpoint - - #################### - # DDP - #################### - def configure_ddp(self, task): - task = DDP(task, device_ids=[self.root_gpu], find_unused_parameters=True) - random.seed(self.seed) - np.random.seed(self.seed) - return task - - def init_ddp_connection(self, proc_rank, world_size): - root_node = '127.0.0.1' - root_node = self.resolve_root_node_address(root_node) - os.environ['MASTER_ADDR'] = root_node - dist.init_process_group('nccl', rank=proc_rank, world_size=world_size) - - def resolve_root_node_address(self, root_node): - if '[' in root_node: - name = root_node.split('[')[0] - number = root_node.split(',')[0] - if '-' in number: - number = number.split('-')[0] - number = re.sub('[^0-9]', '', number) - root_node = name + number - return root_node - - #################### - # utils - #################### - def get_task_ref(self): - from utils.commons.base_task import BaseTask - task: BaseTask = self.task.module if isinstance(self.task, DDP) else self.task - return task - - def log_metrics_to_tb(self, metrics, step=None): - """Logs the metric dict passed in. - - :param metrics: - """ - # turn all tensors to scalars - scalar_metrics = self.metrics_to_scalars(metrics) - - step = step if step is not None else self.global_step - # log actual metrics - if self.proc_rank == 0: - self.log_metrics(self.logger, scalar_metrics, step=step) - - @staticmethod - def log_metrics(logger, metrics, step=None): - for k, v in metrics.items(): - if isinstance(v, torch.Tensor): - v = v.item() - logger.add_scalar(k, v, step) - - def metrics_to_scalars(self, metrics): - new_metrics = {} - for k, v in metrics.items(): - if isinstance(v, torch.Tensor): - v = v.item() - - if type(v) is dict: - v = self.metrics_to_scalars(v) - - new_metrics[k] = v - - return new_metrics - - def save_terminal_logs(self): - t = datetime.now().strftime('%Y%m%d%H%M%S') - os.makedirs(f'{self.work_dir}/terminal_logs', exist_ok=True) - Tee(f'{self.work_dir}/terminal_logs/log_{t}.txt', 'w') - - def save_codes(self): - if len(hparams['save_codes']) > 0: - t = datetime.now().strftime('%Y%m%d%H%M%S') - code_dir = f'{self.work_dir}/codes/{t}' - subprocess.check_call(f'mkdir -p "{code_dir}"', shell=True) - for c in hparams['save_codes']: - if os.path.exists(c): - subprocess.check_call( - f'rsync -aR ' - f'--include="*.py" ' - f'--include="*.yaml" ' - f'--exclude="__pycache__" ' - f'--include="*/" ' - f'--exclude="*" ' - f'"./{c}" "{code_dir}/"', - shell=True) - print(f"| Copied codes to {code_dir}.") diff --git a/spaces/NCTCMumbai/NCTC/models/official/vision/detection/dataloader/mode_keys.py b/spaces/NCTCMumbai/NCTC/models/official/vision/detection/dataloader/mode_keys.py deleted file mode 100644 index 020382b2486ca25a41f0c3eb88b1f2038c538e7e..0000000000000000000000000000000000000000 --- a/spaces/NCTCMumbai/NCTC/models/official/vision/detection/dataloader/mode_keys.py +++ /dev/null @@ -1,33 +0,0 @@ -# Copyright 2019 The TensorFlow Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Standard names for input dataloader modes. - -The following standard keys are defined: - -* `TRAIN`: training mode. -* `EVAL`: evaluation mode. -* `PREDICT`: prediction mode. -* `PREDICT_WITH_GT`: prediction mode with groundtruths in returned variables. -""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - - -TRAIN = 'train' -EVAL = 'eval' -PREDICT = 'predict' -PREDICT_WITH_GT = 'predict_with_gt' diff --git a/spaces/Nee001/bing0/src/lib/hooks/use-at-bottom.tsx b/spaces/Nee001/bing0/src/lib/hooks/use-at-bottom.tsx deleted file mode 100644 index d37c8cf4162adcb0064e08ecec24eb731416b045..0000000000000000000000000000000000000000 --- a/spaces/Nee001/bing0/src/lib/hooks/use-at-bottom.tsx +++ /dev/null @@ -1,23 +0,0 @@ -import * as React from 'react' - -export function useAtBottom(offset = 0) { - const [isAtBottom, setIsAtBottom] = React.useState(false) - - React.useEffect(() => { - const handleScroll = () => { - setIsAtBottom( - window.innerHeight + window.scrollY >= - document.body.offsetHeight - offset - ) - } - - window.addEventListener('scroll', handleScroll, { passive: true }) - handleScroll() - - return () => { - window.removeEventListener('scroll', handleScroll) - } - }, [offset]) - - return isAtBottom -} diff --git a/spaces/NimaBoscarino/playlist-generator/README.md b/spaces/NimaBoscarino/playlist-generator/README.md deleted file mode 100644 index cdab710810ec0879275bab07a03d3925b1fce3c5..0000000000000000000000000000000000000000 --- a/spaces/NimaBoscarino/playlist-generator/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Playlist Generator -emoji: 🎵🧑🏽‍🎤🎤 -colorFrom: red -colorTo: gray -sdk: gradio -sdk_version: 3.0.18 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Nyari/Super-Resolution-Anime-Diffusion/README.md b/spaces/Nyari/Super-Resolution-Anime-Diffusion/README.md deleted file mode 100644 index 03a6bd717759136810c2c8880d739b09dc68b196..0000000000000000000000000000000000000000 --- a/spaces/Nyari/Super-Resolution-Anime-Diffusion/README.md +++ /dev/null @@ -1,135 +0,0 @@ ---- -title: Super-Resolution-Anime-Diffusion -emoji: 🏃 -colorFrom: gray -colorTo: yellow -sdk: gradio -sdk_version: 3.10.1 -app_file: app.py -pinned: false -duplicated_from: yangheng/Super-Resolution-Anime-Diffusion ---- - -# Super Resolution Anime Diffusion - - -# [Online Web Demo](https://huggingface.co/spaces/yangheng/Super-Resolution-Anime-Diffusion) - -This is demo forked from https://huggingface.co/Linaqruf/anything-v3.0. - -## Super Resolution Anime Diffusion -At this moment, many diffusion models can only generate <1024 width and length pictures. -I integrated the Super Resolution with [Anything diffusion model](https://huggingface.co/Linaqruf/anything-v3.0) to produce high resolution pictures. -Thanks to the open-source project: https://github.com/yu45020/Waifu2x - - -## Modifications -1. Disable the safety checker to save time and memory. You need to abide the original rules of the model. -2. Add the Super Resolution function to the model. -3. Add batch generation function to the model (see inference.py). - -## Install -1. Install [Anaconda](https://www.anaconda.com/products/distribution) or [Miniconda](https://docs.conda.io/en/latest/miniconda.html) -2. create a conda environment: -```bash -conda create -n diffusion python=3.9 -conda activate diffusion -``` -3. install requirements: -```ash -conda install pytorch pytorch-cuda=11.7 -c pytorch -c nvidia -pip install -r requirements.txt -``` -4. Run web demo: -``` -python app.py -``` -5. or run batch anime-generation -``` -python inference.py -``` -see the source code for details, you can set scale factor to magnify pictures - -## Random Examples (512*768) x4 scale factor -![Anime Girl](./random_examples/1.png) -![Anime Girl](./random_examples/2.png) -# Origin README ---- -language: -- en -license: creativeml-openrail-m -tags: -- stable-diffusion -- stable-diffusion-diffusers -- text-to-image -- diffusers -inference: true ---- - -# Anything V3 - -Welcome to Anything V3 - a latent diffusion model for weebs. This model is intended to produce high-quality, highly detailed anime style with just a few prompts. Like other anime-style Stable Diffusion models, it also supports danbooru tags to generate images. - -e.g. **_1girl, white hair, golden eyes, beautiful eyes, detail, flower meadow, cumulonimbus clouds, lighting, detailed sky, garden_** - -## Gradio - -We support a [Gradio](https://github.com/gradio-app/gradio) Web UI to run Anything-V3.0: - -[Open in Spaces](https://huggingface.co/spaces/akhaliq/anything-v3.0) - - - -## 🧨 Diffusers - -This model can be used just like any other Stable Diffusion model. For more information, -please have a look at the [Stable Diffusion](https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion). - -You can also export the model to [ONNX](https://huggingface.co/docs/diffusers/optimization/onnx), [MPS](https://huggingface.co/docs/diffusers/optimization/mps) and/or [FLAX/JAX](). - -```python -from diffusers import StableDiffusionPipeline -import torch - -model_id = "Linaqruf/anything-v3.0" -pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16) -pipe = pipe.to("cuda") - -prompt = "pikachu" -image = pipe(prompt).images[0] - -image.save("./pikachu.png") -``` - -## Examples - -Below are some examples of images generated using this model: - -**Anime Girl:** -![Anime Girl](https://huggingface.co/Linaqruf/anything-v3.0/resolve/main/1girl.png) -``` -1girl, brown hair, green eyes, colorful, autumn, cumulonimbus clouds, lighting, blue sky, falling leaves, garden -Steps: 50, Sampler: DDIM, CFG scale: 12 -``` -**Anime Boy:** -![Anime Boy](https://huggingface.co/Linaqruf/anything-v3.0/resolve/main/1boy.png) -``` -1boy, medium hair, blonde hair, blue eyes, bishounen, colorful, autumn, cumulonimbus clouds, lighting, blue sky, falling leaves, garden -Steps: 50, Sampler: DDIM, CFG scale: 12 -``` -**Scenery:** -![Scenery](https://huggingface.co/Linaqruf/anything-v3.0/resolve/main/scenery.png) -``` -scenery, shibuya tokyo, post-apocalypse, ruins, rust, sky, skyscraper, abandoned, blue sky, broken window, building, cloud, crane machine, outdoors, overgrown, pillar, sunset -Steps: 50, Sampler: DDIM, CFG scale: 12 -``` - -## License - -This model is open access and available to all, with a CreativeML OpenRAIL-M license further specifying rights and usage. -The CreativeML OpenRAIL License specifies: - -1. You can't use the model to deliberately produce nor share illegal or harmful outputs or content -2. The authors claims no rights on the outputs you generate, you are free to use them and are accountable for their use which must not go against the provisions set in the license -3. You may re-distribute the weights and use the model commercially and/or as a service. If you do, please be aware you have to include the same use restrictions as the ones in the license and share a copy of the CreativeML OpenRAIL-M to all your users (please read the license entirely and carefully) -[Please read the full license here](https://huggingface.co/spaces/CompVis/stable-diffusion-license) diff --git a/spaces/OAOA/DifFace/datapipe/prepare/face/split_train_val.py b/spaces/OAOA/DifFace/datapipe/prepare/face/split_train_val.py deleted file mode 100644 index a3a76d2128884d5eee8ad47d7bdfc7f8f72a2b6b..0000000000000000000000000000000000000000 --- a/spaces/OAOA/DifFace/datapipe/prepare/face/split_train_val.py +++ /dev/null @@ -1,48 +0,0 @@ -#!/usr/bin/env python -# -*- coding:utf-8 -*- -# Power by Zongsheng Yue 2022-05-18 09:18:02 - -import random -import argparse -from pathlib import Path - -parser = argparse.ArgumentParser(prog='Face dataset Generation') -parser.add_argument('--face_dir', default='/home/jupyter/data/FFHQ/images512x512', type=str, - metavar='PATH', help="Path to save the face images") -# FFHQ: png, Celeba: png -parser.add_argument('--prefix', default='ffhq', type=str, help="Image format of the HR face images") -parser.add_argument('--num_val', default=500, type=int, help="Ratio for Validation set") -parser.add_argument('--seed', default=1234, type=int, help="Random seed") -parser.add_argument('--im_size', default=512, type=int, help="Random seed") -args = parser.parse_args() - -base_dir = Path(__file__).resolve().parents[2] / 'files_txt' -if not base_dir.exists(): - base_dir.mkdir() - -path_list = sorted([str(x.resolve()) for x in Path(args.face_dir).glob('*.png')]) - -file_path = base_dir / f"{args.prefix}{args.im_size}.txt" -if file_path.exists(): - file_path.unlink() -with open(file_path, mode='w') as ff: - for line in path_list: ff.write(line+'\n') - -random.seed(args.seed) -random.shuffle(path_list) -num_train = int(len(path_list) - args.num_val) - -file_path_train = base_dir / f"{args.prefix}{args.im_size}_train.txt" -if file_path_train.exists(): - file_path_train.unlink() -with open(file_path_train, mode='w') as ff: - for line in path_list[:num_train]: ff.write(line+'\n') - -file_path_val = base_dir / f"{args.prefix}{args.im_size}_val.txt" -if file_path_val.exists(): - file_path_val.unlink() -with open(file_path_val, mode='w') as ff: - for line in path_list[num_train:]: ff.write(line+'\n') - -print('Train / Validation: {:d}/{:d}'.format(num_train, len(path_list)-num_train)) - diff --git a/spaces/OAOA/DifFace/models/srcnn.py b/spaces/OAOA/DifFace/models/srcnn.py deleted file mode 100644 index b9b3e6a54030a1c763c127a18c2a696ad52e56f9..0000000000000000000000000000000000000000 --- a/spaces/OAOA/DifFace/models/srcnn.py +++ /dev/null @@ -1,73 +0,0 @@ -#!/usr/bin/env python -# -*- coding:utf-8 -*- -# Power by Zongsheng Yue 2022-07-12 20:35:28 - -import math -from torch import nn -import torch.nn.functional as F - -class SRCNN(nn.Module): - def __init__(self, in_chns, out_chns=None, num_chns=64, depth=8, sf=4): - super().__init__() - self.sf = sf - out_chns = in_chns if out_chns is None else out_chns - - self.head = nn.Conv2d(in_chns, num_chns, kernel_size=5, padding=2) - - body = [] - for _ in range(depth-1): - body.append(nn.Conv2d(num_chns, num_chns, kernel_size=5, padding=2)) - body.append(nn.LeakyReLU(0.2, inplace=True)) - self.body = nn.Sequential(*body) - - tail = [] - for _ in range(int(math.log(sf, 2))): - tail.append(nn.Conv2d(num_chns, num_chns*4, kernel_size=3, padding=1)) - tail.append(nn.LeakyReLU(0.2, inplace=True)) - tail.append(nn.PixelShuffle(2)) - tail.append(nn.Conv2d(num_chns, out_chns, kernel_size=5, padding=2)) - self.tail = nn.Sequential(*tail) - - def forward(self, x): - y = self.head(x) - y = self.body(y) - y = self.tail(y) - return y - -class SRCNNFSR(nn.Module): - def __init__(self, in_chns, down_scale_factor=2, num_chns=64, depth=8, sf=4): - super().__init__() - self.sf = sf - - head = [] - in_chns_shuffle = in_chns * 4 - assert num_chns % 4 == 0 - for ii in range(int(math.log(down_scale_factor, 2))): - head.append(nn.PixelUnshuffle(2)) - head.append(nn.Conv2d(in_chns_shuffle, num_chns, kernel_size=3, padding=1)) - if ii + 1 < int(math.log(down_scale_factor, 2)): - head.append(nn.Conv2d(num_chns, num_chns//4, kernel_size=5, padding=2)) - head.append(nn.LeakyReLU(0.2, inplace=True)) - in_chns_shuffle = num_chns - self.head = nn.Sequential(*head) - - body = [] - for _ in range(depth-1): - body.append(nn.Conv2d(num_chns, num_chns, kernel_size=5, padding=2)) - body.append(nn.LeakyReLU(0.2, inplace=True)) - self.body = nn.Sequential(*body) - - tail = [] - for _ in range(int(math.log(down_scale_factor, 2))): - tail.append(nn.Conv2d(num_chns, num_chns, kernel_size=3, padding=1)) - tail.append(nn.LeakyReLU(0.2, inplace=True)) - tail.append(nn.PixelShuffle(2)) - num_chns //= 4 - tail.append(nn.Conv2d(num_chns, in_chns, kernel_size=5, padding=2)) - self.tail = nn.Sequential(*tail) - - def forward(self, x): - y = self.head(x) - y = self.body(y) - y = self.tail(y) - return y diff --git a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/tests/test_concat_dataset.py b/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/tests/test_concat_dataset.py deleted file mode 100644 index d94aeffd481a2e107eb5747e41d76435b3f3dc8a..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/tests/test_concat_dataset.py +++ /dev/null @@ -1,58 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import unittest - -import torch -from fairseq.data import LanguagePairDataset, TokenBlockDataset -from fairseq.data.concat_dataset import ConcatDataset -from tests.test_train import mock_dict - - -class TestConcatDataset(unittest.TestCase): - def setUp(self): - d = mock_dict() - tokens_1 = torch.LongTensor([1]).view(1, -1) - tokens_ds1 = TokenBlockDataset( - tokens_1, - sizes=[tokens_1.size(-1)], - block_size=1, - pad=0, - eos=1, - include_targets=False, - ) - self.dataset_1 = LanguagePairDataset( - tokens_ds1, tokens_ds1.sizes, d, shuffle=False - ) - tokens_2 = torch.LongTensor([2]).view(1, -1) - tokens_ds2 = TokenBlockDataset( - tokens_2, - sizes=[tokens_2.size(-1)], - block_size=1, - pad=0, - eos=1, - include_targets=False, - ) - self.dataset_2 = LanguagePairDataset( - tokens_ds2, tokens_ds2.sizes, d, shuffle=False - ) - - def test_concat_dataset_basics(self): - d = ConcatDataset([self.dataset_1, self.dataset_2]) - assert len(d) == 2 - assert d[0]["source"][0] == 1 - assert d[1]["source"][0] == 2 - - d = ConcatDataset([self.dataset_1, self.dataset_2], sample_ratios=[1, 2]) - assert len(d) == 3 - assert d[0]["source"][0] == 1 - assert d[1]["source"][0] == 2 - assert d[2]["source"][0] == 2 - - d = ConcatDataset([self.dataset_1, self.dataset_2], sample_ratios=[2, 1]) - assert len(d) == 3 - assert d[0]["source"][0] == 1 - assert d[1]["source"][0] == 1 - assert d[2]["source"][0] == 2 diff --git a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/examples/textless_nlp/gslm/unit2speech/tacotron2/text.py b/spaces/OFA-Sys/OFA-Image_Caption/fairseq/examples/textless_nlp/gslm/unit2speech/tacotron2/text.py deleted file mode 100644 index 49e2ca498bf67ad226af5de796b9f44afa65198d..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/examples/textless_nlp/gslm/unit2speech/tacotron2/text.py +++ /dev/null @@ -1,107 +0,0 @@ -""" from https://github.com/keithito/tacotron """ -import numpy as np -import re -from . import cleaners -from .symbols import symbols - - -# Mappings from symbol to numeric ID and vice versa: -_symbol_to_id = {s: i for i, s in enumerate(symbols)} -_id_to_symbol = {i: s for i, s in enumerate(symbols)} - -# Regular expression matching text enclosed in curly braces: -_curly_re = re.compile(r'(.*?)\{(.+?)\}(.*)') - -# Special symbols -SOS_TOK = '' -EOS_TOK = '' - -def text_to_sequence(text, cleaner_names): - '''Converts a string of text to a sequence of IDs corresponding to the symbols in the text. - - The text can optionally have ARPAbet sequences enclosed in curly braces embedded - in it. For example, "Turn left on {HH AW1 S S T AH0 N} Street." - - Args: - text: string to convert to a sequence - cleaner_names: names of the cleaner functions to run the text through - - Returns: - List of integers corresponding to the symbols in the text - ''' - sequence = [] - - # Check for curly braces and treat their contents as ARPAbet: - while len(text): - m = _curly_re.match(text) - if not m: - sequence += _symbols_to_sequence(_clean_text(text, cleaner_names)) - break - sequence += _symbols_to_sequence(_clean_text(m.group(1), cleaner_names)) - sequence += _arpabet_to_sequence(m.group(2)) - text = m.group(3) - - return sequence - - -def sample_code_chunk(code, size): - assert(size > 0 and size <= len(code)) - start = np.random.randint(len(code) - size + 1) - end = start + size - return code[start:end], start, end - - -def code_to_sequence(code, code_dict, collapse_code): - if collapse_code: - prev_c = None - sequence = [] - for c in code: - if c in code_dict and c != prev_c: - sequence.append(code_dict[c]) - prev_c = c - else: - sequence = [code_dict[c] for c in code if c in code_dict] - if len(sequence) < 0.95 * len(code): - print('WARNING : over 5%% codes are OOV') - - return sequence - - -def sequence_to_text(sequence): - '''Converts a sequence of IDs back to a string''' - result = '' - for symbol_id in sequence: - if symbol_id in _id_to_symbol: - s = _id_to_symbol[symbol_id] - # Enclose ARPAbet back in curly braces: - if len(s) > 1 and s[0] == '@': - s = '{%s}' % s[1:] - result += s - return result.replace('}{', ' ') - - -def sequence_to_code(sequence, code_dict): - '''Analogous to sequence_to_text''' - id_to_code = {i: c for c, i in code_dict.items()} - return ' '.join([id_to_code[i] for i in sequence]) - - -def _clean_text(text, cleaner_names): - for name in cleaner_names: - cleaner = getattr(cleaners, name) - if not cleaner: - raise Exception('Unknown cleaner: %s' % name) - text = cleaner(text) - return text - - -def _symbols_to_sequence(symbols): - return [_symbol_to_id[s] for s in symbols if _should_keep_symbol(s)] - - -def _arpabet_to_sequence(text): - return _symbols_to_sequence(['@' + s for s in text.split()]) - - -def _should_keep_symbol(s): - return s in _symbol_to_id and s != '_' and s != '~' diff --git a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/tests/test_reproducibility.py b/spaces/OFA-Sys/OFA-Image_Caption/fairseq/tests/test_reproducibility.py deleted file mode 100644 index 94931b2a0721c4adfee8899c89cac24f45973d17..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/tests/test_reproducibility.py +++ /dev/null @@ -1,150 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import contextlib -import json -import os -import tempfile -import unittest -from io import StringIO - -import torch - -from . import test_binaries - - -class TestReproducibility(unittest.TestCase): - def _test_reproducibility( - self, - name, - extra_flags=None, - delta=0.0001, - resume_checkpoint="checkpoint1.pt", - max_epoch=3, - ): - def get_last_log_stats_containing_string(log_records, search_string): - for log_record in logs.records[::-1]: - if isinstance(log_record.msg, str) and search_string in log_record.msg: - return json.loads(log_record.msg) - - if extra_flags is None: - extra_flags = [] - - with tempfile.TemporaryDirectory(name) as data_dir: - with self.assertLogs() as logs: - test_binaries.create_dummy_data(data_dir) - test_binaries.preprocess_translation_data(data_dir) - - # train epochs 1 and 2 together - with self.assertLogs() as logs: - test_binaries.train_translation_model( - data_dir, - "fconv_iwslt_de_en", - [ - "--dropout", - "0.0", - "--log-format", - "json", - "--log-interval", - "1", - "--max-epoch", - str(max_epoch), - ] - + extra_flags, - ) - train_log = get_last_log_stats_containing_string(logs.records, "train_loss") - valid_log = get_last_log_stats_containing_string(logs.records, "valid_loss") - - # train epoch 2, resuming from previous checkpoint 1 - os.rename( - os.path.join(data_dir, resume_checkpoint), - os.path.join(data_dir, "checkpoint_last.pt"), - ) - with self.assertLogs() as logs: - test_binaries.train_translation_model( - data_dir, - "fconv_iwslt_de_en", - [ - "--dropout", - "0.0", - "--log-format", - "json", - "--log-interval", - "1", - "--max-epoch", - str(max_epoch), - ] - + extra_flags, - ) - train_res_log = get_last_log_stats_containing_string( - logs.records, "train_loss" - ) - valid_res_log = get_last_log_stats_containing_string( - logs.records, "valid_loss" - ) - - for k in ["train_loss", "train_ppl", "train_num_updates", "train_gnorm"]: - self.assertAlmostEqual( - float(train_log[k]), float(train_res_log[k]), delta=delta - ) - for k in [ - "valid_loss", - "valid_ppl", - "valid_num_updates", - "valid_best_loss", - ]: - self.assertAlmostEqual( - float(valid_log[k]), float(valid_res_log[k]), delta=delta - ) - - def test_reproducibility(self): - self._test_reproducibility("test_reproducibility") - - @unittest.skipIf(not torch.cuda.is_available(), "test requires a GPU") - def test_reproducibility_fp16(self): - self._test_reproducibility( - "test_reproducibility_fp16", - [ - "--fp16", - "--fp16-init-scale", - "4096", - ], - delta=0.011, - ) - - @unittest.skipIf(not torch.cuda.is_available(), "test requires a GPU") - def test_reproducibility_memory_efficient_fp16(self): - self._test_reproducibility( - "test_reproducibility_memory_efficient_fp16", - [ - "--memory-efficient-fp16", - "--fp16-init-scale", - "4096", - ], - ) - - @unittest.skipIf(not torch.cuda.is_available(), "test requires a GPU") - def test_reproducibility_amp(self): - self._test_reproducibility( - "test_reproducibility_amp", - [ - "--amp", - "--fp16-init-scale", - "4096", - ], - delta=0.011, - ) - - def test_mid_epoch_reproducibility(self): - self._test_reproducibility( - "test_mid_epoch_reproducibility", - ["--save-interval-updates", "3"], - resume_checkpoint="checkpoint_1_3.pt", - max_epoch=1, - ) - - -if __name__ == "__main__": - unittest.main() diff --git a/spaces/OFA-Sys/OFA-vqa/fairseq/examples/adaptive_span/__init__.py b/spaces/OFA-Sys/OFA-vqa/fairseq/examples/adaptive_span/__init__.py deleted file mode 100644 index e0a142a769360e1140bf814c532eaf841f1d52d8..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-vqa/fairseq/examples/adaptive_span/__init__.py +++ /dev/null @@ -1,19 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import importlib -import os - -# automatically import any Python files in the current directory -cur_dir = os.path.dirname(__file__) -for file in os.listdir(cur_dir): - path = os.path.join(cur_dir, file) - if ( - not file.startswith("_") - and not file.startswith(".") - and (file.endswith(".py") or os.path.isdir(path)) - ): - mod_name = file[: file.find(".py")] if file.endswith(".py") else file - module = importlib.import_module(__name__ + "." + mod_name) diff --git a/spaces/OFA-Sys/OFA-vqa/fairseq/examples/simultaneous_translation/docs/ende-mma.md b/spaces/OFA-Sys/OFA-vqa/fairseq/examples/simultaneous_translation/docs/ende-mma.md deleted file mode 100644 index 241d604a3b31a37755da68aad6ff47d46891d3fc..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-vqa/fairseq/examples/simultaneous_translation/docs/ende-mma.md +++ /dev/null @@ -1,74 +0,0 @@ -# Simultaneous Machine Translation - -This directory contains the code for the paper [Monotonic Multihead Attention](https://openreview.net/forum?id=Hyg96gBKPS) - -## Prepare Data - -[Please follow the instructions to download and preprocess the WMT'15 En-De dataset.](https://github.com/pytorch/fairseq/tree/simulastsharedtask/examples/translation#prepare-wmt14en2desh) - -Another example of training an English to Japanese model can be found [here](docs/enja.md) - -## Training - -- MMA-IL - -```shell -fairseq-train \ - data-bin/wmt15_en_de_32k \ - --simul-type infinite_lookback \ - --user-dir $FAIRSEQ/example/simultaneous_translation \ - --mass-preservation \ - --criterion latency_augmented_label_smoothed_cross_entropy \ - --latency-weight-avg 0.1 \ - --max-update 50000 \ - --arch transformer_monotonic_iwslt_de_en save_dir_key=lambda \ - --optimizer adam --adam-betas '(0.9, 0.98)' \ - --lr-scheduler 'inverse_sqrt' \ - --warmup-init-lr 1e-7 --warmup-updates 4000 \ - --lr 5e-4 --stop-min-lr 1e-9 --clip-norm 0.0 --weight-decay 0.0001\ - --dropout 0.3 \ - --label-smoothing 0.1\ - --max-tokens 3584 -``` - -- MMA-H - -```shell -fairseq-train \ - data-bin/wmt15_en_de_32k \ - --simul-type hard_aligned \ - --user-dir $FAIRSEQ/example/simultaneous_translation \ - --mass-preservation \ - --criterion latency_augmented_label_smoothed_cross_entropy \ - --latency-weight-var 0.1 \ - --max-update 50000 \ - --arch transformer_monotonic_iwslt_de_en save_dir_key=lambda \ - --optimizer adam --adam-betas '(0.9, 0.98)' \ - --lr-scheduler 'inverse_sqrt' \ - --warmup-init-lr 1e-7 --warmup-updates 4000 \ - --lr 5e-4 --stop-min-lr 1e-9 --clip-norm 0.0 --weight-decay 0.0001\ - --dropout 0.3 \ - --label-smoothing 0.1\ - --max-tokens 3584 -``` - -- wait-k - -```shell -fairseq-train \ - data-bin/wmt15_en_de_32k \ - --simul-type wait-k \ - --waitk-lagging 3 \ - --user-dir $FAIRSEQ/example/simultaneous_translation \ - --mass-preservation \ - --criterion latency_augmented_label_smoothed_cross_entropy \ - --max-update 50000 \ - --arch transformer_monotonic_iwslt_de_en save_dir_key=lambda \ - --optimizer adam --adam-betas '(0.9, 0.98)' \ - --lr-scheduler 'inverse_sqrt' \ - --warmup-init-lr 1e-7 --warmup-updates 4000 \ - --lr 5e-4 --stop-min-lr 1e-9 --clip-norm 0.0 --weight-decay 0.0001\ - --dropout 0.3 \ - --label-smoothing 0.1\ - --max-tokens 3584 -``` diff --git a/spaces/OFA-Sys/OFA-vqa/fairseq/fairseq/models/transformer/__init__.py b/spaces/OFA-Sys/OFA-vqa/fairseq/fairseq/models/transformer/__init__.py deleted file mode 100644 index 681fca3d4553f6832a65f61fc186793bc4ee0679..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-vqa/fairseq/fairseq/models/transformer/__init__.py +++ /dev/null @@ -1,50 +0,0 @@ -# Copyright (c) Facebook Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. -"""isort:skip_file""" - -from .transformer_config import ( - TransformerConfig, - DEFAULT_MAX_SOURCE_POSITIONS, - DEFAULT_MAX_TARGET_POSITIONS, - DEFAULT_MIN_PARAMS_TO_WRAP, -) -from .transformer_decoder import TransformerDecoder, TransformerDecoderBase, Linear -from .transformer_encoder import TransformerEncoder, TransformerEncoderBase -from .transformer_legacy import ( - TransformerModel, - base_architecture, - tiny_architecture, - transformer_iwslt_de_en, - transformer_wmt_en_de, - transformer_vaswani_wmt_en_de_big, - transformer_vaswani_wmt_en_fr_big, - transformer_wmt_en_de_big, - transformer_wmt_en_de_big_t2t, -) -from .transformer_base import TransformerModelBase, Embedding - - -__all__ = [ - "TransformerModelBase", - "TransformerConfig", - "TransformerDecoder", - "TransformerDecoderBase", - "TransformerEncoder", - "TransformerEncoderBase", - "TransformerModel", - "Embedding", - "Linear", - "base_architecture", - "tiny_architecture", - "transformer_iwslt_de_en", - "transformer_wmt_en_de", - "transformer_vaswani_wmt_en_de_big", - "transformer_vaswani_wmt_en_fr_big", - "transformer_wmt_en_de_big", - "transformer_wmt_en_de_big_t2t", - "DEFAULT_MAX_SOURCE_POSITIONS", - "DEFAULT_MAX_TARGET_POSITIONS", - "DEFAULT_MIN_PARAMS_TO_WRAP", -] diff --git a/spaces/OFA-Sys/OFA-vqa/fairseq/fairseq/scoring/tokenizer.py b/spaces/OFA-Sys/OFA-vqa/fairseq/fairseq/scoring/tokenizer.py deleted file mode 100644 index 61cf6d4a7cc698258caad9f68f2e8559dd510eee..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-vqa/fairseq/fairseq/scoring/tokenizer.py +++ /dev/null @@ -1,67 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import unicodedata - -from fairseq.dataclass import ChoiceEnum - - -class EvaluationTokenizer(object): - """A generic evaluation-time tokenizer, which leverages built-in tokenizers - in sacreBLEU (https://github.com/mjpost/sacrebleu). It additionally provides - lowercasing, punctuation removal and character tokenization, which are - applied after sacreBLEU tokenization. - - Args: - tokenizer_type (str): the type of sacreBLEU tokenizer to apply. - lowercase (bool): lowercase the text. - punctuation_removal (bool): remove punctuation (based on unicode - category) from text. - character_tokenization (bool): tokenize the text to characters. - """ - - SPACE = chr(32) - SPACE_ESCAPE = chr(9601) - ALL_TOKENIZER_TYPES = ChoiceEnum(["none", "13a", "intl", "zh", "ja-mecab"]) - - def __init__( - self, - tokenizer_type: str = "13a", - lowercase: bool = False, - punctuation_removal: bool = False, - character_tokenization: bool = False, - ): - from sacrebleu.tokenizers import TOKENIZERS - - assert tokenizer_type in TOKENIZERS, f"{tokenizer_type}, {TOKENIZERS}" - self.lowercase = lowercase - self.punctuation_removal = punctuation_removal - self.character_tokenization = character_tokenization - self.tokenizer = TOKENIZERS[tokenizer_type] - - @classmethod - def remove_punctuation(cls, sent: str): - """Remove punctuation based on Unicode category.""" - return cls.SPACE.join( - t - for t in sent.split(cls.SPACE) - if not all(unicodedata.category(c)[0] == "P" for c in t) - ) - - def tokenize(self, sent: str): - tokenized = self.tokenizer()(sent) - - if self.punctuation_removal: - tokenized = self.remove_punctuation(tokenized) - - if self.character_tokenization: - tokenized = self.SPACE.join( - list(tokenized.replace(self.SPACE, self.SPACE_ESCAPE)) - ) - - if self.lowercase: - tokenized = tokenized.lower() - - return tokenized diff --git a/spaces/PKUWilliamYang/VToonify/vtoonify/smooth_parsing_map.py b/spaces/PKUWilliamYang/VToonify/vtoonify/smooth_parsing_map.py deleted file mode 100644 index 7720d0c7786925db38d3e793d6a3a8f68f6e663e..0000000000000000000000000000000000000000 --- a/spaces/PKUWilliamYang/VToonify/vtoonify/smooth_parsing_map.py +++ /dev/null @@ -1,172 +0,0 @@ -import os -#os.environ['CUDA_VISIBLE_DEVICES'] = "0" -import numpy as np -import cv2 -import math -import argparse -from tqdm import tqdm -import torch -from torch import nn -from torchvision import transforms -import torch.nn.functional as F -from model.raft.core.raft import RAFT -from model.raft.core.utils.utils import InputPadder -from model.bisenet.model import BiSeNet -from model.stylegan.model import Downsample - -class Options(): - def __init__(self): - - self.parser = argparse.ArgumentParser(description="Smooth Parsing Maps") - self.parser.add_argument("--window_size", type=int, default=5, help="temporal window size") - - self.parser.add_argument("--faceparsing_path", type=str, default='./checkpoint/faceparsing.pth', help="path of the face parsing model") - self.parser.add_argument("--raft_path", type=str, default='./checkpoint/raft-things.pth', help="path of the RAFT model") - - self.parser.add_argument("--video_path", type=str, help="path of the target video") - self.parser.add_argument("--output_path", type=str, default='./output/', help="path of the output parsing maps") - - def parse(self): - self.opt = self.parser.parse_args() - args = vars(self.opt) - print('Load options') - for name, value in sorted(args.items()): - print('%s: %s' % (str(name), str(value))) - return self.opt - -# from RAFT -def warp(x, flo): - """ - warp an image/tensor (im2) back to im1, according to the optical flow - x: [B, C, H, W] (im2) - flo: [B, 2, H, W] flow - """ - B, C, H, W = x.size() - # mesh grid - xx = torch.arange(0, W).view(1,-1).repeat(H,1) - yy = torch.arange(0, H).view(-1,1).repeat(1,W) - xx = xx.view(1,1,H,W).repeat(B,1,1,1) - yy = yy.view(1,1,H,W).repeat(B,1,1,1) - grid = torch.cat((xx,yy),1).float() - - - #x = x.cuda() - grid = grid.cuda() - vgrid = grid + flo # B,2,H,W - - # scale grid to [-1,1] - ##2019 code - vgrid[:,0,:,:] = 2.0*vgrid[:,0,:,:].clone()/max(W-1,1)-1.0 - vgrid[:,1,:,:] = 2.0*vgrid[:,1,:,:].clone()/max(H-1,1)-1.0 - - vgrid = vgrid.permute(0,2,3,1) - output = nn.functional.grid_sample(x, vgrid,align_corners=True) - mask = torch.autograd.Variable(torch.ones(x.size())).cuda() - mask = nn.functional.grid_sample(mask, vgrid,align_corners=True) - - ##2019 author - mask[mask<0.9999] = 0 - mask[mask>0] = 1 - - ##2019 code - # mask = torch.floor(torch.clamp(mask, 0 ,1)) - - return output*mask, mask - - -if __name__ == "__main__": - - parser = Options() - args = parser.parse() - print('*'*98) - - - device = "cuda" - - transform = transforms.Compose([ - transforms.ToTensor(), - transforms.Normalize(mean=[0.5, 0.5, 0.5],std=[0.5,0.5,0.5]), - ]) - - parser = argparse.ArgumentParser() - parser.add_argument('--model', help="restore checkpoint") - parser.add_argument('--small', action='store_true', help='use small model') - parser.add_argument('--mixed_precision', action='store_true', help='use mixed precision') - parser.add_argument('--alternate_corr', action='store_true', help='use efficent correlation implementation') - - raft_model = torch.nn.DataParallel(RAFT(parser.parse_args(['--model', args.raft_path]))) - raft_model.load_state_dict(torch.load(args.raft_path)) - - raft_model = raft_model.module - raft_model.to(device) - raft_model.eval() - - parsingpredictor = BiSeNet(n_classes=19) - parsingpredictor.load_state_dict(torch.load(args.faceparsing_path, map_location=lambda storage, loc: storage)) - parsingpredictor.to(device).eval() - - down = Downsample(kernel=[1, 3, 3, 1], factor=2).to(device).eval() - - print('Load models successfully!') - - window = args.window_size - - video_cap = cv2.VideoCapture(args.video_path) - num = int(video_cap.get(7)) - - Is = [] - for i in range(num): - success, frame = video_cap.read() - if success == False: - break - frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) - with torch.no_grad(): - Is += [transform(frame).unsqueeze(dim=0).cpu()] - video_cap.release() - - # enlarge frames for more accurate parsing maps and optical flows - Is = F.upsample(torch.cat(Is, dim=0), scale_factor=2, mode='bilinear') - Is_ = torch.cat((Is[0:window], Is, Is[-window:]), dim=0) - - print('Load video with %d frames successfully!'%(len(Is))) - - Ps = [] - for i in tqdm(range(len(Is))): - with torch.no_grad(): - Ps += [parsingpredictor(2*Is[i:i+1].to(device))[0].detach().cpu()] - Ps = torch.cat(Ps, dim=0) - Ps_ = torch.cat((Ps[0:window], Ps, Ps[-window:]), dim=0) - - print('Predict parsing maps successfully!') - - - # temporal weights of the (2*args.window_size+1) frames - wt = torch.exp(-(torch.arange(2*window+1).float()-window)**2/(2*((window+0.5)**2))).reshape(2*window+1,1,1,1).to(device) - - parse = [] - for ii in tqdm(range(len(Is))): - i = ii + window - image2 = Is_[i-window:i+window+1].to(device) - image1 = Is_[i].repeat(2*window+1,1,1,1).to(device) - padder = InputPadder(image1.shape) - image1, image2 = padder.pad(image1, image2) - with torch.no_grad(): - flow_low, flow_up = raft_model((image1+1)*255.0/2, (image2+1)*255.0/2, iters=20, test_mode=True) - output, mask = warp(torch.cat((image2, Ps_[i-window:i+window+1].to(device)), dim=1), flow_up) - aligned_Is = output[:,0:3].detach() - aligned_Ps = output[:,3:].detach() - # the spatial weight - ws = torch.exp(-((aligned_Is-image1)**2).mean(dim=1, keepdims=True)/(2*(0.2**2))) * mask[:,0:1] - aligned_Ps[window] = Ps_[i].to(device) - # the weight between i and i shoud be 1.0 - ws[window,:,:,:] = 1.0 - weights = ws*wt - weights = weights / weights.sum(dim=(0), keepdims=True) - fused_Ps = (aligned_Ps * weights).sum(dim=0, keepdims=True) - parse += [down(fused_Ps).detach().cpu()] - parse = torch.cat(parse, dim=0) - - basename = os.path.basename(args.video_path).split('.')[0] - np.save(os.path.join(args.output_path, basename+'_parsingmap.npy'), parse.numpy()) - - print('Done!') \ No newline at end of file diff --git a/spaces/PYTHONOPTIC/FOCUSGUMMY/app.py b/spaces/PYTHONOPTIC/FOCUSGUMMY/app.py deleted file mode 100644 index be2d66cb6845f4a8b598c1a7a9ed7d58c7628175..0000000000000000000000000000000000000000 --- a/spaces/PYTHONOPTIC/FOCUSGUMMY/app.py +++ /dev/null @@ -1,21 +0,0 @@ -import openai -import gradio - -openai.api_key = "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" - -messages = [{"role": "system", "content": "Your name is Simone. You are an expert in biology, bioengineering, supplements, nootropics, and chemistry and can answer any questions about supplements."}] - -def CustomChatGPT(user_input): - messages.append({"role": "user", "content": user_input}) - response = openai.ChatCompletion.create( - model = "gpt-3.5-turbo", - messages = messages - ) - ChatGPT_reply = response["choices"][0]["message"]["content"] - messages.append({"role": "assistant", "content": ChatGPT_reply}) - return ChatGPT_reply - -demo = gradio.Interface(fn=CustomChatGPT, inputs = "text", outputs = "text", title = "Ask me anything about Nootropic Brain Supplements.") - -demo.launch() - diff --git a/spaces/PeepDaSlan9/Bark-Voice-Cloning/app.py b/spaces/PeepDaSlan9/Bark-Voice-Cloning/app.py deleted file mode 100644 index e60c6acea113aba62d1a3f8e8186b1ae075aa989..0000000000000000000000000000000000000000 --- a/spaces/PeepDaSlan9/Bark-Voice-Cloning/app.py +++ /dev/null @@ -1,401 +0,0 @@ -from cProfile import label -import dataclasses -from distutils.command.check import check -from doctest import Example -import gradio as gr -import os -import sys -import numpy as np -import logging -import torch -import pytorch_seed -import time - -from xml.sax import saxutils -from bark.api import generate_with_settings -from bark.api import save_as_prompt -from util.settings import Settings -#import nltk - - -from bark import SAMPLE_RATE -from cloning.clonevoice import clone_voice -from bark.generation import SAMPLE_RATE, preload_models, _load_history_prompt, codec_decode -from scipy.io.wavfile import write as write_wav -from util.parseinput import split_and_recombine_text, build_ssml, is_ssml, create_clips_from_ssml -from datetime import datetime -from tqdm.auto import tqdm -from util.helper import create_filename, add_id3_tag -from swap_voice import swap_voice_from_audio -from training.training_prepare import prepare_semantics_from_text, prepare_wavs_from_semantics -from training.train import training_prepare_files, train - -settings = Settings('config.yaml') - - -def generate_text_to_speech(text, selected_speaker, text_temp, waveform_temp, eos_prob, quick_generation, complete_settings, seed, batchcount, progress=gr.Progress(track_tqdm=True)): - # Chunk the text into smaller pieces then combine the generated audio - - # generation settings - if selected_speaker == 'None': - selected_speaker = None - - voice_name = selected_speaker - - if text == None or len(text) < 1: - if selected_speaker == None: - raise gr.Error('No text entered!') - - # Extract audio data from speaker if no text and speaker selected - voicedata = _load_history_prompt(voice_name) - audio_arr = codec_decode(voicedata["fine_prompt"]) - result = create_filename(settings.output_folder_path, "None", "extract",".wav") - save_wav(audio_arr, result) - return result - - if batchcount < 1: - batchcount = 1 - - - silenceshort = np.zeros(int((float(settings.silence_sentence) / 1000.0) * SAMPLE_RATE), dtype=np.int16) # quarter second of silence - silencelong = np.zeros(int((float(settings.silence_speakers) / 1000.0) * SAMPLE_RATE), dtype=np.float32) # half a second of silence - use_last_generation_as_history = "Use last generation as history" in complete_settings - save_last_generation = "Save generation as Voice" in complete_settings - for l in range(batchcount): - currentseed = seed - if seed != None and seed > 2**32 - 1: - logger.warning(f"Seed {seed} > 2**32 - 1 (max), setting to random") - currentseed = None - if currentseed == None or currentseed <= 0: - currentseed = np.random.default_rng().integers(1, 2**32 - 1) - assert(0 < currentseed and currentseed < 2**32) - - progress(0, desc="Generating") - - full_generation = None - - all_parts = [] - complete_text = "" - text = text.lstrip() - if is_ssml(text): - list_speak = create_clips_from_ssml(text) - prev_speaker = None - for i, clip in tqdm(enumerate(list_speak), total=len(list_speak)): - selected_speaker = clip[0] - # Add pause break between speakers - if i > 0 and selected_speaker != prev_speaker: - all_parts += [silencelong.copy()] - prev_speaker = selected_speaker - text = clip[1] - text = saxutils.unescape(text) - if selected_speaker == "None": - selected_speaker = None - - print(f"\nGenerating Text ({i+1}/{len(list_speak)}) -> {selected_speaker} (Seed {currentseed}):`{text}`") - complete_text += text - with pytorch_seed.SavedRNG(currentseed): - audio_array = generate_with_settings(text_prompt=text, voice_name=selected_speaker, semantic_temp=text_temp, coarse_temp=waveform_temp, eos_p=eos_prob) - currentseed = torch.random.initial_seed() - if len(list_speak) > 1: - filename = create_filename(settings.output_folder_path, currentseed, "audioclip",".wav") - save_wav(audio_array, filename) - add_id3_tag(filename, text, selected_speaker, currentseed) - - all_parts += [audio_array] - else: - texts = split_and_recombine_text(text, settings.input_text_desired_length, settings.input_text_max_length) - for i, text in tqdm(enumerate(texts), total=len(texts)): - print(f"\nGenerating Text ({i+1}/{len(texts)}) -> {selected_speaker} (Seed {currentseed}):`{text}`") - complete_text += text - if quick_generation == True: - with pytorch_seed.SavedRNG(currentseed): - audio_array = generate_with_settings(text_prompt=text, voice_name=selected_speaker, semantic_temp=text_temp, coarse_temp=waveform_temp, eos_p=eos_prob) - currentseed = torch.random.initial_seed() - else: - full_output = use_last_generation_as_history or save_last_generation - if full_output: - full_generation, audio_array = generate_with_settings(text_prompt=text, voice_name=voice_name, semantic_temp=text_temp, coarse_temp=waveform_temp, eos_p=eos_prob, output_full=True) - else: - audio_array = generate_with_settings(text_prompt=text, voice_name=voice_name, semantic_temp=text_temp, coarse_temp=waveform_temp, eos_p=eos_prob) - - # Noticed this in the HF Demo - convert to 16bit int -32767/32767 - most used audio format - # audio_array = (audio_array * 32767).astype(np.int16) - - if len(texts) > 1: - filename = create_filename(settings.output_folder_path, currentseed, "audioclip",".wav") - save_wav(audio_array, filename) - add_id3_tag(filename, text, selected_speaker, currentseed) - - if quick_generation == False and (save_last_generation == True or use_last_generation_as_history == True): - # save to npz - voice_name = create_filename(settings.output_folder_path, seed, "audioclip", ".npz") - save_as_prompt(voice_name, full_generation) - if use_last_generation_as_history: - selected_speaker = voice_name - - all_parts += [audio_array] - # Add short pause between sentences - if text[-1] in "!?.\n" and i > 1: - all_parts += [silenceshort.copy()] - - # save & play audio - result = create_filename(settings.output_folder_path, currentseed, "final",".wav") - save_wav(np.concatenate(all_parts), result) - # write id3 tag with text truncated to 60 chars, as a precaution... - add_id3_tag(result, complete_text, selected_speaker, currentseed) - - return result - - - -def save_wav(audio_array, filename): - write_wav(filename, SAMPLE_RATE, audio_array) - -def save_voice(filename, semantic_prompt, coarse_prompt, fine_prompt): - np.savez_compressed( - filename, - semantic_prompt=semantic_prompt, - coarse_prompt=coarse_prompt, - fine_prompt=fine_prompt - ) - - -def on_quick_gen_changed(checkbox): - if checkbox == False: - return gr.CheckboxGroup.update(visible=True) - return gr.CheckboxGroup.update(visible=False) - -def delete_output_files(checkbox_state): - if checkbox_state: - outputs_folder = os.path.join(os.getcwd(), settings.output_folder_path) - if os.path.exists(outputs_folder): - purgedir(outputs_folder) - return False - - -# https://stackoverflow.com/a/54494779 -def purgedir(parent): - for root, dirs, files in os.walk(parent): - for item in files: - # Delete subordinate files - filespec = os.path.join(root, item) - os.unlink(filespec) - for item in dirs: - # Recursively perform this operation for subordinate directories - purgedir(os.path.join(root, item)) - -def convert_text_to_ssml(text, selected_speaker): - return build_ssml(text, selected_speaker) - - -def training_prepare(selected_step, num_text_generations, progress=gr.Progress(track_tqdm=True)): - if selected_step == prepare_training_list[0]: - prepare_semantics_from_text() - else: - prepare_wavs_from_semantics() - return None - - -def start_training(save_model_epoch, max_epochs, progress=gr.Progress(track_tqdm=True)): - training_prepare_files("./training/data/", "./training/data/checkpoint/hubert_base_ls960.pt") - train("./training/data/", save_model_epoch, max_epochs) - return None - - - -def apply_settings(themes, input_server_name, input_server_port, input_server_public, input_desired_len, input_max_len, input_silence_break, input_silence_speaker): - settings.selected_theme = themes - settings.server_name = input_server_name - settings.server_port = input_server_port - settings.server_share = input_server_public - settings.input_text_desired_length = input_desired_len - settings.input_text_max_length = input_max_len - settings.silence_sentence = input_silence_break - settings.silence_speaker = input_silence_speaker - settings.save() - -def restart(): - global restart_server - restart_server = True - - -def create_version_html(): - python_version = ".".join([str(x) for x in sys.version_info[0:3]]) - versions_html = f""" -python: {python_version} - •  -torch: {getattr(torch, '__long_version__',torch.__version__)} - •  -gradio: {gr.__version__} -""" - return versions_html - - - -logger = logging.getLogger(__name__) -APPTITLE = "Bark Voice Cloning UI" - - -autolaunch = False - -if len(sys.argv) > 1: - autolaunch = "-autolaunch" in sys.argv - - -if torch.cuda.is_available() == False: - os.environ['BARK_FORCE_CPU'] = 'True' - logger.warning("No CUDA detected, fallback to CPU!") - -print(f'smallmodels={os.environ.get("SUNO_USE_SMALL_MODELS", False)}') -print(f'enablemps={os.environ.get("SUNO_ENABLE_MPS", False)}') -print(f'offloadcpu={os.environ.get("SUNO_OFFLOAD_CPU", False)}') -print(f'forcecpu={os.environ.get("BARK_FORCE_CPU", False)}') -print(f'autolaunch={autolaunch}\n\n') - -#print("Updating nltk\n") -#nltk.download('punkt') - -print("Preloading Models\n") -preload_models() - -available_themes = ["Default", "gradio/glass", "gradio/monochrome", "gradio/seafoam", "gradio/soft", "gstaff/xkcd", "freddyaboulton/dracula_revamped", "ysharma/steampunk"] -tokenizer_language_list = ["de","en", "pl"] -prepare_training_list = ["Step 1: Semantics from Text","Step 2: WAV from Semantics"] - -seed = -1 -server_name = settings.server_name -if len(server_name) < 1: - server_name = None -server_port = settings.server_port -if server_port <= 0: - server_port = None -global run_server -global restart_server - -run_server = True - -while run_server: - # Collect all existing speakers/voices in dir - speakers_list = [] - - for root, dirs, files in os.walk("./bark/assets/prompts"): - for file in files: - if file.endswith(".npz"): - pathpart = root.replace("./bark/assets/prompts", "") - name = os.path.join(pathpart, file[:-4]) - if name.startswith("/") or name.startswith("\\"): - name = name[1:] - speakers_list.append(name) - - speakers_list = sorted(speakers_list, key=lambda x: x.lower()) - speakers_list.insert(0, 'None') - - print(f'Launching {APPTITLE} Server') - - # Create Gradio Blocks - - with gr.Blocks(title=f"{APPTITLE}", mode=f"{APPTITLE}", theme=settings.selected_theme) as barkgui: - gr.Markdown("#
    🐶🎶⭐ - Bark Voice Cloning
    ") - gr.Markdown("###
    🤗 - If you like this space, please star my [github repo](https://github.com/KevinWang676/Bark-Voice-Cloning)
    ") - gr.Markdown("###
    🎡 - Based on [bark-gui](https://github.com/C0untFloyd/bark-gui)
    ") - gr.Markdown(f""" You can duplicate and use it with a GPU: Duplicate Space - or open in [Colab](https://colab.research.google.com/github/KevinWang676/Bark-Voice-Cloning/blob/main/Bark_Voice_Cloning_UI.ipynb) for quick start 🌟 - """) - - with gr.Tab("🎙️ - Clone Voice"): - with gr.Row(): - input_audio_filename = gr.Audio(label="Input audio.wav", source="upload", type="filepath") - #transcription_text = gr.Textbox(label="Transcription Text", lines=1, placeholder="Enter Text of your Audio Sample here...") - with gr.Row(): - with gr.Column(): - initialname = "/home/user/app/bark/assets/prompts/file" - output_voice = gr.Textbox(label="Filename of trained Voice (do not change the initial name)", lines=1, placeholder=initialname, value=initialname, visible=False) - with gr.Column(): - tokenizerlang = gr.Dropdown(tokenizer_language_list, label="Base Language Tokenizer", value=tokenizer_language_list[1], visible=False) - with gr.Row(): - clone_voice_button = gr.Button("Create Voice", variant="primary") - with gr.Row(): - dummy = gr.Text(label="Progress") - npz_file = gr.File(label=".npz file") - speakers_list.insert(0, npz_file) # add prompt - - with gr.Tab("🎵 - TTS"): - with gr.Row(): - with gr.Column(): - placeholder = "Enter text here." - input_text = gr.Textbox(label="Input Text", lines=4, placeholder=placeholder) - convert_to_ssml_button = gr.Button("Convert Input Text to SSML") - with gr.Column(): - seedcomponent = gr.Number(label="Seed (default -1 = Random)", precision=0, value=-1) - batchcount = gr.Number(label="Batch count", precision=0, value=1) - - with gr.Row(): - with gr.Column(): - gr.Markdown("[Voice Prompt Library](https://suno-ai.notion.site/8b8e8749ed514b0cbf3f699013548683?v=bc67cff786b04b50b3ceb756fd05f68c)") - speaker = gr.Dropdown(speakers_list, value=speakers_list[0], label="Voice (Choose “file” if you wanna use the custom voice)") - - with gr.Column(): - text_temp = gr.Slider(0.1, 1.0, value=0.6, label="Generation Temperature", info="1.0 more diverse, 0.1 more conservative") - waveform_temp = gr.Slider(0.1, 1.0, value=0.7, label="Waveform temperature", info="1.0 more diverse, 0.1 more conservative") - - with gr.Row(): - with gr.Column(): - quick_gen_checkbox = gr.Checkbox(label="Quick Generation", value=True) - settings_checkboxes = ["Use last generation as history", "Save generation as Voice"] - complete_settings = gr.CheckboxGroup(choices=settings_checkboxes, value=settings_checkboxes, label="Detailed Generation Settings", type="value", interactive=True, visible=False) - with gr.Column(): - eos_prob = gr.Slider(0.0, 0.5, value=0.05, label="End of sentence probability") - - with gr.Row(): - with gr.Column(): - tts_create_button = gr.Button("Generate", variant="primary") - with gr.Column(): - hidden_checkbox = gr.Checkbox(visible=False) - button_stop_generation = gr.Button("Stop generation") - with gr.Row(): - output_audio = gr.Audio(label="Generated Audio", type="filepath") - - with gr.Tab("🔮 - Voice Conversion"): - with gr.Row(): - swap_audio_filename = gr.Audio(label="Input audio.wav to swap voice", source="upload", type="filepath") - with gr.Row(): - with gr.Column(): - swap_tokenizer_lang = gr.Dropdown(tokenizer_language_list, label="Base Language Tokenizer", value=tokenizer_language_list[1]) - swap_seed = gr.Number(label="Seed (default -1 = Random)", precision=0, value=-1) - with gr.Column(): - speaker_swap = gr.Dropdown(speakers_list, value=speakers_list[0], label="Voice (Choose “file” if you wanna use the custom voice)") - swap_batchcount = gr.Number(label="Batch count", precision=0, value=1) - with gr.Row(): - swap_voice_button = gr.Button("Generate", variant="primary") - with gr.Row(): - output_swap = gr.Audio(label="Generated Audio", type="filepath") - - - quick_gen_checkbox.change(fn=on_quick_gen_changed, inputs=quick_gen_checkbox, outputs=complete_settings) - convert_to_ssml_button.click(convert_text_to_ssml, inputs=[input_text, speaker],outputs=input_text) - gen_click = tts_create_button.click(generate_text_to_speech, inputs=[input_text, speaker, text_temp, waveform_temp, eos_prob, quick_gen_checkbox, complete_settings, seedcomponent, batchcount],outputs=output_audio) - button_stop_generation.click(fn=None, inputs=None, outputs=None, cancels=[gen_click]) - - - - swap_voice_button.click(swap_voice_from_audio, inputs=[swap_audio_filename, speaker_swap, swap_tokenizer_lang, swap_seed, swap_batchcount], outputs=output_swap) - clone_voice_button.click(clone_voice, inputs=[input_audio_filename, output_voice], outputs=[dummy, npz_file]) - - - restart_server = False - try: - barkgui.queue().launch(show_error=True) - except: - restart_server = True - run_server = False - try: - while restart_server == False: - time.sleep(1.0) - except (KeyboardInterrupt, OSError): - print("Keyboard interruption in main thread... closing server.") - run_server = False - barkgui.close() - - - - diff --git a/spaces/PushkarA07/image-colorizer/app.py b/spaces/PushkarA07/image-colorizer/app.py deleted file mode 100644 index 7be54f7ec5b95219abbca4530a62b1780eb2ebfa..0000000000000000000000000000000000000000 --- a/spaces/PushkarA07/image-colorizer/app.py +++ /dev/null @@ -1,413 +0,0 @@ -from fastai.vision.models.unet import DynamicUnet -from torchvision.models.resnet import resnet18 -from fastai.vision.learner import create_body -import streamlit as st -from PIL import Image -import cv2 as cv -import os -import glob -import time -import numpy as np -from PIL import Image -from pathlib import Path -from tqdm.notebook import tqdm -import matplotlib.pyplot as plt -from skimage.color import rgb2lab, lab2rgb - -# pip install fastai==2.4 - -import torch -from torch import nn, optim -from torchvision import transforms -from torchvision.utils import make_grid -from torch.utils.data import Dataset, DataLoader -device = torch.device("cuda" if torch.cuda.is_available() else "cpu") -use_colab = None - -SIZE = 256 - - -class ColorizationDataset(Dataset): - def __init__(self, paths, split='train'): - if split == 'train': - self.transforms = transforms.Compose([ - transforms.Resize((SIZE, SIZE), Image.BICUBIC), - transforms.RandomHorizontalFlip(), - ]) - elif split == 'val': - self.transforms = transforms.Resize((SIZE, SIZE), Image.BICUBIC) - - self.split = split - self.size = SIZE - self.paths = paths - - def __getitem__(self, idx): - img = Image.open(self.paths[idx]).convert("RGB") - img = self.transforms(img) - img = np.array(img) - img_lab = rgb2lab(img).astype("float32") # Converting RGB to L*a*b - img_lab = transforms.ToTensor()(img_lab) - L = img_lab[[0], ...] / 50. - 1. # Between -1 and 1 - ab = img_lab[[1, 2], ...] / 110. # Between -1 and 1 - - return {'L': L, 'ab': ab} - - def __len__(self): - return len(self.paths) - - -def make_dataloaders(batch_size=16, n_workers=4, pin_memory=True, **kwargs): - dataset = ColorizationDataset(**kwargs) - dataloader = DataLoader(dataset, batch_size=batch_size, num_workers=n_workers, - pin_memory=pin_memory) - return dataloader - - -class UnetBlock(nn.Module): - def __init__(self, nf, ni, submodule=None, input_c=None, dropout=False, - innermost=False, outermost=False): - super().__init__() - self.outermost = outermost - if input_c is None: - input_c = nf - downconv = nn.Conv2d(input_c, ni, kernel_size=4, - stride=2, padding=1, bias=False) - downrelu = nn.LeakyReLU(0.2, True) - downnorm = nn.BatchNorm2d(ni) - uprelu = nn.ReLU(True) - upnorm = nn.BatchNorm2d(nf) - - if outermost: - upconv = nn.ConvTranspose2d(ni * 2, nf, kernel_size=4, - stride=2, padding=1) - down = [downconv] - up = [uprelu, upconv, nn.Tanh()] - model = down + [submodule] + up - elif innermost: - upconv = nn.ConvTranspose2d(ni, nf, kernel_size=4, - stride=2, padding=1, bias=False) - down = [downrelu, downconv] - up = [uprelu, upconv, upnorm] - model = down + up - else: - upconv = nn.ConvTranspose2d(ni * 2, nf, kernel_size=4, - stride=2, padding=1, bias=False) - down = [downrelu, downconv, downnorm] - up = [uprelu, upconv, upnorm] - if dropout: - up += [nn.Dropout(0.5)] - model = down + [submodule] + up - self.model = nn.Sequential(*model) - - def forward(self, x): - if self.outermost: - return self.model(x) - else: - return torch.cat([x, self.model(x)], 1) - - -class Unet(nn.Module): - def __init__(self, input_c=1, output_c=2, n_down=8, num_filters=64): - super().__init__() - unet_block = UnetBlock( - num_filters * 8, num_filters * 8, innermost=True) - for _ in range(n_down - 5): - unet_block = UnetBlock( - num_filters * 8, num_filters * 8, submodule=unet_block, dropout=True) - out_filters = num_filters * 8 - for _ in range(3): - unet_block = UnetBlock( - out_filters // 2, out_filters, submodule=unet_block) - out_filters //= 2 - self.model = UnetBlock( - output_c, out_filters, input_c=input_c, submodule=unet_block, outermost=True) - - def forward(self, x): - return self.model(x) - - -class PatchDiscriminator(nn.Module): - def __init__(self, input_c, num_filters=64, n_down=3): - super().__init__() - model = [self.get_layers(input_c, num_filters, norm=False)] - model += [self.get_layers(num_filters * 2 ** i, num_filters * 2 ** (i + 1), s=1 if i == (n_down-1) else 2) - for i in range(n_down)] # the 'if' statement is taking care of not using - # stride of 2 for the last block in this loop - # Make sure to not use normalization or - model += [self.get_layers(num_filters * 2 ** - n_down, 1, s=1, norm=False, act=False)] - # activation for the last layer of the model - self.model = nn.Sequential(*model) - - def get_layers(self, ni, nf, k=4, s=2, p=1, norm=True, act=True): - layers = [nn.Conv2d(ni, nf, k, s, p, bias=not norm)] - if norm: - layers += [nn.BatchNorm2d(nf)] - if act: - layers += [nn.LeakyReLU(0.2, True)] - return nn.Sequential(*layers) - - def forward(self, x): - return self.model(x) - - -class GANLoss(nn.Module): - def __init__(self, gan_mode='vanilla', real_label=1.0, fake_label=0.0): - super().__init__() - self.register_buffer('real_label', torch.tensor(real_label)) - self.register_buffer('fake_label', torch.tensor(fake_label)) - if gan_mode == 'vanilla': - self.loss = nn.BCEWithLogitsLoss() - elif gan_mode == 'lsgan': - self.loss = nn.MSELoss() - - def get_labels(self, preds, target_is_real): - if target_is_real: - labels = self.real_label - else: - labels = self.fake_label - return labels.expand_as(preds) - - def __call__(self, preds, target_is_real): - labels = self.get_labels(preds, target_is_real) - loss = self.loss(preds, labels) - return loss - - -def init_weights(net, init='norm', gain=0.02): - - def init_func(m): - classname = m.__class__.__name__ - if hasattr(m, 'weight') and 'Conv' in classname: - if init == 'norm': - nn.init.normal_(m.weight.data, mean=0.0, std=gain) - elif init == 'xavier': - nn.init.xavier_normal_(m.weight.data, gain=gain) - elif init == 'kaiming': - nn.init.kaiming_normal_(m.weight.data, a=0, mode='fan_in') - - if hasattr(m, 'bias') and m.bias is not None: - nn.init.constant_(m.bias.data, 0.0) - elif 'BatchNorm2d' in classname: - nn.init.normal_(m.weight.data, 1., gain) - nn.init.constant_(m.bias.data, 0.) - - net.apply(init_func) - print(f"model initialized with {init} initialization") - return net - - -def init_model(model, device): - model = model.to(device) - model = init_weights(model) - return model - - -class MainModel(nn.Module): - def __init__(self, net_G=None, lr_G=2e-4, lr_D=2e-4, - beta1=0.5, beta2=0.999, lambda_L1=100.): - super().__init__() - - self.device = torch.device( - "cuda" if torch.cuda.is_available() else "cpu") - self.lambda_L1 = lambda_L1 - - if net_G is None: - self.net_G = init_model( - Unet(input_c=1, output_c=2, n_down=8, num_filters=64), self.device) - else: - self.net_G = net_G.to(self.device) - self.net_D = init_model(PatchDiscriminator( - input_c=3, n_down=3, num_filters=64), self.device) - self.GANcriterion = GANLoss(gan_mode='vanilla').to(self.device) - self.L1criterion = nn.L1Loss() - self.opt_G = optim.Adam(self.net_G.parameters(), - lr=lr_G, betas=(beta1, beta2)) - self.opt_D = optim.Adam(self.net_D.parameters(), - lr=lr_D, betas=(beta1, beta2)) - - def set_requires_grad(self, model, requires_grad=True): - for p in model.parameters(): - p.requires_grad = requires_grad - - def setup_input(self, data): - self.L = data['L'].to(self.device) - self.ab = data['ab'].to(self.device) - - def forward(self): - self.fake_color = self.net_G(self.L) - - def backward_D(self): - fake_image = torch.cat([self.L, self.fake_color], dim=1) - fake_preds = self.net_D(fake_image.detach()) - self.loss_D_fake = self.GANcriterion(fake_preds, False) - real_image = torch.cat([self.L, self.ab], dim=1) - real_preds = self.net_D(real_image) - self.loss_D_real = self.GANcriterion(real_preds, True) - self.loss_D = (self.loss_D_fake + self.loss_D_real) * 0.5 - self.loss_D.backward() - - def backward_G(self): - fake_image = torch.cat([self.L, self.fake_color], dim=1) - fake_preds = self.net_D(fake_image) - self.loss_G_GAN = self.GANcriterion(fake_preds, True) - self.loss_G_L1 = self.L1criterion( - self.fake_color, self.ab) * self.lambda_L1 - self.loss_G = self.loss_G_GAN + self.loss_G_L1 - self.loss_G.backward() - - def optimize(self): - self.forward() - self.net_D.train() - self.set_requires_grad(self.net_D, True) - self.opt_D.zero_grad() - self.backward_D() - self.opt_D.step() - - self.net_G.train() - self.set_requires_grad(self.net_D, False) - self.opt_G.zero_grad() - self.backward_G() - self.opt_G.step() - - -class AverageMeter: - def __init__(self): - self.reset() - - def reset(self): - self.count, self.avg, self.sum = [0.] * 3 - - def update(self, val, count=1): - self.count += count - self.sum += count * val - self.avg = self.sum / self.count - - -def create_loss_meters(): - loss_D_fake = AverageMeter() - loss_D_real = AverageMeter() - loss_D = AverageMeter() - loss_G_GAN = AverageMeter() - loss_G_L1 = AverageMeter() - loss_G = AverageMeter() - - return {'loss_D_fake': loss_D_fake, - 'loss_D_real': loss_D_real, - 'loss_D': loss_D, - 'loss_G_GAN': loss_G_GAN, - 'loss_G_L1': loss_G_L1, - 'loss_G': loss_G} - - -def update_losses(model, loss_meter_dict, count): - for loss_name, loss_meter in loss_meter_dict.items(): - loss = getattr(model, loss_name) - loss_meter.update(loss.item(), count=count) - - -def lab_to_rgb(L, ab): - """ - Takes a batch of images - """ - - L = (L + 1.) * 50. - ab = ab * 110. - Lab = torch.cat([L, ab], dim=1).permute(0, 2, 3, 1).cpu().numpy() - rgb_imgs = [] - for img in Lab: - img_rgb = lab2rgb(img) - rgb_imgs.append(img_rgb) - return np.stack(rgb_imgs, axis=0) - - -def visualize(model, data, dims): - model.net_G.eval() - with torch.no_grad(): - model.setup_input(data) - model.forward() - model.net_G.train() - fake_color = model.fake_color.detach() - real_color = model.ab - L = model.L - fake_imgs = lab_to_rgb(L, fake_color) - real_imgs = lab_to_rgb(L, real_color) - for i in range(1): - # t_img = transforms.Resize((dims[0], dims[1]))(t_img) - img = Image.fromarray(np.uint8(fake_imgs[i])) - img = cv.resize(fake_imgs[i], dsize=( - dims[1], dims[0]), interpolation=cv.INTER_CUBIC) - # st.text(f"Size of fake image {fake_imgs[i].shape} \n Type of image = {type(fake_imgs[i])}") - st.image(img, caption="Output image", - use_column_width='auto', clamp=True) - - -def log_results(loss_meter_dict): - for loss_name, loss_meter in loss_meter_dict.items(): - print(f"{loss_name}: {loss_meter.avg:.5f}") - - -# pip install fastai==2.4 -from fastai.vision.learner import create_body -from torchvision.models.resnet import resnet18 -from fastai.vision.models.unet import DynamicUnet - - -def build_res_unet(n_input=1, n_output=2, size=256): - device = torch.device("cuda" if torch.cuda.is_available() else "cpu") - body = create_body(resnet18, pretrained=True, n_in=n_input, cut=-2) - net_G = DynamicUnet(body, n_output, (size, size)).to(device) - return net_G - - -net_G = build_res_unet(n_input=1, n_output=2, size=256) -net_G.load_state_dict(torch.load("res18-unet.pt", map_location=device)) -model = MainModel(net_G=net_G) -model.load_state_dict(torch.load("main-model.pt", map_location=device)) - - -class MyDataset(torch.utils.data.Dataset): - def __init__(self, img_list): - super(MyDataset, self).__init__() - self.img_list = img_list - self.augmentations = transforms.Resize((SIZE, SIZE), Image.BICUBIC) - - def __len__(self): - return len(self.img_list) - - def __getitem__(self, idx): - img = self.img_list[idx] - img = self.augmentations(img) - img = np.array(img) - img_lab = rgb2lab(img).astype("float32") # Converting RGB to L*a*b - img_lab = transforms.ToTensor()(img_lab) - L = img_lab[[0], ...] / 50. - 1. # Between -1 and 1 - ab = img_lab[[1, 2], ...] / 110. - return {'L': L, 'ab': ab} - -def make_dataloaders2(batch_size=16, n_workers=4, pin_memory=True, **kwargs): - dataset = MyDataset(**kwargs) - dataloader = DataLoader(dataset, batch_size=batch_size, num_workers=n_workers, - pin_memory=pin_memory) - return dataloader - - -# st.set_option('deprecation.showfileUploaderEncoding', False) -# @st.cache(allow_output_mutation= True) -st.write(""" - # Image Recolorisation - """ - ) -st.subheader("Created by Pushkar") -file_up = st.file_uploader("Upload an jpg image", type=["jpg", "jpeg", "png"]) - -if file_up is not None: - im = Image.open(file_up) - st.text(body=f"Size of uploaded image {im.shape}") - a = im.shape - st.image(im, caption="Uploaded Image.", use_column_width='auto') - test_dl = make_dataloaders2(img_list=[im]) - for data in test_dl: - model.setup_input(data) - model.optimize() - visualize(model, data, a) diff --git a/spaces/RMXK/RVC_HFF/go-tensorboard.bat b/spaces/RMXK/RVC_HFF/go-tensorboard.bat deleted file mode 100644 index cb81c17d3865513adec8eb0b832b7888cd1e4078..0000000000000000000000000000000000000000 --- a/spaces/RMXK/RVC_HFF/go-tensorboard.bat +++ /dev/null @@ -1,2 +0,0 @@ -python fixes/tensor-launch.py -pause \ No newline at end of file diff --git a/spaces/Raghav001/PDF/app.py b/spaces/Raghav001/PDF/app.py deleted file mode 100644 index 1031d9b617b0acdc178223eb13f46c3b6a5d6f4f..0000000000000000000000000000000000000000 --- a/spaces/Raghav001/PDF/app.py +++ /dev/null @@ -1,312 +0,0 @@ -import requests -import json -import gradio as gr -# from concurrent.futures import ThreadPoolExecutor -import pdfplumber -import pandas as pd -import langchain -import time -from cnocr import CnOcr -import pinecone -import openai -from langchain.vectorstores import Pinecone -from langchain.embeddings.openai import OpenAIEmbeddings -from langchain.text_splitter import CharacterTextSplitter - -# from langchain.document_loaders import PyPDFLoader -from langchain.document_loaders import UnstructuredWordDocumentLoader -from langchain.document_loaders import UnstructuredPowerPointLoader -# from langchain.document_loaders.image import UnstructuredImageLoader - - -from langchain.chains.question_answering import load_qa_chain -from langchain import OpenAI - -from sentence_transformers import SentenceTransformer, models, util -word_embedding_model = models.Transformer('sentence-transformers/all-MiniLM-L6-v2', do_lower_case=True) -pooling_model = models.Pooling(word_embedding_model.get_word_embedding_dimension(), pooling_mode='cls') -embedder = SentenceTransformer(modules=[word_embedding_model, pooling_model]) -ocr = CnOcr() -# chat_url = 'https://Raghav001-API.hf.space/sale' -chat_url = 'https://Raghav001-API.hf.space/chatpdf' -chat_emd = 'https://Raghav001-API.hf.space/embedd' -headers = { - 'Content-Type': 'application/json', -} -# thread_pool_executor = ThreadPoolExecutor(max_workers=4) -history_max_len = 500 -all_max_len = 3000 - - - -# Initialize Pinecone client and create an index -pinecone.init(api_key="ffb1f594-0915-4ebf-835f-c1eaa62fdcdc",environment = "us-west4-gcp-free") -index = pinecone.Index(index_name="test") - - -def get_emb(text): - emb_url = 'https://Raghav001-API.hf.space/embeddings' - data = {"content": text} - try: - result = requests.post(url=emb_url, - data=json.dumps(data), - headers=headers - ) - print("--------------------------------Embeddings-----------------------------------") - print(result.json()['data'][0]['embedding']) - return result.json()['data'][0]['embedding'] - except Exception as e: - print('data', data, 'result json', result.json()) - - -def doc_emb(doc: str): - texts = doc.split('\n') - # futures = [] - emb_list = embedder.encode(texts) - print('emb_list',emb_list) - # for text in texts: - # futures.append(thread_pool_executor.submit(get_emb, text)) - # for f in futures: - # emb_list.append(f.result()) - print('\n'.join(texts)) - pine(doc) - gr.Textbox.update(value="") - return texts, emb_list, gr.Textbox.update(visible=True), gr.Button.update(visible=True), gr.Markdown.update( - value="""success ! Let's talk"""), gr.Chatbot.update(visible=True) - - -def get_response(msg, bot, doc_text_list, doc_embeddings): - # future = thread_pool_executor.submit(get_emb, msg) - gr.Textbox.update(value="") - now_len = len(msg) - req_json = {'question': msg} - his_bg = -1 - for i in range(len(bot) - 1, -1, -1): - if now_len + len(bot[i][0]) + len(bot[i][1]) > history_max_len: - break - now_len += len(bot[i][0]) + len(bot[i][1]) - his_bg = i - req_json['history'] = [] if his_bg == -1 else bot[his_bg:] - # query_embedding = future.result() - query_embedding = embedder.encode([msg]) - cos_scores = util.cos_sim(query_embedding, doc_embeddings)[0] - score_index = [[score, index] for score, index in zip(cos_scores, [i for i in range(len(cos_scores))])] - score_index.sort(key=lambda x: x[0], reverse=True) - print('score_index:\n', score_index) - print('doc_emb_state', doc_emb_state) - index_set, sub_doc_list = set(), [] - for s_i in score_index: - doc = doc_text_list[s_i[1]] - if now_len + len(doc) > all_max_len: - break - index_set.add(s_i[1]) - now_len += len(doc) - # Maybe the paragraph is truncated wrong, so add the upper and lower paragraphs - if s_i[1] > 0 and s_i[1] -1 not in index_set: - doc = doc_text_list[s_i[1]-1] - if now_len + len(doc) > all_max_len: - break - index_set.add(s_i[1]-1) - now_len += len(doc) - if s_i[1] + 1 < len(doc_text_list) and s_i[1] + 1 not in index_set: - doc = doc_text_list[s_i[1]+1] - if now_len + len(doc) > all_max_len: - break - index_set.add(s_i[1]+1) - now_len += len(doc) - - index_list = list(index_set) - index_list.sort() - for i in index_list: - sub_doc_list.append(doc_text_list[i]) - req_json['doc'] = '' if len(sub_doc_list) == 0 else '\n'.join(sub_doc_list) - data = {"content": json.dumps(req_json)} - print('data:\n', req_json) - result = requests.post(url=chat_url, - data=json.dumps(data), - headers=headers - ) - res = result.json()['content'] - bot.append([msg, res]) - return bot[max(0, len(bot) - 3):] - - -def up_file(fls): - doc_text_list = [] - - - names = [] - print(names) - for i in fls: - names.append(str(i.name)) - - - pdf = [] - docs = [] - pptx = [] - - for i in names: - - if i[-3:] == "pdf": - pdf.append(i) - elif i[-4:] == "docx": - docs.append(i) - else: - pptx.append(i) - - - #Pdf Extracting - for idx, file in enumerate(pdf): - print("11111") - #print(file.name) - with pdfplumber.open(file) as pdf: - for i in range(len(pdf.pages)): - # Read page i+1 of a PDF document - page = pdf.pages[i] - res_list = page.extract_text().split('\n')[:-1] - - for j in range(len(page.images)): - # Get the binary stream of the image - img = page.images[j] - file_name = '{}-{}-{}.png'.format(str(time.time()), str(i), str(j)) - with open(file_name, mode='wb') as f: - f.write(img['stream'].get_data()) - try: - res = ocr.ocr(file_name) - # res = PyPDFLoader(file_name) - except Exception as e: - res = [] - if len(res) > 0: - res_list.append(' '.join([re['text'] for re in res])) - - tables = page.extract_tables() - for table in tables: - # The first column is used as the header - df = pd.DataFrame(table[1:], columns=table[0]) - try: - records = json.loads(df.to_json(orient="records", force_ascii=False)) - for rec in records: - res_list.append(json.dumps(rec, ensure_ascii=False)) - except Exception as e: - res_list.append(str(df)) - - doc_text_list += res_list - - #pptx Extracting - for i in pptx: - loader = UnstructuredPowerPointLoader(i) - data = loader.load() - # content = str(data).split("'") - # cnt = content[1] - # # c = cnt.split('\\n\\n') - # # final = "".join(c) - # c = cnt.replace('\\n\\n',"").replace("","").replace("\t","") - doc_text_list.append(data) - - - - #Doc Extracting - for i in docs: - loader = UnstructuredWordDocumentLoader(i) - data = loader.load() - # content = str(data).split("'") - # cnt = content[1] - # # c = cnt.split('\\n\\n') - # # final = "".join(c) - # c = cnt.replace('\\n\\n',"").replace("","").replace("\t","") - doc_text_list.append(data) - - # #Image Extraction - # for i in jpg: - # loader = UnstructuredImageLoader(i) - # data = loader.load() - # # content = str(data).split("'") - # # cnt = content[1] - # # # c = cnt.split('\\n\\n') - # # # final = "".join(c) - # # c = cnt.replace('\\n\\n',"").replace("","").replace("\t","") - # doc_text_list.append(data) - - doc_text_list = [str(text).strip() for text in doc_text_list if len(str(text).strip()) > 0] - # print(doc_text_list) - return gr.Textbox.update(value='\n'.join(doc_text_list), visible=True), gr.Button.update( - visible=True), gr.Markdown.update( - value="Processing") - - -def pine(data): - char_text_spliter = CharacterTextSplitter(chunk_size = 1000, chunk_overlap=0) - # doc_text = char_text_spliter.split_documents(data) - doc_spilt = [] - data = data.split(" ") - # print(len(data)) - - c = 0 - check = 0 - for i in data: - # print(i) - if c == 550: - text = " ".join(data[check: check + c]) - print(text) - print(check) - doc_spilt.append(text) - check = check + c - c = 0 - else: - c = c+1 - - - Embedding_model = "text-embedding-ada-002" - embeddings = OpenAIEmbeddings(openai_api_key="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") - - print(requests.post(url = chat_emd)) - - # embeddings = requests.post(url=chat_emd, - # data=json.dumps(data), - # headers=headers - # ) - - pinecone.init(api_key = "ffb1f594-0915-4ebf-835f-c1eaa62fdcdc", - environment = "us-west4-gcp-free" - ) - - index_name = "test" - docstore = Pinecone.from_texts([d for d in doc_spilt],embeddings,index_name = index_name,namespace='a1') - - return '' - -def get_answer(query_live): - - llm = OpenAI(temperature=0, openai='aaa') - qa_chain = load_qa_chain(llm,chain_type='stuff') - query = query_live - docs = docstore.similarity_search(query) - qa_chain.run(input_documents = docs, question = query) - -with gr.Blocks() as demo: - with gr.Row(): - with gr.Column(): - file = gr.File(file_types=['.pdf'], label='Click to upload Document', file_count='multiple') - doc_bu = gr.Button(value='Submit', visible=False) - - - txt = gr.Textbox(label='result', visible=False) - - - doc_text_state = gr.State([]) - doc_emb_state = gr.State([]) - - with gr.Column(): - md = gr.Markdown("Please Upload the PDF") - chat_bot = gr.Chatbot(visible=False) - msg_txt = gr.Textbox(visible = False) - chat_bu = gr.Button(value='Clear', visible=False) - - file.change(up_file, [file], [txt, doc_bu, md]) #hiding the text - doc_bu.click(doc_emb, [txt], [doc_text_state, doc_emb_state, msg_txt, chat_bu, md, chat_bot]) - msg_txt.submit(get_response, [msg_txt, chat_bot,doc_text_state, doc_emb_state], [chat_bot],queue=False) - chat_bu.click(lambda: None, None, chat_bot, queue=False) - -if __name__ == "__main__": - demo.queue().launch(show_api=False) - # demo.queue().launch(share=False, server_name='172.22.2.54', server_port=9191) \ No newline at end of file diff --git a/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/pip/_internal/resolution/resolvelib/resolver.py b/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/pip/_internal/resolution/resolvelib/resolver.py deleted file mode 100644 index a605d6c254f519ec221b18f104a62b758241a99b..0000000000000000000000000000000000000000 --- a/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/pip/_internal/resolution/resolvelib/resolver.py +++ /dev/null @@ -1,296 +0,0 @@ -import functools -import logging -import os -from typing import TYPE_CHECKING, Dict, List, Optional, Set, Tuple, cast - -from pip._vendor.packaging.utils import canonicalize_name -from pip._vendor.resolvelib import BaseReporter, ResolutionImpossible -from pip._vendor.resolvelib import Resolver as RLResolver -from pip._vendor.resolvelib.structs import DirectedGraph - -from pip._internal.cache import WheelCache -from pip._internal.index.package_finder import PackageFinder -from pip._internal.operations.prepare import RequirementPreparer -from pip._internal.req.req_install import InstallRequirement -from pip._internal.req.req_set import RequirementSet -from pip._internal.resolution.base import BaseResolver, InstallRequirementProvider -from pip._internal.resolution.resolvelib.provider import PipProvider -from pip._internal.resolution.resolvelib.reporter import ( - PipDebuggingReporter, - PipReporter, -) - -from .base import Candidate, Requirement -from .factory import Factory - -if TYPE_CHECKING: - from pip._vendor.resolvelib.resolvers import Result as RLResult - - Result = RLResult[Requirement, Candidate, str] - - -logger = logging.getLogger(__name__) - - -class Resolver(BaseResolver): - _allowed_strategies = {"eager", "only-if-needed", "to-satisfy-only"} - - def __init__( - self, - preparer: RequirementPreparer, - finder: PackageFinder, - wheel_cache: Optional[WheelCache], - make_install_req: InstallRequirementProvider, - use_user_site: bool, - ignore_dependencies: bool, - ignore_installed: bool, - ignore_requires_python: bool, - force_reinstall: bool, - upgrade_strategy: str, - py_version_info: Optional[Tuple[int, ...]] = None, - ): - super().__init__() - assert upgrade_strategy in self._allowed_strategies - - self.factory = Factory( - finder=finder, - preparer=preparer, - make_install_req=make_install_req, - wheel_cache=wheel_cache, - use_user_site=use_user_site, - force_reinstall=force_reinstall, - ignore_installed=ignore_installed, - ignore_requires_python=ignore_requires_python, - py_version_info=py_version_info, - ) - self.ignore_dependencies = ignore_dependencies - self.upgrade_strategy = upgrade_strategy - self._result: Optional[Result] = None - - def resolve( - self, root_reqs: List[InstallRequirement], check_supported_wheels: bool - ) -> RequirementSet: - collected = self.factory.collect_root_requirements(root_reqs) - provider = PipProvider( - factory=self.factory, - constraints=collected.constraints, - ignore_dependencies=self.ignore_dependencies, - upgrade_strategy=self.upgrade_strategy, - user_requested=collected.user_requested, - ) - if "PIP_RESOLVER_DEBUG" in os.environ: - reporter: BaseReporter = PipDebuggingReporter() - else: - reporter = PipReporter() - resolver: RLResolver[Requirement, Candidate, str] = RLResolver( - provider, - reporter, - ) - - try: - try_to_avoid_resolution_too_deep = 2000000 - result = self._result = resolver.resolve( - collected.requirements, max_rounds=try_to_avoid_resolution_too_deep - ) - - except ResolutionImpossible as e: - error = self.factory.get_installation_error( - cast("ResolutionImpossible[Requirement, Candidate]", e), - collected.constraints, - ) - raise error from e - - req_set = RequirementSet(check_supported_wheels=check_supported_wheels) - for candidate in result.mapping.values(): - ireq = candidate.get_install_requirement() - if ireq is None: - continue - - # Check if there is already an installation under the same name, - # and set a flag for later stages to uninstall it, if needed. - installed_dist = self.factory.get_dist_to_uninstall(candidate) - if installed_dist is None: - # There is no existing installation -- nothing to uninstall. - ireq.should_reinstall = False - elif self.factory.force_reinstall: - # The --force-reinstall flag is set -- reinstall. - ireq.should_reinstall = True - elif installed_dist.version != candidate.version: - # The installation is different in version -- reinstall. - ireq.should_reinstall = True - elif candidate.is_editable or installed_dist.editable: - # The incoming distribution is editable, or different in - # editable-ness to installation -- reinstall. - ireq.should_reinstall = True - elif candidate.source_link and candidate.source_link.is_file: - # The incoming distribution is under file:// - if candidate.source_link.is_wheel: - # is a local wheel -- do nothing. - logger.info( - "%s is already installed with the same version as the " - "provided wheel. Use --force-reinstall to force an " - "installation of the wheel.", - ireq.name, - ) - continue - - # is a local sdist or path -- reinstall - ireq.should_reinstall = True - else: - continue - - link = candidate.source_link - if link and link.is_yanked: - # The reason can contain non-ASCII characters, Unicode - # is required for Python 2. - msg = ( - "The candidate selected for download or install is a " - "yanked version: {name!r} candidate (version {version} " - "at {link})\nReason for being yanked: {reason}" - ).format( - name=candidate.name, - version=candidate.version, - link=link, - reason=link.yanked_reason or "", - ) - logger.warning(msg) - - req_set.add_named_requirement(ireq) - - reqs = req_set.all_requirements - self.factory.preparer.prepare_linked_requirements_more(reqs) - return req_set - - def get_installation_order( - self, req_set: RequirementSet - ) -> List[InstallRequirement]: - """Get order for installation of requirements in RequirementSet. - - The returned list contains a requirement before another that depends on - it. This helps ensure that the environment is kept consistent as they - get installed one-by-one. - - The current implementation creates a topological ordering of the - dependency graph, giving more weight to packages with less - or no dependencies, while breaking any cycles in the graph at - arbitrary points. We make no guarantees about where the cycle - would be broken, other than it *would* be broken. - """ - assert self._result is not None, "must call resolve() first" - - if not req_set.requirements: - # Nothing is left to install, so we do not need an order. - return [] - - graph = self._result.graph - weights = get_topological_weights(graph, set(req_set.requirements.keys())) - - sorted_items = sorted( - req_set.requirements.items(), - key=functools.partial(_req_set_item_sorter, weights=weights), - reverse=True, - ) - return [ireq for _, ireq in sorted_items] - - -def get_topological_weights( - graph: "DirectedGraph[Optional[str]]", requirement_keys: Set[str] -) -> Dict[Optional[str], int]: - """Assign weights to each node based on how "deep" they are. - - This implementation may change at any point in the future without prior - notice. - - We first simplify the dependency graph by pruning any leaves and giving them - the highest weight: a package without any dependencies should be installed - first. This is done again and again in the same way, giving ever less weight - to the newly found leaves. The loop stops when no leaves are left: all - remaining packages have at least one dependency left in the graph. - - Then we continue with the remaining graph, by taking the length for the - longest path to any node from root, ignoring any paths that contain a single - node twice (i.e. cycles). This is done through a depth-first search through - the graph, while keeping track of the path to the node. - - Cycles in the graph result would result in node being revisited while also - being on its own path. In this case, take no action. This helps ensure we - don't get stuck in a cycle. - - When assigning weight, the longer path (i.e. larger length) is preferred. - - We are only interested in the weights of packages that are in the - requirement_keys. - """ - path: Set[Optional[str]] = set() - weights: Dict[Optional[str], int] = {} - - def visit(node: Optional[str]) -> None: - if node in path: - # We hit a cycle, so we'll break it here. - return - - # Time to visit the children! - path.add(node) - for child in graph.iter_children(node): - visit(child) - path.remove(node) - - if node not in requirement_keys: - return - - last_known_parent_count = weights.get(node, 0) - weights[node] = max(last_known_parent_count, len(path)) - - # Simplify the graph, pruning leaves that have no dependencies. - # This is needed for large graphs (say over 200 packages) because the - # `visit` function is exponentially slower then, taking minutes. - # See https://github.com/pypa/pip/issues/10557 - # We will loop until we explicitly break the loop. - while True: - leaves = set() - for key in graph: - if key is None: - continue - for _child in graph.iter_children(key): - # This means we have at least one child - break - else: - # No child. - leaves.add(key) - if not leaves: - # We are done simplifying. - break - # Calculate the weight for the leaves. - weight = len(graph) - 1 - for leaf in leaves: - if leaf not in requirement_keys: - continue - weights[leaf] = weight - # Remove the leaves from the graph, making it simpler. - for leaf in leaves: - graph.remove(leaf) - - # Visit the remaining graph. - # `None` is guaranteed to be the root node by resolvelib. - visit(None) - - # Sanity check: all requirement keys should be in the weights, - # and no other keys should be in the weights. - difference = set(weights.keys()).difference(requirement_keys) - assert not difference, difference - - return weights - - -def _req_set_item_sorter( - item: Tuple[str, InstallRequirement], - weights: Dict[Optional[str], int], -) -> Tuple[int, str]: - """Key function used to sort install requirements for installation. - - Based on the "weight" mapping calculated in ``get_installation_order()``. - The canonical package name is returned as the second member as a tie- - breaker to ensure the result is predictable, which is useful in tests. - """ - name = canonicalize_name(item[0]) - return weights[name], name diff --git a/spaces/Realcat/image-matching-webui/third_party/DeDoDe/DeDoDe/benchmarks/mega_pose_est_mnn.py b/spaces/Realcat/image-matching-webui/third_party/DeDoDe/DeDoDe/benchmarks/mega_pose_est_mnn.py deleted file mode 100644 index e979bddfb09ff8760d83442b284662376a074998..0000000000000000000000000000000000000000 --- a/spaces/Realcat/image-matching-webui/third_party/DeDoDe/DeDoDe/benchmarks/mega_pose_est_mnn.py +++ /dev/null @@ -1,152 +0,0 @@ -import numpy as np -import torch -from DeDoDe.utils import * -from PIL import Image -from tqdm import tqdm -import torch.nn.functional as F - - -class MegaDepthPoseMNNBenchmark: - def __init__(self, data_root="data/megadepth", scene_names=None) -> None: - if scene_names is None: - self.scene_names = [ - "0015_0.1_0.3.npz", - "0015_0.3_0.5.npz", - "0022_0.1_0.3.npz", - "0022_0.3_0.5.npz", - "0022_0.5_0.7.npz", - ] - else: - self.scene_names = scene_names - self.scenes = [ - np.load(f"{data_root}/{scene}", allow_pickle=True) - for scene in self.scene_names - ] - self.data_root = data_root - - def benchmark( - self, - detector_model, - descriptor_model, - matcher_model, - model_name=None, - resolution=None, - scale_intrinsics=True, - calibrated=True, - ): - with torch.no_grad(): - data_root = self.data_root - tot_e_t, tot_e_R, tot_e_pose = [], [], [] - thresholds = [5, 10, 20] - for scene_ind in range(len(self.scenes)): - import os - - scene_name = os.path.splitext(self.scene_names[scene_ind])[0] - scene = self.scenes[scene_ind] - pairs = scene["pair_infos"] - intrinsics = scene["intrinsics"] - poses = scene["poses"] - im_paths = scene["image_paths"] - pair_inds = range(len(pairs)) - for pairind in tqdm(pair_inds): - idx1, idx2 = pairs[pairind][0] - K1 = intrinsics[idx1].copy() - T1 = poses[idx1].copy() - R1, t1 = T1[:3, :3], T1[:3, 3] - K2 = intrinsics[idx2].copy() - T2 = poses[idx2].copy() - R2, t2 = T2[:3, :3], T2[:3, 3] - R, t = compute_relative_pose(R1, t1, R2, t2) - T1_to_2 = np.concatenate((R, t[:, None]), axis=-1) - im_A_path = f"{data_root}/{im_paths[idx1]}" - im_B_path = f"{data_root}/{im_paths[idx2]}" - detections_A = detector_model.detect_from_path(im_A_path) - keypoints_A, P_A = ( - detections_A["keypoints"], - detections_A["confidence"], - ) - detections_B = detector_model.detect_from_path(im_B_path) - keypoints_B, P_B = ( - detections_B["keypoints"], - detections_B["confidence"], - ) - description_A = descriptor_model.describe_keypoints_from_path( - im_A_path, keypoints_A - )["descriptions"] - description_B = descriptor_model.describe_keypoints_from_path( - im_B_path, keypoints_B - )["descriptions"] - matches_A, matches_B, batch_ids = matcher_model.match( - keypoints_A, - description_A, - keypoints_B, - description_B, - P_A=P_A, - P_B=P_B, - normalize=True, - inv_temp=20, - threshold=0.01, - ) - - im_A = Image.open(im_A_path) - w1, h1 = im_A.size - im_B = Image.open(im_B_path) - w2, h2 = im_B.size - if scale_intrinsics: - scale1 = 1200 / max(w1, h1) - scale2 = 1200 / max(w2, h2) - w1, h1 = scale1 * w1, scale1 * h1 - w2, h2 = scale2 * w2, scale2 * h2 - K1, K2 = K1.copy(), K2.copy() - K1[:2] = K1[:2] * scale1 - K2[:2] = K2[:2] * scale2 - kpts1, kpts2 = matcher_model.to_pixel_coords( - matches_A, matches_B, h1, w1, h2, w2 - ) - for _ in range(1): - shuffling = np.random.permutation(np.arange(len(kpts1))) - kpts1 = kpts1[shuffling] - kpts2 = kpts2[shuffling] - try: - threshold = 0.5 - if calibrated: - norm_threshold = threshold / ( - np.mean(np.abs(K1[:2, :2])) - + np.mean(np.abs(K2[:2, :2])) - ) - R_est, t_est, mask = estimate_pose( - kpts1.cpu().numpy(), - kpts2.cpu().numpy(), - K1, - K2, - norm_threshold, - conf=0.99999, - ) - T1_to_2_est = np.concatenate((R_est, t_est), axis=-1) # - e_t, e_R = compute_pose_error(T1_to_2_est, R, t) - e_pose = max(e_t, e_R) - except Exception as e: - print(repr(e)) - e_t, e_R = 90, 90 - e_pose = max(e_t, e_R) - tot_e_t.append(e_t) - tot_e_R.append(e_R) - tot_e_pose.append(e_pose) - tot_e_pose = np.array(tot_e_pose) - auc = pose_auc(tot_e_pose, thresholds) - acc_5 = (tot_e_pose < 5).mean() - acc_10 = (tot_e_pose < 10).mean() - acc_15 = (tot_e_pose < 15).mean() - acc_20 = (tot_e_pose < 20).mean() - map_5 = acc_5 - map_10 = np.mean([acc_5, acc_10]) - map_20 = np.mean([acc_5, acc_10, acc_15, acc_20]) - print(f"{model_name} auc: {auc}") - return { - "auc_5": auc[0], - "auc_10": auc[1], - "auc_20": auc[2], - "map_5": map_5, - "map_10": map_10, - "map_20": map_20, - } diff --git a/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmdet_null/core/evaluation/mean_ap.py b/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmdet_null/core/evaluation/mean_ap.py deleted file mode 100644 index e3226c71cf8457dce65652553132ad1ddbf214f7..0000000000000000000000000000000000000000 --- a/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmdet_null/core/evaluation/mean_ap.py +++ /dev/null @@ -1,469 +0,0 @@ -from multiprocessing import Pool - -import annotator.uniformer.mmcv as mmcv -import numpy as np -from annotator.uniformer.mmcv.utils import print_log -from terminaltables import AsciiTable - -from .bbox_overlaps import bbox_overlaps -from .class_names import get_classes - - -def average_precision(recalls, precisions, mode='area'): - """Calculate average precision (for single or multiple scales). - - Args: - recalls (ndarray): shape (num_scales, num_dets) or (num_dets, ) - precisions (ndarray): shape (num_scales, num_dets) or (num_dets, ) - mode (str): 'area' or '11points', 'area' means calculating the area - under precision-recall curve, '11points' means calculating - the average precision of recalls at [0, 0.1, ..., 1] - - Returns: - float or ndarray: calculated average precision - """ - no_scale = False - if recalls.ndim == 1: - no_scale = True - recalls = recalls[np.newaxis, :] - precisions = precisions[np.newaxis, :] - assert recalls.shape == precisions.shape and recalls.ndim == 2 - num_scales = recalls.shape[0] - ap = np.zeros(num_scales, dtype=np.float32) - if mode == 'area': - zeros = np.zeros((num_scales, 1), dtype=recalls.dtype) - ones = np.ones((num_scales, 1), dtype=recalls.dtype) - mrec = np.hstack((zeros, recalls, ones)) - mpre = np.hstack((zeros, precisions, zeros)) - for i in range(mpre.shape[1] - 1, 0, -1): - mpre[:, i - 1] = np.maximum(mpre[:, i - 1], mpre[:, i]) - for i in range(num_scales): - ind = np.where(mrec[i, 1:] != mrec[i, :-1])[0] - ap[i] = np.sum( - (mrec[i, ind + 1] - mrec[i, ind]) * mpre[i, ind + 1]) - elif mode == '11points': - for i in range(num_scales): - for thr in np.arange(0, 1 + 1e-3, 0.1): - precs = precisions[i, recalls[i, :] >= thr] - prec = precs.max() if precs.size > 0 else 0 - ap[i] += prec - ap /= 11 - else: - raise ValueError( - 'Unrecognized mode, only "area" and "11points" are supported') - if no_scale: - ap = ap[0] - return ap - - -def tpfp_imagenet(det_bboxes, - gt_bboxes, - gt_bboxes_ignore=None, - default_iou_thr=0.5, - area_ranges=None): - """Check if detected bboxes are true positive or false positive. - - Args: - det_bbox (ndarray): Detected bboxes of this image, of shape (m, 5). - gt_bboxes (ndarray): GT bboxes of this image, of shape (n, 4). - gt_bboxes_ignore (ndarray): Ignored gt bboxes of this image, - of shape (k, 4). Default: None - default_iou_thr (float): IoU threshold to be considered as matched for - medium and large bboxes (small ones have special rules). - Default: 0.5. - area_ranges (list[tuple] | None): Range of bbox areas to be evaluated, - in the format [(min1, max1), (min2, max2), ...]. Default: None. - - Returns: - tuple[np.ndarray]: (tp, fp) whose elements are 0 and 1. The shape of - each array is (num_scales, m). - """ - # an indicator of ignored gts - gt_ignore_inds = np.concatenate( - (np.zeros(gt_bboxes.shape[0], dtype=np.bool), - np.ones(gt_bboxes_ignore.shape[0], dtype=np.bool))) - # stack gt_bboxes and gt_bboxes_ignore for convenience - gt_bboxes = np.vstack((gt_bboxes, gt_bboxes_ignore)) - - num_dets = det_bboxes.shape[0] - num_gts = gt_bboxes.shape[0] - if area_ranges is None: - area_ranges = [(None, None)] - num_scales = len(area_ranges) - # tp and fp are of shape (num_scales, num_gts), each row is tp or fp - # of a certain scale. - tp = np.zeros((num_scales, num_dets), dtype=np.float32) - fp = np.zeros((num_scales, num_dets), dtype=np.float32) - if gt_bboxes.shape[0] == 0: - if area_ranges == [(None, None)]: - fp[...] = 1 - else: - det_areas = (det_bboxes[:, 2] - det_bboxes[:, 0]) * ( - det_bboxes[:, 3] - det_bboxes[:, 1]) - for i, (min_area, max_area) in enumerate(area_ranges): - fp[i, (det_areas >= min_area) & (det_areas < max_area)] = 1 - return tp, fp - ious = bbox_overlaps(det_bboxes, gt_bboxes - 1) - gt_w = gt_bboxes[:, 2] - gt_bboxes[:, 0] - gt_h = gt_bboxes[:, 3] - gt_bboxes[:, 1] - iou_thrs = np.minimum((gt_w * gt_h) / ((gt_w + 10.0) * (gt_h + 10.0)), - default_iou_thr) - # sort all detections by scores in descending order - sort_inds = np.argsort(-det_bboxes[:, -1]) - for k, (min_area, max_area) in enumerate(area_ranges): - gt_covered = np.zeros(num_gts, dtype=bool) - # if no area range is specified, gt_area_ignore is all False - if min_area is None: - gt_area_ignore = np.zeros_like(gt_ignore_inds, dtype=bool) - else: - gt_areas = gt_w * gt_h - gt_area_ignore = (gt_areas < min_area) | (gt_areas >= max_area) - for i in sort_inds: - max_iou = -1 - matched_gt = -1 - # find best overlapped available gt - for j in range(num_gts): - # different from PASCAL VOC: allow finding other gts if the - # best overlapped ones are already matched by other det bboxes - if gt_covered[j]: - continue - elif ious[i, j] >= iou_thrs[j] and ious[i, j] > max_iou: - max_iou = ious[i, j] - matched_gt = j - # there are 4 cases for a det bbox: - # 1. it matches a gt, tp = 1, fp = 0 - # 2. it matches an ignored gt, tp = 0, fp = 0 - # 3. it matches no gt and within area range, tp = 0, fp = 1 - # 4. it matches no gt but is beyond area range, tp = 0, fp = 0 - if matched_gt >= 0: - gt_covered[matched_gt] = 1 - if not (gt_ignore_inds[matched_gt] - or gt_area_ignore[matched_gt]): - tp[k, i] = 1 - elif min_area is None: - fp[k, i] = 1 - else: - bbox = det_bboxes[i, :4] - area = (bbox[2] - bbox[0]) * (bbox[3] - bbox[1]) - if area >= min_area and area < max_area: - fp[k, i] = 1 - return tp, fp - - -def tpfp_default(det_bboxes, - gt_bboxes, - gt_bboxes_ignore=None, - iou_thr=0.5, - area_ranges=None): - """Check if detected bboxes are true positive or false positive. - - Args: - det_bbox (ndarray): Detected bboxes of this image, of shape (m, 5). - gt_bboxes (ndarray): GT bboxes of this image, of shape (n, 4). - gt_bboxes_ignore (ndarray): Ignored gt bboxes of this image, - of shape (k, 4). Default: None - iou_thr (float): IoU threshold to be considered as matched. - Default: 0.5. - area_ranges (list[tuple] | None): Range of bbox areas to be evaluated, - in the format [(min1, max1), (min2, max2), ...]. Default: None. - - Returns: - tuple[np.ndarray]: (tp, fp) whose elements are 0 and 1. The shape of - each array is (num_scales, m). - """ - # an indicator of ignored gts - gt_ignore_inds = np.concatenate( - (np.zeros(gt_bboxes.shape[0], dtype=np.bool), - np.ones(gt_bboxes_ignore.shape[0], dtype=np.bool))) - # stack gt_bboxes and gt_bboxes_ignore for convenience - gt_bboxes = np.vstack((gt_bboxes, gt_bboxes_ignore)) - - num_dets = det_bboxes.shape[0] - num_gts = gt_bboxes.shape[0] - if area_ranges is None: - area_ranges = [(None, None)] - num_scales = len(area_ranges) - # tp and fp are of shape (num_scales, num_gts), each row is tp or fp of - # a certain scale - tp = np.zeros((num_scales, num_dets), dtype=np.float32) - fp = np.zeros((num_scales, num_dets), dtype=np.float32) - - # if there is no gt bboxes in this image, then all det bboxes - # within area range are false positives - if gt_bboxes.shape[0] == 0: - if area_ranges == [(None, None)]: - fp[...] = 1 - else: - det_areas = (det_bboxes[:, 2] - det_bboxes[:, 0]) * ( - det_bboxes[:, 3] - det_bboxes[:, 1]) - for i, (min_area, max_area) in enumerate(area_ranges): - fp[i, (det_areas >= min_area) & (det_areas < max_area)] = 1 - return tp, fp - - ious = bbox_overlaps(det_bboxes, gt_bboxes) - # for each det, the max iou with all gts - ious_max = ious.max(axis=1) - # for each det, which gt overlaps most with it - ious_argmax = ious.argmax(axis=1) - # sort all dets in descending order by scores - sort_inds = np.argsort(-det_bboxes[:, -1]) - for k, (min_area, max_area) in enumerate(area_ranges): - gt_covered = np.zeros(num_gts, dtype=bool) - # if no area range is specified, gt_area_ignore is all False - if min_area is None: - gt_area_ignore = np.zeros_like(gt_ignore_inds, dtype=bool) - else: - gt_areas = (gt_bboxes[:, 2] - gt_bboxes[:, 0]) * ( - gt_bboxes[:, 3] - gt_bboxes[:, 1]) - gt_area_ignore = (gt_areas < min_area) | (gt_areas >= max_area) - for i in sort_inds: - if ious_max[i] >= iou_thr: - matched_gt = ious_argmax[i] - if not (gt_ignore_inds[matched_gt] - or gt_area_ignore[matched_gt]): - if not gt_covered[matched_gt]: - gt_covered[matched_gt] = True - tp[k, i] = 1 - else: - fp[k, i] = 1 - # otherwise ignore this detected bbox, tp = 0, fp = 0 - elif min_area is None: - fp[k, i] = 1 - else: - bbox = det_bboxes[i, :4] - area = (bbox[2] - bbox[0]) * (bbox[3] - bbox[1]) - if area >= min_area and area < max_area: - fp[k, i] = 1 - return tp, fp - - -def get_cls_results(det_results, annotations, class_id): - """Get det results and gt information of a certain class. - - Args: - det_results (list[list]): Same as `eval_map()`. - annotations (list[dict]): Same as `eval_map()`. - class_id (int): ID of a specific class. - - Returns: - tuple[list[np.ndarray]]: detected bboxes, gt bboxes, ignored gt bboxes - """ - cls_dets = [img_res[class_id] for img_res in det_results] - cls_gts = [] - cls_gts_ignore = [] - for ann in annotations: - gt_inds = ann['labels'] == class_id - cls_gts.append(ann['bboxes'][gt_inds, :]) - - if ann.get('labels_ignore', None) is not None: - ignore_inds = ann['labels_ignore'] == class_id - cls_gts_ignore.append(ann['bboxes_ignore'][ignore_inds, :]) - else: - cls_gts_ignore.append(np.empty((0, 4), dtype=np.float32)) - - return cls_dets, cls_gts, cls_gts_ignore - - -def eval_map(det_results, - annotations, - scale_ranges=None, - iou_thr=0.5, - dataset=None, - logger=None, - tpfp_fn=None, - nproc=4): - """Evaluate mAP of a dataset. - - Args: - det_results (list[list]): [[cls1_det, cls2_det, ...], ...]. - The outer list indicates images, and the inner list indicates - per-class detected bboxes. - annotations (list[dict]): Ground truth annotations where each item of - the list indicates an image. Keys of annotations are: - - - `bboxes`: numpy array of shape (n, 4) - - `labels`: numpy array of shape (n, ) - - `bboxes_ignore` (optional): numpy array of shape (k, 4) - - `labels_ignore` (optional): numpy array of shape (k, ) - scale_ranges (list[tuple] | None): Range of scales to be evaluated, - in the format [(min1, max1), (min2, max2), ...]. A range of - (32, 64) means the area range between (32**2, 64**2). - Default: None. - iou_thr (float): IoU threshold to be considered as matched. - Default: 0.5. - dataset (list[str] | str | None): Dataset name or dataset classes, - there are minor differences in metrics for different datsets, e.g. - "voc07", "imagenet_det", etc. Default: None. - logger (logging.Logger | str | None): The way to print the mAP - summary. See `mmcv.utils.print_log()` for details. Default: None. - tpfp_fn (callable | None): The function used to determine true/ - false positives. If None, :func:`tpfp_default` is used as default - unless dataset is 'det' or 'vid' (:func:`tpfp_imagenet` in this - case). If it is given as a function, then this function is used - to evaluate tp & fp. Default None. - nproc (int): Processes used for computing TP and FP. - Default: 4. - - Returns: - tuple: (mAP, [dict, dict, ...]) - """ - assert len(det_results) == len(annotations) - - num_imgs = len(det_results) - num_scales = len(scale_ranges) if scale_ranges is not None else 1 - num_classes = len(det_results[0]) # positive class num - area_ranges = ([(rg[0]**2, rg[1]**2) for rg in scale_ranges] - if scale_ranges is not None else None) - - pool = Pool(nproc) - eval_results = [] - for i in range(num_classes): - # get gt and det bboxes of this class - cls_dets, cls_gts, cls_gts_ignore = get_cls_results( - det_results, annotations, i) - # choose proper function according to datasets to compute tp and fp - if tpfp_fn is None: - if dataset in ['det', 'vid']: - tpfp_fn = tpfp_imagenet - else: - tpfp_fn = tpfp_default - if not callable(tpfp_fn): - raise ValueError( - f'tpfp_fn has to be a function or None, but got {tpfp_fn}') - - # compute tp and fp for each image with multiple processes - tpfp = pool.starmap( - tpfp_fn, - zip(cls_dets, cls_gts, cls_gts_ignore, - [iou_thr for _ in range(num_imgs)], - [area_ranges for _ in range(num_imgs)])) - tp, fp = tuple(zip(*tpfp)) - # calculate gt number of each scale - # ignored gts or gts beyond the specific scale are not counted - num_gts = np.zeros(num_scales, dtype=int) - for j, bbox in enumerate(cls_gts): - if area_ranges is None: - num_gts[0] += bbox.shape[0] - else: - gt_areas = (bbox[:, 2] - bbox[:, 0]) * ( - bbox[:, 3] - bbox[:, 1]) - for k, (min_area, max_area) in enumerate(area_ranges): - num_gts[k] += np.sum((gt_areas >= min_area) - & (gt_areas < max_area)) - # sort all det bboxes by score, also sort tp and fp - cls_dets = np.vstack(cls_dets) - num_dets = cls_dets.shape[0] - sort_inds = np.argsort(-cls_dets[:, -1]) - tp = np.hstack(tp)[:, sort_inds] - fp = np.hstack(fp)[:, sort_inds] - # calculate recall and precision with tp and fp - tp = np.cumsum(tp, axis=1) - fp = np.cumsum(fp, axis=1) - eps = np.finfo(np.float32).eps - recalls = tp / np.maximum(num_gts[:, np.newaxis], eps) - precisions = tp / np.maximum((tp + fp), eps) - # calculate AP - if scale_ranges is None: - recalls = recalls[0, :] - precisions = precisions[0, :] - num_gts = num_gts.item() - mode = 'area' if dataset != 'voc07' else '11points' - ap = average_precision(recalls, precisions, mode) - eval_results.append({ - 'num_gts': num_gts, - 'num_dets': num_dets, - 'recall': recalls, - 'precision': precisions, - 'ap': ap - }) - pool.close() - if scale_ranges is not None: - # shape (num_classes, num_scales) - all_ap = np.vstack([cls_result['ap'] for cls_result in eval_results]) - all_num_gts = np.vstack( - [cls_result['num_gts'] for cls_result in eval_results]) - mean_ap = [] - for i in range(num_scales): - if np.any(all_num_gts[:, i] > 0): - mean_ap.append(all_ap[all_num_gts[:, i] > 0, i].mean()) - else: - mean_ap.append(0.0) - else: - aps = [] - for cls_result in eval_results: - if cls_result['num_gts'] > 0: - aps.append(cls_result['ap']) - mean_ap = np.array(aps).mean().item() if aps else 0.0 - - print_map_summary( - mean_ap, eval_results, dataset, area_ranges, logger=logger) - - return mean_ap, eval_results - - -def print_map_summary(mean_ap, - results, - dataset=None, - scale_ranges=None, - logger=None): - """Print mAP and results of each class. - - A table will be printed to show the gts/dets/recall/AP of each class and - the mAP. - - Args: - mean_ap (float): Calculated from `eval_map()`. - results (list[dict]): Calculated from `eval_map()`. - dataset (list[str] | str | None): Dataset name or dataset classes. - scale_ranges (list[tuple] | None): Range of scales to be evaluated. - logger (logging.Logger | str | None): The way to print the mAP - summary. See `mmcv.utils.print_log()` for details. Default: None. - """ - - if logger == 'silent': - return - - if isinstance(results[0]['ap'], np.ndarray): - num_scales = len(results[0]['ap']) - else: - num_scales = 1 - - if scale_ranges is not None: - assert len(scale_ranges) == num_scales - - num_classes = len(results) - - recalls = np.zeros((num_scales, num_classes), dtype=np.float32) - aps = np.zeros((num_scales, num_classes), dtype=np.float32) - num_gts = np.zeros((num_scales, num_classes), dtype=int) - for i, cls_result in enumerate(results): - if cls_result['recall'].size > 0: - recalls[:, i] = np.array(cls_result['recall'], ndmin=2)[:, -1] - aps[:, i] = cls_result['ap'] - num_gts[:, i] = cls_result['num_gts'] - - if dataset is None: - label_names = [str(i) for i in range(num_classes)] - elif mmcv.is_str(dataset): - label_names = get_classes(dataset) - else: - label_names = dataset - - if not isinstance(mean_ap, list): - mean_ap = [mean_ap] - - header = ['class', 'gts', 'dets', 'recall', 'ap'] - for i in range(num_scales): - if scale_ranges is not None: - print_log(f'Scale range {scale_ranges[i]}', logger=logger) - table_data = [header] - for j in range(num_classes): - row_data = [ - label_names[j], num_gts[i, j], results[j]['num_dets'], - f'{recalls[i, j]:.3f}', f'{aps[i, j]:.3f}' - ] - table_data.append(row_data) - table_data.append(['mAP', '', '', '', f'{mean_ap[i]:.3f}']) - table = AsciiTable(table_data) - table.inner_footing_row_border = True - print_log('\n' + table.table, logger=logger) diff --git a/spaces/Shivraj8615/Huggy/README.md b/spaces/Shivraj8615/Huggy/README.md deleted file mode 100644 index 8ed01098734fca978b02080c610af11075d900ed..0000000000000000000000000000000000000000 --- a/spaces/Shivraj8615/Huggy/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Huggy -emoji: 🐶 -colorFrom: red -colorTo: indigo -sdk: static -pinned: false -license: cc-by-nc-sa-4.0 -duplicated_from: ThomasSimonini/Huggy ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/cycler.py b/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/cycler.py deleted file mode 100644 index f86b68de64b8066b98d8fa2d92bf5983ea582237..0000000000000000000000000000000000000000 --- a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/cycler.py +++ /dev/null @@ -1,501 +0,0 @@ -""" -Cycler -====== - -Cycling through combinations of values, producing dictionaries. - -You can add cyclers:: - - from cycler import cycler - cc = (cycler(color=list('rgb')) + - cycler(linestyle=['-', '--', '-.'])) - for d in cc: - print(d) - -Results in:: - - {'color': 'r', 'linestyle': '-'} - {'color': 'g', 'linestyle': '--'} - {'color': 'b', 'linestyle': '-.'} - - -You can multiply cyclers:: - - from cycler import cycler - cc = (cycler(color=list('rgb')) * - cycler(linestyle=['-', '--', '-.'])) - for d in cc: - print(d) - -Results in:: - - {'color': 'r', 'linestyle': '-'} - {'color': 'r', 'linestyle': '--'} - {'color': 'r', 'linestyle': '-.'} - {'color': 'g', 'linestyle': '-'} - {'color': 'g', 'linestyle': '--'} - {'color': 'g', 'linestyle': '-.'} - {'color': 'b', 'linestyle': '-'} - {'color': 'b', 'linestyle': '--'} - {'color': 'b', 'linestyle': '-.'} -""" - - -import copy -from functools import reduce -from itertools import product, cycle -from operator import mul, add - -__version__ = '0.10.0' - - -def _process_keys(left, right): - """ - Helper function to compose cycler keys. - - Parameters - ---------- - left, right : iterable of dictionaries or None - The cyclers to be composed. - - Returns - ------- - keys : set - The keys in the composition of the two cyclers. - """ - l_peek = next(iter(left)) if left is not None else {} - r_peek = next(iter(right)) if right is not None else {} - l_key = set(l_peek.keys()) - r_key = set(r_peek.keys()) - if l_key & r_key: - raise ValueError("Can not compose overlapping cycles") - return l_key | r_key - - -def concat(left, right): - r""" - Concatenate `Cycler`\s, as if chained using `itertools.chain`. - - The keys must match exactly. - - Examples - -------- - >>> num = cycler('a', range(3)) - >>> let = cycler('a', 'abc') - >>> num.concat(let) - cycler('a', [0, 1, 2, 'a', 'b', 'c']) - - Returns - ------- - `Cycler` - The concatenated cycler. - """ - if left.keys != right.keys: - raise ValueError("Keys do not match:\n" - "\tIntersection: {both!r}\n" - "\tDisjoint: {just_one!r}".format( - both=left.keys & right.keys, - just_one=left.keys ^ right.keys)) - _l = left.by_key() - _r = right.by_key() - return reduce(add, (_cycler(k, _l[k] + _r[k]) for k in left.keys)) - - -class Cycler: - """ - Composable cycles. - - This class has compositions methods: - - ``+`` - for 'inner' products (zip) - - ``+=`` - in-place ``+`` - - ``*`` - for outer products (`itertools.product`) and integer multiplication - - ``*=`` - in-place ``*`` - - and supports basic slicing via ``[]``. - - Parameters - ---------- - left, right : Cycler or None - The 'left' and 'right' cyclers. - op : func or None - Function which composes the 'left' and 'right' cyclers. - """ - - def __call__(self): - return cycle(self) - - def __init__(self, left, right=None, op=None): - """ - Semi-private init. - - Do not use this directly, use `cycler` function instead. - """ - if isinstance(left, Cycler): - self._left = Cycler(left._left, left._right, left._op) - elif left is not None: - # Need to copy the dictionary or else that will be a residual - # mutable that could lead to strange errors - self._left = [copy.copy(v) for v in left] - else: - self._left = None - - if isinstance(right, Cycler): - self._right = Cycler(right._left, right._right, right._op) - elif right is not None: - # Need to copy the dictionary or else that will be a residual - # mutable that could lead to strange errors - self._right = [copy.copy(v) for v in right] - else: - self._right = None - - self._keys = _process_keys(self._left, self._right) - self._op = op - - def __contains__(self, k): - return k in self._keys - - @property - def keys(self): - """The keys this Cycler knows about.""" - return set(self._keys) - - def change_key(self, old, new): - """ - Change a key in this cycler to a new name. - Modification is performed in-place. - - Does nothing if the old key is the same as the new key. - Raises a ValueError if the new key is already a key. - Raises a KeyError if the old key isn't a key. - """ - if old == new: - return - if new in self._keys: - raise ValueError( - "Can't replace {old} with {new}, {new} is already a key" - .format(old=old, new=new) - ) - if old not in self._keys: - raise KeyError("Can't replace {old} with {new}, {old} is not a key" - .format(old=old, new=new)) - - self._keys.remove(old) - self._keys.add(new) - - if self._right is not None and old in self._right.keys: - self._right.change_key(old, new) - - # self._left should always be non-None - # if self._keys is non-empty. - elif isinstance(self._left, Cycler): - self._left.change_key(old, new) - else: - # It should be completely safe at this point to - # assume that the old key can be found in each - # iteration. - self._left = [{new: entry[old]} for entry in self._left] - - @classmethod - def _from_iter(cls, label, itr): - """ - Class method to create 'base' Cycler objects - that do not have a 'right' or 'op' and for which - the 'left' object is not another Cycler. - - Parameters - ---------- - label : str - The property key. - - itr : iterable - Finite length iterable of the property values. - - Returns - ------- - `Cycler` - New 'base' cycler. - """ - ret = cls(None) - ret._left = list({label: v} for v in itr) - ret._keys = {label} - return ret - - def __getitem__(self, key): - # TODO : maybe add numpy style fancy slicing - if isinstance(key, slice): - trans = self.by_key() - return reduce(add, (_cycler(k, v[key]) for k, v in trans.items())) - else: - raise ValueError("Can only use slices with Cycler.__getitem__") - - def __iter__(self): - if self._right is None: - for left in self._left: - yield dict(left) - else: - for a, b in self._op(self._left, self._right): - out = {} - out.update(a) - out.update(b) - yield out - - def __add__(self, other): - """ - Pair-wise combine two equal length cyclers (zip). - - Parameters - ---------- - other : Cycler - """ - if len(self) != len(other): - raise ValueError("Can only add equal length cycles, " - f"not {len(self)} and {len(other)}") - return Cycler(self, other, zip) - - def __mul__(self, other): - """ - Outer product of two cyclers (`itertools.product`) or integer - multiplication. - - Parameters - ---------- - other : Cycler or int - """ - if isinstance(other, Cycler): - return Cycler(self, other, product) - elif isinstance(other, int): - trans = self.by_key() - return reduce(add, (_cycler(k, v*other) for k, v in trans.items())) - else: - return NotImplemented - - def __rmul__(self, other): - return self * other - - def __len__(self): - op_dict = {zip: min, product: mul} - if self._right is None: - return len(self._left) - l_len = len(self._left) - r_len = len(self._right) - return op_dict[self._op](l_len, r_len) - - def __iadd__(self, other): - """ - In-place pair-wise combine two equal length cyclers (zip). - - Parameters - ---------- - other : Cycler - """ - if not isinstance(other, Cycler): - raise TypeError("Cannot += with a non-Cycler object") - # True shallow copy of self is fine since this is in-place - old_self = copy.copy(self) - self._keys = _process_keys(old_self, other) - self._left = old_self - self._op = zip - self._right = Cycler(other._left, other._right, other._op) - return self - - def __imul__(self, other): - """ - In-place outer product of two cyclers (`itertools.product`). - - Parameters - ---------- - other : Cycler - """ - if not isinstance(other, Cycler): - raise TypeError("Cannot *= with a non-Cycler object") - # True shallow copy of self is fine since this is in-place - old_self = copy.copy(self) - self._keys = _process_keys(old_self, other) - self._left = old_self - self._op = product - self._right = Cycler(other._left, other._right, other._op) - return self - - def __eq__(self, other): - if len(self) != len(other): - return False - if self.keys ^ other.keys: - return False - return all(a == b for a, b in zip(self, other)) - - def __ne__(self, other): - return not (self == other) - - __hash__ = None - - def __repr__(self): - op_map = {zip: '+', product: '*'} - if self._right is None: - lab = self.keys.pop() - itr = list(v[lab] for v in self) - return f"cycler({lab!r}, {itr!r})" - else: - op = op_map.get(self._op, '?') - msg = "({left!r} {op} {right!r})" - return msg.format(left=self._left, op=op, right=self._right) - - def _repr_html_(self): - # an table showing the value of each key through a full cycle - output = "" - sorted_keys = sorted(self.keys, key=repr) - for key in sorted_keys: - output += f"" - for d in iter(self): - output += "" - for k in sorted_keys: - output += f"" - output += "" - output += "
    {key!r}
    {d[k]!r}
    " - return output - - def by_key(self): - """ - Values by key. - - This returns the transposed values of the cycler. Iterating - over a `Cycler` yields dicts with a single value for each key, - this method returns a `dict` of `list` which are the values - for the given key. - - The returned value can be used to create an equivalent `Cycler` - using only `+`. - - Returns - ------- - transpose : dict - dict of lists of the values for each key. - """ - - # TODO : sort out if this is a bottle neck, if there is a better way - # and if we care. - - keys = self.keys - out = {k: list() for k in keys} - - for d in self: - for k in keys: - out[k].append(d[k]) - return out - - # for back compatibility - _transpose = by_key - - def simplify(self): - """ - Simplify the cycler into a sum (but no products) of cyclers. - - Returns - ------- - simple : Cycler - """ - # TODO: sort out if it is worth the effort to make sure this is - # balanced. Currently it is is - # (((a + b) + c) + d) vs - # ((a + b) + (c + d)) - # I would believe that there is some performance implications - trans = self.by_key() - return reduce(add, (_cycler(k, v) for k, v in trans.items())) - - concat = concat - - -def cycler(*args, **kwargs): - """ - Create a new `Cycler` object from a single positional argument, - a pair of positional arguments, or the combination of keyword arguments. - - cycler(arg) - cycler(label1=itr1[, label2=iter2[, ...]]) - cycler(label, itr) - - Form 1 simply copies a given `Cycler` object. - - Form 2 composes a `Cycler` as an inner product of the - pairs of keyword arguments. In other words, all of the - iterables are cycled simultaneously, as if through zip(). - - Form 3 creates a `Cycler` from a label and an iterable. - This is useful for when the label cannot be a keyword argument - (e.g., an integer or a name that has a space in it). - - Parameters - ---------- - arg : Cycler - Copy constructor for Cycler (does a shallow copy of iterables). - label : name - The property key. In the 2-arg form of the function, - the label can be any hashable object. In the keyword argument - form of the function, it must be a valid python identifier. - itr : iterable - Finite length iterable of the property values. - Can be a single-property `Cycler` that would - be like a key change, but as a shallow copy. - - Returns - ------- - cycler : Cycler - New `Cycler` for the given property - - """ - if args and kwargs: - raise TypeError("cyl() can only accept positional OR keyword " - "arguments -- not both.") - - if len(args) == 1: - if not isinstance(args[0], Cycler): - raise TypeError("If only one positional argument given, it must " - "be a Cycler instance.") - return Cycler(args[0]) - elif len(args) == 2: - return _cycler(*args) - elif len(args) > 2: - raise TypeError("Only a single Cycler can be accepted as the lone " - "positional argument. Use keyword arguments instead.") - - if kwargs: - return reduce(add, (_cycler(k, v) for k, v in kwargs.items())) - - raise TypeError("Must have at least a positional OR keyword arguments") - - -def _cycler(label, itr): - """ - Create a new `Cycler` object from a property name and iterable of values. - - Parameters - ---------- - label : hashable - The property key. - itr : iterable - Finite length iterable of the property values. - - Returns - ------- - cycler : Cycler - New `Cycler` for the given property - """ - if isinstance(itr, Cycler): - keys = itr.keys - if len(keys) != 1: - msg = "Can not create Cycler from a multi-property Cycler" - raise ValueError(msg) - - lab = keys.pop() - # Doesn't need to be a new list because - # _from_iter() will be creating that new list anyway. - itr = (v[lab] for v in itr) - - return Cycler._from_iter(label, itr) diff --git a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/debugpy/_vendored/pydevd/pydevd_attach_to_process/winappdbg/win32/advapi32.py b/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/debugpy/_vendored/pydevd/pydevd_attach_to_process/winappdbg/win32/advapi32.py deleted file mode 100644 index 4e49889eedee425d3d32643d78f2bb48db392622..0000000000000000000000000000000000000000 --- a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/debugpy/_vendored/pydevd/pydevd_attach_to_process/winappdbg/win32/advapi32.py +++ /dev/null @@ -1,3209 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- - -# Copyright (c) 2009-2014, Mario Vilas -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are met: -# -# * Redistributions of source code must retain the above copyright notice, -# this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above copyright -# notice,this list of conditions and the following disclaimer in the -# documentation and/or other materials provided with the distribution. -# * Neither the name of the copyright holder nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE -# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -# POSSIBILITY OF SUCH DAMAGE. - -""" -Wrapper for advapi32.dll in ctypes. -""" - -__revision__ = "$Id$" - -from winappdbg.win32.defines import * -from winappdbg.win32.kernel32 import * - -# XXX TODO -# + add transacted registry operations - -#============================================================================== -# This is used later on to calculate the list of exported symbols. -_all = None -_all = set(vars().keys()) -#============================================================================== - -#--- Constants ---------------------------------------------------------------- - -# Privilege constants -SE_ASSIGNPRIMARYTOKEN_NAME = "SeAssignPrimaryTokenPrivilege" -SE_AUDIT_NAME = "SeAuditPrivilege" -SE_BACKUP_NAME = "SeBackupPrivilege" -SE_CHANGE_NOTIFY_NAME = "SeChangeNotifyPrivilege" -SE_CREATE_GLOBAL_NAME = "SeCreateGlobalPrivilege" -SE_CREATE_PAGEFILE_NAME = "SeCreatePagefilePrivilege" -SE_CREATE_PERMANENT_NAME = "SeCreatePermanentPrivilege" -SE_CREATE_SYMBOLIC_LINK_NAME = "SeCreateSymbolicLinkPrivilege" -SE_CREATE_TOKEN_NAME = "SeCreateTokenPrivilege" -SE_DEBUG_NAME = "SeDebugPrivilege" -SE_ENABLE_DELEGATION_NAME = "SeEnableDelegationPrivilege" -SE_IMPERSONATE_NAME = "SeImpersonatePrivilege" -SE_INC_BASE_PRIORITY_NAME = "SeIncreaseBasePriorityPrivilege" -SE_INCREASE_QUOTA_NAME = "SeIncreaseQuotaPrivilege" -SE_INC_WORKING_SET_NAME = "SeIncreaseWorkingSetPrivilege" -SE_LOAD_DRIVER_NAME = "SeLoadDriverPrivilege" -SE_LOCK_MEMORY_NAME = "SeLockMemoryPrivilege" -SE_MACHINE_ACCOUNT_NAME = "SeMachineAccountPrivilege" -SE_MANAGE_VOLUME_NAME = "SeManageVolumePrivilege" -SE_PROF_SINGLE_PROCESS_NAME = "SeProfileSingleProcessPrivilege" -SE_RELABEL_NAME = "SeRelabelPrivilege" -SE_REMOTE_SHUTDOWN_NAME = "SeRemoteShutdownPrivilege" -SE_RESTORE_NAME = "SeRestorePrivilege" -SE_SECURITY_NAME = "SeSecurityPrivilege" -SE_SHUTDOWN_NAME = "SeShutdownPrivilege" -SE_SYNC_AGENT_NAME = "SeSyncAgentPrivilege" -SE_SYSTEM_ENVIRONMENT_NAME = "SeSystemEnvironmentPrivilege" -SE_SYSTEM_PROFILE_NAME = "SeSystemProfilePrivilege" -SE_SYSTEMTIME_NAME = "SeSystemtimePrivilege" -SE_TAKE_OWNERSHIP_NAME = "SeTakeOwnershipPrivilege" -SE_TCB_NAME = "SeTcbPrivilege" -SE_TIME_ZONE_NAME = "SeTimeZonePrivilege" -SE_TRUSTED_CREDMAN_ACCESS_NAME = "SeTrustedCredManAccessPrivilege" -SE_UNDOCK_NAME = "SeUndockPrivilege" -SE_UNSOLICITED_INPUT_NAME = "SeUnsolicitedInputPrivilege" - -SE_PRIVILEGE_ENABLED_BY_DEFAULT = 0x00000001 -SE_PRIVILEGE_ENABLED = 0x00000002 -SE_PRIVILEGE_REMOVED = 0x00000004 -SE_PRIVILEGE_USED_FOR_ACCESS = 0x80000000 - -TOKEN_ADJUST_PRIVILEGES = 0x00000020 - -LOGON_WITH_PROFILE = 0x00000001 -LOGON_NETCREDENTIALS_ONLY = 0x00000002 - -# Token access rights -TOKEN_ASSIGN_PRIMARY = 0x0001 -TOKEN_DUPLICATE = 0x0002 -TOKEN_IMPERSONATE = 0x0004 -TOKEN_QUERY = 0x0008 -TOKEN_QUERY_SOURCE = 0x0010 -TOKEN_ADJUST_PRIVILEGES = 0x0020 -TOKEN_ADJUST_GROUPS = 0x0040 -TOKEN_ADJUST_DEFAULT = 0x0080 -TOKEN_ADJUST_SESSIONID = 0x0100 -TOKEN_READ = (STANDARD_RIGHTS_READ | TOKEN_QUERY) -TOKEN_ALL_ACCESS = (STANDARD_RIGHTS_REQUIRED | TOKEN_ASSIGN_PRIMARY | - TOKEN_DUPLICATE | TOKEN_IMPERSONATE | TOKEN_QUERY | TOKEN_QUERY_SOURCE | - TOKEN_ADJUST_PRIVILEGES | TOKEN_ADJUST_GROUPS | TOKEN_ADJUST_DEFAULT | - TOKEN_ADJUST_SESSIONID) - -# Predefined HKEY values -HKEY_CLASSES_ROOT = 0x80000000 -HKEY_CURRENT_USER = 0x80000001 -HKEY_LOCAL_MACHINE = 0x80000002 -HKEY_USERS = 0x80000003 -HKEY_PERFORMANCE_DATA = 0x80000004 -HKEY_CURRENT_CONFIG = 0x80000005 - -# Registry access rights -KEY_ALL_ACCESS = 0xF003F -KEY_CREATE_LINK = 0x0020 -KEY_CREATE_SUB_KEY = 0x0004 -KEY_ENUMERATE_SUB_KEYS = 0x0008 -KEY_EXECUTE = 0x20019 -KEY_NOTIFY = 0x0010 -KEY_QUERY_VALUE = 0x0001 -KEY_READ = 0x20019 -KEY_SET_VALUE = 0x0002 -KEY_WOW64_32KEY = 0x0200 -KEY_WOW64_64KEY = 0x0100 -KEY_WRITE = 0x20006 - -# Registry value types -REG_NONE = 0 -REG_SZ = 1 -REG_EXPAND_SZ = 2 -REG_BINARY = 3 -REG_DWORD = 4 -REG_DWORD_LITTLE_ENDIAN = REG_DWORD -REG_DWORD_BIG_ENDIAN = 5 -REG_LINK = 6 -REG_MULTI_SZ = 7 -REG_RESOURCE_LIST = 8 -REG_FULL_RESOURCE_DESCRIPTOR = 9 -REG_RESOURCE_REQUIREMENTS_LIST = 10 -REG_QWORD = 11 -REG_QWORD_LITTLE_ENDIAN = REG_QWORD - -#--- TOKEN_PRIVILEGE structure ------------------------------------------------ - -# typedef struct _LUID { -# DWORD LowPart; -# LONG HighPart; -# } LUID, -# *PLUID; -class LUID(Structure): - _fields_ = [ - ("LowPart", DWORD), - ("HighPart", LONG), - ] - -PLUID = POINTER(LUID) - -# typedef struct _LUID_AND_ATTRIBUTES { -# LUID Luid; -# DWORD Attributes; -# } LUID_AND_ATTRIBUTES, -# *PLUID_AND_ATTRIBUTES; -class LUID_AND_ATTRIBUTES(Structure): - _fields_ = [ - ("Luid", LUID), - ("Attributes", DWORD), - ] - -# typedef struct _TOKEN_PRIVILEGES { -# DWORD PrivilegeCount; -# LUID_AND_ATTRIBUTES Privileges[ANYSIZE_ARRAY]; -# } TOKEN_PRIVILEGES, -# *PTOKEN_PRIVILEGES; -class TOKEN_PRIVILEGES(Structure): - _fields_ = [ - ("PrivilegeCount", DWORD), -## ("Privileges", LUID_AND_ATTRIBUTES * ANYSIZE_ARRAY), - ("Privileges", LUID_AND_ATTRIBUTES), - ] - # See comments on AdjustTokenPrivileges about this structure - -PTOKEN_PRIVILEGES = POINTER(TOKEN_PRIVILEGES) - -#--- GetTokenInformation enums and structures --------------------------------- - -# typedef enum _TOKEN_INFORMATION_CLASS { -# TokenUser = 1, -# TokenGroups, -# TokenPrivileges, -# TokenOwner, -# TokenPrimaryGroup, -# TokenDefaultDacl, -# TokenSource, -# TokenType, -# TokenImpersonationLevel, -# TokenStatistics, -# TokenRestrictedSids, -# TokenSessionId, -# TokenGroupsAndPrivileges, -# TokenSessionReference, -# TokenSandBoxInert, -# TokenAuditPolicy, -# TokenOrigin, -# TokenElevationType, -# TokenLinkedToken, -# TokenElevation, -# TokenHasRestrictions, -# TokenAccessInformation, -# TokenVirtualizationAllowed, -# TokenVirtualizationEnabled, -# TokenIntegrityLevel, -# TokenUIAccess, -# TokenMandatoryPolicy, -# TokenLogonSid, -# TokenIsAppContainer, -# TokenCapabilities, -# TokenAppContainerSid, -# TokenAppContainerNumber, -# TokenUserClaimAttributes, -# TokenDeviceClaimAttributes, -# TokenRestrictedUserClaimAttributes, -# TokenRestrictedDeviceClaimAttributes, -# TokenDeviceGroups, -# TokenRestrictedDeviceGroups, -# TokenSecurityAttributes, -# TokenIsRestricted, -# MaxTokenInfoClass -# } TOKEN_INFORMATION_CLASS, *PTOKEN_INFORMATION_CLASS; - -TOKEN_INFORMATION_CLASS = ctypes.c_int - -TokenUser = 1 -TokenGroups = 2 -TokenPrivileges = 3 -TokenOwner = 4 -TokenPrimaryGroup = 5 -TokenDefaultDacl = 6 -TokenSource = 7 -TokenType = 8 -TokenImpersonationLevel = 9 -TokenStatistics = 10 -TokenRestrictedSids = 11 -TokenSessionId = 12 -TokenGroupsAndPrivileges = 13 -TokenSessionReference = 14 -TokenSandBoxInert = 15 -TokenAuditPolicy = 16 -TokenOrigin = 17 -TokenElevationType = 18 -TokenLinkedToken = 19 -TokenElevation = 20 -TokenHasRestrictions = 21 -TokenAccessInformation = 22 -TokenVirtualizationAllowed = 23 -TokenVirtualizationEnabled = 24 -TokenIntegrityLevel = 25 -TokenUIAccess = 26 -TokenMandatoryPolicy = 27 -TokenLogonSid = 28 -TokenIsAppContainer = 29 -TokenCapabilities = 30 -TokenAppContainerSid = 31 -TokenAppContainerNumber = 32 -TokenUserClaimAttributes = 33 -TokenDeviceClaimAttributes = 34 -TokenRestrictedUserClaimAttributes = 35 -TokenRestrictedDeviceClaimAttributes = 36 -TokenDeviceGroups = 37 -TokenRestrictedDeviceGroups = 38 -TokenSecurityAttributes = 39 -TokenIsRestricted = 40 -MaxTokenInfoClass = 41 - -# typedef enum tagTOKEN_TYPE { -# TokenPrimary = 1, -# TokenImpersonation -# } TOKEN_TYPE, *PTOKEN_TYPE; - -TOKEN_TYPE = ctypes.c_int -PTOKEN_TYPE = POINTER(TOKEN_TYPE) - -TokenPrimary = 1 -TokenImpersonation = 2 - -# typedef enum { -# TokenElevationTypeDefault = 1, -# TokenElevationTypeFull, -# TokenElevationTypeLimited -# } TOKEN_ELEVATION_TYPE , *PTOKEN_ELEVATION_TYPE; - -TokenElevationTypeDefault = 1 -TokenElevationTypeFull = 2 -TokenElevationTypeLimited = 3 - -TOKEN_ELEVATION_TYPE = ctypes.c_int -PTOKEN_ELEVATION_TYPE = POINTER(TOKEN_ELEVATION_TYPE) - -# typedef enum _SECURITY_IMPERSONATION_LEVEL { -# SecurityAnonymous, -# SecurityIdentification, -# SecurityImpersonation, -# SecurityDelegation -# } SECURITY_IMPERSONATION_LEVEL, *PSECURITY_IMPERSONATION_LEVEL; - -SecurityAnonymous = 0 -SecurityIdentification = 1 -SecurityImpersonation = 2 -SecurityDelegation = 3 - -SECURITY_IMPERSONATION_LEVEL = ctypes.c_int -PSECURITY_IMPERSONATION_LEVEL = POINTER(SECURITY_IMPERSONATION_LEVEL) - -# typedef struct _SID_AND_ATTRIBUTES { -# PSID Sid; -# DWORD Attributes; -# } SID_AND_ATTRIBUTES, *PSID_AND_ATTRIBUTES; -class SID_AND_ATTRIBUTES(Structure): - _fields_ = [ - ("Sid", PSID), - ("Attributes", DWORD), - ] -PSID_AND_ATTRIBUTES = POINTER(SID_AND_ATTRIBUTES) - -# typedef struct _TOKEN_USER { -# SID_AND_ATTRIBUTES User; -# } TOKEN_USER, *PTOKEN_USER; -class TOKEN_USER(Structure): - _fields_ = [ - ("User", SID_AND_ATTRIBUTES), - ] -PTOKEN_USER = POINTER(TOKEN_USER) - -# typedef struct _TOKEN_MANDATORY_LABEL { -# SID_AND_ATTRIBUTES Label; -# } TOKEN_MANDATORY_LABEL, *PTOKEN_MANDATORY_LABEL; -class TOKEN_MANDATORY_LABEL(Structure): - _fields_ = [ - ("Label", SID_AND_ATTRIBUTES), - ] -PTOKEN_MANDATORY_LABEL = POINTER(TOKEN_MANDATORY_LABEL) - -# typedef struct _TOKEN_OWNER { -# PSID Owner; -# } TOKEN_OWNER, *PTOKEN_OWNER; -class TOKEN_OWNER(Structure): - _fields_ = [ - ("Owner", PSID), - ] -PTOKEN_OWNER = POINTER(TOKEN_OWNER) - -# typedef struct _TOKEN_PRIMARY_GROUP { -# PSID PrimaryGroup; -# } TOKEN_PRIMARY_GROUP, *PTOKEN_PRIMARY_GROUP; -class TOKEN_PRIMARY_GROUP(Structure): - _fields_ = [ - ("PrimaryGroup", PSID), - ] -PTOKEN_PRIMARY_GROUP = POINTER(TOKEN_PRIMARY_GROUP) - -# typedef struct _TOKEN_APPCONTAINER_INFORMATION { -# PSID TokenAppContainer; -# } TOKEN_APPCONTAINER_INFORMATION, *PTOKEN_APPCONTAINER_INFORMATION; -class TOKEN_APPCONTAINER_INFORMATION(Structure): - _fields_ = [ - ("TokenAppContainer", PSID), - ] -PTOKEN_APPCONTAINER_INFORMATION = POINTER(TOKEN_APPCONTAINER_INFORMATION) - -# typedef struct _TOKEN_ORIGIN { -# LUID OriginatingLogonSession; -# } TOKEN_ORIGIN, *PTOKEN_ORIGIN; -class TOKEN_ORIGIN(Structure): - _fields_ = [ - ("OriginatingLogonSession", LUID), - ] -PTOKEN_ORIGIN = POINTER(TOKEN_ORIGIN) - -# typedef struct _TOKEN_LINKED_TOKEN { -# HANDLE LinkedToken; -# } TOKEN_LINKED_TOKEN, *PTOKEN_LINKED_TOKEN; -class TOKEN_LINKED_TOKEN(Structure): - _fields_ = [ - ("LinkedToken", HANDLE), - ] -PTOKEN_LINKED_TOKEN = POINTER(TOKEN_LINKED_TOKEN) - -# typedef struct _TOKEN_STATISTICS { -# LUID TokenId; -# LUID AuthenticationId; -# LARGE_INTEGER ExpirationTime; -# TOKEN_TYPE TokenType; -# SECURITY_IMPERSONATION_LEVEL ImpersonationLevel; -# DWORD DynamicCharged; -# DWORD DynamicAvailable; -# DWORD GroupCount; -# DWORD PrivilegeCount; -# LUID ModifiedId; -# } TOKEN_STATISTICS, *PTOKEN_STATISTICS; -class TOKEN_STATISTICS(Structure): - _fields_ = [ - ("TokenId", LUID), - ("AuthenticationId", LUID), - ("ExpirationTime", LONGLONG), # LARGE_INTEGER - ("TokenType", TOKEN_TYPE), - ("ImpersonationLevel", SECURITY_IMPERSONATION_LEVEL), - ("DynamicCharged", DWORD), - ("DynamicAvailable", DWORD), - ("GroupCount", DWORD), - ("PrivilegeCount", DWORD), - ("ModifiedId", LUID), - ] -PTOKEN_STATISTICS = POINTER(TOKEN_STATISTICS) - -#--- SID_NAME_USE enum -------------------------------------------------------- - -# typedef enum _SID_NAME_USE { -# SidTypeUser = 1, -# SidTypeGroup, -# SidTypeDomain, -# SidTypeAlias, -# SidTypeWellKnownGroup, -# SidTypeDeletedAccount, -# SidTypeInvalid, -# SidTypeUnknown, -# SidTypeComputer, -# SidTypeLabel -# } SID_NAME_USE, *PSID_NAME_USE; - -SidTypeUser = 1 -SidTypeGroup = 2 -SidTypeDomain = 3 -SidTypeAlias = 4 -SidTypeWellKnownGroup = 5 -SidTypeDeletedAccount = 6 -SidTypeInvalid = 7 -SidTypeUnknown = 8 -SidTypeComputer = 9 -SidTypeLabel = 10 - -#--- WAITCHAIN_NODE_INFO structure and types ---------------------------------- - -WCT_MAX_NODE_COUNT = 16 -WCT_OBJNAME_LENGTH = 128 -WCT_ASYNC_OPEN_FLAG = 1 -WCTP_OPEN_ALL_FLAGS = WCT_ASYNC_OPEN_FLAG -WCT_OUT_OF_PROC_FLAG = 1 -WCT_OUT_OF_PROC_COM_FLAG = 2 -WCT_OUT_OF_PROC_CS_FLAG = 4 -WCTP_GETINFO_ALL_FLAGS = WCT_OUT_OF_PROC_FLAG | WCT_OUT_OF_PROC_COM_FLAG | WCT_OUT_OF_PROC_CS_FLAG - -HWCT = LPVOID - -# typedef enum _WCT_OBJECT_TYPE -# { -# WctCriticalSectionType = 1, -# WctSendMessageType, -# WctMutexType, -# WctAlpcType, -# WctComType, -# WctThreadWaitType, -# WctProcessWaitType, -# WctThreadType, -# WctComActivationType, -# WctUnknownType, -# WctMaxType -# } WCT_OBJECT_TYPE; - -WCT_OBJECT_TYPE = DWORD - -WctCriticalSectionType = 1 -WctSendMessageType = 2 -WctMutexType = 3 -WctAlpcType = 4 -WctComType = 5 -WctThreadWaitType = 6 -WctProcessWaitType = 7 -WctThreadType = 8 -WctComActivationType = 9 -WctUnknownType = 10 -WctMaxType = 11 - -# typedef enum _WCT_OBJECT_STATUS -# { -# WctStatusNoAccess = 1, // ACCESS_DENIED for this object -# WctStatusRunning, // Thread status -# WctStatusBlocked, // Thread status -# WctStatusPidOnly, // Thread status -# WctStatusPidOnlyRpcss, // Thread status -# WctStatusOwned, // Dispatcher object status -# WctStatusNotOwned, // Dispatcher object status -# WctStatusAbandoned, // Dispatcher object status -# WctStatusUnknown, // All objects -# WctStatusError, // All objects -# WctStatusMax -# } WCT_OBJECT_STATUS; - -WCT_OBJECT_STATUS = DWORD - -WctStatusNoAccess = 1 # ACCESS_DENIED for this object -WctStatusRunning = 2 # Thread status -WctStatusBlocked = 3 # Thread status -WctStatusPidOnly = 4 # Thread status -WctStatusPidOnlyRpcss = 5 # Thread status -WctStatusOwned = 6 # Dispatcher object status -WctStatusNotOwned = 7 # Dispatcher object status -WctStatusAbandoned = 8 # Dispatcher object status -WctStatusUnknown = 9 # All objects -WctStatusError = 10 # All objects -WctStatusMax = 11 - -# typedef struct _WAITCHAIN_NODE_INFO { -# WCT_OBJECT_TYPE ObjectType; -# WCT_OBJECT_STATUS ObjectStatus; -# union { -# struct { -# WCHAR ObjectName[WCT_OBJNAME_LENGTH]; -# LARGE_INTEGER Timeout; -# BOOL Alertable; -# } LockObject; -# struct { -# DWORD ProcessId; -# DWORD ThreadId; -# DWORD WaitTime; -# DWORD ContextSwitches; -# } ThreadObject; -# } ; -# }WAITCHAIN_NODE_INFO, *PWAITCHAIN_NODE_INFO; - -class _WAITCHAIN_NODE_INFO_STRUCT_1(Structure): - _fields_ = [ - ("ObjectName", WCHAR * WCT_OBJNAME_LENGTH), - ("Timeout", LONGLONG), # LARGE_INTEGER - ("Alertable", BOOL), - ] - -class _WAITCHAIN_NODE_INFO_STRUCT_2(Structure): - _fields_ = [ - ("ProcessId", DWORD), - ("ThreadId", DWORD), - ("WaitTime", DWORD), - ("ContextSwitches", DWORD), - ] - -class _WAITCHAIN_NODE_INFO_UNION(Union): - _fields_ = [ - ("LockObject", _WAITCHAIN_NODE_INFO_STRUCT_1), - ("ThreadObject", _WAITCHAIN_NODE_INFO_STRUCT_2), - ] - -class WAITCHAIN_NODE_INFO(Structure): - _fields_ = [ - ("ObjectType", WCT_OBJECT_TYPE), - ("ObjectStatus", WCT_OBJECT_STATUS), - ("u", _WAITCHAIN_NODE_INFO_UNION), - ] - -PWAITCHAIN_NODE_INFO = POINTER(WAITCHAIN_NODE_INFO) - -class WaitChainNodeInfo (object): - """ - Represents a node in the wait chain. - - It's a wrapper on the L{WAITCHAIN_NODE_INFO} structure. - - The following members are defined only - if the node is of L{WctThreadType} type: - - C{ProcessId} - - C{ThreadId} - - C{WaitTime} - - C{ContextSwitches} - - @see: L{GetThreadWaitChain} - - @type ObjectName: unicode - @ivar ObjectName: Object name. May be an empty string. - - @type ObjectType: int - @ivar ObjectType: Object type. - Should be one of the following values: - - L{WctCriticalSectionType} - - L{WctSendMessageType} - - L{WctMutexType} - - L{WctAlpcType} - - L{WctComType} - - L{WctThreadWaitType} - - L{WctProcessWaitType} - - L{WctThreadType} - - L{WctComActivationType} - - L{WctUnknownType} - - @type ObjectStatus: int - @ivar ObjectStatus: Wait status. - Should be one of the following values: - - L{WctStatusNoAccess} I{(ACCESS_DENIED for this object)} - - L{WctStatusRunning} I{(Thread status)} - - L{WctStatusBlocked} I{(Thread status)} - - L{WctStatusPidOnly} I{(Thread status)} - - L{WctStatusPidOnlyRpcss} I{(Thread status)} - - L{WctStatusOwned} I{(Dispatcher object status)} - - L{WctStatusNotOwned} I{(Dispatcher object status)} - - L{WctStatusAbandoned} I{(Dispatcher object status)} - - L{WctStatusUnknown} I{(All objects)} - - L{WctStatusError} I{(All objects)} - - @type ProcessId: int - @ivar ProcessId: Process global ID. - - @type ThreadId: int - @ivar ThreadId: Thread global ID. - - @type WaitTime: int - @ivar WaitTime: Wait time. - - @type ContextSwitches: int - @ivar ContextSwitches: Number of context switches. - """ - - #@type Timeout: int - #@ivar Timeout: Currently not documented in MSDN. - # - #@type Alertable: bool - #@ivar Alertable: Currently not documented in MSDN. - - # TODO: __repr__ - - def __init__(self, aStructure): - self.ObjectType = aStructure.ObjectType - self.ObjectStatus = aStructure.ObjectStatus - if self.ObjectType == WctThreadType: - self.ProcessId = aStructure.u.ThreadObject.ProcessId - self.ThreadId = aStructure.u.ThreadObject.ThreadId - self.WaitTime = aStructure.u.ThreadObject.WaitTime - self.ContextSwitches = aStructure.u.ThreadObject.ContextSwitches - self.ObjectName = u'' - else: - self.ObjectName = aStructure.u.LockObject.ObjectName.value - #self.Timeout = aStructure.u.LockObject.Timeout - #self.Alertable = bool(aStructure.u.LockObject.Alertable) - -class ThreadWaitChainSessionHandle (Handle): - """ - Thread wait chain session handle. - - Returned by L{OpenThreadWaitChainSession}. - - @see: L{Handle} - """ - - def __init__(self, aHandle = None): - """ - @type aHandle: int - @param aHandle: Win32 handle value. - """ - super(ThreadWaitChainSessionHandle, self).__init__(aHandle, - bOwnership = True) - - def _close(self): - if self.value is None: - raise ValueError("Handle was already closed!") - CloseThreadWaitChainSession(self.value) - - def dup(self): - raise NotImplementedError() - - def wait(self, dwMilliseconds = None): - raise NotImplementedError() - - @property - def inherit(self): - return False - - @property - def protectFromClose(self): - return False - -#--- Privilege dropping ------------------------------------------------------- - -SAFER_LEVEL_HANDLE = HANDLE - -SAFER_SCOPEID_MACHINE = 1 -SAFER_SCOPEID_USER = 2 - -SAFER_LEVEL_OPEN = 1 - -SAFER_LEVELID_DISALLOWED = 0x00000 -SAFER_LEVELID_UNTRUSTED = 0x01000 -SAFER_LEVELID_CONSTRAINED = 0x10000 -SAFER_LEVELID_NORMALUSER = 0x20000 -SAFER_LEVELID_FULLYTRUSTED = 0x40000 - -SAFER_POLICY_INFO_CLASS = DWORD -SaferPolicyLevelList = 1 -SaferPolicyEnableTransparentEnforcement = 2 -SaferPolicyDefaultLevel = 3 -SaferPolicyEvaluateUserScope = 4 -SaferPolicyScopeFlags = 5 - -SAFER_TOKEN_NULL_IF_EQUAL = 1 -SAFER_TOKEN_COMPARE_ONLY = 2 -SAFER_TOKEN_MAKE_INERT = 4 -SAFER_TOKEN_WANT_FLAGS = 8 -SAFER_TOKEN_MASK = 15 - -#--- Service Control Manager types, constants and structures ------------------ - -SC_HANDLE = HANDLE - -SERVICES_ACTIVE_DATABASEW = u"ServicesActive" -SERVICES_FAILED_DATABASEW = u"ServicesFailed" - -SERVICES_ACTIVE_DATABASEA = "ServicesActive" -SERVICES_FAILED_DATABASEA = "ServicesFailed" - -SC_GROUP_IDENTIFIERW = u'+' -SC_GROUP_IDENTIFIERA = '+' - -SERVICE_NO_CHANGE = 0xffffffff - -# enum SC_STATUS_TYPE -SC_STATUS_TYPE = ctypes.c_int -SC_STATUS_PROCESS_INFO = 0 - -# enum SC_ENUM_TYPE -SC_ENUM_TYPE = ctypes.c_int -SC_ENUM_PROCESS_INFO = 0 - -# Access rights -# http://msdn.microsoft.com/en-us/library/windows/desktop/ms685981(v=vs.85).aspx - -SERVICE_ALL_ACCESS = 0xF01FF -SERVICE_QUERY_CONFIG = 0x0001 -SERVICE_CHANGE_CONFIG = 0x0002 -SERVICE_QUERY_STATUS = 0x0004 -SERVICE_ENUMERATE_DEPENDENTS = 0x0008 -SERVICE_START = 0x0010 -SERVICE_STOP = 0x0020 -SERVICE_PAUSE_CONTINUE = 0x0040 -SERVICE_INTERROGATE = 0x0080 -SERVICE_USER_DEFINED_CONTROL = 0x0100 - -SC_MANAGER_ALL_ACCESS = 0xF003F -SC_MANAGER_CONNECT = 0x0001 -SC_MANAGER_CREATE_SERVICE = 0x0002 -SC_MANAGER_ENUMERATE_SERVICE = 0x0004 -SC_MANAGER_LOCK = 0x0008 -SC_MANAGER_QUERY_LOCK_STATUS = 0x0010 -SC_MANAGER_MODIFY_BOOT_CONFIG = 0x0020 - -# CreateService() service start type -SERVICE_BOOT_START = 0x00000000 -SERVICE_SYSTEM_START = 0x00000001 -SERVICE_AUTO_START = 0x00000002 -SERVICE_DEMAND_START = 0x00000003 -SERVICE_DISABLED = 0x00000004 - -# CreateService() error control flags -SERVICE_ERROR_IGNORE = 0x00000000 -SERVICE_ERROR_NORMAL = 0x00000001 -SERVICE_ERROR_SEVERE = 0x00000002 -SERVICE_ERROR_CRITICAL = 0x00000003 - -# EnumServicesStatusEx() service state filters -SERVICE_ACTIVE = 1 -SERVICE_INACTIVE = 2 -SERVICE_STATE_ALL = 3 - -# SERVICE_STATUS_PROCESS.dwServiceType -SERVICE_KERNEL_DRIVER = 0x00000001 -SERVICE_FILE_SYSTEM_DRIVER = 0x00000002 -SERVICE_ADAPTER = 0x00000004 -SERVICE_RECOGNIZER_DRIVER = 0x00000008 -SERVICE_WIN32_OWN_PROCESS = 0x00000010 -SERVICE_WIN32_SHARE_PROCESS = 0x00000020 -SERVICE_INTERACTIVE_PROCESS = 0x00000100 - -# EnumServicesStatusEx() service type filters (in addition to actual types) -SERVICE_DRIVER = 0x0000000B # SERVICE_KERNEL_DRIVER and SERVICE_FILE_SYSTEM_DRIVER -SERVICE_WIN32 = 0x00000030 # SERVICE_WIN32_OWN_PROCESS and SERVICE_WIN32_SHARE_PROCESS - -# SERVICE_STATUS_PROCESS.dwCurrentState -SERVICE_STOPPED = 0x00000001 -SERVICE_START_PENDING = 0x00000002 -SERVICE_STOP_PENDING = 0x00000003 -SERVICE_RUNNING = 0x00000004 -SERVICE_CONTINUE_PENDING = 0x00000005 -SERVICE_PAUSE_PENDING = 0x00000006 -SERVICE_PAUSED = 0x00000007 - -# SERVICE_STATUS_PROCESS.dwControlsAccepted -SERVICE_ACCEPT_STOP = 0x00000001 -SERVICE_ACCEPT_PAUSE_CONTINUE = 0x00000002 -SERVICE_ACCEPT_SHUTDOWN = 0x00000004 -SERVICE_ACCEPT_PARAMCHANGE = 0x00000008 -SERVICE_ACCEPT_NETBINDCHANGE = 0x00000010 -SERVICE_ACCEPT_HARDWAREPROFILECHANGE = 0x00000020 -SERVICE_ACCEPT_POWEREVENT = 0x00000040 -SERVICE_ACCEPT_SESSIONCHANGE = 0x00000080 -SERVICE_ACCEPT_PRESHUTDOWN = 0x00000100 - -# SERVICE_STATUS_PROCESS.dwServiceFlags -SERVICE_RUNS_IN_SYSTEM_PROCESS = 0x00000001 - -# Service control flags -SERVICE_CONTROL_STOP = 0x00000001 -SERVICE_CONTROL_PAUSE = 0x00000002 -SERVICE_CONTROL_CONTINUE = 0x00000003 -SERVICE_CONTROL_INTERROGATE = 0x00000004 -SERVICE_CONTROL_SHUTDOWN = 0x00000005 -SERVICE_CONTROL_PARAMCHANGE = 0x00000006 -SERVICE_CONTROL_NETBINDADD = 0x00000007 -SERVICE_CONTROL_NETBINDREMOVE = 0x00000008 -SERVICE_CONTROL_NETBINDENABLE = 0x00000009 -SERVICE_CONTROL_NETBINDDISABLE = 0x0000000A -SERVICE_CONTROL_DEVICEEVENT = 0x0000000B -SERVICE_CONTROL_HARDWAREPROFILECHANGE = 0x0000000C -SERVICE_CONTROL_POWEREVENT = 0x0000000D -SERVICE_CONTROL_SESSIONCHANGE = 0x0000000E - -# Service control accepted bitmasks -SERVICE_ACCEPT_STOP = 0x00000001 -SERVICE_ACCEPT_PAUSE_CONTINUE = 0x00000002 -SERVICE_ACCEPT_SHUTDOWN = 0x00000004 -SERVICE_ACCEPT_PARAMCHANGE = 0x00000008 -SERVICE_ACCEPT_NETBINDCHANGE = 0x00000010 -SERVICE_ACCEPT_HARDWAREPROFILECHANGE = 0x00000020 -SERVICE_ACCEPT_POWEREVENT = 0x00000040 -SERVICE_ACCEPT_SESSIONCHANGE = 0x00000080 -SERVICE_ACCEPT_PRESHUTDOWN = 0x00000100 -SERVICE_ACCEPT_TIMECHANGE = 0x00000200 -SERVICE_ACCEPT_TRIGGEREVENT = 0x00000400 -SERVICE_ACCEPT_USERMODEREBOOT = 0x00000800 - -# enum SC_ACTION_TYPE -SC_ACTION_NONE = 0 -SC_ACTION_RESTART = 1 -SC_ACTION_REBOOT = 2 -SC_ACTION_RUN_COMMAND = 3 - -# QueryServiceConfig2 -SERVICE_CONFIG_DESCRIPTION = 1 -SERVICE_CONFIG_FAILURE_ACTIONS = 2 - -# typedef struct _SERVICE_STATUS { -# DWORD dwServiceType; -# DWORD dwCurrentState; -# DWORD dwControlsAccepted; -# DWORD dwWin32ExitCode; -# DWORD dwServiceSpecificExitCode; -# DWORD dwCheckPoint; -# DWORD dwWaitHint; -# } SERVICE_STATUS, *LPSERVICE_STATUS; -class SERVICE_STATUS(Structure): - _fields_ = [ - ("dwServiceType", DWORD), - ("dwCurrentState", DWORD), - ("dwControlsAccepted", DWORD), - ("dwWin32ExitCode", DWORD), - ("dwServiceSpecificExitCode", DWORD), - ("dwCheckPoint", DWORD), - ("dwWaitHint", DWORD), - ] -LPSERVICE_STATUS = POINTER(SERVICE_STATUS) - -# typedef struct _SERVICE_STATUS_PROCESS { -# DWORD dwServiceType; -# DWORD dwCurrentState; -# DWORD dwControlsAccepted; -# DWORD dwWin32ExitCode; -# DWORD dwServiceSpecificExitCode; -# DWORD dwCheckPoint; -# DWORD dwWaitHint; -# DWORD dwProcessId; -# DWORD dwServiceFlags; -# } SERVICE_STATUS_PROCESS, *LPSERVICE_STATUS_PROCESS; -class SERVICE_STATUS_PROCESS(Structure): - _fields_ = SERVICE_STATUS._fields_ + [ - ("dwProcessId", DWORD), - ("dwServiceFlags", DWORD), - ] -LPSERVICE_STATUS_PROCESS = POINTER(SERVICE_STATUS_PROCESS) - -# typedef struct _ENUM_SERVICE_STATUS { -# LPTSTR lpServiceName; -# LPTSTR lpDisplayName; -# SERVICE_STATUS ServiceStatus; -# } ENUM_SERVICE_STATUS, *LPENUM_SERVICE_STATUS; -class ENUM_SERVICE_STATUSA(Structure): - _fields_ = [ - ("lpServiceName", LPSTR), - ("lpDisplayName", LPSTR), - ("ServiceStatus", SERVICE_STATUS), - ] -class ENUM_SERVICE_STATUSW(Structure): - _fields_ = [ - ("lpServiceName", LPWSTR), - ("lpDisplayName", LPWSTR), - ("ServiceStatus", SERVICE_STATUS), - ] -LPENUM_SERVICE_STATUSA = POINTER(ENUM_SERVICE_STATUSA) -LPENUM_SERVICE_STATUSW = POINTER(ENUM_SERVICE_STATUSW) - -# typedef struct _ENUM_SERVICE_STATUS_PROCESS { -# LPTSTR lpServiceName; -# LPTSTR lpDisplayName; -# SERVICE_STATUS_PROCESS ServiceStatusProcess; -# } ENUM_SERVICE_STATUS_PROCESS, *LPENUM_SERVICE_STATUS_PROCESS; -class ENUM_SERVICE_STATUS_PROCESSA(Structure): - _fields_ = [ - ("lpServiceName", LPSTR), - ("lpDisplayName", LPSTR), - ("ServiceStatusProcess", SERVICE_STATUS_PROCESS), - ] -class ENUM_SERVICE_STATUS_PROCESSW(Structure): - _fields_ = [ - ("lpServiceName", LPWSTR), - ("lpDisplayName", LPWSTR), - ("ServiceStatusProcess", SERVICE_STATUS_PROCESS), - ] -LPENUM_SERVICE_STATUS_PROCESSA = POINTER(ENUM_SERVICE_STATUS_PROCESSA) -LPENUM_SERVICE_STATUS_PROCESSW = POINTER(ENUM_SERVICE_STATUS_PROCESSW) - -class ServiceStatus(object): - """ - Wrapper for the L{SERVICE_STATUS} structure. - """ - - def __init__(self, raw): - """ - @type raw: L{SERVICE_STATUS} - @param raw: Raw structure for this service status data. - """ - self.ServiceType = raw.dwServiceType - self.CurrentState = raw.dwCurrentState - self.ControlsAccepted = raw.dwControlsAccepted - self.Win32ExitCode = raw.dwWin32ExitCode - self.ServiceSpecificExitCode = raw.dwServiceSpecificExitCode - self.CheckPoint = raw.dwCheckPoint - self.WaitHint = raw.dwWaitHint - -class ServiceStatusProcess(object): - """ - Wrapper for the L{SERVICE_STATUS_PROCESS} structure. - """ - - def __init__(self, raw): - """ - @type raw: L{SERVICE_STATUS_PROCESS} - @param raw: Raw structure for this service status data. - """ - self.ServiceType = raw.dwServiceType - self.CurrentState = raw.dwCurrentState - self.ControlsAccepted = raw.dwControlsAccepted - self.Win32ExitCode = raw.dwWin32ExitCode - self.ServiceSpecificExitCode = raw.dwServiceSpecificExitCode - self.CheckPoint = raw.dwCheckPoint - self.WaitHint = raw.dwWaitHint - self.ProcessId = raw.dwProcessId - self.ServiceFlags = raw.dwServiceFlags - -class ServiceStatusEntry(object): - """ - Service status entry returned by L{EnumServicesStatus}. - """ - - def __init__(self, raw): - """ - @type raw: L{ENUM_SERVICE_STATUSA} or L{ENUM_SERVICE_STATUSW} - @param raw: Raw structure for this service status entry. - """ - self.ServiceName = raw.lpServiceName - self.DisplayName = raw.lpDisplayName - self.ServiceType = raw.ServiceStatus.dwServiceType - self.CurrentState = raw.ServiceStatus.dwCurrentState - self.ControlsAccepted = raw.ServiceStatus.dwControlsAccepted - self.Win32ExitCode = raw.ServiceStatus.dwWin32ExitCode - self.ServiceSpecificExitCode = raw.ServiceStatus.dwServiceSpecificExitCode - self.CheckPoint = raw.ServiceStatus.dwCheckPoint - self.WaitHint = raw.ServiceStatus.dwWaitHint - - def __str__(self): - output = [] - if self.ServiceType & SERVICE_INTERACTIVE_PROCESS: - output.append("Interactive service") - else: - output.append("Service") - if self.DisplayName: - output.append("\"%s\" (%s)" % (self.DisplayName, self.ServiceName)) - else: - output.append("\"%s\"" % self.ServiceName) - if self.CurrentState == SERVICE_CONTINUE_PENDING: - output.append("is about to continue.") - elif self.CurrentState == SERVICE_PAUSE_PENDING: - output.append("is pausing.") - elif self.CurrentState == SERVICE_PAUSED: - output.append("is paused.") - elif self.CurrentState == SERVICE_RUNNING: - output.append("is running.") - elif self.CurrentState == SERVICE_START_PENDING: - output.append("is starting.") - elif self.CurrentState == SERVICE_STOP_PENDING: - output.append("is stopping.") - elif self.CurrentState == SERVICE_STOPPED: - output.append("is stopped.") - return " ".join(output) - -class ServiceStatusProcessEntry(object): - """ - Service status entry returned by L{EnumServicesStatusEx}. - """ - - def __init__(self, raw): - """ - @type raw: L{ENUM_SERVICE_STATUS_PROCESSA} or L{ENUM_SERVICE_STATUS_PROCESSW} - @param raw: Raw structure for this service status entry. - """ - self.ServiceName = raw.lpServiceName - self.DisplayName = raw.lpDisplayName - self.ServiceType = raw.ServiceStatusProcess.dwServiceType - self.CurrentState = raw.ServiceStatusProcess.dwCurrentState - self.ControlsAccepted = raw.ServiceStatusProcess.dwControlsAccepted - self.Win32ExitCode = raw.ServiceStatusProcess.dwWin32ExitCode - self.ServiceSpecificExitCode = raw.ServiceStatusProcess.dwServiceSpecificExitCode - self.CheckPoint = raw.ServiceStatusProcess.dwCheckPoint - self.WaitHint = raw.ServiceStatusProcess.dwWaitHint - self.ProcessId = raw.ServiceStatusProcess.dwProcessId - self.ServiceFlags = raw.ServiceStatusProcess.dwServiceFlags - - def __str__(self): - output = [] - if self.ServiceType & SERVICE_INTERACTIVE_PROCESS: - output.append("Interactive service ") - else: - output.append("Service ") - if self.DisplayName: - output.append("\"%s\" (%s)" % (self.DisplayName, self.ServiceName)) - else: - output.append("\"%s\"" % self.ServiceName) - if self.CurrentState == SERVICE_CONTINUE_PENDING: - output.append(" is about to continue") - elif self.CurrentState == SERVICE_PAUSE_PENDING: - output.append(" is pausing") - elif self.CurrentState == SERVICE_PAUSED: - output.append(" is paused") - elif self.CurrentState == SERVICE_RUNNING: - output.append(" is running") - elif self.CurrentState == SERVICE_START_PENDING: - output.append(" is starting") - elif self.CurrentState == SERVICE_STOP_PENDING: - output.append(" is stopping") - elif self.CurrentState == SERVICE_STOPPED: - output.append(" is stopped") - if self.ProcessId: - output.append(" at process %d" % self.ProcessId) - output.append(".") - return "".join(output) - -#--- Handle wrappers ---------------------------------------------------------- - -# XXX maybe add functions related to the tokens here? -class TokenHandle (Handle): - """ - Access token handle. - - @see: L{Handle} - """ - pass - -class RegistryKeyHandle (UserModeHandle): - """ - Registry key handle. - """ - - _TYPE = HKEY - - def _close(self): - RegCloseKey(self.value) - -class SaferLevelHandle (UserModeHandle): - """ - Safer level handle. - - @see: U{http://msdn.microsoft.com/en-us/library/ms722425(VS.85).aspx} - """ - - _TYPE = SAFER_LEVEL_HANDLE - - def _close(self): - SaferCloseLevel(self.value) - -class ServiceHandle (UserModeHandle): - """ - Service handle. - - @see: U{http://msdn.microsoft.com/en-us/library/windows/desktop/ms684330(v=vs.85).aspx} - """ - - _TYPE = SC_HANDLE - - def _close(self): - CloseServiceHandle(self.value) - -class ServiceControlManagerHandle (UserModeHandle): - """ - Service Control Manager (SCM) handle. - - @see: U{http://msdn.microsoft.com/en-us/library/windows/desktop/ms684323(v=vs.85).aspx} - """ - - _TYPE = SC_HANDLE - - def _close(self): - CloseServiceHandle(self.value) - -#--- advapi32.dll ------------------------------------------------------------- - -# BOOL WINAPI GetUserName( -# __out LPTSTR lpBuffer, -# __inout LPDWORD lpnSize -# ); -def GetUserNameA(): - _GetUserNameA = windll.advapi32.GetUserNameA - _GetUserNameA.argtypes = [LPSTR, LPDWORD] - _GetUserNameA.restype = bool - - nSize = DWORD(0) - _GetUserNameA(None, byref(nSize)) - error = GetLastError() - if error != ERROR_INSUFFICIENT_BUFFER: - raise ctypes.WinError(error) - lpBuffer = ctypes.create_string_buffer('', nSize.value + 1) - success = _GetUserNameA(lpBuffer, byref(nSize)) - if not success: - raise ctypes.WinError() - return lpBuffer.value - -def GetUserNameW(): - _GetUserNameW = windll.advapi32.GetUserNameW - _GetUserNameW.argtypes = [LPWSTR, LPDWORD] - _GetUserNameW.restype = bool - - nSize = DWORD(0) - _GetUserNameW(None, byref(nSize)) - error = GetLastError() - if error != ERROR_INSUFFICIENT_BUFFER: - raise ctypes.WinError(error) - lpBuffer = ctypes.create_unicode_buffer(u'', nSize.value + 1) - success = _GetUserNameW(lpBuffer, byref(nSize)) - if not success: - raise ctypes.WinError() - return lpBuffer.value - -GetUserName = DefaultStringType(GetUserNameA, GetUserNameW) - -# BOOL WINAPI LookupAccountName( -# __in_opt LPCTSTR lpSystemName, -# __in LPCTSTR lpAccountName, -# __out_opt PSID Sid, -# __inout LPDWORD cbSid, -# __out_opt LPTSTR ReferencedDomainName, -# __inout LPDWORD cchReferencedDomainName, -# __out PSID_NAME_USE peUse -# ); - -# XXX TO DO - -# BOOL WINAPI LookupAccountSid( -# __in_opt LPCTSTR lpSystemName, -# __in PSID lpSid, -# __out_opt LPTSTR lpName, -# __inout LPDWORD cchName, -# __out_opt LPTSTR lpReferencedDomainName, -# __inout LPDWORD cchReferencedDomainName, -# __out PSID_NAME_USE peUse -# ); -def LookupAccountSidA(lpSystemName, lpSid): - _LookupAccountSidA = windll.advapi32.LookupAccountSidA - _LookupAccountSidA.argtypes = [LPSTR, PSID, LPSTR, LPDWORD, LPSTR, LPDWORD, LPDWORD] - _LookupAccountSidA.restype = bool - - cchName = DWORD(0) - cchReferencedDomainName = DWORD(0) - peUse = DWORD(0) - _LookupAccountSidA(lpSystemName, lpSid, None, byref(cchName), None, byref(cchReferencedDomainName), byref(peUse)) - error = GetLastError() - if error != ERROR_INSUFFICIENT_BUFFER: - raise ctypes.WinError(error) - lpName = ctypes.create_string_buffer('', cchName + 1) - lpReferencedDomainName = ctypes.create_string_buffer('', cchReferencedDomainName + 1) - success = _LookupAccountSidA(lpSystemName, lpSid, lpName, byref(cchName), lpReferencedDomainName, byref(cchReferencedDomainName), byref(peUse)) - if not success: - raise ctypes.WinError() - return lpName.value, lpReferencedDomainName.value, peUse.value - -def LookupAccountSidW(lpSystemName, lpSid): - _LookupAccountSidW = windll.advapi32.LookupAccountSidA - _LookupAccountSidW.argtypes = [LPSTR, PSID, LPWSTR, LPDWORD, LPWSTR, LPDWORD, LPDWORD] - _LookupAccountSidW.restype = bool - - cchName = DWORD(0) - cchReferencedDomainName = DWORD(0) - peUse = DWORD(0) - _LookupAccountSidW(lpSystemName, lpSid, None, byref(cchName), None, byref(cchReferencedDomainName), byref(peUse)) - error = GetLastError() - if error != ERROR_INSUFFICIENT_BUFFER: - raise ctypes.WinError(error) - lpName = ctypes.create_unicode_buffer(u'', cchName + 1) - lpReferencedDomainName = ctypes.create_unicode_buffer(u'', cchReferencedDomainName + 1) - success = _LookupAccountSidW(lpSystemName, lpSid, lpName, byref(cchName), lpReferencedDomainName, byref(cchReferencedDomainName), byref(peUse)) - if not success: - raise ctypes.WinError() - return lpName.value, lpReferencedDomainName.value, peUse.value - -LookupAccountSid = GuessStringType(LookupAccountSidA, LookupAccountSidW) - -# BOOL ConvertSidToStringSid( -# __in PSID Sid, -# __out LPTSTR *StringSid -# ); -def ConvertSidToStringSidA(Sid): - _ConvertSidToStringSidA = windll.advapi32.ConvertSidToStringSidA - _ConvertSidToStringSidA.argtypes = [PSID, LPSTR] - _ConvertSidToStringSidA.restype = bool - _ConvertSidToStringSidA.errcheck = RaiseIfZero - - pStringSid = LPSTR() - _ConvertSidToStringSidA(Sid, byref(pStringSid)) - try: - StringSid = pStringSid.value - finally: - LocalFree(pStringSid) - return StringSid - -def ConvertSidToStringSidW(Sid): - _ConvertSidToStringSidW = windll.advapi32.ConvertSidToStringSidW - _ConvertSidToStringSidW.argtypes = [PSID, LPWSTR] - _ConvertSidToStringSidW.restype = bool - _ConvertSidToStringSidW.errcheck = RaiseIfZero - - pStringSid = LPWSTR() - _ConvertSidToStringSidW(Sid, byref(pStringSid)) - try: - StringSid = pStringSid.value - finally: - LocalFree(pStringSid) - return StringSid - -ConvertSidToStringSid = DefaultStringType(ConvertSidToStringSidA, ConvertSidToStringSidW) - -# BOOL WINAPI ConvertStringSidToSid( -# __in LPCTSTR StringSid, -# __out PSID *Sid -# ); -def ConvertStringSidToSidA(StringSid): - _ConvertStringSidToSidA = windll.advapi32.ConvertStringSidToSidA - _ConvertStringSidToSidA.argtypes = [LPSTR, PVOID] - _ConvertStringSidToSidA.restype = bool - _ConvertStringSidToSidA.errcheck = RaiseIfZero - - Sid = PVOID() - _ConvertStringSidToSidA(StringSid, ctypes.pointer(Sid)) - return Sid.value - -def ConvertStringSidToSidW(StringSid): - _ConvertStringSidToSidW = windll.advapi32.ConvertStringSidToSidW - _ConvertStringSidToSidW.argtypes = [LPWSTR, PVOID] - _ConvertStringSidToSidW.restype = bool - _ConvertStringSidToSidW.errcheck = RaiseIfZero - - Sid = PVOID() - _ConvertStringSidToSidW(StringSid, ctypes.pointer(Sid)) - return Sid.value - -ConvertStringSidToSid = GuessStringType(ConvertStringSidToSidA, ConvertStringSidToSidW) - -# BOOL WINAPI IsValidSid( -# __in PSID pSid -# ); -def IsValidSid(pSid): - _IsValidSid = windll.advapi32.IsValidSid - _IsValidSid.argtypes = [PSID] - _IsValidSid.restype = bool - return _IsValidSid(pSid) - -# BOOL WINAPI EqualSid( -# __in PSID pSid1, -# __in PSID pSid2 -# ); -def EqualSid(pSid1, pSid2): - _EqualSid = windll.advapi32.EqualSid - _EqualSid.argtypes = [PSID, PSID] - _EqualSid.restype = bool - return _EqualSid(pSid1, pSid2) - -# DWORD WINAPI GetLengthSid( -# __in PSID pSid -# ); -def GetLengthSid(pSid): - _GetLengthSid = windll.advapi32.GetLengthSid - _GetLengthSid.argtypes = [PSID] - _GetLengthSid.restype = DWORD - return _GetLengthSid(pSid) - -# BOOL WINAPI CopySid( -# __in DWORD nDestinationSidLength, -# __out PSID pDestinationSid, -# __in PSID pSourceSid -# ); -def CopySid(pSourceSid): - _CopySid = windll.advapi32.CopySid - _CopySid.argtypes = [DWORD, PVOID, PSID] - _CopySid.restype = bool - _CopySid.errcheck = RaiseIfZero - - nDestinationSidLength = GetLengthSid(pSourceSid) - DestinationSid = ctypes.create_string_buffer('', nDestinationSidLength) - pDestinationSid = ctypes.cast(ctypes.pointer(DestinationSid), PVOID) - _CopySid(nDestinationSidLength, pDestinationSid, pSourceSid) - return ctypes.cast(pDestinationSid, PSID) - -# PVOID WINAPI FreeSid( -# __in PSID pSid -# ); -def FreeSid(pSid): - _FreeSid = windll.advapi32.FreeSid - _FreeSid.argtypes = [PSID] - _FreeSid.restype = PSID - _FreeSid.errcheck = RaiseIfNotZero - _FreeSid(pSid) - -# BOOL WINAPI OpenProcessToken( -# __in HANDLE ProcessHandle, -# __in DWORD DesiredAccess, -# __out PHANDLE TokenHandle -# ); -def OpenProcessToken(ProcessHandle, DesiredAccess = TOKEN_ALL_ACCESS): - _OpenProcessToken = windll.advapi32.OpenProcessToken - _OpenProcessToken.argtypes = [HANDLE, DWORD, PHANDLE] - _OpenProcessToken.restype = bool - _OpenProcessToken.errcheck = RaiseIfZero - - NewTokenHandle = HANDLE(INVALID_HANDLE_VALUE) - _OpenProcessToken(ProcessHandle, DesiredAccess, byref(NewTokenHandle)) - return TokenHandle(NewTokenHandle.value) - -# BOOL WINAPI OpenThreadToken( -# __in HANDLE ThreadHandle, -# __in DWORD DesiredAccess, -# __in BOOL OpenAsSelf, -# __out PHANDLE TokenHandle -# ); -def OpenThreadToken(ThreadHandle, DesiredAccess, OpenAsSelf = True): - _OpenThreadToken = windll.advapi32.OpenThreadToken - _OpenThreadToken.argtypes = [HANDLE, DWORD, BOOL, PHANDLE] - _OpenThreadToken.restype = bool - _OpenThreadToken.errcheck = RaiseIfZero - - NewTokenHandle = HANDLE(INVALID_HANDLE_VALUE) - _OpenThreadToken(ThreadHandle, DesiredAccess, OpenAsSelf, byref(NewTokenHandle)) - return TokenHandle(NewTokenHandle.value) - -# BOOL WINAPI DuplicateToken( -# _In_ HANDLE ExistingTokenHandle, -# _In_ SECURITY_IMPERSONATION_LEVEL ImpersonationLevel, -# _Out_ PHANDLE DuplicateTokenHandle -# ); -def DuplicateToken(ExistingTokenHandle, ImpersonationLevel = SecurityImpersonation): - _DuplicateToken = windll.advapi32.DuplicateToken - _DuplicateToken.argtypes = [HANDLE, SECURITY_IMPERSONATION_LEVEL, PHANDLE] - _DuplicateToken.restype = bool - _DuplicateToken.errcheck = RaiseIfZero - - DuplicateTokenHandle = HANDLE(INVALID_HANDLE_VALUE) - _DuplicateToken(ExistingTokenHandle, ImpersonationLevel, byref(DuplicateTokenHandle)) - return TokenHandle(DuplicateTokenHandle.value) - -# BOOL WINAPI DuplicateTokenEx( -# _In_ HANDLE hExistingToken, -# _In_ DWORD dwDesiredAccess, -# _In_opt_ LPSECURITY_ATTRIBUTES lpTokenAttributes, -# _In_ SECURITY_IMPERSONATION_LEVEL ImpersonationLevel, -# _In_ TOKEN_TYPE TokenType, -# _Out_ PHANDLE phNewToken -# ); -def DuplicateTokenEx(hExistingToken, dwDesiredAccess = TOKEN_ALL_ACCESS, lpTokenAttributes = None, ImpersonationLevel = SecurityImpersonation, TokenType = TokenPrimary): - _DuplicateTokenEx = windll.advapi32.DuplicateTokenEx - _DuplicateTokenEx.argtypes = [HANDLE, DWORD, LPSECURITY_ATTRIBUTES, SECURITY_IMPERSONATION_LEVEL, TOKEN_TYPE, PHANDLE] - _DuplicateTokenEx.restype = bool - _DuplicateTokenEx.errcheck = RaiseIfZero - - DuplicateTokenHandle = HANDLE(INVALID_HANDLE_VALUE) - _DuplicateTokenEx(hExistingToken, dwDesiredAccess, lpTokenAttributes, ImpersonationLevel, TokenType, byref(DuplicateTokenHandle)) - return TokenHandle(DuplicateTokenHandle.value) - -# BOOL WINAPI IsTokenRestricted( -# __in HANDLE TokenHandle -# ); -def IsTokenRestricted(hTokenHandle): - _IsTokenRestricted = windll.advapi32.IsTokenRestricted - _IsTokenRestricted.argtypes = [HANDLE] - _IsTokenRestricted.restype = bool - _IsTokenRestricted.errcheck = RaiseIfNotErrorSuccess - - SetLastError(ERROR_SUCCESS) - return _IsTokenRestricted(hTokenHandle) - -# BOOL WINAPI LookupPrivilegeValue( -# __in_opt LPCTSTR lpSystemName, -# __in LPCTSTR lpName, -# __out PLUID lpLuid -# ); -def LookupPrivilegeValueA(lpSystemName, lpName): - _LookupPrivilegeValueA = windll.advapi32.LookupPrivilegeValueA - _LookupPrivilegeValueA.argtypes = [LPSTR, LPSTR, PLUID] - _LookupPrivilegeValueA.restype = bool - _LookupPrivilegeValueA.errcheck = RaiseIfZero - - lpLuid = LUID() - if not lpSystemName: - lpSystemName = None - _LookupPrivilegeValueA(lpSystemName, lpName, byref(lpLuid)) - return lpLuid - -def LookupPrivilegeValueW(lpSystemName, lpName): - _LookupPrivilegeValueW = windll.advapi32.LookupPrivilegeValueW - _LookupPrivilegeValueW.argtypes = [LPWSTR, LPWSTR, PLUID] - _LookupPrivilegeValueW.restype = bool - _LookupPrivilegeValueW.errcheck = RaiseIfZero - - lpLuid = LUID() - if not lpSystemName: - lpSystemName = None - _LookupPrivilegeValueW(lpSystemName, lpName, byref(lpLuid)) - return lpLuid - -LookupPrivilegeValue = GuessStringType(LookupPrivilegeValueA, LookupPrivilegeValueW) - -# BOOL WINAPI LookupPrivilegeName( -# __in_opt LPCTSTR lpSystemName, -# __in PLUID lpLuid, -# __out_opt LPTSTR lpName, -# __inout LPDWORD cchName -# ); - -def LookupPrivilegeNameA(lpSystemName, lpLuid): - _LookupPrivilegeNameA = windll.advapi32.LookupPrivilegeNameA - _LookupPrivilegeNameA.argtypes = [LPSTR, PLUID, LPSTR, LPDWORD] - _LookupPrivilegeNameA.restype = bool - _LookupPrivilegeNameA.errcheck = RaiseIfZero - - cchName = DWORD(0) - _LookupPrivilegeNameA(lpSystemName, byref(lpLuid), NULL, byref(cchName)) - lpName = ctypes.create_string_buffer("", cchName.value) - _LookupPrivilegeNameA(lpSystemName, byref(lpLuid), byref(lpName), byref(cchName)) - return lpName.value - -def LookupPrivilegeNameW(lpSystemName, lpLuid): - _LookupPrivilegeNameW = windll.advapi32.LookupPrivilegeNameW - _LookupPrivilegeNameW.argtypes = [LPWSTR, PLUID, LPWSTR, LPDWORD] - _LookupPrivilegeNameW.restype = bool - _LookupPrivilegeNameW.errcheck = RaiseIfZero - - cchName = DWORD(0) - _LookupPrivilegeNameW(lpSystemName, byref(lpLuid), NULL, byref(cchName)) - lpName = ctypes.create_unicode_buffer(u"", cchName.value) - _LookupPrivilegeNameW(lpSystemName, byref(lpLuid), byref(lpName), byref(cchName)) - return lpName.value - -LookupPrivilegeName = GuessStringType(LookupPrivilegeNameA, LookupPrivilegeNameW) - -# BOOL WINAPI AdjustTokenPrivileges( -# __in HANDLE TokenHandle, -# __in BOOL DisableAllPrivileges, -# __in_opt PTOKEN_PRIVILEGES NewState, -# __in DWORD BufferLength, -# __out_opt PTOKEN_PRIVILEGES PreviousState, -# __out_opt PDWORD ReturnLength -# ); -def AdjustTokenPrivileges(TokenHandle, NewState = ()): - _AdjustTokenPrivileges = windll.advapi32.AdjustTokenPrivileges - _AdjustTokenPrivileges.argtypes = [HANDLE, BOOL, LPVOID, DWORD, LPVOID, LPVOID] - _AdjustTokenPrivileges.restype = bool - _AdjustTokenPrivileges.errcheck = RaiseIfZero - # - # I don't know how to allocate variable sized structures in ctypes :( - # so this hack will work by using always TOKEN_PRIVILEGES of one element - # and calling the API many times. This also means the PreviousState - # parameter won't be supported yet as it's too much hassle. In a future - # version I look forward to implementing this function correctly. - # - if not NewState: - _AdjustTokenPrivileges(TokenHandle, TRUE, NULL, 0, NULL, NULL) - else: - success = True - for (privilege, enabled) in NewState: - if not isinstance(privilege, LUID): - privilege = LookupPrivilegeValue(NULL, privilege) - if enabled == True: - flags = SE_PRIVILEGE_ENABLED - elif enabled == False: - flags = SE_PRIVILEGE_REMOVED - elif enabled == None: - flags = 0 - else: - flags = enabled - laa = LUID_AND_ATTRIBUTES(privilege, flags) - tp = TOKEN_PRIVILEGES(1, laa) - _AdjustTokenPrivileges(TokenHandle, FALSE, byref(tp), sizeof(tp), NULL, NULL) - -# BOOL WINAPI GetTokenInformation( -# __in HANDLE TokenHandle, -# __in TOKEN_INFORMATION_CLASS TokenInformationClass, -# __out_opt LPVOID TokenInformation, -# __in DWORD TokenInformationLength, -# __out PDWORD ReturnLength -# ); -def GetTokenInformation(hTokenHandle, TokenInformationClass): - if TokenInformationClass <= 0 or TokenInformationClass > MaxTokenInfoClass: - raise ValueError("Invalid value for TokenInformationClass (%i)" % TokenInformationClass) - - # User SID. - if TokenInformationClass == TokenUser: - TokenInformation = TOKEN_USER() - _internal_GetTokenInformation(hTokenHandle, TokenInformationClass, TokenInformation) - return TokenInformation.User.Sid.value - - # Owner SID. - if TokenInformationClass == TokenOwner: - TokenInformation = TOKEN_OWNER() - _internal_GetTokenInformation(hTokenHandle, TokenInformationClass, TokenInformation) - return TokenInformation.Owner.value - - # Primary group SID. - if TokenInformationClass == TokenOwner: - TokenInformation = TOKEN_PRIMARY_GROUP() - _internal_GetTokenInformation(hTokenHandle, TokenInformationClass, TokenInformation) - return TokenInformation.PrimaryGroup.value - - # App container SID. - if TokenInformationClass == TokenAppContainerSid: - TokenInformation = TOKEN_APPCONTAINER_INFORMATION() - _internal_GetTokenInformation(hTokenHandle, TokenInformationClass, TokenInformation) - return TokenInformation.TokenAppContainer.value - - # Integrity level SID. - if TokenInformationClass == TokenIntegrityLevel: - TokenInformation = TOKEN_MANDATORY_LABEL() - _internal_GetTokenInformation(hTokenHandle, TokenInformationClass, TokenInformation) - return TokenInformation.Label.Sid.value, TokenInformation.Label.Attributes - - # Logon session LUID. - if TokenInformationClass == TokenOrigin: - TokenInformation = TOKEN_ORIGIN() - _internal_GetTokenInformation(hTokenHandle, TokenInformationClass, TokenInformation) - return TokenInformation.OriginatingLogonSession - - # Primary or impersonation token. - if TokenInformationClass == TokenType: - TokenInformation = TOKEN_TYPE(0) - _internal_GetTokenInformation(hTokenHandle, TokenInformationClass, TokenInformation) - return TokenInformation.value - - # Elevated token. - if TokenInformationClass == TokenElevation: - TokenInformation = TOKEN_ELEVATION(0) - _internal_GetTokenInformation(hTokenHandle, TokenInformationClass, TokenInformation) - return TokenInformation.value - - # Security impersonation level. - if TokenInformationClass == TokenElevation: - TokenInformation = SECURITY_IMPERSONATION_LEVEL(0) - _internal_GetTokenInformation(hTokenHandle, TokenInformationClass, TokenInformation) - return TokenInformation.value - - # Session ID and other DWORD values. - if TokenInformationClass in (TokenSessionId, TokenAppContainerNumber): - TokenInformation = DWORD(0) - _internal_GetTokenInformation(hTokenHandle, TokenInformationClass, TokenInformation) - return TokenInformation.value - - # Various boolean flags. - if TokenInformationClass in (TokenSandBoxInert, TokenHasRestrictions, TokenUIAccess, - TokenVirtualizationAllowed, TokenVirtualizationEnabled): - TokenInformation = DWORD(0) - _internal_GetTokenInformation(hTokenHandle, TokenInformationClass, TokenInformation) - return bool(TokenInformation.value) - - # Linked token. - if TokenInformationClass == TokenLinkedToken: - TokenInformation = TOKEN_LINKED_TOKEN(0) - _internal_GetTokenInformation(hTokenHandle, TokenInformationClass, TokenInformation) - return TokenHandle(TokenInformation.LinkedToken.value, bOwnership = True) - - # Token statistics. - if TokenInformationClass == TokenStatistics: - TokenInformation = TOKEN_STATISTICS() - _internal_GetTokenInformation(hTokenHandle, TokenInformationClass, TokenInformation) - return TokenInformation # TODO add a class wrapper? - - # Currently unsupported flags. - raise NotImplementedError("TokenInformationClass(%i) not yet supported!" % TokenInformationClass) - -def _internal_GetTokenInformation(hTokenHandle, TokenInformationClass, TokenInformation): - _GetTokenInformation = windll.advapi32.GetTokenInformation - _GetTokenInformation.argtypes = [HANDLE, TOKEN_INFORMATION_CLASS, LPVOID, DWORD, PDWORD] - _GetTokenInformation.restype = bool - _GetTokenInformation.errcheck = RaiseIfZero - - ReturnLength = DWORD(0) - TokenInformationLength = SIZEOF(TokenInformation) - _GetTokenInformation(hTokenHandle, TokenInformationClass, byref(TokenInformation), TokenInformationLength, byref(ReturnLength)) - if ReturnLength.value != TokenInformationLength: - raise ctypes.WinError(ERROR_INSUFFICIENT_BUFFER) - return TokenInformation - -# BOOL WINAPI SetTokenInformation( -# __in HANDLE TokenHandle, -# __in TOKEN_INFORMATION_CLASS TokenInformationClass, -# __in LPVOID TokenInformation, -# __in DWORD TokenInformationLength -# ); - -# XXX TODO - -# BOOL WINAPI CreateProcessWithLogonW( -# __in LPCWSTR lpUsername, -# __in_opt LPCWSTR lpDomain, -# __in LPCWSTR lpPassword, -# __in DWORD dwLogonFlags, -# __in_opt LPCWSTR lpApplicationName, -# __inout_opt LPWSTR lpCommandLine, -# __in DWORD dwCreationFlags, -# __in_opt LPVOID lpEnvironment, -# __in_opt LPCWSTR lpCurrentDirectory, -# __in LPSTARTUPINFOW lpStartupInfo, -# __out LPPROCESS_INFORMATION lpProcessInfo -# ); -def CreateProcessWithLogonW(lpUsername = None, lpDomain = None, lpPassword = None, dwLogonFlags = 0, lpApplicationName = None, lpCommandLine = None, dwCreationFlags = 0, lpEnvironment = None, lpCurrentDirectory = None, lpStartupInfo = None): - _CreateProcessWithLogonW = windll.advapi32.CreateProcessWithLogonW - _CreateProcessWithLogonW.argtypes = [LPWSTR, LPWSTR, LPWSTR, DWORD, LPWSTR, LPWSTR, DWORD, LPVOID, LPWSTR, LPVOID, LPPROCESS_INFORMATION] - _CreateProcessWithLogonW.restype = bool - _CreateProcessWithLogonW.errcheck = RaiseIfZero - - if not lpUsername: - lpUsername = None - if not lpDomain: - lpDomain = None - if not lpPassword: - lpPassword = None - if not lpApplicationName: - lpApplicationName = None - if not lpCommandLine: - lpCommandLine = None - else: - lpCommandLine = ctypes.create_unicode_buffer(lpCommandLine, max(MAX_PATH, len(lpCommandLine))) - if not lpEnvironment: - lpEnvironment = None - else: - lpEnvironment = ctypes.create_unicode_buffer(lpEnvironment) - if not lpCurrentDirectory: - lpCurrentDirectory = None - if not lpStartupInfo: - lpStartupInfo = STARTUPINFOW() - lpStartupInfo.cb = sizeof(STARTUPINFOW) - lpStartupInfo.lpReserved = 0 - lpStartupInfo.lpDesktop = 0 - lpStartupInfo.lpTitle = 0 - lpStartupInfo.dwFlags = 0 - lpStartupInfo.cbReserved2 = 0 - lpStartupInfo.lpReserved2 = 0 - lpProcessInformation = PROCESS_INFORMATION() - lpProcessInformation.hProcess = INVALID_HANDLE_VALUE - lpProcessInformation.hThread = INVALID_HANDLE_VALUE - lpProcessInformation.dwProcessId = 0 - lpProcessInformation.dwThreadId = 0 - _CreateProcessWithLogonW(lpUsername, lpDomain, lpPassword, dwLogonFlags, lpApplicationName, lpCommandLine, dwCreationFlags, lpEnvironment, lpCurrentDirectory, byref(lpStartupInfo), byref(lpProcessInformation)) - return ProcessInformation(lpProcessInformation) - -CreateProcessWithLogonA = MakeANSIVersion(CreateProcessWithLogonW) -CreateProcessWithLogon = DefaultStringType(CreateProcessWithLogonA, CreateProcessWithLogonW) - -# BOOL WINAPI CreateProcessWithTokenW( -# __in HANDLE hToken, -# __in DWORD dwLogonFlags, -# __in_opt LPCWSTR lpApplicationName, -# __inout_opt LPWSTR lpCommandLine, -# __in DWORD dwCreationFlags, -# __in_opt LPVOID lpEnvironment, -# __in_opt LPCWSTR lpCurrentDirectory, -# __in LPSTARTUPINFOW lpStartupInfo, -# __out LPPROCESS_INFORMATION lpProcessInfo -# ); -def CreateProcessWithTokenW(hToken = None, dwLogonFlags = 0, lpApplicationName = None, lpCommandLine = None, dwCreationFlags = 0, lpEnvironment = None, lpCurrentDirectory = None, lpStartupInfo = None): - _CreateProcessWithTokenW = windll.advapi32.CreateProcessWithTokenW - _CreateProcessWithTokenW.argtypes = [HANDLE, DWORD, LPWSTR, LPWSTR, DWORD, LPVOID, LPWSTR, LPVOID, LPPROCESS_INFORMATION] - _CreateProcessWithTokenW.restype = bool - _CreateProcessWithTokenW.errcheck = RaiseIfZero - - if not hToken: - hToken = None - if not lpApplicationName: - lpApplicationName = None - if not lpCommandLine: - lpCommandLine = None - else: - lpCommandLine = ctypes.create_unicode_buffer(lpCommandLine, max(MAX_PATH, len(lpCommandLine))) - if not lpEnvironment: - lpEnvironment = None - else: - lpEnvironment = ctypes.create_unicode_buffer(lpEnvironment) - if not lpCurrentDirectory: - lpCurrentDirectory = None - if not lpStartupInfo: - lpStartupInfo = STARTUPINFOW() - lpStartupInfo.cb = sizeof(STARTUPINFOW) - lpStartupInfo.lpReserved = 0 - lpStartupInfo.lpDesktop = 0 - lpStartupInfo.lpTitle = 0 - lpStartupInfo.dwFlags = 0 - lpStartupInfo.cbReserved2 = 0 - lpStartupInfo.lpReserved2 = 0 - lpProcessInformation = PROCESS_INFORMATION() - lpProcessInformation.hProcess = INVALID_HANDLE_VALUE - lpProcessInformation.hThread = INVALID_HANDLE_VALUE - lpProcessInformation.dwProcessId = 0 - lpProcessInformation.dwThreadId = 0 - _CreateProcessWithTokenW(hToken, dwLogonFlags, lpApplicationName, lpCommandLine, dwCreationFlags, lpEnvironment, lpCurrentDirectory, byref(lpStartupInfo), byref(lpProcessInformation)) - return ProcessInformation(lpProcessInformation) - -CreateProcessWithTokenA = MakeANSIVersion(CreateProcessWithTokenW) -CreateProcessWithToken = DefaultStringType(CreateProcessWithTokenA, CreateProcessWithTokenW) - -# BOOL WINAPI CreateProcessAsUser( -# __in_opt HANDLE hToken, -# __in_opt LPCTSTR lpApplicationName, -# __inout_opt LPTSTR lpCommandLine, -# __in_opt LPSECURITY_ATTRIBUTES lpProcessAttributes, -# __in_opt LPSECURITY_ATTRIBUTES lpThreadAttributes, -# __in BOOL bInheritHandles, -# __in DWORD dwCreationFlags, -# __in_opt LPVOID lpEnvironment, -# __in_opt LPCTSTR lpCurrentDirectory, -# __in LPSTARTUPINFO lpStartupInfo, -# __out LPPROCESS_INFORMATION lpProcessInformation -# ); -def CreateProcessAsUserA(hToken = None, lpApplicationName = None, lpCommandLine=None, lpProcessAttributes=None, lpThreadAttributes=None, bInheritHandles=False, dwCreationFlags=0, lpEnvironment=None, lpCurrentDirectory=None, lpStartupInfo=None): - _CreateProcessAsUserA = windll.advapi32.CreateProcessAsUserA - _CreateProcessAsUserA.argtypes = [HANDLE, LPSTR, LPSTR, LPSECURITY_ATTRIBUTES, LPSECURITY_ATTRIBUTES, BOOL, DWORD, LPVOID, LPSTR, LPVOID, LPPROCESS_INFORMATION] - _CreateProcessAsUserA.restype = bool - _CreateProcessAsUserA.errcheck = RaiseIfZero - - if not lpApplicationName: - lpApplicationName = None - if not lpCommandLine: - lpCommandLine = None - else: - lpCommandLine = ctypes.create_string_buffer(lpCommandLine, max(MAX_PATH, len(lpCommandLine))) - if not lpEnvironment: - lpEnvironment = None - else: - lpEnvironment = ctypes.create_string_buffer(lpEnvironment) - if not lpCurrentDirectory: - lpCurrentDirectory = None - if not lpProcessAttributes: - lpProcessAttributes = None - else: - lpProcessAttributes = byref(lpProcessAttributes) - if not lpThreadAttributes: - lpThreadAttributes = None - else: - lpThreadAttributes = byref(lpThreadAttributes) - if not lpStartupInfo: - lpStartupInfo = STARTUPINFO() - lpStartupInfo.cb = sizeof(STARTUPINFO) - lpStartupInfo.lpReserved = 0 - lpStartupInfo.lpDesktop = 0 - lpStartupInfo.lpTitle = 0 - lpStartupInfo.dwFlags = 0 - lpStartupInfo.cbReserved2 = 0 - lpStartupInfo.lpReserved2 = 0 - lpProcessInformation = PROCESS_INFORMATION() - lpProcessInformation.hProcess = INVALID_HANDLE_VALUE - lpProcessInformation.hThread = INVALID_HANDLE_VALUE - lpProcessInformation.dwProcessId = 0 - lpProcessInformation.dwThreadId = 0 - _CreateProcessAsUserA(hToken, lpApplicationName, lpCommandLine, lpProcessAttributes, lpThreadAttributes, bool(bInheritHandles), dwCreationFlags, lpEnvironment, lpCurrentDirectory, byref(lpStartupInfo), byref(lpProcessInformation)) - return ProcessInformation(lpProcessInformation) - -def CreateProcessAsUserW(hToken = None, lpApplicationName = None, lpCommandLine=None, lpProcessAttributes=None, lpThreadAttributes=None, bInheritHandles=False, dwCreationFlags=0, lpEnvironment=None, lpCurrentDirectory=None, lpStartupInfo=None): - _CreateProcessAsUserW = windll.advapi32.CreateProcessAsUserW - _CreateProcessAsUserW.argtypes = [HANDLE, LPWSTR, LPWSTR, LPSECURITY_ATTRIBUTES, LPSECURITY_ATTRIBUTES, BOOL, DWORD, LPVOID, LPWSTR, LPVOID, LPPROCESS_INFORMATION] - _CreateProcessAsUserW.restype = bool - _CreateProcessAsUserW.errcheck = RaiseIfZero - - if not lpApplicationName: - lpApplicationName = None - if not lpCommandLine: - lpCommandLine = None - else: - lpCommandLine = ctypes.create_unicode_buffer(lpCommandLine, max(MAX_PATH, len(lpCommandLine))) - if not lpEnvironment: - lpEnvironment = None - else: - lpEnvironment = ctypes.create_unicode_buffer(lpEnvironment) - if not lpCurrentDirectory: - lpCurrentDirectory = None - if not lpProcessAttributes: - lpProcessAttributes = None - else: - lpProcessAttributes = byref(lpProcessAttributes) - if not lpThreadAttributes: - lpThreadAttributes = None - else: - lpThreadAttributes = byref(lpThreadAttributes) - if not lpStartupInfo: - lpStartupInfo = STARTUPINFO() - lpStartupInfo.cb = sizeof(STARTUPINFO) - lpStartupInfo.lpReserved = 0 - lpStartupInfo.lpDesktop = 0 - lpStartupInfo.lpTitle = 0 - lpStartupInfo.dwFlags = 0 - lpStartupInfo.cbReserved2 = 0 - lpStartupInfo.lpReserved2 = 0 - lpProcessInformation = PROCESS_INFORMATION() - lpProcessInformation.hProcess = INVALID_HANDLE_VALUE - lpProcessInformation.hThread = INVALID_HANDLE_VALUE - lpProcessInformation.dwProcessId = 0 - lpProcessInformation.dwThreadId = 0 - _CreateProcessAsUserW(hToken, lpApplicationName, lpCommandLine, lpProcessAttributes, lpThreadAttributes, bool(bInheritHandles), dwCreationFlags, lpEnvironment, lpCurrentDirectory, byref(lpStartupInfo), byref(lpProcessInformation)) - return ProcessInformation(lpProcessInformation) - -CreateProcessAsUser = GuessStringType(CreateProcessAsUserA, CreateProcessAsUserW) - -# VOID CALLBACK WaitChainCallback( -# HWCT WctHandle, -# DWORD_PTR Context, -# DWORD CallbackStatus, -# LPDWORD NodeCount, -# PWAITCHAIN_NODE_INFO NodeInfoArray, -# LPBOOL IsCycle -# ); -PWAITCHAINCALLBACK = WINFUNCTYPE(HWCT, DWORD_PTR, DWORD, LPDWORD, PWAITCHAIN_NODE_INFO, LPBOOL) - -# HWCT WINAPI OpenThreadWaitChainSession( -# __in DWORD Flags, -# __in_opt PWAITCHAINCALLBACK callback -# ); -def OpenThreadWaitChainSession(Flags = 0, callback = None): - _OpenThreadWaitChainSession = windll.advapi32.OpenThreadWaitChainSession - _OpenThreadWaitChainSession.argtypes = [DWORD, PVOID] - _OpenThreadWaitChainSession.restype = HWCT - _OpenThreadWaitChainSession.errcheck = RaiseIfZero - - if callback is not None: - callback = PWAITCHAINCALLBACK(callback) - aHandle = _OpenThreadWaitChainSession(Flags, callback) - return ThreadWaitChainSessionHandle(aHandle) - -# BOOL WINAPI GetThreadWaitChain( -# _In_ HWCT WctHandle, -# _In_opt_ DWORD_PTR Context, -# _In_ DWORD Flags, -# _In_ DWORD ThreadId, -# _Inout_ LPDWORD NodeCount, -# _Out_ PWAITCHAIN_NODE_INFO NodeInfoArray, -# _Out_ LPBOOL IsCycle -# ); -def GetThreadWaitChain(WctHandle, Context = None, Flags = WCTP_GETINFO_ALL_FLAGS, ThreadId = -1, NodeCount = WCT_MAX_NODE_COUNT): - _GetThreadWaitChain = windll.advapi32.GetThreadWaitChain - _GetThreadWaitChain.argtypes = [HWCT, LPDWORD, DWORD, DWORD, LPDWORD, PWAITCHAIN_NODE_INFO, LPBOOL] - _GetThreadWaitChain.restype = bool - _GetThreadWaitChain.errcheck = RaiseIfZero - - dwNodeCount = DWORD(NodeCount) - NodeInfoArray = (WAITCHAIN_NODE_INFO * NodeCount)() - IsCycle = BOOL(0) - _GetThreadWaitChain(WctHandle, Context, Flags, ThreadId, byref(dwNodeCount), ctypes.cast(ctypes.pointer(NodeInfoArray), PWAITCHAIN_NODE_INFO), byref(IsCycle)) - while dwNodeCount.value > NodeCount: - NodeCount = dwNodeCount.value - NodeInfoArray = (WAITCHAIN_NODE_INFO * NodeCount)() - _GetThreadWaitChain(WctHandle, Context, Flags, ThreadId, byref(dwNodeCount), ctypes.cast(ctypes.pointer(NodeInfoArray), PWAITCHAIN_NODE_INFO), byref(IsCycle)) - return ( - [ WaitChainNodeInfo(NodeInfoArray[index]) for index in compat.xrange(dwNodeCount.value) ], - bool(IsCycle.value) - ) - -# VOID WINAPI CloseThreadWaitChainSession( -# __in HWCT WctHandle -# ); -def CloseThreadWaitChainSession(WctHandle): - _CloseThreadWaitChainSession = windll.advapi32.CloseThreadWaitChainSession - _CloseThreadWaitChainSession.argtypes = [HWCT] - _CloseThreadWaitChainSession(WctHandle) - -# BOOL WINAPI SaferCreateLevel( -# __in DWORD dwScopeId, -# __in DWORD dwLevelId, -# __in DWORD OpenFlags, -# __out SAFER_LEVEL_HANDLE *pLevelHandle, -# __reserved LPVOID lpReserved -# ); -def SaferCreateLevel(dwScopeId=SAFER_SCOPEID_USER, dwLevelId=SAFER_LEVELID_NORMALUSER, OpenFlags=0): - _SaferCreateLevel = windll.advapi32.SaferCreateLevel - _SaferCreateLevel.argtypes = [DWORD, DWORD, DWORD, POINTER(SAFER_LEVEL_HANDLE), LPVOID] - _SaferCreateLevel.restype = BOOL - _SaferCreateLevel.errcheck = RaiseIfZero - - hLevelHandle = SAFER_LEVEL_HANDLE(INVALID_HANDLE_VALUE) - _SaferCreateLevel(dwScopeId, dwLevelId, OpenFlags, byref(hLevelHandle), None) - return SaferLevelHandle(hLevelHandle.value) - -# BOOL WINAPI SaferIdentifyLevel( -# __in DWORD dwNumProperties, -# __in_opt PSAFER_CODE_PROPERTIES pCodeProperties, -# __out SAFER_LEVEL_HANDLE *pLevelHandle, -# __reserved LPVOID lpReserved -# ); - -# XXX TODO - -# BOOL WINAPI SaferComputeTokenFromLevel( -# __in SAFER_LEVEL_HANDLE LevelHandle, -# __in_opt HANDLE InAccessToken, -# __out PHANDLE OutAccessToken, -# __in DWORD dwFlags, -# __inout_opt LPVOID lpReserved -# ); -def SaferComputeTokenFromLevel(LevelHandle, InAccessToken=None, dwFlags=0): - _SaferComputeTokenFromLevel = windll.advapi32.SaferComputeTokenFromLevel - _SaferComputeTokenFromLevel.argtypes = [SAFER_LEVEL_HANDLE, HANDLE, PHANDLE, DWORD, LPDWORD] - _SaferComputeTokenFromLevel.restype = BOOL - _SaferComputeTokenFromLevel.errcheck = RaiseIfZero - - OutAccessToken = HANDLE(INVALID_HANDLE_VALUE) - lpReserved = DWORD(0) - _SaferComputeTokenFromLevel(LevelHandle, InAccessToken, byref(OutAccessToken), dwFlags, byref(lpReserved)) - return TokenHandle(OutAccessToken.value), lpReserved.value - -# BOOL WINAPI SaferCloseLevel( -# __in SAFER_LEVEL_HANDLE hLevelHandle -# ); -def SaferCloseLevel(hLevelHandle): - _SaferCloseLevel = windll.advapi32.SaferCloseLevel - _SaferCloseLevel.argtypes = [SAFER_LEVEL_HANDLE] - _SaferCloseLevel.restype = BOOL - _SaferCloseLevel.errcheck = RaiseIfZero - - if hasattr(hLevelHandle, 'value'): - _SaferCloseLevel(hLevelHandle.value) - else: - _SaferCloseLevel(hLevelHandle) - -# BOOL SaferiIsExecutableFileType( -# __in LPCWSTR szFullPath, -# __in BOOLEAN bFromShellExecute -# ); -def SaferiIsExecutableFileType(szFullPath, bFromShellExecute = False): - _SaferiIsExecutableFileType = windll.advapi32.SaferiIsExecutableFileType - _SaferiIsExecutableFileType.argtypes = [LPWSTR, BOOLEAN] - _SaferiIsExecutableFileType.restype = BOOL - _SaferiIsExecutableFileType.errcheck = RaiseIfLastError - - SetLastError(ERROR_SUCCESS) - return bool(_SaferiIsExecutableFileType(compat.unicode(szFullPath), bFromShellExecute)) - -# useful alias since I'm likely to misspell it :P -SaferIsExecutableFileType = SaferiIsExecutableFileType - -#------------------------------------------------------------------------------ - -# LONG WINAPI RegCloseKey( -# __in HKEY hKey -# ); -def RegCloseKey(hKey): - if hasattr(hKey, 'value'): - value = hKey.value - else: - value = hKey - - if value in ( - HKEY_CLASSES_ROOT, - HKEY_CURRENT_USER, - HKEY_LOCAL_MACHINE, - HKEY_USERS, - HKEY_PERFORMANCE_DATA, - HKEY_CURRENT_CONFIG - ): - return - - _RegCloseKey = windll.advapi32.RegCloseKey - _RegCloseKey.argtypes = [HKEY] - _RegCloseKey.restype = LONG - _RegCloseKey.errcheck = RaiseIfNotErrorSuccess - _RegCloseKey(hKey) - -# LONG WINAPI RegConnectRegistry( -# __in_opt LPCTSTR lpMachineName, -# __in HKEY hKey, -# __out PHKEY phkResult -# ); -def RegConnectRegistryA(lpMachineName = None, hKey = HKEY_LOCAL_MACHINE): - _RegConnectRegistryA = windll.advapi32.RegConnectRegistryA - _RegConnectRegistryA.argtypes = [LPSTR, HKEY, PHKEY] - _RegConnectRegistryA.restype = LONG - _RegConnectRegistryA.errcheck = RaiseIfNotErrorSuccess - - hkResult = HKEY(INVALID_HANDLE_VALUE) - _RegConnectRegistryA(lpMachineName, hKey, byref(hkResult)) - return RegistryKeyHandle(hkResult.value) - -def RegConnectRegistryW(lpMachineName = None, hKey = HKEY_LOCAL_MACHINE): - _RegConnectRegistryW = windll.advapi32.RegConnectRegistryW - _RegConnectRegistryW.argtypes = [LPWSTR, HKEY, PHKEY] - _RegConnectRegistryW.restype = LONG - _RegConnectRegistryW.errcheck = RaiseIfNotErrorSuccess - - hkResult = HKEY(INVALID_HANDLE_VALUE) - _RegConnectRegistryW(lpMachineName, hKey, byref(hkResult)) - return RegistryKeyHandle(hkResult.value) - -RegConnectRegistry = GuessStringType(RegConnectRegistryA, RegConnectRegistryW) - -# LONG WINAPI RegCreateKey( -# __in HKEY hKey, -# __in_opt LPCTSTR lpSubKey, -# __out PHKEY phkResult -# ); -def RegCreateKeyA(hKey = HKEY_LOCAL_MACHINE, lpSubKey = None): - _RegCreateKeyA = windll.advapi32.RegCreateKeyA - _RegCreateKeyA.argtypes = [HKEY, LPSTR, PHKEY] - _RegCreateKeyA.restype = LONG - _RegCreateKeyA.errcheck = RaiseIfNotErrorSuccess - - hkResult = HKEY(INVALID_HANDLE_VALUE) - _RegCreateKeyA(hKey, lpSubKey, byref(hkResult)) - return RegistryKeyHandle(hkResult.value) - -def RegCreateKeyW(hKey = HKEY_LOCAL_MACHINE, lpSubKey = None): - _RegCreateKeyW = windll.advapi32.RegCreateKeyW - _RegCreateKeyW.argtypes = [HKEY, LPWSTR, PHKEY] - _RegCreateKeyW.restype = LONG - _RegCreateKeyW.errcheck = RaiseIfNotErrorSuccess - - hkResult = HKEY(INVALID_HANDLE_VALUE) - _RegCreateKeyW(hKey, lpSubKey, byref(hkResult)) - return RegistryKeyHandle(hkResult.value) - -RegCreateKey = GuessStringType(RegCreateKeyA, RegCreateKeyW) - -# LONG WINAPI RegCreateKeyEx( -# __in HKEY hKey, -# __in LPCTSTR lpSubKey, -# __reserved DWORD Reserved, -# __in_opt LPTSTR lpClass, -# __in DWORD dwOptions, -# __in REGSAM samDesired, -# __in_opt LPSECURITY_ATTRIBUTES lpSecurityAttributes, -# __out PHKEY phkResult, -# __out_opt LPDWORD lpdwDisposition -# ); - -# XXX TODO - -# LONG WINAPI RegOpenKey( -# __in HKEY hKey, -# __in_opt LPCTSTR lpSubKey, -# __out PHKEY phkResult -# ); -def RegOpenKeyA(hKey = HKEY_LOCAL_MACHINE, lpSubKey = None): - _RegOpenKeyA = windll.advapi32.RegOpenKeyA - _RegOpenKeyA.argtypes = [HKEY, LPSTR, PHKEY] - _RegOpenKeyA.restype = LONG - _RegOpenKeyA.errcheck = RaiseIfNotErrorSuccess - - hkResult = HKEY(INVALID_HANDLE_VALUE) - _RegOpenKeyA(hKey, lpSubKey, byref(hkResult)) - return RegistryKeyHandle(hkResult.value) - -def RegOpenKeyW(hKey = HKEY_LOCAL_MACHINE, lpSubKey = None): - _RegOpenKeyW = windll.advapi32.RegOpenKeyW - _RegOpenKeyW.argtypes = [HKEY, LPWSTR, PHKEY] - _RegOpenKeyW.restype = LONG - _RegOpenKeyW.errcheck = RaiseIfNotErrorSuccess - - hkResult = HKEY(INVALID_HANDLE_VALUE) - _RegOpenKeyW(hKey, lpSubKey, byref(hkResult)) - return RegistryKeyHandle(hkResult.value) - -RegOpenKey = GuessStringType(RegOpenKeyA, RegOpenKeyW) - -# LONG WINAPI RegOpenKeyEx( -# __in HKEY hKey, -# __in_opt LPCTSTR lpSubKey, -# __reserved DWORD ulOptions, -# __in REGSAM samDesired, -# __out PHKEY phkResult -# ); -def RegOpenKeyExA(hKey = HKEY_LOCAL_MACHINE, lpSubKey = None, samDesired = KEY_ALL_ACCESS): - _RegOpenKeyExA = windll.advapi32.RegOpenKeyExA - _RegOpenKeyExA.argtypes = [HKEY, LPSTR, DWORD, REGSAM, PHKEY] - _RegOpenKeyExA.restype = LONG - _RegOpenKeyExA.errcheck = RaiseIfNotErrorSuccess - - hkResult = HKEY(INVALID_HANDLE_VALUE) - _RegOpenKeyExA(hKey, lpSubKey, 0, samDesired, byref(hkResult)) - return RegistryKeyHandle(hkResult.value) - -def RegOpenKeyExW(hKey = HKEY_LOCAL_MACHINE, lpSubKey = None, samDesired = KEY_ALL_ACCESS): - _RegOpenKeyExW = windll.advapi32.RegOpenKeyExW - _RegOpenKeyExW.argtypes = [HKEY, LPWSTR, DWORD, REGSAM, PHKEY] - _RegOpenKeyExW.restype = LONG - _RegOpenKeyExW.errcheck = RaiseIfNotErrorSuccess - - hkResult = HKEY(INVALID_HANDLE_VALUE) - _RegOpenKeyExW(hKey, lpSubKey, 0, samDesired, byref(hkResult)) - return RegistryKeyHandle(hkResult.value) - -RegOpenKeyEx = GuessStringType(RegOpenKeyExA, RegOpenKeyExW) - -# LONG WINAPI RegOpenCurrentUser( -# __in REGSAM samDesired, -# __out PHKEY phkResult -# ); -def RegOpenCurrentUser(samDesired = KEY_ALL_ACCESS): - _RegOpenCurrentUser = windll.advapi32.RegOpenCurrentUser - _RegOpenCurrentUser.argtypes = [REGSAM, PHKEY] - _RegOpenCurrentUser.restype = LONG - _RegOpenCurrentUser.errcheck = RaiseIfNotErrorSuccess - - hkResult = HKEY(INVALID_HANDLE_VALUE) - _RegOpenCurrentUser(samDesired, byref(hkResult)) - return RegistryKeyHandle(hkResult.value) - -# LONG WINAPI RegOpenUserClassesRoot( -# __in HANDLE hToken, -# __reserved DWORD dwOptions, -# __in REGSAM samDesired, -# __out PHKEY phkResult -# ); -def RegOpenUserClassesRoot(hToken, samDesired = KEY_ALL_ACCESS): - _RegOpenUserClassesRoot = windll.advapi32.RegOpenUserClassesRoot - _RegOpenUserClassesRoot.argtypes = [HANDLE, DWORD, REGSAM, PHKEY] - _RegOpenUserClassesRoot.restype = LONG - _RegOpenUserClassesRoot.errcheck = RaiseIfNotErrorSuccess - - hkResult = HKEY(INVALID_HANDLE_VALUE) - _RegOpenUserClassesRoot(hToken, 0, samDesired, byref(hkResult)) - return RegistryKeyHandle(hkResult.value) - -# LONG WINAPI RegQueryValue( -# __in HKEY hKey, -# __in_opt LPCTSTR lpSubKey, -# __out_opt LPTSTR lpValue, -# __inout_opt PLONG lpcbValue -# ); -def RegQueryValueA(hKey, lpSubKey = None): - _RegQueryValueA = windll.advapi32.RegQueryValueA - _RegQueryValueA.argtypes = [HKEY, LPSTR, LPVOID, PLONG] - _RegQueryValueA.restype = LONG - _RegQueryValueA.errcheck = RaiseIfNotErrorSuccess - - cbValue = LONG(0) - _RegQueryValueA(hKey, lpSubKey, None, byref(cbValue)) - lpValue = ctypes.create_string_buffer(cbValue.value) - _RegQueryValueA(hKey, lpSubKey, lpValue, byref(cbValue)) - return lpValue.value - -def RegQueryValueW(hKey, lpSubKey = None): - _RegQueryValueW = windll.advapi32.RegQueryValueW - _RegQueryValueW.argtypes = [HKEY, LPWSTR, LPVOID, PLONG] - _RegQueryValueW.restype = LONG - _RegQueryValueW.errcheck = RaiseIfNotErrorSuccess - - cbValue = LONG(0) - _RegQueryValueW(hKey, lpSubKey, None, byref(cbValue)) - lpValue = ctypes.create_unicode_buffer(cbValue.value * sizeof(WCHAR)) - _RegQueryValueW(hKey, lpSubKey, lpValue, byref(cbValue)) - return lpValue.value - -RegQueryValue = GuessStringType(RegQueryValueA, RegQueryValueW) - -# LONG WINAPI RegQueryValueEx( -# __in HKEY hKey, -# __in_opt LPCTSTR lpValueName, -# __reserved LPDWORD lpReserved, -# __out_opt LPDWORD lpType, -# __out_opt LPBYTE lpData, -# __inout_opt LPDWORD lpcbData -# ); -def _internal_RegQueryValueEx(ansi, hKey, lpValueName = None, bGetData = True): - _RegQueryValueEx = _caller_RegQueryValueEx(ansi) - - cbData = DWORD(0) - dwType = DWORD(-1) - _RegQueryValueEx(hKey, lpValueName, None, byref(dwType), None, byref(cbData)) - Type = dwType.value - - if not bGetData: - return cbData.value, Type - - if Type in (REG_DWORD, REG_DWORD_BIG_ENDIAN): # REG_DWORD_LITTLE_ENDIAN - if cbData.value != 4: - raise ValueError("REG_DWORD value of size %d" % cbData.value) - dwData = DWORD(0) - _RegQueryValueEx(hKey, lpValueName, None, None, byref(dwData), byref(cbData)) - return dwData.value, Type - - if Type == REG_QWORD: # REG_QWORD_LITTLE_ENDIAN - if cbData.value != 8: - raise ValueError("REG_QWORD value of size %d" % cbData.value) - qwData = QWORD(long(0)) - _RegQueryValueEx(hKey, lpValueName, None, None, byref(qwData), byref(cbData)) - return qwData.value, Type - - if Type in (REG_SZ, REG_EXPAND_SZ): - if ansi: - szData = ctypes.create_string_buffer(cbData.value) - else: - szData = ctypes.create_unicode_buffer(cbData.value) - _RegQueryValueEx(hKey, lpValueName, None, None, byref(szData), byref(cbData)) - return szData.value, Type - - if Type == REG_MULTI_SZ: - if ansi: - szData = ctypes.create_string_buffer(cbData.value) - else: - szData = ctypes.create_unicode_buffer(cbData.value) - _RegQueryValueEx(hKey, lpValueName, None, None, byref(szData), byref(cbData)) - Data = szData[:] - if ansi: - aData = Data.split('\0') - else: - aData = Data.split(u'\0') - aData = [token for token in aData if token] - return aData, Type - - if Type == REG_LINK: - szData = ctypes.create_unicode_buffer(cbData.value) - _RegQueryValueEx(hKey, lpValueName, None, None, byref(szData), byref(cbData)) - return szData.value, Type - - # REG_BINARY, REG_NONE, and any future types - szData = ctypes.create_string_buffer(cbData.value) - _RegQueryValueEx(hKey, lpValueName, None, None, byref(szData), byref(cbData)) - return szData.raw, Type - -def _caller_RegQueryValueEx(ansi): - if ansi: - _RegQueryValueEx = windll.advapi32.RegQueryValueExA - _RegQueryValueEx.argtypes = [HKEY, LPSTR, LPVOID, PDWORD, LPVOID, PDWORD] - else: - _RegQueryValueEx = windll.advapi32.RegQueryValueExW - _RegQueryValueEx.argtypes = [HKEY, LPWSTR, LPVOID, PDWORD, LPVOID, PDWORD] - _RegQueryValueEx.restype = LONG - _RegQueryValueEx.errcheck = RaiseIfNotErrorSuccess - return _RegQueryValueEx - -# see _internal_RegQueryValueEx -def RegQueryValueExA(hKey, lpValueName = None, bGetData = True): - return _internal_RegQueryValueEx(True, hKey, lpValueName, bGetData) - -# see _internal_RegQueryValueEx -def RegQueryValueExW(hKey, lpValueName = None, bGetData = True): - return _internal_RegQueryValueEx(False, hKey, lpValueName, bGetData) - -RegQueryValueEx = GuessStringType(RegQueryValueExA, RegQueryValueExW) - -# LONG WINAPI RegSetValueEx( -# __in HKEY hKey, -# __in_opt LPCTSTR lpValueName, -# __reserved DWORD Reserved, -# __in DWORD dwType, -# __in_opt const BYTE *lpData, -# __in DWORD cbData -# ); -def RegSetValueEx(hKey, lpValueName = None, lpData = None, dwType = None): - - # Determine which version of the API to use, ANSI or Widechar. - if lpValueName is None: - if isinstance(lpData, GuessStringType.t_ansi): - ansi = True - elif isinstance(lpData, GuessStringType.t_unicode): - ansi = False - else: - ansi = (GuessStringType.t_ansi == GuessStringType.t_default) - elif isinstance(lpValueName, GuessStringType.t_ansi): - ansi = True - elif isinstance(lpValueName, GuessStringType.t_unicode): - ansi = False - else: - raise TypeError("String expected, got %s instead" % type(lpValueName)) - - # Autodetect the type when not given. - # TODO: improve detection of DWORD and QWORD by seeing if the value "fits". - if dwType is None: - if lpValueName is None: - dwType = REG_SZ - elif lpData is None: - dwType = REG_NONE - elif isinstance(lpData, GuessStringType.t_ansi): - dwType = REG_SZ - elif isinstance(lpData, GuessStringType.t_unicode): - dwType = REG_SZ - elif isinstance(lpData, int): - dwType = REG_DWORD - elif isinstance(lpData, long): - dwType = REG_QWORD - else: - dwType = REG_BINARY - - # Load the ctypes caller. - if ansi: - _RegSetValueEx = windll.advapi32.RegSetValueExA - _RegSetValueEx.argtypes = [HKEY, LPSTR, DWORD, DWORD, LPVOID, DWORD] - else: - _RegSetValueEx = windll.advapi32.RegSetValueExW - _RegSetValueEx.argtypes = [HKEY, LPWSTR, DWORD, DWORD, LPVOID, DWORD] - _RegSetValueEx.restype = LONG - _RegSetValueEx.errcheck = RaiseIfNotErrorSuccess - - # Convert the arguments so ctypes can understand them. - if lpData is None: - DataRef = None - DataSize = 0 - else: - if dwType in (REG_DWORD, REG_DWORD_BIG_ENDIAN): # REG_DWORD_LITTLE_ENDIAN - Data = DWORD(lpData) - elif dwType == REG_QWORD: # REG_QWORD_LITTLE_ENDIAN - Data = QWORD(lpData) - elif dwType in (REG_SZ, REG_EXPAND_SZ): - if ansi: - Data = ctypes.create_string_buffer(lpData) - else: - Data = ctypes.create_unicode_buffer(lpData) - elif dwType == REG_MULTI_SZ: - if ansi: - Data = ctypes.create_string_buffer('\0'.join(lpData) + '\0\0') - else: - Data = ctypes.create_unicode_buffer(u'\0'.join(lpData) + u'\0\0') - elif dwType == REG_LINK: - Data = ctypes.create_unicode_buffer(lpData) - else: - Data = ctypes.create_string_buffer(lpData) - DataRef = byref(Data) - DataSize = sizeof(Data) - - # Call the API with the converted arguments. - _RegSetValueEx(hKey, lpValueName, 0, dwType, DataRef, DataSize) - -# No "GuessStringType" here since detection is done inside. -RegSetValueExA = RegSetValueExW = RegSetValueEx - -# LONG WINAPI RegEnumKey( -# __in HKEY hKey, -# __in DWORD dwIndex, -# __out LPTSTR lpName, -# __in DWORD cchName -# ); -def RegEnumKeyA(hKey, dwIndex): - _RegEnumKeyA = windll.advapi32.RegEnumKeyA - _RegEnumKeyA.argtypes = [HKEY, DWORD, LPSTR, DWORD] - _RegEnumKeyA.restype = LONG - - cchName = 1024 - while True: - lpName = ctypes.create_string_buffer(cchName) - errcode = _RegEnumKeyA(hKey, dwIndex, lpName, cchName) - if errcode != ERROR_MORE_DATA: - break - cchName = cchName + 1024 - if cchName > 65536: - raise ctypes.WinError(errcode) - if errcode == ERROR_NO_MORE_ITEMS: - return None - if errcode != ERROR_SUCCESS: - raise ctypes.WinError(errcode) - return lpName.value - -def RegEnumKeyW(hKey, dwIndex): - _RegEnumKeyW = windll.advapi32.RegEnumKeyW - _RegEnumKeyW.argtypes = [HKEY, DWORD, LPWSTR, DWORD] - _RegEnumKeyW.restype = LONG - - cchName = 512 - while True: - lpName = ctypes.create_unicode_buffer(cchName) - errcode = _RegEnumKeyW(hKey, dwIndex, lpName, cchName * 2) - if errcode != ERROR_MORE_DATA: - break - cchName = cchName + 512 - if cchName > 32768: - raise ctypes.WinError(errcode) - if errcode == ERROR_NO_MORE_ITEMS: - return None - if errcode != ERROR_SUCCESS: - raise ctypes.WinError(errcode) - return lpName.value - -RegEnumKey = DefaultStringType(RegEnumKeyA, RegEnumKeyW) - -# LONG WINAPI RegEnumKeyEx( -# __in HKEY hKey, -# __in DWORD dwIndex, -# __out LPTSTR lpName, -# __inout LPDWORD lpcName, -# __reserved LPDWORD lpReserved, -# __inout LPTSTR lpClass, -# __inout_opt LPDWORD lpcClass, -# __out_opt PFILETIME lpftLastWriteTime -# ); - -# XXX TODO - -# LONG WINAPI RegEnumValue( -# __in HKEY hKey, -# __in DWORD dwIndex, -# __out LPTSTR lpValueName, -# __inout LPDWORD lpcchValueName, -# __reserved LPDWORD lpReserved, -# __out_opt LPDWORD lpType, -# __out_opt LPBYTE lpData, -# __inout_opt LPDWORD lpcbData -# ); -def _internal_RegEnumValue(ansi, hKey, dwIndex, bGetData = True): - if ansi: - _RegEnumValue = windll.advapi32.RegEnumValueA - _RegEnumValue.argtypes = [HKEY, DWORD, LPSTR, LPDWORD, LPVOID, LPDWORD, LPVOID, LPDWORD] - else: - _RegEnumValue = windll.advapi32.RegEnumValueW - _RegEnumValue.argtypes = [HKEY, DWORD, LPWSTR, LPDWORD, LPVOID, LPDWORD, LPVOID, LPDWORD] - _RegEnumValue.restype = LONG - - cchValueName = DWORD(1024) - dwType = DWORD(-1) - lpcchValueName = byref(cchValueName) - lpType = byref(dwType) - if ansi: - lpValueName = ctypes.create_string_buffer(cchValueName.value) - else: - lpValueName = ctypes.create_unicode_buffer(cchValueName.value) - if bGetData: - cbData = DWORD(0) - lpcbData = byref(cbData) - else: - lpcbData = None - lpData = None - errcode = _RegEnumValue(hKey, dwIndex, lpValueName, lpcchValueName, None, lpType, lpData, lpcbData) - - if errcode == ERROR_MORE_DATA or (bGetData and errcode == ERROR_SUCCESS): - if ansi: - cchValueName.value = cchValueName.value + sizeof(CHAR) - lpValueName = ctypes.create_string_buffer(cchValueName.value) - else: - cchValueName.value = cchValueName.value + sizeof(WCHAR) - lpValueName = ctypes.create_unicode_buffer(cchValueName.value) - - if bGetData: - Type = dwType.value - - if Type in (REG_DWORD, REG_DWORD_BIG_ENDIAN): # REG_DWORD_LITTLE_ENDIAN - if cbData.value != sizeof(DWORD): - raise ValueError("REG_DWORD value of size %d" % cbData.value) - Data = DWORD(0) - - elif Type == REG_QWORD: # REG_QWORD_LITTLE_ENDIAN - if cbData.value != sizeof(QWORD): - raise ValueError("REG_QWORD value of size %d" % cbData.value) - Data = QWORD(long(0)) - - elif Type in (REG_SZ, REG_EXPAND_SZ, REG_MULTI_SZ): - if ansi: - Data = ctypes.create_string_buffer(cbData.value) - else: - Data = ctypes.create_unicode_buffer(cbData.value) - - elif Type == REG_LINK: - Data = ctypes.create_unicode_buffer(cbData.value) - - else: # REG_BINARY, REG_NONE, and any future types - Data = ctypes.create_string_buffer(cbData.value) - - lpData = byref(Data) - - errcode = _RegEnumValue(hKey, dwIndex, lpValueName, lpcchValueName, None, lpType, lpData, lpcbData) - - if errcode == ERROR_NO_MORE_ITEMS: - return None - #if errcode != ERROR_SUCCESS: - # raise ctypes.WinError(errcode) - - if not bGetData: - return lpValueName.value, dwType.value - - if Type in (REG_DWORD, REG_DWORD_BIG_ENDIAN, REG_QWORD, REG_SZ, REG_EXPAND_SZ, REG_LINK): # REG_DWORD_LITTLE_ENDIAN, REG_QWORD_LITTLE_ENDIAN - return lpValueName.value, dwType.value, Data.value - - if Type == REG_MULTI_SZ: - sData = Data[:] - del Data - if ansi: - aData = sData.split('\0') - else: - aData = sData.split(u'\0') - aData = [token for token in aData if token] - return lpValueName.value, dwType.value, aData - - # REG_BINARY, REG_NONE, and any future types - return lpValueName.value, dwType.value, Data.raw - -def RegEnumValueA(hKey, dwIndex, bGetData = True): - return _internal_RegEnumValue(True, hKey, dwIndex, bGetData) - -def RegEnumValueW(hKey, dwIndex, bGetData = True): - return _internal_RegEnumValue(False, hKey, dwIndex, bGetData) - -RegEnumValue = DefaultStringType(RegEnumValueA, RegEnumValueW) - -# XXX TODO - -# LONG WINAPI RegSetKeyValue( -# __in HKEY hKey, -# __in_opt LPCTSTR lpSubKey, -# __in_opt LPCTSTR lpValueName, -# __in DWORD dwType, -# __in_opt LPCVOID lpData, -# __in DWORD cbData -# ); - -# XXX TODO - -# LONG WINAPI RegQueryMultipleValues( -# __in HKEY hKey, -# __out PVALENT val_list, -# __in DWORD num_vals, -# __out_opt LPTSTR lpValueBuf, -# __inout_opt LPDWORD ldwTotsize -# ); - -# XXX TODO - -# LONG WINAPI RegDeleteValue( -# __in HKEY hKey, -# __in_opt LPCTSTR lpValueName -# ); -def RegDeleteValueA(hKeySrc, lpValueName = None): - _RegDeleteValueA = windll.advapi32.RegDeleteValueA - _RegDeleteValueA.argtypes = [HKEY, LPSTR] - _RegDeleteValueA.restype = LONG - _RegDeleteValueA.errcheck = RaiseIfNotErrorSuccess - _RegDeleteValueA(hKeySrc, lpValueName) -def RegDeleteValueW(hKeySrc, lpValueName = None): - _RegDeleteValueW = windll.advapi32.RegDeleteValueW - _RegDeleteValueW.argtypes = [HKEY, LPWSTR] - _RegDeleteValueW.restype = LONG - _RegDeleteValueW.errcheck = RaiseIfNotErrorSuccess - _RegDeleteValueW(hKeySrc, lpValueName) -RegDeleteValue = GuessStringType(RegDeleteValueA, RegDeleteValueW) - -# LONG WINAPI RegDeleteKeyValue( -# __in HKEY hKey, -# __in_opt LPCTSTR lpSubKey, -# __in_opt LPCTSTR lpValueName -# ); -def RegDeleteKeyValueA(hKeySrc, lpSubKey = None, lpValueName = None): - _RegDeleteKeyValueA = windll.advapi32.RegDeleteKeyValueA - _RegDeleteKeyValueA.argtypes = [HKEY, LPSTR, LPSTR] - _RegDeleteKeyValueA.restype = LONG - _RegDeleteKeyValueA.errcheck = RaiseIfNotErrorSuccess - _RegDeleteKeyValueA(hKeySrc, lpSubKey, lpValueName) -def RegDeleteKeyValueW(hKeySrc, lpSubKey = None, lpValueName = None): - _RegDeleteKeyValueW = windll.advapi32.RegDeleteKeyValueW - _RegDeleteKeyValueW.argtypes = [HKEY, LPWSTR, LPWSTR] - _RegDeleteKeyValueW.restype = LONG - _RegDeleteKeyValueW.errcheck = RaiseIfNotErrorSuccess - _RegDeleteKeyValueW(hKeySrc, lpSubKey, lpValueName) -RegDeleteKeyValue = GuessStringType(RegDeleteKeyValueA, RegDeleteKeyValueW) - -# LONG WINAPI RegDeleteKey( -# __in HKEY hKey, -# __in LPCTSTR lpSubKey -# ); -def RegDeleteKeyA(hKeySrc, lpSubKey = None): - _RegDeleteKeyA = windll.advapi32.RegDeleteKeyA - _RegDeleteKeyA.argtypes = [HKEY, LPSTR] - _RegDeleteKeyA.restype = LONG - _RegDeleteKeyA.errcheck = RaiseIfNotErrorSuccess - _RegDeleteKeyA(hKeySrc, lpSubKey) -def RegDeleteKeyW(hKeySrc, lpSubKey = None): - _RegDeleteKeyW = windll.advapi32.RegDeleteKeyW - _RegDeleteKeyW.argtypes = [HKEY, LPWSTR] - _RegDeleteKeyW.restype = LONG - _RegDeleteKeyW.errcheck = RaiseIfNotErrorSuccess - _RegDeleteKeyW(hKeySrc, lpSubKey) -RegDeleteKey = GuessStringType(RegDeleteKeyA, RegDeleteKeyW) - -# LONG WINAPI RegDeleteKeyEx( -# __in HKEY hKey, -# __in LPCTSTR lpSubKey, -# __in REGSAM samDesired, -# __reserved DWORD Reserved -# ); - -def RegDeleteKeyExA(hKeySrc, lpSubKey = None, samDesired = KEY_WOW64_32KEY): - _RegDeleteKeyExA = windll.advapi32.RegDeleteKeyExA - _RegDeleteKeyExA.argtypes = [HKEY, LPSTR, REGSAM, DWORD] - _RegDeleteKeyExA.restype = LONG - _RegDeleteKeyExA.errcheck = RaiseIfNotErrorSuccess - _RegDeleteKeyExA(hKeySrc, lpSubKey, samDesired, 0) -def RegDeleteKeyExW(hKeySrc, lpSubKey = None, samDesired = KEY_WOW64_32KEY): - _RegDeleteKeyExW = windll.advapi32.RegDeleteKeyExW - _RegDeleteKeyExW.argtypes = [HKEY, LPWSTR, REGSAM, DWORD] - _RegDeleteKeyExW.restype = LONG - _RegDeleteKeyExW.errcheck = RaiseIfNotErrorSuccess - _RegDeleteKeyExW(hKeySrc, lpSubKey, samDesired, 0) -RegDeleteKeyEx = GuessStringType(RegDeleteKeyExA, RegDeleteKeyExW) - -# LONG WINAPI RegCopyTree( -# __in HKEY hKeySrc, -# __in_opt LPCTSTR lpSubKey, -# __in HKEY hKeyDest -# ); -def RegCopyTreeA(hKeySrc, lpSubKey, hKeyDest): - _RegCopyTreeA = windll.advapi32.RegCopyTreeA - _RegCopyTreeA.argtypes = [HKEY, LPSTR, HKEY] - _RegCopyTreeA.restype = LONG - _RegCopyTreeA.errcheck = RaiseIfNotErrorSuccess - _RegCopyTreeA(hKeySrc, lpSubKey, hKeyDest) -def RegCopyTreeW(hKeySrc, lpSubKey, hKeyDest): - _RegCopyTreeW = windll.advapi32.RegCopyTreeW - _RegCopyTreeW.argtypes = [HKEY, LPWSTR, HKEY] - _RegCopyTreeW.restype = LONG - _RegCopyTreeW.errcheck = RaiseIfNotErrorSuccess - _RegCopyTreeW(hKeySrc, lpSubKey, hKeyDest) -RegCopyTree = GuessStringType(RegCopyTreeA, RegCopyTreeW) - -# LONG WINAPI RegDeleteTree( -# __in HKEY hKey, -# __in_opt LPCTSTR lpSubKey -# ); -def RegDeleteTreeA(hKey, lpSubKey = None): - _RegDeleteTreeA = windll.advapi32.RegDeleteTreeA - _RegDeleteTreeA.argtypes = [HKEY, LPWSTR] - _RegDeleteTreeA.restype = LONG - _RegDeleteTreeA.errcheck = RaiseIfNotErrorSuccess - _RegDeleteTreeA(hKey, lpSubKey) -def RegDeleteTreeW(hKey, lpSubKey = None): - _RegDeleteTreeW = windll.advapi32.RegDeleteTreeW - _RegDeleteTreeW.argtypes = [HKEY, LPWSTR] - _RegDeleteTreeW.restype = LONG - _RegDeleteTreeW.errcheck = RaiseIfNotErrorSuccess - _RegDeleteTreeW(hKey, lpSubKey) -RegDeleteTree = GuessStringType(RegDeleteTreeA, RegDeleteTreeW) - -# LONG WINAPI RegFlushKey( -# __in HKEY hKey -# ); -def RegFlushKey(hKey): - _RegFlushKey = windll.advapi32.RegFlushKey - _RegFlushKey.argtypes = [HKEY] - _RegFlushKey.restype = LONG - _RegFlushKey.errcheck = RaiseIfNotErrorSuccess - _RegFlushKey(hKey) - -# LONG WINAPI RegLoadMUIString( -# _In_ HKEY hKey, -# _In_opt_ LPCTSTR pszValue, -# _Out_opt_ LPTSTR pszOutBuf, -# _In_ DWORD cbOutBuf, -# _Out_opt_ LPDWORD pcbData, -# _In_ DWORD Flags, -# _In_opt_ LPCTSTR pszDirectory -# ); - -# TO DO - -#------------------------------------------------------------------------------ - -# BOOL WINAPI CloseServiceHandle( -# _In_ SC_HANDLE hSCObject -# ); -def CloseServiceHandle(hSCObject): - _CloseServiceHandle = windll.advapi32.CloseServiceHandle - _CloseServiceHandle.argtypes = [SC_HANDLE] - _CloseServiceHandle.restype = bool - _CloseServiceHandle.errcheck = RaiseIfZero - - if isinstance(hSCObject, Handle): - # Prevents the handle from being closed without notifying the Handle object. - hSCObject.close() - else: - _CloseServiceHandle(hSCObject) - -# SC_HANDLE WINAPI OpenSCManager( -# _In_opt_ LPCTSTR lpMachineName, -# _In_opt_ LPCTSTR lpDatabaseName, -# _In_ DWORD dwDesiredAccess -# ); -def OpenSCManagerA(lpMachineName = None, lpDatabaseName = None, dwDesiredAccess = SC_MANAGER_ALL_ACCESS): - _OpenSCManagerA = windll.advapi32.OpenSCManagerA - _OpenSCManagerA.argtypes = [LPSTR, LPSTR, DWORD] - _OpenSCManagerA.restype = SC_HANDLE - _OpenSCManagerA.errcheck = RaiseIfZero - - hSCObject = _OpenSCManagerA(lpMachineName, lpDatabaseName, dwDesiredAccess) - return ServiceControlManagerHandle(hSCObject) - -def OpenSCManagerW(lpMachineName = None, lpDatabaseName = None, dwDesiredAccess = SC_MANAGER_ALL_ACCESS): - _OpenSCManagerW = windll.advapi32.OpenSCManagerW - _OpenSCManagerW.argtypes = [LPWSTR, LPWSTR, DWORD] - _OpenSCManagerW.restype = SC_HANDLE - _OpenSCManagerW.errcheck = RaiseIfZero - - hSCObject = _OpenSCManagerA(lpMachineName, lpDatabaseName, dwDesiredAccess) - return ServiceControlManagerHandle(hSCObject) - -OpenSCManager = GuessStringType(OpenSCManagerA, OpenSCManagerW) - -# SC_HANDLE WINAPI OpenService( -# _In_ SC_HANDLE hSCManager, -# _In_ LPCTSTR lpServiceName, -# _In_ DWORD dwDesiredAccess -# ); -def OpenServiceA(hSCManager, lpServiceName, dwDesiredAccess = SERVICE_ALL_ACCESS): - _OpenServiceA = windll.advapi32.OpenServiceA - _OpenServiceA.argtypes = [SC_HANDLE, LPSTR, DWORD] - _OpenServiceA.restype = SC_HANDLE - _OpenServiceA.errcheck = RaiseIfZero - return ServiceHandle( _OpenServiceA(hSCManager, lpServiceName, dwDesiredAccess) ) - -def OpenServiceW(hSCManager, lpServiceName, dwDesiredAccess = SERVICE_ALL_ACCESS): - _OpenServiceW = windll.advapi32.OpenServiceW - _OpenServiceW.argtypes = [SC_HANDLE, LPWSTR, DWORD] - _OpenServiceW.restype = SC_HANDLE - _OpenServiceW.errcheck = RaiseIfZero - return ServiceHandle( _OpenServiceW(hSCManager, lpServiceName, dwDesiredAccess) ) - -OpenService = GuessStringType(OpenServiceA, OpenServiceW) - -# SC_HANDLE WINAPI CreateService( -# _In_ SC_HANDLE hSCManager, -# _In_ LPCTSTR lpServiceName, -# _In_opt_ LPCTSTR lpDisplayName, -# _In_ DWORD dwDesiredAccess, -# _In_ DWORD dwServiceType, -# _In_ DWORD dwStartType, -# _In_ DWORD dwErrorControl, -# _In_opt_ LPCTSTR lpBinaryPathName, -# _In_opt_ LPCTSTR lpLoadOrderGroup, -# _Out_opt_ LPDWORD lpdwTagId, -# _In_opt_ LPCTSTR lpDependencies, -# _In_opt_ LPCTSTR lpServiceStartName, -# _In_opt_ LPCTSTR lpPassword -# ); -def CreateServiceA(hSCManager, lpServiceName, - lpDisplayName = None, - dwDesiredAccess = SERVICE_ALL_ACCESS, - dwServiceType = SERVICE_WIN32_OWN_PROCESS, - dwStartType = SERVICE_DEMAND_START, - dwErrorControl = SERVICE_ERROR_NORMAL, - lpBinaryPathName = None, - lpLoadOrderGroup = None, - lpDependencies = None, - lpServiceStartName = None, - lpPassword = None): - - _CreateServiceA = windll.advapi32.CreateServiceA - _CreateServiceA.argtypes = [SC_HANDLE, LPSTR, LPSTR, DWORD, DWORD, DWORD, DWORD, LPSTR, LPSTR, LPDWORD, LPSTR, LPSTR, LPSTR] - _CreateServiceA.restype = SC_HANDLE - _CreateServiceA.errcheck = RaiseIfZero - - dwTagId = DWORD(0) - hService = _CreateServiceA(hSCManager, lpServiceName, dwDesiredAccess, dwServiceType, dwStartType, dwErrorControl, lpBinaryPathName, lpLoadOrderGroup, byref(dwTagId), lpDependencies, lpServiceStartName, lpPassword) - return ServiceHandle(hService), dwTagId.value - -def CreateServiceW(hSCManager, lpServiceName, - lpDisplayName = None, - dwDesiredAccess = SERVICE_ALL_ACCESS, - dwServiceType = SERVICE_WIN32_OWN_PROCESS, - dwStartType = SERVICE_DEMAND_START, - dwErrorControl = SERVICE_ERROR_NORMAL, - lpBinaryPathName = None, - lpLoadOrderGroup = None, - lpDependencies = None, - lpServiceStartName = None, - lpPassword = None): - - _CreateServiceW = windll.advapi32.CreateServiceW - _CreateServiceW.argtypes = [SC_HANDLE, LPWSTR, LPWSTR, DWORD, DWORD, DWORD, DWORD, LPWSTR, LPWSTR, LPDWORD, LPWSTR, LPWSTR, LPWSTR] - _CreateServiceW.restype = SC_HANDLE - _CreateServiceW.errcheck = RaiseIfZero - - dwTagId = DWORD(0) - hService = _CreateServiceW(hSCManager, lpServiceName, dwDesiredAccess, dwServiceType, dwStartType, dwErrorControl, lpBinaryPathName, lpLoadOrderGroup, byref(dwTagId), lpDependencies, lpServiceStartName, lpPassword) - return ServiceHandle(hService), dwTagId.value - -CreateService = GuessStringType(CreateServiceA, CreateServiceW) - -# BOOL WINAPI DeleteService( -# _In_ SC_HANDLE hService -# ); -def DeleteService(hService): - _DeleteService = windll.advapi32.DeleteService - _DeleteService.argtypes = [SC_HANDLE] - _DeleteService.restype = bool - _DeleteService.errcheck = RaiseIfZero - _DeleteService(hService) - -# BOOL WINAPI GetServiceKeyName( -# _In_ SC_HANDLE hSCManager, -# _In_ LPCTSTR lpDisplayName, -# _Out_opt_ LPTSTR lpServiceName, -# _Inout_ LPDWORD lpcchBuffer -# ); -def GetServiceKeyNameA(hSCManager, lpDisplayName): - _GetServiceKeyNameA = windll.advapi32.GetServiceKeyNameA - _GetServiceKeyNameA.argtypes = [SC_HANDLE, LPSTR, LPSTR, LPDWORD] - _GetServiceKeyNameA.restype = bool - - cchBuffer = DWORD(0) - _GetServiceKeyNameA(hSCManager, lpDisplayName, None, byref(cchBuffer)) - if cchBuffer.value == 0: - raise ctypes.WinError() - lpServiceName = ctypes.create_string_buffer(cchBuffer.value + 1) - cchBuffer.value = sizeof(lpServiceName) - success = _GetServiceKeyNameA(hSCManager, lpDisplayName, lpServiceName, byref(cchBuffer)) - if not success: - raise ctypes.WinError() - return lpServiceName.value - -def GetServiceKeyNameW(hSCManager, lpDisplayName): - _GetServiceKeyNameW = windll.advapi32.GetServiceKeyNameW - _GetServiceKeyNameW.argtypes = [SC_HANDLE, LPWSTR, LPWSTR, LPDWORD] - _GetServiceKeyNameW.restype = bool - - cchBuffer = DWORD(0) - _GetServiceKeyNameW(hSCManager, lpDisplayName, None, byref(cchBuffer)) - if cchBuffer.value == 0: - raise ctypes.WinError() - lpServiceName = ctypes.create_unicode_buffer(cchBuffer.value + 2) - cchBuffer.value = sizeof(lpServiceName) - success = _GetServiceKeyNameW(hSCManager, lpDisplayName, lpServiceName, byref(cchBuffer)) - if not success: - raise ctypes.WinError() - return lpServiceName.value - -GetServiceKeyName = GuessStringType(GetServiceKeyNameA, GetServiceKeyNameW) - -# BOOL WINAPI GetServiceDisplayName( -# _In_ SC_HANDLE hSCManager, -# _In_ LPCTSTR lpServiceName, -# _Out_opt_ LPTSTR lpDisplayName, -# _Inout_ LPDWORD lpcchBuffer -# ); -def GetServiceDisplayNameA(hSCManager, lpServiceName): - _GetServiceDisplayNameA = windll.advapi32.GetServiceDisplayNameA - _GetServiceDisplayNameA.argtypes = [SC_HANDLE, LPSTR, LPSTR, LPDWORD] - _GetServiceDisplayNameA.restype = bool - - cchBuffer = DWORD(0) - _GetServiceDisplayNameA(hSCManager, lpServiceName, None, byref(cchBuffer)) - if cchBuffer.value == 0: - raise ctypes.WinError() - lpDisplayName = ctypes.create_string_buffer(cchBuffer.value + 1) - cchBuffer.value = sizeof(lpDisplayName) - success = _GetServiceDisplayNameA(hSCManager, lpServiceName, lpDisplayName, byref(cchBuffer)) - if not success: - raise ctypes.WinError() - return lpDisplayName.value - -def GetServiceDisplayNameW(hSCManager, lpServiceName): - _GetServiceDisplayNameW = windll.advapi32.GetServiceDisplayNameW - _GetServiceDisplayNameW.argtypes = [SC_HANDLE, LPWSTR, LPWSTR, LPDWORD] - _GetServiceDisplayNameW.restype = bool - - cchBuffer = DWORD(0) - _GetServiceDisplayNameW(hSCManager, lpServiceName, None, byref(cchBuffer)) - if cchBuffer.value == 0: - raise ctypes.WinError() - lpDisplayName = ctypes.create_unicode_buffer(cchBuffer.value + 2) - cchBuffer.value = sizeof(lpDisplayName) - success = _GetServiceDisplayNameW(hSCManager, lpServiceName, lpDisplayName, byref(cchBuffer)) - if not success: - raise ctypes.WinError() - return lpDisplayName.value - -GetServiceDisplayName = GuessStringType(GetServiceDisplayNameA, GetServiceDisplayNameW) - -# BOOL WINAPI QueryServiceConfig( -# _In_ SC_HANDLE hService, -# _Out_opt_ LPQUERY_SERVICE_CONFIG lpServiceConfig, -# _In_ DWORD cbBufSize, -# _Out_ LPDWORD pcbBytesNeeded -# ); - -# TO DO - -# BOOL WINAPI QueryServiceConfig2( -# _In_ SC_HANDLE hService, -# _In_ DWORD dwInfoLevel, -# _Out_opt_ LPBYTE lpBuffer, -# _In_ DWORD cbBufSize, -# _Out_ LPDWORD pcbBytesNeeded -# ); - -# TO DO - -# BOOL WINAPI ChangeServiceConfig( -# _In_ SC_HANDLE hService, -# _In_ DWORD dwServiceType, -# _In_ DWORD dwStartType, -# _In_ DWORD dwErrorControl, -# _In_opt_ LPCTSTR lpBinaryPathName, -# _In_opt_ LPCTSTR lpLoadOrderGroup, -# _Out_opt_ LPDWORD lpdwTagId, -# _In_opt_ LPCTSTR lpDependencies, -# _In_opt_ LPCTSTR lpServiceStartName, -# _In_opt_ LPCTSTR lpPassword, -# _In_opt_ LPCTSTR lpDisplayName -# ); - -# TO DO - -# BOOL WINAPI ChangeServiceConfig2( -# _In_ SC_HANDLE hService, -# _In_ DWORD dwInfoLevel, -# _In_opt_ LPVOID lpInfo -# ); - -# TO DO - -# BOOL WINAPI StartService( -# _In_ SC_HANDLE hService, -# _In_ DWORD dwNumServiceArgs, -# _In_opt_ LPCTSTR *lpServiceArgVectors -# ); -def StartServiceA(hService, ServiceArgVectors = None): - _StartServiceA = windll.advapi32.StartServiceA - _StartServiceA.argtypes = [SC_HANDLE, DWORD, LPVOID] - _StartServiceA.restype = bool - _StartServiceA.errcheck = RaiseIfZero - - if ServiceArgVectors: - dwNumServiceArgs = len(ServiceArgVectors) - CServiceArgVectors = (LPSTR * dwNumServiceArgs)(*ServiceArgVectors) - lpServiceArgVectors = ctypes.pointer(CServiceArgVectors) - else: - dwNumServiceArgs = 0 - lpServiceArgVectors = None - _StartServiceA(hService, dwNumServiceArgs, lpServiceArgVectors) - -def StartServiceW(hService, ServiceArgVectors = None): - _StartServiceW = windll.advapi32.StartServiceW - _StartServiceW.argtypes = [SC_HANDLE, DWORD, LPVOID] - _StartServiceW.restype = bool - _StartServiceW.errcheck = RaiseIfZero - - if ServiceArgVectors: - dwNumServiceArgs = len(ServiceArgVectors) - CServiceArgVectors = (LPWSTR * dwNumServiceArgs)(*ServiceArgVectors) - lpServiceArgVectors = ctypes.pointer(CServiceArgVectors) - else: - dwNumServiceArgs = 0 - lpServiceArgVectors = None - _StartServiceW(hService, dwNumServiceArgs, lpServiceArgVectors) - -StartService = GuessStringType(StartServiceA, StartServiceW) - -# BOOL WINAPI ControlService( -# _In_ SC_HANDLE hService, -# _In_ DWORD dwControl, -# _Out_ LPSERVICE_STATUS lpServiceStatus -# ); -def ControlService(hService, dwControl): - _ControlService = windll.advapi32.ControlService - _ControlService.argtypes = [SC_HANDLE, DWORD, LPSERVICE_STATUS] - _ControlService.restype = bool - _ControlService.errcheck = RaiseIfZero - - rawServiceStatus = SERVICE_STATUS() - _ControlService(hService, dwControl, byref(rawServiceStatus)) - return ServiceStatus(rawServiceStatus) - -# BOOL WINAPI ControlServiceEx( -# _In_ SC_HANDLE hService, -# _In_ DWORD dwControl, -# _In_ DWORD dwInfoLevel, -# _Inout_ PVOID pControlParams -# ); - -# TO DO - -# DWORD WINAPI NotifyServiceStatusChange( -# _In_ SC_HANDLE hService, -# _In_ DWORD dwNotifyMask, -# _In_ PSERVICE_NOTIFY pNotifyBuffer -# ); - -# TO DO - -# BOOL WINAPI QueryServiceStatus( -# _In_ SC_HANDLE hService, -# _Out_ LPSERVICE_STATUS lpServiceStatus -# ); -def QueryServiceStatus(hService): - _QueryServiceStatus = windll.advapi32.QueryServiceStatus - _QueryServiceStatus.argtypes = [SC_HANDLE, LPSERVICE_STATUS] - _QueryServiceStatus.restype = bool - _QueryServiceStatus.errcheck = RaiseIfZero - - rawServiceStatus = SERVICE_STATUS() - _QueryServiceStatus(hService, byref(rawServiceStatus)) - return ServiceStatus(rawServiceStatus) - -# BOOL WINAPI QueryServiceStatusEx( -# _In_ SC_HANDLE hService, -# _In_ SC_STATUS_TYPE InfoLevel, -# _Out_opt_ LPBYTE lpBuffer, -# _In_ DWORD cbBufSize, -# _Out_ LPDWORD pcbBytesNeeded -# ); -def QueryServiceStatusEx(hService, InfoLevel = SC_STATUS_PROCESS_INFO): - - if InfoLevel != SC_STATUS_PROCESS_INFO: - raise NotImplementedError() - - _QueryServiceStatusEx = windll.advapi32.QueryServiceStatusEx - _QueryServiceStatusEx.argtypes = [SC_HANDLE, SC_STATUS_TYPE, LPVOID, DWORD, LPDWORD] - _QueryServiceStatusEx.restype = bool - _QueryServiceStatusEx.errcheck = RaiseIfZero - - lpBuffer = SERVICE_STATUS_PROCESS() - cbBytesNeeded = DWORD(sizeof(lpBuffer)) - _QueryServiceStatusEx(hService, InfoLevel, byref(lpBuffer), sizeof(lpBuffer), byref(cbBytesNeeded)) - return ServiceStatusProcess(lpBuffer) - -# BOOL WINAPI EnumServicesStatus( -# _In_ SC_HANDLE hSCManager, -# _In_ DWORD dwServiceType, -# _In_ DWORD dwServiceState, -# _Out_opt_ LPENUM_SERVICE_STATUS lpServices, -# _In_ DWORD cbBufSize, -# _Out_ LPDWORD pcbBytesNeeded, -# _Out_ LPDWORD lpServicesReturned, -# _Inout_opt_ LPDWORD lpResumeHandle -# ); -def EnumServicesStatusA(hSCManager, dwServiceType = SERVICE_DRIVER | SERVICE_WIN32, dwServiceState = SERVICE_STATE_ALL): - _EnumServicesStatusA = windll.advapi32.EnumServicesStatusA - _EnumServicesStatusA.argtypes = [SC_HANDLE, DWORD, DWORD, LPVOID, DWORD, LPDWORD, LPDWORD, LPDWORD] - _EnumServicesStatusA.restype = bool - - cbBytesNeeded = DWORD(0) - ServicesReturned = DWORD(0) - ResumeHandle = DWORD(0) - - _EnumServicesStatusA(hSCManager, dwServiceType, dwServiceState, None, 0, byref(cbBytesNeeded), byref(ServicesReturned), byref(ResumeHandle)) - - Services = [] - success = False - while GetLastError() == ERROR_MORE_DATA: - if cbBytesNeeded.value < sizeof(ENUM_SERVICE_STATUSA): - break - ServicesBuffer = ctypes.create_string_buffer("", cbBytesNeeded.value) - success = _EnumServicesStatusA(hSCManager, dwServiceType, dwServiceState, byref(ServicesBuffer), sizeof(ServicesBuffer), byref(cbBytesNeeded), byref(ServicesReturned), byref(ResumeHandle)) - if sizeof(ServicesBuffer) < (sizeof(ENUM_SERVICE_STATUSA) * ServicesReturned.value): - raise ctypes.WinError() - lpServicesArray = ctypes.cast(ctypes.cast(ctypes.pointer(ServicesBuffer), ctypes.c_void_p), LPENUM_SERVICE_STATUSA) - for index in compat.xrange(0, ServicesReturned.value): - Services.append( ServiceStatusEntry(lpServicesArray[index]) ) - if success: break - if not success: - raise ctypes.WinError() - - return Services - -def EnumServicesStatusW(hSCManager, dwServiceType = SERVICE_DRIVER | SERVICE_WIN32, dwServiceState = SERVICE_STATE_ALL): - _EnumServicesStatusW = windll.advapi32.EnumServicesStatusW - _EnumServicesStatusW.argtypes = [SC_HANDLE, DWORD, DWORD, LPVOID, DWORD, LPDWORD, LPDWORD, LPDWORD] - _EnumServicesStatusW.restype = bool - - cbBytesNeeded = DWORD(0) - ServicesReturned = DWORD(0) - ResumeHandle = DWORD(0) - - _EnumServicesStatusW(hSCManager, dwServiceType, dwServiceState, None, 0, byref(cbBytesNeeded), byref(ServicesReturned), byref(ResumeHandle)) - - Services = [] - success = False - while GetLastError() == ERROR_MORE_DATA: - if cbBytesNeeded.value < sizeof(ENUM_SERVICE_STATUSW): - break - ServicesBuffer = ctypes.create_string_buffer("", cbBytesNeeded.value) - success = _EnumServicesStatusW(hSCManager, dwServiceType, dwServiceState, byref(ServicesBuffer), sizeof(ServicesBuffer), byref(cbBytesNeeded), byref(ServicesReturned), byref(ResumeHandle)) - if sizeof(ServicesBuffer) < (sizeof(ENUM_SERVICE_STATUSW) * ServicesReturned.value): - raise ctypes.WinError() - lpServicesArray = ctypes.cast(ctypes.cast(ctypes.pointer(ServicesBuffer), ctypes.c_void_p), LPENUM_SERVICE_STATUSW) - for index in compat.xrange(0, ServicesReturned.value): - Services.append( ServiceStatusEntry(lpServicesArray[index]) ) - if success: break - if not success: - raise ctypes.WinError() - - return Services - -EnumServicesStatus = DefaultStringType(EnumServicesStatusA, EnumServicesStatusW) - -# BOOL WINAPI EnumServicesStatusEx( -# _In_ SC_HANDLE hSCManager, -# _In_ SC_ENUM_TYPE InfoLevel, -# _In_ DWORD dwServiceType, -# _In_ DWORD dwServiceState, -# _Out_opt_ LPBYTE lpServices, -# _In_ DWORD cbBufSize, -# _Out_ LPDWORD pcbBytesNeeded, -# _Out_ LPDWORD lpServicesReturned, -# _Inout_opt_ LPDWORD lpResumeHandle, -# _In_opt_ LPCTSTR pszGroupName -# ); -def EnumServicesStatusExA(hSCManager, InfoLevel = SC_ENUM_PROCESS_INFO, dwServiceType = SERVICE_DRIVER | SERVICE_WIN32, dwServiceState = SERVICE_STATE_ALL, pszGroupName = None): - - if InfoLevel != SC_ENUM_PROCESS_INFO: - raise NotImplementedError() - - _EnumServicesStatusExA = windll.advapi32.EnumServicesStatusExA - _EnumServicesStatusExA.argtypes = [SC_HANDLE, SC_ENUM_TYPE, DWORD, DWORD, LPVOID, DWORD, LPDWORD, LPDWORD, LPDWORD, LPSTR] - _EnumServicesStatusExA.restype = bool - - cbBytesNeeded = DWORD(0) - ServicesReturned = DWORD(0) - ResumeHandle = DWORD(0) - - _EnumServicesStatusExA(hSCManager, InfoLevel, dwServiceType, dwServiceState, None, 0, byref(cbBytesNeeded), byref(ServicesReturned), byref(ResumeHandle), pszGroupName) - - Services = [] - success = False - while GetLastError() == ERROR_MORE_DATA: - if cbBytesNeeded.value < sizeof(ENUM_SERVICE_STATUS_PROCESSA): - break - ServicesBuffer = ctypes.create_string_buffer("", cbBytesNeeded.value) - success = _EnumServicesStatusExA(hSCManager, InfoLevel, dwServiceType, dwServiceState, byref(ServicesBuffer), sizeof(ServicesBuffer), byref(cbBytesNeeded), byref(ServicesReturned), byref(ResumeHandle), pszGroupName) - if sizeof(ServicesBuffer) < (sizeof(ENUM_SERVICE_STATUS_PROCESSA) * ServicesReturned.value): - raise ctypes.WinError() - lpServicesArray = ctypes.cast(ctypes.cast(ctypes.pointer(ServicesBuffer), ctypes.c_void_p), LPENUM_SERVICE_STATUS_PROCESSA) - for index in compat.xrange(0, ServicesReturned.value): - Services.append( ServiceStatusProcessEntry(lpServicesArray[index]) ) - if success: break - if not success: - raise ctypes.WinError() - - return Services - -def EnumServicesStatusExW(hSCManager, InfoLevel = SC_ENUM_PROCESS_INFO, dwServiceType = SERVICE_DRIVER | SERVICE_WIN32, dwServiceState = SERVICE_STATE_ALL, pszGroupName = None): - _EnumServicesStatusExW = windll.advapi32.EnumServicesStatusExW - _EnumServicesStatusExW.argtypes = [SC_HANDLE, SC_ENUM_TYPE, DWORD, DWORD, LPVOID, DWORD, LPDWORD, LPDWORD, LPDWORD, LPWSTR] - _EnumServicesStatusExW.restype = bool - - if InfoLevel != SC_ENUM_PROCESS_INFO: - raise NotImplementedError() - - cbBytesNeeded = DWORD(0) - ServicesReturned = DWORD(0) - ResumeHandle = DWORD(0) - - _EnumServicesStatusExW(hSCManager, InfoLevel, dwServiceType, dwServiceState, None, 0, byref(cbBytesNeeded), byref(ServicesReturned), byref(ResumeHandle), pszGroupName) - - Services = [] - success = False - while GetLastError() == ERROR_MORE_DATA: - if cbBytesNeeded.value < sizeof(ENUM_SERVICE_STATUS_PROCESSW): - break - ServicesBuffer = ctypes.create_string_buffer("", cbBytesNeeded.value) - success = _EnumServicesStatusExW(hSCManager, InfoLevel, dwServiceType, dwServiceState, byref(ServicesBuffer), sizeof(ServicesBuffer), byref(cbBytesNeeded), byref(ServicesReturned), byref(ResumeHandle), pszGroupName) - if sizeof(ServicesBuffer) < (sizeof(ENUM_SERVICE_STATUS_PROCESSW) * ServicesReturned.value): - raise ctypes.WinError() - lpServicesArray = ctypes.cast(ctypes.cast(ctypes.pointer(ServicesBuffer), ctypes.c_void_p), LPENUM_SERVICE_STATUS_PROCESSW) - for index in compat.xrange(0, ServicesReturned.value): - Services.append( ServiceStatusProcessEntry(lpServicesArray[index]) ) - if success: break - if not success: - raise ctypes.WinError() - - return Services - -EnumServicesStatusEx = DefaultStringType(EnumServicesStatusExA, EnumServicesStatusExW) - -# BOOL WINAPI EnumDependentServices( -# _In_ SC_HANDLE hService, -# _In_ DWORD dwServiceState, -# _Out_opt_ LPENUM_SERVICE_STATUS lpServices, -# _In_ DWORD cbBufSize, -# _Out_ LPDWORD pcbBytesNeeded, -# _Out_ LPDWORD lpServicesReturned -# ); - -# TO DO - -#============================================================================== -# This calculates the list of exported symbols. -_all = set(vars().keys()).difference(_all) -__all__ = [_x for _x in _all if not _x.startswith('_')] -__all__.sort() -#============================================================================== diff --git a/spaces/Superlang/ImageProcessor/annotator/uniformer/configs/_base_/models/pspnet_r50-d8.py b/spaces/Superlang/ImageProcessor/annotator/uniformer/configs/_base_/models/pspnet_r50-d8.py deleted file mode 100644 index f451e08ad2eb0732dcb806b1851eb978d4acf136..0000000000000000000000000000000000000000 --- a/spaces/Superlang/ImageProcessor/annotator/uniformer/configs/_base_/models/pspnet_r50-d8.py +++ /dev/null @@ -1,44 +0,0 @@ -# model settings -norm_cfg = dict(type='SyncBN', requires_grad=True) -model = dict( - type='EncoderDecoder', - pretrained='open-mmlab://resnet50_v1c', - backbone=dict( - type='ResNetV1c', - depth=50, - num_stages=4, - out_indices=(0, 1, 2, 3), - dilations=(1, 1, 2, 4), - strides=(1, 2, 1, 1), - norm_cfg=norm_cfg, - norm_eval=False, - style='pytorch', - contract_dilation=True), - decode_head=dict( - type='PSPHead', - in_channels=2048, - in_index=3, - channels=512, - pool_scales=(1, 2, 3, 6), - dropout_ratio=0.1, - num_classes=19, - norm_cfg=norm_cfg, - align_corners=False, - loss_decode=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), - auxiliary_head=dict( - type='FCNHead', - in_channels=1024, - in_index=2, - channels=256, - num_convs=1, - concat_input=False, - dropout_ratio=0.1, - num_classes=19, - norm_cfg=norm_cfg, - align_corners=False, - loss_decode=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), - # model training and testing settings - train_cfg=dict(), - test_cfg=dict(mode='whole')) diff --git a/spaces/TRI-ML/risk_biased_prediction/risk_biased/config/waymo_config.py b/spaces/TRI-ML/risk_biased_prediction/risk_biased/config/waymo_config.py deleted file mode 100644 index b273dc98050f13b72075d49ef2e0f59df997e1cd..0000000000000000000000000000000000000000 --- a/spaces/TRI-ML/risk_biased_prediction/risk_biased/config/waymo_config.py +++ /dev/null @@ -1,104 +0,0 @@ -from risk_biased.config.paths import ( - data_dir, - sample_dataset_path, - val_dataset_path, - train_dataset_path, - test_dataset_path, - log_path, -) - -# Data augmentation: -normalize_angle = True -random_rotation = False -angle_std = 3.14 / 4 -random_translation = False -translation_distance_std = 0.1 -p_exchange_two_first = 0.5 - -# Data diminution: -min_num_observation = 2 -max_size_lane = 50 -train_dataset_size_limit = None -val_dataset_size_limit = None -max_num_agents = 50 -max_num_objects = 50 - -# Data caracterization: -time_scene = 9.1 -dt = 0.1 -num_steps = 11 -num_steps_future = 80 - -# TODO: avoid conditioning on the name of the directory in the path -if data_dir == "interactive_veh_type": - map_state_dim = 2 + num_steps * 8 - state_dim = 11 - dynamic_state_dim = 5 -elif data_dir == "interactive_full": - map_state_dim = 2 - state_dim = 5 - dynamic_state_dim = 5 -else: - map_state_dim = 2 - state_dim = 2 - dynamic_state_dim = 2 - -# Variational Loss Hyperparameters -kl_weight = 1.0 -kl_threshold = 0.01 - -# Training Parameters -learning_rate = 3e-4 -batch_size = 64 -accumulate_grad_batches = 2 -num_epochs_cvae = 0 -num_epochs_bias = 100 -gpus = [1] -seed = 0 # Give an integer value to seed will set seed for pseudo-random number generators in: pytorch, numpy, python.random -num_workers = 8 - -# Model hyperparameter -model_type = "interaction_biased" -condition_on_ego_future = False -latent_dim = 16 -hidden_dim = 128 -feature_dim = 16 -num_vq = 512 -latent_distribution = "gaussian" # "gaussian" or "quantized" -is_mlp_residual = True -num_hidden_layers = 3 -num_blocks = 3 -interaction_type = "Attention" # one of "ContextGating", "Attention", "Hybrid" -## MCG parameters -mcg_dim_expansion = 2 -mcg_num_layers = 0 -## Attention parameters -num_attention_heads = 4 -sequence_encoder_type = "MLP" # one of "MLP", "LSTM", "maskedLSTM" -sequence_decoder_type = "MLP" # one of "MLP", "LSTM" - - -# Risk Loss Hyperparameters -cost_reduce = "discounted_mean" # choose in "discounted_mean", "mean", "min", "max", "now", "final" -discount_factor = 0.95 # only used if cost_reduce == "discounted_mean", discounts the cost by this factor at each time step -min_velocity_diff = 0.1 -n_mc_samples_risk = 32 -n_mc_samples_biased = 16 -risk_weight = 1 -risk_assymetry_factor = 30 -use_risk_constraint = True # For encoder_biased only -risk_constraint_update_every_n_epoch = ( - 1 # For encoder_biased only, not used if use_risk_constraint == False -) -risk_constraint_weight_update_factor = ( - 1.5 # For encoder_biased only, not used if use_risk_constraint == False -) -risk_constraint_weight_maximum = ( - 1000 # For encoder_biased only, not used if use_risk_constraint == False -) - -# List files that should be saved as log -files_to_log = [ - "./risk_biased/models/biased_cvae_model.py", - "./risk_biased/models/latent_distributions.py", -] diff --git a/spaces/TabooAM/What-game-you-should-play/app.py b/spaces/TabooAM/What-game-you-should-play/app.py deleted file mode 100644 index b8e324b9c29780cc194b84219d4782bd519931d7..0000000000000000000000000000000000000000 --- a/spaces/TabooAM/What-game-you-should-play/app.py +++ /dev/null @@ -1,172 +0,0 @@ -### ----------------------------- ### -### libraries ### -### ----------------------------- ### - -import gradio as gr -import pandas as pd -import numpy as np -from sklearn.model_selection import train_test_split -from sklearn.linear_model import LogisticRegression -from sklearn import metrics - - -### ------------------------------ ### -### data transformation ### -### ------------------------------ ### - -# load dataset -uncleaned_data = pd.read_csv('data.csv') - -# remove timestamp from dataset (always first column) -uncleaned_data = uncleaned_data.iloc[: , 1:] -data = pd.DataFrame() - -# keep track of which columns are categorical and what -# those columns' value mappings are -# structure: {colname1: {...}, colname2: {...} } -cat_value_dicts = {} -final_colname = uncleaned_data.columns[len(uncleaned_data.columns) - 1] - -# for each column... -for (colname, colval) in uncleaned_data.iteritems(): - - # check if col is already a number; if so, add col directly - # to new dataframe and skip to next column - if isinstance(colval.values[0], (np.integer, float)): - data[colname] = uncleaned_data[colname].copy() - continue - - # structure: {0: "lilac", 1: "blue", ...} - new_dict = {} - val = 0 # first index per column - transformed_col_vals = [] # new numeric datapoints - - # if not, for each item in that column... - for (row, item) in enumerate(colval.values): - - # if item is not in this col's dict... - if item not in new_dict: - new_dict[item] = val - val += 1 - - # then add numerical value to transformed dataframe - transformed_col_vals.append(new_dict[item]) - - # reverse dictionary only for final col (0, 1) => (vals) - if colname == final_colname: - new_dict = {value : key for (key, value) in new_dict.items()} - - cat_value_dicts[colname] = new_dict - data[colname] = transformed_col_vals - - -### -------------------------------- ### -### model training ### -### -------------------------------- ### - -# select features and predicton; automatically selects last column as prediction -cols = len(data.columns) -num_features = cols - 1 -x = data.iloc[: , :num_features] -y = data.iloc[: , num_features:] - -# split data into training and testing sets -x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.25) - -# instantiate the model (using default parameters) -model = LogisticRegression() -model.fit(x_train, y_train.values.ravel()) -y_pred = model.predict(x_test) - - -### -------------------------------- ### -### article generation ### -### -------------------------------- ### -# borrow file reading function from reader.py - -def get_feat(): - feats = [abs(x) for x in model.coef_[0]] - max_val = max(feats) - idx = feats.index(max_val) - return data.columns[idx] - -acc = str(round(metrics.accuracy_score(y_test, y_pred) * 100, 1)) + "%" -most_imp_feat = get_feat() -# info = get_article(acc, most_imp_feat) - - - -### ------------------------------- ### -### interface creation ### -### ------------------------------- ### - - -# predictor for generic number of features -def general_predictor(*args): - features = [] - - # transform categorical input - for colname, arg in zip(data.columns, args): - if (colname in cat_value_dicts): - features.append(cat_value_dicts[colname][arg]) - else: - features.append(arg) - - # predict single datapoint - new_input = [features] - result = model.predict(new_input) - return cat_value_dicts[final_colname][result[0]] - -# add data labels to replace those lost via star-args - - -block = gr.Blocks() - -with open('info.md') as f: - with block: - gr.Markdown(f.readline()) - gr.Markdown('Take the quiz to get a personalized recommendation using AI.') - - with gr.Row(): - with gr.Box(): - inputls = [] - for colname in data.columns: - # skip last column - if colname == final_colname: - continue - - # access categories dict if data is categorical - # otherwise, just use a number input - if colname in cat_value_dicts: - radio_options = list(cat_value_dicts[colname].keys()) - inputls.append(gr.inputs.Dropdown(choices=radio_options, type="value", label=colname)) - else: - # add numerical input - inputls.append(gr.inputs.Number(label=colname)) - gr.Markdown("
    ") - - submit = gr.Button("Click to see your personalized result!", variant="primary") - gr.Markdown("
    ") - output = gr.Textbox(label="Your recommendation:", placeholder="your recommendation will appear here") - - submit.click(fn=general_predictor, inputs=inputls, outputs=output) - gr.Markdown("
    ") - - with gr.Row(): - with gr.Box(): - gr.Markdown(f"

    Accuracy:

    {acc}") - with gr.Box(): - gr.Markdown(f"

    Most important feature:

    {most_imp_feat}") - - gr.Markdown("
    ") - - with gr.Box(): - gr.Markdown('''⭐ Note that model accuracy is based on the uploaded data.csv and reflects how well the AI model can give correct recommendations for that dataset. Model accuracy and most important feature can be helpful for understanding how the model works, but should not be considered absolute facts about the real world.''') - - with gr.Box(): - with open('info.md') as f: - f.readline() - gr.Markdown(f.read()) - -# show the interface -block.launch() \ No newline at end of file diff --git a/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/setuptools/_distutils/command/build_clib.py b/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/setuptools/_distutils/command/build_clib.py deleted file mode 100644 index b3f679b67da7c997478bd9ee8546682106b8be62..0000000000000000000000000000000000000000 --- a/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/setuptools/_distutils/command/build_clib.py +++ /dev/null @@ -1,207 +0,0 @@ -"""distutils.command.build_clib - -Implements the Distutils 'build_clib' command, to build a C/C++ library -that is included in the module distribution and needed by an extension -module.""" - - -# XXX this module has *lots* of code ripped-off quite transparently from -# build_ext.py -- not surprisingly really, as the work required to build -# a static library from a collection of C source files is not really all -# that different from what's required to build a shared object file from -# a collection of C source files. Nevertheless, I haven't done the -# necessary refactoring to account for the overlap in code between the -# two modules, mainly because a number of subtle details changed in the -# cut 'n paste. Sigh. - -import os -from ..core import Command -from ..errors import DistutilsSetupError -from ..sysconfig import customize_compiler -from distutils._log import log - - -def show_compilers(): - from ..ccompiler import show_compilers - - show_compilers() - - -class build_clib(Command): - description = "build C/C++ libraries used by Python extensions" - - user_options = [ - ('build-clib=', 'b', "directory to build C/C++ libraries to"), - ('build-temp=', 't', "directory to put temporary build by-products"), - ('debug', 'g', "compile with debugging information"), - ('force', 'f', "forcibly build everything (ignore file timestamps)"), - ('compiler=', 'c', "specify the compiler type"), - ] - - boolean_options = ['debug', 'force'] - - help_options = [ - ('help-compiler', None, "list available compilers", show_compilers), - ] - - def initialize_options(self): - self.build_clib = None - self.build_temp = None - - # List of libraries to build - self.libraries = None - - # Compilation options for all libraries - self.include_dirs = None - self.define = None - self.undef = None - self.debug = None - self.force = 0 - self.compiler = None - - def finalize_options(self): - # This might be confusing: both build-clib and build-temp default - # to build-temp as defined by the "build" command. This is because - # I think that C libraries are really just temporary build - # by-products, at least from the point of view of building Python - # extensions -- but I want to keep my options open. - self.set_undefined_options( - 'build', - ('build_temp', 'build_clib'), - ('build_temp', 'build_temp'), - ('compiler', 'compiler'), - ('debug', 'debug'), - ('force', 'force'), - ) - - self.libraries = self.distribution.libraries - if self.libraries: - self.check_library_list(self.libraries) - - if self.include_dirs is None: - self.include_dirs = self.distribution.include_dirs or [] - if isinstance(self.include_dirs, str): - self.include_dirs = self.include_dirs.split(os.pathsep) - - # XXX same as for build_ext -- what about 'self.define' and - # 'self.undef' ? - - def run(self): - if not self.libraries: - return - - # Yech -- this is cut 'n pasted from build_ext.py! - from ..ccompiler import new_compiler - - self.compiler = new_compiler( - compiler=self.compiler, dry_run=self.dry_run, force=self.force - ) - customize_compiler(self.compiler) - - if self.include_dirs is not None: - self.compiler.set_include_dirs(self.include_dirs) - if self.define is not None: - # 'define' option is a list of (name,value) tuples - for name, value in self.define: - self.compiler.define_macro(name, value) - if self.undef is not None: - for macro in self.undef: - self.compiler.undefine_macro(macro) - - self.build_libraries(self.libraries) - - def check_library_list(self, libraries): - """Ensure that the list of libraries is valid. - - `library` is presumably provided as a command option 'libraries'. - This method checks that it is a list of 2-tuples, where the tuples - are (library_name, build_info_dict). - - Raise DistutilsSetupError if the structure is invalid anywhere; - just returns otherwise. - """ - if not isinstance(libraries, list): - raise DistutilsSetupError("'libraries' option must be a list of tuples") - - for lib in libraries: - if not isinstance(lib, tuple) and len(lib) != 2: - raise DistutilsSetupError("each element of 'libraries' must a 2-tuple") - - name, build_info = lib - - if not isinstance(name, str): - raise DistutilsSetupError( - "first element of each tuple in 'libraries' " - "must be a string (the library name)" - ) - - if '/' in name or (os.sep != '/' and os.sep in name): - raise DistutilsSetupError( - "bad library name '%s': " - "may not contain directory separators" % lib[0] - ) - - if not isinstance(build_info, dict): - raise DistutilsSetupError( - "second element of each tuple in 'libraries' " - "must be a dictionary (build info)" - ) - - def get_library_names(self): - # Assume the library list is valid -- 'check_library_list()' is - # called from 'finalize_options()', so it should be! - if not self.libraries: - return None - - lib_names = [] - for lib_name, build_info in self.libraries: - lib_names.append(lib_name) - return lib_names - - def get_source_files(self): - self.check_library_list(self.libraries) - filenames = [] - for lib_name, build_info in self.libraries: - sources = build_info.get('sources') - if sources is None or not isinstance(sources, (list, tuple)): - raise DistutilsSetupError( - "in 'libraries' option (library '%s'), " - "'sources' must be present and must be " - "a list of source filenames" % lib_name - ) - - filenames.extend(sources) - return filenames - - def build_libraries(self, libraries): - for lib_name, build_info in libraries: - sources = build_info.get('sources') - if sources is None or not isinstance(sources, (list, tuple)): - raise DistutilsSetupError( - "in 'libraries' option (library '%s'), " - "'sources' must be present and must be " - "a list of source filenames" % lib_name - ) - sources = list(sources) - - log.info("building '%s' library", lib_name) - - # First, compile the source code to object files in the library - # directory. (This should probably change to putting object - # files in a temporary build directory.) - macros = build_info.get('macros') - include_dirs = build_info.get('include_dirs') - objects = self.compiler.compile( - sources, - output_dir=self.build_temp, - macros=macros, - include_dirs=include_dirs, - debug=self.debug, - ) - - # Now "link" the object files together into a static library. - # (On Unix at least, this isn't really linking -- it just - # builds an archive. Whatever.) - self.compiler.create_static_lib( - objects, lib_name, output_dir=self.build_clib, debug=self.debug - ) diff --git a/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/setuptools/_vendor/importlib_metadata/__init__.py b/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/setuptools/_vendor/importlib_metadata/__init__.py deleted file mode 100644 index 886421437557e5d898f5e608ea7e9f23662f01bb..0000000000000000000000000000000000000000 --- a/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/setuptools/_vendor/importlib_metadata/__init__.py +++ /dev/null @@ -1,904 +0,0 @@ -import os -import re -import abc -import csv -import sys -from .. import zipp -import email -import pathlib -import operator -import textwrap -import warnings -import functools -import itertools -import posixpath -import collections - -from . import _adapters, _meta, _py39compat -from ._collections import FreezableDefaultDict, Pair -from ._compat import ( - NullFinder, - install, - pypy_partial, -) -from ._functools import method_cache, pass_none -from ._itertools import always_iterable, unique_everseen -from ._meta import PackageMetadata, SimplePath - -from contextlib import suppress -from importlib import import_module -from importlib.abc import MetaPathFinder -from itertools import starmap -from typing import List, Mapping, Optional - - -__all__ = [ - 'Distribution', - 'DistributionFinder', - 'PackageMetadata', - 'PackageNotFoundError', - 'distribution', - 'distributions', - 'entry_points', - 'files', - 'metadata', - 'packages_distributions', - 'requires', - 'version', -] - - -class PackageNotFoundError(ModuleNotFoundError): - """The package was not found.""" - - def __str__(self): - return f"No package metadata was found for {self.name}" - - @property - def name(self): - (name,) = self.args - return name - - -class Sectioned: - """ - A simple entry point config parser for performance - - >>> for item in Sectioned.read(Sectioned._sample): - ... print(item) - Pair(name='sec1', value='# comments ignored') - Pair(name='sec1', value='a = 1') - Pair(name='sec1', value='b = 2') - Pair(name='sec2', value='a = 2') - - >>> res = Sectioned.section_pairs(Sectioned._sample) - >>> item = next(res) - >>> item.name - 'sec1' - >>> item.value - Pair(name='a', value='1') - >>> item = next(res) - >>> item.value - Pair(name='b', value='2') - >>> item = next(res) - >>> item.name - 'sec2' - >>> item.value - Pair(name='a', value='2') - >>> list(res) - [] - """ - - _sample = textwrap.dedent( - """ - [sec1] - # comments ignored - a = 1 - b = 2 - - [sec2] - a = 2 - """ - ).lstrip() - - @classmethod - def section_pairs(cls, text): - return ( - section._replace(value=Pair.parse(section.value)) - for section in cls.read(text, filter_=cls.valid) - if section.name is not None - ) - - @staticmethod - def read(text, filter_=None): - lines = filter(filter_, map(str.strip, text.splitlines())) - name = None - for value in lines: - section_match = value.startswith('[') and value.endswith(']') - if section_match: - name = value.strip('[]') - continue - yield Pair(name, value) - - @staticmethod - def valid(line): - return line and not line.startswith('#') - - -class DeprecatedTuple: - """ - Provide subscript item access for backward compatibility. - - >>> recwarn = getfixture('recwarn') - >>> ep = EntryPoint(name='name', value='value', group='group') - >>> ep[:] - ('name', 'value', 'group') - >>> ep[0] - 'name' - >>> len(recwarn) - 1 - """ - - # Do not remove prior to 2023-05-01 or Python 3.13 - _warn = functools.partial( - warnings.warn, - "EntryPoint tuple interface is deprecated. Access members by name.", - DeprecationWarning, - stacklevel=pypy_partial(2), - ) - - def __getitem__(self, item): - self._warn() - return self._key()[item] - - -class EntryPoint(DeprecatedTuple): - """An entry point as defined by Python packaging conventions. - - See `the packaging docs on entry points - `_ - for more information. - - >>> ep = EntryPoint( - ... name=None, group=None, value='package.module:attr [extra1, extra2]') - >>> ep.module - 'package.module' - >>> ep.attr - 'attr' - >>> ep.extras - ['extra1', 'extra2'] - """ - - pattern = re.compile( - r'(?P[\w.]+)\s*' - r'(:\s*(?P[\w.]+)\s*)?' - r'((?P\[.*\])\s*)?$' - ) - """ - A regular expression describing the syntax for an entry point, - which might look like: - - - module - - package.module - - package.module:attribute - - package.module:object.attribute - - package.module:attr [extra1, extra2] - - Other combinations are possible as well. - - The expression is lenient about whitespace around the ':', - following the attr, and following any extras. - """ - - name: str - value: str - group: str - - dist: Optional['Distribution'] = None - - def __init__(self, name, value, group): - vars(self).update(name=name, value=value, group=group) - - def load(self): - """Load the entry point from its definition. If only a module - is indicated by the value, return that module. Otherwise, - return the named object. - """ - match = self.pattern.match(self.value) - module = import_module(match.group('module')) - attrs = filter(None, (match.group('attr') or '').split('.')) - return functools.reduce(getattr, attrs, module) - - @property - def module(self): - match = self.pattern.match(self.value) - return match.group('module') - - @property - def attr(self): - match = self.pattern.match(self.value) - return match.group('attr') - - @property - def extras(self): - match = self.pattern.match(self.value) - return re.findall(r'\w+', match.group('extras') or '') - - def _for(self, dist): - vars(self).update(dist=dist) - return self - - def matches(self, **params): - """ - EntryPoint matches the given parameters. - - >>> ep = EntryPoint(group='foo', name='bar', value='bing:bong [extra1, extra2]') - >>> ep.matches(group='foo') - True - >>> ep.matches(name='bar', value='bing:bong [extra1, extra2]') - True - >>> ep.matches(group='foo', name='other') - False - >>> ep.matches() - True - >>> ep.matches(extras=['extra1', 'extra2']) - True - >>> ep.matches(module='bing') - True - >>> ep.matches(attr='bong') - True - """ - attrs = (getattr(self, param) for param in params) - return all(map(operator.eq, params.values(), attrs)) - - def _key(self): - return self.name, self.value, self.group - - def __lt__(self, other): - return self._key() < other._key() - - def __eq__(self, other): - return self._key() == other._key() - - def __setattr__(self, name, value): - raise AttributeError("EntryPoint objects are immutable.") - - def __repr__(self): - return ( - f'EntryPoint(name={self.name!r}, value={self.value!r}, ' - f'group={self.group!r})' - ) - - def __hash__(self): - return hash(self._key()) - - -class EntryPoints(tuple): - """ - An immutable collection of selectable EntryPoint objects. - """ - - __slots__ = () - - def __getitem__(self, name): # -> EntryPoint: - """ - Get the EntryPoint in self matching name. - """ - try: - return next(iter(self.select(name=name))) - except StopIteration: - raise KeyError(name) - - def select(self, **params): - """ - Select entry points from self that match the - given parameters (typically group and/or name). - """ - return EntryPoints(ep for ep in self if _py39compat.ep_matches(ep, **params)) - - @property - def names(self): - """ - Return the set of all names of all entry points. - """ - return {ep.name for ep in self} - - @property - def groups(self): - """ - Return the set of all groups of all entry points. - """ - return {ep.group for ep in self} - - @classmethod - def _from_text_for(cls, text, dist): - return cls(ep._for(dist) for ep in cls._from_text(text)) - - @staticmethod - def _from_text(text): - return ( - EntryPoint(name=item.value.name, value=item.value.value, group=item.name) - for item in Sectioned.section_pairs(text or '') - ) - - -class PackagePath(pathlib.PurePosixPath): - """A reference to a path in a package""" - - def read_text(self, encoding='utf-8'): - with self.locate().open(encoding=encoding) as stream: - return stream.read() - - def read_binary(self): - with self.locate().open('rb') as stream: - return stream.read() - - def locate(self): - """Return a path-like object for this path""" - return self.dist.locate_file(self) - - -class FileHash: - def __init__(self, spec): - self.mode, _, self.value = spec.partition('=') - - def __repr__(self): - return f'' - - -class Distribution(metaclass=abc.ABCMeta): - """A Python distribution package.""" - - @abc.abstractmethod - def read_text(self, filename): - """Attempt to load metadata file given by the name. - - :param filename: The name of the file in the distribution info. - :return: The text if found, otherwise None. - """ - - @abc.abstractmethod - def locate_file(self, path): - """ - Given a path to a file in this distribution, return a path - to it. - """ - - @classmethod - def from_name(cls, name: str): - """Return the Distribution for the given package name. - - :param name: The name of the distribution package to search for. - :return: The Distribution instance (or subclass thereof) for the named - package, if found. - :raises PackageNotFoundError: When the named package's distribution - metadata cannot be found. - :raises ValueError: When an invalid value is supplied for name. - """ - if not name: - raise ValueError("A distribution name is required.") - try: - return next(cls.discover(name=name)) - except StopIteration: - raise PackageNotFoundError(name) - - @classmethod - def discover(cls, **kwargs): - """Return an iterable of Distribution objects for all packages. - - Pass a ``context`` or pass keyword arguments for constructing - a context. - - :context: A ``DistributionFinder.Context`` object. - :return: Iterable of Distribution objects for all packages. - """ - context = kwargs.pop('context', None) - if context and kwargs: - raise ValueError("cannot accept context and kwargs") - context = context or DistributionFinder.Context(**kwargs) - return itertools.chain.from_iterable( - resolver(context) for resolver in cls._discover_resolvers() - ) - - @staticmethod - def at(path): - """Return a Distribution for the indicated metadata path - - :param path: a string or path-like object - :return: a concrete Distribution instance for the path - """ - return PathDistribution(pathlib.Path(path)) - - @staticmethod - def _discover_resolvers(): - """Search the meta_path for resolvers.""" - declared = ( - getattr(finder, 'find_distributions', None) for finder in sys.meta_path - ) - return filter(None, declared) - - @property - def metadata(self) -> _meta.PackageMetadata: - """Return the parsed metadata for this Distribution. - - The returned object will have keys that name the various bits of - metadata. See PEP 566 for details. - """ - text = ( - self.read_text('METADATA') - or self.read_text('PKG-INFO') - # This last clause is here to support old egg-info files. Its - # effect is to just end up using the PathDistribution's self._path - # (which points to the egg-info file) attribute unchanged. - or self.read_text('') - ) - return _adapters.Message(email.message_from_string(text)) - - @property - def name(self): - """Return the 'Name' metadata for the distribution package.""" - return self.metadata['Name'] - - @property - def _normalized_name(self): - """Return a normalized version of the name.""" - return Prepared.normalize(self.name) - - @property - def version(self): - """Return the 'Version' metadata for the distribution package.""" - return self.metadata['Version'] - - @property - def entry_points(self): - return EntryPoints._from_text_for(self.read_text('entry_points.txt'), self) - - @property - def files(self): - """Files in this distribution. - - :return: List of PackagePath for this distribution or None - - Result is `None` if the metadata file that enumerates files - (i.e. RECORD for dist-info or SOURCES.txt for egg-info) is - missing. - Result may be empty if the metadata exists but is empty. - """ - - def make_file(name, hash=None, size_str=None): - result = PackagePath(name) - result.hash = FileHash(hash) if hash else None - result.size = int(size_str) if size_str else None - result.dist = self - return result - - @pass_none - def make_files(lines): - return list(starmap(make_file, csv.reader(lines))) - - return make_files(self._read_files_distinfo() or self._read_files_egginfo()) - - def _read_files_distinfo(self): - """ - Read the lines of RECORD - """ - text = self.read_text('RECORD') - return text and text.splitlines() - - def _read_files_egginfo(self): - """ - SOURCES.txt might contain literal commas, so wrap each line - in quotes. - """ - text = self.read_text('SOURCES.txt') - return text and map('"{}"'.format, text.splitlines()) - - @property - def requires(self): - """Generated requirements specified for this Distribution""" - reqs = self._read_dist_info_reqs() or self._read_egg_info_reqs() - return reqs and list(reqs) - - def _read_dist_info_reqs(self): - return self.metadata.get_all('Requires-Dist') - - def _read_egg_info_reqs(self): - source = self.read_text('requires.txt') - return pass_none(self._deps_from_requires_text)(source) - - @classmethod - def _deps_from_requires_text(cls, source): - return cls._convert_egg_info_reqs_to_simple_reqs(Sectioned.read(source)) - - @staticmethod - def _convert_egg_info_reqs_to_simple_reqs(sections): - """ - Historically, setuptools would solicit and store 'extra' - requirements, including those with environment markers, - in separate sections. More modern tools expect each - dependency to be defined separately, with any relevant - extras and environment markers attached directly to that - requirement. This method converts the former to the - latter. See _test_deps_from_requires_text for an example. - """ - - def make_condition(name): - return name and f'extra == "{name}"' - - def quoted_marker(section): - section = section or '' - extra, sep, markers = section.partition(':') - if extra and markers: - markers = f'({markers})' - conditions = list(filter(None, [markers, make_condition(extra)])) - return '; ' + ' and '.join(conditions) if conditions else '' - - def url_req_space(req): - """ - PEP 508 requires a space between the url_spec and the quoted_marker. - Ref python/importlib_metadata#357. - """ - # '@' is uniquely indicative of a url_req. - return ' ' * ('@' in req) - - for section in sections: - space = url_req_space(section.value) - yield section.value + space + quoted_marker(section.name) - - -class DistributionFinder(MetaPathFinder): - """ - A MetaPathFinder capable of discovering installed distributions. - """ - - class Context: - """ - Keyword arguments presented by the caller to - ``distributions()`` or ``Distribution.discover()`` - to narrow the scope of a search for distributions - in all DistributionFinders. - - Each DistributionFinder may expect any parameters - and should attempt to honor the canonical - parameters defined below when appropriate. - """ - - name = None - """ - Specific name for which a distribution finder should match. - A name of ``None`` matches all distributions. - """ - - def __init__(self, **kwargs): - vars(self).update(kwargs) - - @property - def path(self): - """ - The sequence of directory path that a distribution finder - should search. - - Typically refers to Python installed package paths such as - "site-packages" directories and defaults to ``sys.path``. - """ - return vars(self).get('path', sys.path) - - @abc.abstractmethod - def find_distributions(self, context=Context()): - """ - Find distributions. - - Return an iterable of all Distribution instances capable of - loading the metadata for packages matching the ``context``, - a DistributionFinder.Context instance. - """ - - -class FastPath: - """ - Micro-optimized class for searching a path for - children. - - >>> FastPath('').children() - ['...'] - """ - - @functools.lru_cache() # type: ignore - def __new__(cls, root): - return super().__new__(cls) - - def __init__(self, root): - self.root = root - - def joinpath(self, child): - return pathlib.Path(self.root, child) - - def children(self): - with suppress(Exception): - return os.listdir(self.root or '.') - with suppress(Exception): - return self.zip_children() - return [] - - def zip_children(self): - zip_path = zipp.Path(self.root) - names = zip_path.root.namelist() - self.joinpath = zip_path.joinpath - - return dict.fromkeys(child.split(posixpath.sep, 1)[0] for child in names) - - def search(self, name): - return self.lookup(self.mtime).search(name) - - @property - def mtime(self): - with suppress(OSError): - return os.stat(self.root).st_mtime - self.lookup.cache_clear() - - @method_cache - def lookup(self, mtime): - return Lookup(self) - - -class Lookup: - def __init__(self, path: FastPath): - base = os.path.basename(path.root).lower() - base_is_egg = base.endswith(".egg") - self.infos = FreezableDefaultDict(list) - self.eggs = FreezableDefaultDict(list) - - for child in path.children(): - low = child.lower() - if low.endswith((".dist-info", ".egg-info")): - # rpartition is faster than splitext and suitable for this purpose. - name = low.rpartition(".")[0].partition("-")[0] - normalized = Prepared.normalize(name) - self.infos[normalized].append(path.joinpath(child)) - elif base_is_egg and low == "egg-info": - name = base.rpartition(".")[0].partition("-")[0] - legacy_normalized = Prepared.legacy_normalize(name) - self.eggs[legacy_normalized].append(path.joinpath(child)) - - self.infos.freeze() - self.eggs.freeze() - - def search(self, prepared): - infos = ( - self.infos[prepared.normalized] - if prepared - else itertools.chain.from_iterable(self.infos.values()) - ) - eggs = ( - self.eggs[prepared.legacy_normalized] - if prepared - else itertools.chain.from_iterable(self.eggs.values()) - ) - return itertools.chain(infos, eggs) - - -class Prepared: - """ - A prepared search for metadata on a possibly-named package. - """ - - normalized = None - legacy_normalized = None - - def __init__(self, name): - self.name = name - if name is None: - return - self.normalized = self.normalize(name) - self.legacy_normalized = self.legacy_normalize(name) - - @staticmethod - def normalize(name): - """ - PEP 503 normalization plus dashes as underscores. - """ - return re.sub(r"[-_.]+", "-", name).lower().replace('-', '_') - - @staticmethod - def legacy_normalize(name): - """ - Normalize the package name as found in the convention in - older packaging tools versions and specs. - """ - return name.lower().replace('-', '_') - - def __bool__(self): - return bool(self.name) - - -@install -class MetadataPathFinder(NullFinder, DistributionFinder): - """A degenerate finder for distribution packages on the file system. - - This finder supplies only a find_distributions() method for versions - of Python that do not have a PathFinder find_distributions(). - """ - - def find_distributions(self, context=DistributionFinder.Context()): - """ - Find distributions. - - Return an iterable of all Distribution instances capable of - loading the metadata for packages matching ``context.name`` - (or all names if ``None`` indicated) along the paths in the list - of directories ``context.path``. - """ - found = self._search_paths(context.name, context.path) - return map(PathDistribution, found) - - @classmethod - def _search_paths(cls, name, paths): - """Find metadata directories in paths heuristically.""" - prepared = Prepared(name) - return itertools.chain.from_iterable( - path.search(prepared) for path in map(FastPath, paths) - ) - - def invalidate_caches(cls): - FastPath.__new__.cache_clear() - - -class PathDistribution(Distribution): - def __init__(self, path: SimplePath): - """Construct a distribution. - - :param path: SimplePath indicating the metadata directory. - """ - self._path = path - - def read_text(self, filename): - with suppress( - FileNotFoundError, - IsADirectoryError, - KeyError, - NotADirectoryError, - PermissionError, - ): - return self._path.joinpath(filename).read_text(encoding='utf-8') - - read_text.__doc__ = Distribution.read_text.__doc__ - - def locate_file(self, path): - return self._path.parent / path - - @property - def _normalized_name(self): - """ - Performance optimization: where possible, resolve the - normalized name from the file system path. - """ - stem = os.path.basename(str(self._path)) - return ( - pass_none(Prepared.normalize)(self._name_from_stem(stem)) - or super()._normalized_name - ) - - @staticmethod - def _name_from_stem(stem): - """ - >>> PathDistribution._name_from_stem('foo-3.0.egg-info') - 'foo' - >>> PathDistribution._name_from_stem('CherryPy-3.0.dist-info') - 'CherryPy' - >>> PathDistribution._name_from_stem('face.egg-info') - 'face' - >>> PathDistribution._name_from_stem('foo.bar') - """ - filename, ext = os.path.splitext(stem) - if ext not in ('.dist-info', '.egg-info'): - return - name, sep, rest = filename.partition('-') - return name - - -def distribution(distribution_name): - """Get the ``Distribution`` instance for the named package. - - :param distribution_name: The name of the distribution package as a string. - :return: A ``Distribution`` instance (or subclass thereof). - """ - return Distribution.from_name(distribution_name) - - -def distributions(**kwargs): - """Get all ``Distribution`` instances in the current environment. - - :return: An iterable of ``Distribution`` instances. - """ - return Distribution.discover(**kwargs) - - -def metadata(distribution_name) -> _meta.PackageMetadata: - """Get the metadata for the named package. - - :param distribution_name: The name of the distribution package to query. - :return: A PackageMetadata containing the parsed metadata. - """ - return Distribution.from_name(distribution_name).metadata - - -def version(distribution_name): - """Get the version string for the named package. - - :param distribution_name: The name of the distribution package to query. - :return: The version string for the package as defined in the package's - "Version" metadata key. - """ - return distribution(distribution_name).version - - -_unique = functools.partial( - unique_everseen, - key=_py39compat.normalized_name, -) -""" -Wrapper for ``distributions`` to return unique distributions by name. -""" - - -def entry_points(**params) -> EntryPoints: - """Return EntryPoint objects for all installed packages. - - Pass selection parameters (group or name) to filter the - result to entry points matching those properties (see - EntryPoints.select()). - - :return: EntryPoints for all installed packages. - """ - eps = itertools.chain.from_iterable( - dist.entry_points for dist in _unique(distributions()) - ) - return EntryPoints(eps).select(**params) - - -def files(distribution_name): - """Return a list of files for the named package. - - :param distribution_name: The name of the distribution package to query. - :return: List of files composing the distribution. - """ - return distribution(distribution_name).files - - -def requires(distribution_name): - """ - Return a list of requirements for the named package. - - :return: An iterator of requirements, suitable for - packaging.requirement.Requirement. - """ - return distribution(distribution_name).requires - - -def packages_distributions() -> Mapping[str, List[str]]: - """ - Return a mapping of top-level packages to their - distributions. - - >>> import collections.abc - >>> pkgs = packages_distributions() - >>> all(isinstance(dist, collections.abc.Sequence) for dist in pkgs.values()) - True - """ - pkg_to_dist = collections.defaultdict(list) - for dist in distributions(): - for pkg in _top_level_declared(dist) or _top_level_inferred(dist): - pkg_to_dist[pkg].append(dist.metadata['Name']) - return dict(pkg_to_dist) - - -def _top_level_declared(dist): - return (dist.read_text('top_level.txt') or '').split() - - -def _top_level_inferred(dist): - return { - f.parts[0] if len(f.parts) > 1 else f.with_suffix('').name - for f in always_iterable(dist.files) - if f.suffix == ".py" - } diff --git a/spaces/Tape/yoga/app.py b/spaces/Tape/yoga/app.py deleted file mode 100644 index 4ba1e4feb626f55e59d2f42de95670af8dc52baf..0000000000000000000000000000000000000000 --- a/spaces/Tape/yoga/app.py +++ /dev/null @@ -1,237 +0,0 @@ -import gradio as gr - -import math -import time -import mediapipe as mp - -import cv2 -from PIL import Image -import numpy as np -import datetime -import copy -from openpose import util -from openpose.body import Body -import matplotlib.pyplot as plt - - -mp_pose = mp.solutions.pose - -pose = mp_pose.Pose(static_image_mode=True, min_detection_confidence=0.3, model_complexity=2) - -mp_drawing = mp.solutions.drawing_utils - -def change_example(choice): - if choice == "plank": - return gr.Image.update(value="examples/plank.jpg") - elif choice == "downdog": - return gr.update(value="examples/downdog.jpg") - elif choice == "tree": - return gr.update(value="examples/tree.jpg") - else: - return gr.update(visible=False) - -def points(image): - results = pose.process(cv2.cvtColor(image, cv2.COLOR_BGR2RGB)) - image_height, image_width, _ = image.shape - - img_copy = image.copy() - - if results.pose_landmarks: - mp_drawing.draw_landmarks(image=img_copy, landmark_list=results.pose_landmarks, connections=mp_pose.POSE_CONNECTIONS) - - img = plt.figure(figsize = [10, 10]) - - plt.axis('off') - plt.imshow(img_copy[:,:,::-1]) - return img - - else: - return gr.Image("examples/Error.png") - -def landmarks(image): - results = pose.process(cv2.cvtColor(image, cv2.COLOR_BGR2RGB)) - height, width, _ = image.shape - - landmarks = [] - - if results.pose_landmarks: - for landmark in results.pose_landmarks.landmark: - landmarks.append((int(landmark.x * width), int(landmark.y * height), - (landmark.z * width))) - return landmarks - -def calculateAngle(landmark1, landmark2, landmark3): - - x1, y1, _ = landmark1 - x2, y2, _ = landmark2 - x3, y3, _ = landmark3 - - angle = math.degrees(math.atan2(y3 - y2, x3 - x2) - math.atan2(y1 - y2, x1 - x2)) - - if angle < 0: - angle += 360 - if angle > 180: - angle = 360 - angle - - return angle - -def correct_plank(landmarks): - recommendation = "" - result = "" - left_knee_angle = calculateAngle(landmarks[mp_pose.PoseLandmark.LEFT_HIP.value], - landmarks[mp_pose.PoseLandmark.LEFT_KNEE.value], - landmarks[mp_pose.PoseLandmark.LEFT_ANKLE.value]) - - right_knee_angle = calculateAngle(landmarks[mp_pose.PoseLandmark.RIGHT_HIP.value], - landmarks[mp_pose.PoseLandmark.RIGHT_KNEE.value], - landmarks[mp_pose.PoseLandmark.RIGHT_ANKLE.value]) - - left_hip_angle = calculateAngle(landmarks[mp_pose.PoseLandmark.LEFT_SHOULDER.value], - landmarks[mp_pose.PoseLandmark.LEFT_HIP.value], - landmarks[mp_pose.PoseLandmark.LEFT_KNEE.value]) - - right_hip_angle = calculateAngle(landmarks[mp_pose.PoseLandmark.RIGHT_SHOULDER.value], - landmarks[mp_pose.PoseLandmark.RIGHT_HIP.value], - landmarks[mp_pose.PoseLandmark.RIGHT_KNEE.value]) - - if (left_knee_angle>180 or left_knee_angle<170): - recommendation = recommendation + "The left knee should be straight. " - if (right_knee_angle>180 or right_knee_angle<170): - recommendation = recommendation + "The right knee should be straight. " - if ((left_hip_angle>180 or left_hip_angle<170) or (right_hip_angle>180 or right_hip_angle<170)): - recommendation = recommendation + "The spine should be straight. " - if recommendation == "": - result = "Great! You are doing the exercise correctly" - else: - result = "Almost perfect, but \n" - return result + recommendation - -def correct_dog(landmarks): - recommendation = "" - result = "" - left_hip_angle = calculateAngle(landmarks[mp_pose.PoseLandmark.LEFT_SHOULDER.value], - landmarks[mp_pose.PoseLandmark.LEFT_HIP.value], - landmarks[mp_pose.PoseLandmark.LEFT_KNEE.value]) - - right_hip_angle = calculateAngle(landmarks[mp_pose.PoseLandmark.RIGHT_SHOULDER.value], - landmarks[mp_pose.PoseLandmark.RIGHT_HIP.value], - landmarks[mp_pose.PoseLandmark.RIGHT_KNEE.value]) - - left_leg = calculateAngle(landmarks[mp_pose.PoseLandmark.LEFT_HIP.value], - landmarks[mp_pose.PoseLandmark.LEFT_KNEE.value], - landmarks[mp_pose.PoseLandmark.LEFT_ANKLE.value]) - right_leg = calculateAngle(landmarks[mp_pose.PoseLandmark.RIGHT_HIP.value], - landmarks[mp_pose.PoseLandmark.RIGHT_KNEE.value], - landmarks[mp_pose.PoseLandmark.RIGHT_ANKLE.value]) - - right_hand = calculateAngle(landmarks[mp_pose.PoseLandmark.RIGHT_HIP.value], - landmarks[mp_pose.PoseLandmark.RIGHT_SHOULDER.value], - landmarks[mp_pose.PoseLandmark.RIGHT_ELBOW.value]) - - left_hand = calculateAngle(landmarks[mp_pose.PoseLandmark.LEFT_HIP.value], - landmarks[mp_pose.PoseLandmark.LEFT_SHOULDER.value], - landmarks[mp_pose.PoseLandmark.LEFT_ELBOW.value]) - - if (left_hip_angle>90 or right_hip_angle>90): - recommendation += "The spine should be bent. " - if not (170 < left_leg < 181 or 170 < right_leg < 181): - recommendation = "Legs should be straight. " - if not (170 < left_hand < 181 or 181 > right_hand > 170): - recommendation = "Hands should be straight. " - if recommendation == "": - result = "Great! You are doing the exercise correctly" - else: - result = "Almost perfect, but \n" - - return result + recommendation - -def correct_tree(landmarks): - recommendation = "" - result = "" - left_hip_angle = calculateAngle(landmarks[mp_pose.PoseLandmark.LEFT_SHOULDER.value], - landmarks[mp_pose.PoseLandmark.LEFT_HIP.value], - landmarks[mp_pose.PoseLandmark.LEFT_KNEE.value]) - - right_hip_angle = calculateAngle(landmarks[mp_pose.PoseLandmark.RIGHT_SHOULDER.value], - landmarks[mp_pose.PoseLandmark.RIGHT_HIP.value], - landmarks[mp_pose.PoseLandmark.RIGHT_KNEE.value]) - - left_leg = calculateAngle( - landmarks[mp_pose.PoseLandmark.LEFT_HIP.value], - landmarks[mp_pose.PoseLandmark.LEFT_KNEE.value], - landmarks[mp_pose.PoseLandmark.LEFT_ANKLE.value]) - right_leg = calculateAngle( - landmarks[mp_pose.PoseLandmark.RIGHT_HIP.value], - landmarks[mp_pose.PoseLandmark.RIGHT_KNEE.value], - landmarks[mp_pose.PoseLandmark.RIGHT_ANKLE.value]) - - right_hand = calculateAngle( - landmarks[mp_pose.PoseLandmark.RIGHT_HIP.value], - landmarks[mp_pose.PoseLandmark.RIGHT_SHOULDER.value], - landmarks[mp_pose.PoseLandmark.RIGHT_ELBOW.value]) - - left_hand = calculateAngle( - landmarks[mp_pose.PoseLandmark.LEFT_HIP.value], - landmarks[mp_pose.PoseLandmark.LEFT_SHOULDER.value], - landmarks[mp_pose.PoseLandmark.LEFT_ELBOW.value]) - - - #if (left_hand>90 or right_hand>90): - # recommendation += "Hands should be bent. " - if not (((175 < right_leg < 181) and (left_leg < 100)) or ((175 < left_leg < 181) and (right_leg < 100))): - recommendation = "One leg should be straight and another should be bent. " - if recommendation == "": - result = "Great! You are doing the exercise correctly" - else: - result = "Almost perfect, but \n" - - return result + recommendation - -def detect(video, radio): - video_clip = cv2.VideoCapture(video) - frames = video_clip.get(cv2.CAP_PROP_FRAME_COUNT) - - i=0 - while(video_clip.isOpened()): - is_read, frame = video_clip.read() - image = frame - if i == round(frames/2): - image = frame - break - i+=1 - new_image = points(image) - - result = "" - if radio == "plank": - result = correct_plank(landmarks(image)) - - if radio == "downdog": - result = correct_dog(landmarks(image)) - - if radio == "tree": - result = correct_tree(landmarks(image)) - - return new_image, result - -with gr.Blocks() as demo: - with gr.Row(): - with gr.Column(): - radio = gr.Radio(["plank", "downdog", "tree"], label="Choose the exercise") - image1 = gr.Image("examples/Example.png") - video = gr.Video(label="Upload video with exercise") - check_btn = gr.Button(value="Let's go") - with gr.Column(): - image = gr.Plot() - result = gr.Textbox() - radio.change(fn=change_example, inputs=radio, outputs=image1) - check_btn.click(detect, inputs=[video, radio], outputs=[image, result]) - -demo.launch() - - -#gr.Interface( -# fn=detect, -# inputs=gr.Video(label="Upload video with exercise"), -# outputs="image", -# title="Are you doing the exercise correct?" -#).launch() \ No newline at end of file diff --git a/spaces/TencentARC/VLog/models/grit_src/third_party/CenterNet2/INSTALL.md b/spaces/TencentARC/VLog/models/grit_src/third_party/CenterNet2/INSTALL.md deleted file mode 100644 index b40768913742ca2b2e11c74d5944561931ecb326..0000000000000000000000000000000000000000 --- a/spaces/TencentARC/VLog/models/grit_src/third_party/CenterNet2/INSTALL.md +++ /dev/null @@ -1,261 +0,0 @@ -## Installation - -### Requirements -- Linux or macOS with Python ≥ 3.6 -- PyTorch ≥ 1.8 and [torchvision](https://github.com/pytorch/vision/) that matches the PyTorch installation. - Install them together at [pytorch.org](https://pytorch.org) to make sure of this -- OpenCV is optional but needed by demo and visualization - - -### Build Detectron2 from Source - -gcc & g++ ≥ 5.4 are required. [ninja](https://ninja-build.org/) is optional but recommended for faster build. -After having them, run: -``` -python -m pip install 'git+https://github.com/facebookresearch/detectron2.git' -# (add --user if you don't have permission) - -# Or, to install it from a local clone: -git clone https://github.com/facebookresearch/detectron2.git -python -m pip install -e detectron2 - -# On macOS, you may need to prepend the above commands with a few environment variables: -CC=clang CXX=clang++ ARCHFLAGS="-arch x86_64" python -m pip install ... -``` - -To __rebuild__ detectron2 that's built from a local clone, use `rm -rf build/ **/*.so` to clean the -old build first. You often need to rebuild detectron2 after reinstalling PyTorch. - -### Install Pre-Built Detectron2 (Linux only) - -Choose from this table to install [v0.6 (Oct 2021)](https://github.com/facebookresearch/detectron2/releases): - -
    CUDA torch 1.10torch 1.9torch 1.8
    11.3
    install
    python -m pip install detectron2 -f \
    -  https://dl.fbaipublicfiles.com/detectron2/wheels/cu113/torch1.10/index.html
    -
    11.1
    install
    python -m pip install detectron2 -f \
    -  https://dl.fbaipublicfiles.com/detectron2/wheels/cu111/torch1.10/index.html
    -
    install
    python -m pip install detectron2 -f \
    -  https://dl.fbaipublicfiles.com/detectron2/wheels/cu111/torch1.9/index.html
    -
    install
    python -m pip install detectron2 -f \
    -  https://dl.fbaipublicfiles.com/detectron2/wheels/cu111/torch1.8/index.html
    -
    10.2
    install
    python -m pip install detectron2 -f \
    -  https://dl.fbaipublicfiles.com/detectron2/wheels/cu102/torch1.10/index.html
    -
    install
    python -m pip install detectron2 -f \
    -  https://dl.fbaipublicfiles.com/detectron2/wheels/cu102/torch1.9/index.html
    -
    install
    python -m pip install detectron2 -f \
    -  https://dl.fbaipublicfiles.com/detectron2/wheels/cu102/torch1.8/index.html
    -
    10.1
    install
    python -m pip install detectron2 -f \
    -  https://dl.fbaipublicfiles.com/detectron2/wheels/cu101/torch1.8/index.html
    -
    cpu
    install
    python -m pip install detectron2 -f \
    -  https://dl.fbaipublicfiles.com/detectron2/wheels/cpu/torch1.10/index.html
    -
    install
    python -m pip install detectron2 -f \
    -  https://dl.fbaipublicfiles.com/detectron2/wheels/cpu/torch1.9/index.html
    -
    install
    python -m pip install detectron2 -f \
    -  https://dl.fbaipublicfiles.com/detectron2/wheels/cpu/torch1.8/index.html
    -
    - -Note that: -1. The pre-built packages have to be used with corresponding version of CUDA and the official package of PyTorch. - Otherwise, please build detectron2 from source. -2. New packages are released every few months. Therefore, packages may not contain latest features in the main - branch and may not be compatible with the main branch of a research project that uses detectron2 - (e.g. those in [projects](projects)). - -### Common Installation Issues - -Click each issue for its solutions: - -
    - -Undefined symbols that looks like "TH..","at::Tensor...","torch..." - -
    - -This usually happens when detectron2 or torchvision is not -compiled with the version of PyTorch you're running. - -If the error comes from a pre-built torchvision, uninstall torchvision and pytorch and reinstall them -following [pytorch.org](http://pytorch.org). So the versions will match. - -If the error comes from a pre-built detectron2, check [release notes](https://github.com/facebookresearch/detectron2/releases), -uninstall and reinstall the correct pre-built detectron2 that matches pytorch version. - -If the error comes from detectron2 or torchvision that you built manually from source, -remove files you built (`build/`, `**/*.so`) and rebuild it so it can pick up the version of pytorch currently in your environment. - -If the above instructions do not resolve this problem, please provide an environment (e.g. a dockerfile) that can reproduce the issue. -
    - -
    - -Missing torch dynamic libraries, OR segmentation fault immediately when using detectron2. - -This usually happens when detectron2 or torchvision is not -compiled with the version of PyTorch you're running. See the previous common issue for the solution. -
    - -
    - -Undefined C++ symbols (e.g. "GLIBCXX..") or C++ symbols not found. - -
    -Usually it's because the library is compiled with a newer C++ compiler but run with an old C++ runtime. - -This often happens with old anaconda. -It may help to run `conda update libgcc` to upgrade its runtime. - -The fundamental solution is to avoid the mismatch, either by compiling using older version of C++ -compiler, or run the code with proper C++ runtime. -To run the code with a specific C++ runtime, you can use environment variable `LD_PRELOAD=/path/to/libstdc++.so`. - -
    - -
    - -"nvcc not found" or "Not compiled with GPU support" or "Detectron2 CUDA Compiler: not available". - -
    -CUDA is not found when building detectron2. -You should make sure - -``` -python -c 'import torch; from torch.utils.cpp_extension import CUDA_HOME; print(torch.cuda.is_available(), CUDA_HOME)' -``` - -print `(True, a directory with cuda)` at the time you build detectron2. - -Most models can run inference (but not training) without GPU support. To use CPUs, set `MODEL.DEVICE='cpu'` in the config. -
    - -
    - -"invalid device function" or "no kernel image is available for execution". - -
    -Two possibilities: - -* You build detectron2 with one version of CUDA but run it with a different version. - - To check whether it is the case, - use `python -m detectron2.utils.collect_env` to find out inconsistent CUDA versions. - In the output of this command, you should expect "Detectron2 CUDA Compiler", "CUDA_HOME", "PyTorch built with - CUDA" - to contain cuda libraries of the same version. - - When they are inconsistent, - you need to either install a different build of PyTorch (or build by yourself) - to match your local CUDA installation, or install a different version of CUDA to match PyTorch. - -* PyTorch/torchvision/Detectron2 is not built for the correct GPU SM architecture (aka. compute capability). - - The architecture included by PyTorch/detectron2/torchvision is available in the "architecture flags" in - `python -m detectron2.utils.collect_env`. It must include - the architecture of your GPU, which can be found at [developer.nvidia.com/cuda-gpus](https://developer.nvidia.com/cuda-gpus). - - If you're using pre-built PyTorch/detectron2/torchvision, they have included support for most popular GPUs already. - If not supported, you need to build them from source. - - When building detectron2/torchvision from source, they detect the GPU device and build for only the device. - This means the compiled code may not work on a different GPU device. - To recompile them for the correct architecture, remove all installed/compiled files, - and rebuild them with the `TORCH_CUDA_ARCH_LIST` environment variable set properly. - For example, `export TORCH_CUDA_ARCH_LIST="6.0;7.0"` makes it compile for both P100s and V100s. -
    - -
    - -Undefined CUDA symbols; Cannot open libcudart.so - -
    -The version of NVCC you use to build detectron2 or torchvision does -not match the version of CUDA you are running with. -This often happens when using anaconda's CUDA runtime. - -Use `python -m detectron2.utils.collect_env` to find out inconsistent CUDA versions. -In the output of this command, you should expect "Detectron2 CUDA Compiler", "CUDA_HOME", "PyTorch built with - CUDA" -to contain cuda libraries of the same version. - -When they are inconsistent, -you need to either install a different build of PyTorch (or build by yourself) -to match your local CUDA installation, or install a different version of CUDA to match PyTorch. -
    - - -
    - -C++ compilation errors from NVCC / NVRTC, or "Unsupported gpu architecture" - -
    -A few possibilities: - -1. Local CUDA/NVCC version has to match the CUDA version of your PyTorch. Both can be found in `python collect_env.py`. - When they are inconsistent, you need to either install a different build of PyTorch (or build by yourself) - to match your local CUDA installation, or install a different version of CUDA to match PyTorch. - -2. Local CUDA/NVCC version shall support the SM architecture (a.k.a. compute capability) of your GPU. - The capability of your GPU can be found at [developer.nvidia.com/cuda-gpus](https://developer.nvidia.com/cuda-gpus). - The capability supported by NVCC is listed at [here](https://gist.github.com/ax3l/9489132). - If your NVCC version is too old, this can be workaround by setting environment variable - `TORCH_CUDA_ARCH_LIST` to a lower, supported capability. - -3. The combination of NVCC and GCC you use is incompatible. You need to change one of their versions. - See [here](https://gist.github.com/ax3l/9489132) for some valid combinations. - Notably, CUDA<=10.1.105 doesn't support GCC>7.3. - - The CUDA/GCC version used by PyTorch can be found by `print(torch.__config__.show())`. - -
    - - -
    - -"ImportError: cannot import name '_C'". - -
    -Please build and install detectron2 following the instructions above. - -Or, if you are running code from detectron2's root directory, `cd` to a different one. -Otherwise you may not import the code that you installed. -
    - - -
    - -Any issue on windows. - -
    - -Detectron2 is continuously built on windows with [CircleCI](https://app.circleci.com/pipelines/github/facebookresearch/detectron2?branch=main). -However we do not provide official support for it. -PRs that improves code compatibility on windows are welcome. -
    - -
    - -ONNX conversion segfault after some "TraceWarning". - -
    -The ONNX package is compiled with a too old compiler. - -Please build and install ONNX from its source code using a compiler -whose version is closer to what's used by PyTorch (available in `torch.__config__.show()`). -
    - - -
    - -"library not found for -lstdc++" on older version of MacOS - -
    -See -[this stackoverflow answer](https://stackoverflow.com/questions/56083725/macos-build-issues-lstdc-not-found-while-building-python-package). - -
    - - -### Installation inside specific environments: - -* __Colab__: see our [Colab Tutorial](https://colab.research.google.com/drive/16jcaJoc6bCFAQ96jDe2HwtXj7BMD_-m5) - which has step-by-step instructions. - -* __Docker__: The official [Dockerfile](docker) installs detectron2 with a few simple commands. - diff --git a/spaces/UndueTarget/audioFILE_to_text/README.md b/spaces/UndueTarget/audioFILE_to_text/README.md deleted file mode 100644 index 4393225b0adc1691e8d2a8370ae24f2a0ea30f4a..0000000000000000000000000000000000000000 --- a/spaces/UndueTarget/audioFILE_to_text/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Audio To Text -emoji: 💩 -colorFrom: purple -colorTo: indigo -sdk: gradio -sdk_version: 3.9 -app_file: app.py -pinned: false -duplicated_from: siddh4rth/audio_to_text ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Vageesh1/clip_gpt2/engine.py b/spaces/Vageesh1/clip_gpt2/engine.py deleted file mode 100644 index 609528286375abd40c6e04286bb1d67823a93fb1..0000000000000000000000000000000000000000 --- a/spaces/Vageesh1/clip_gpt2/engine.py +++ /dev/null @@ -1,42 +0,0 @@ -import os -import torch -import torchvision.transforms as transforms -from PIL import Image -import json -from neuralnet.model import SeqToSeq -import wget - -url = "https://github.com/Koushik0901/Image-Captioning/releases/download/v1.0/flickr30k.pt" -# os.system("curl -L https://github.com/Koushik0901/Image-Captioning/releases/download/v1.0/flickr30k.pt") -filename = wget.download(url) - -def inference(img_path): - transform = transforms.Compose( - [ - transforms.Resize((299, 299)), - transforms.ToTensor(), - transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)) - ] - ) - - vocabulary = json.load(open('./vocab.json')) - - model_params = {"embed_size":256, "hidden_size":512, "vocab_size": 7666, "num_layers": 3, "device":"cpu"} - model = SeqToSeq(**model_params) - checkpoint = torch.load('./flickr30k.pt', map_location = 'cpu') - model.load_state_dict(checkpoint['state_dict']) - - img = transform(Image.open(img_path).convert("RGB")).unsqueeze(0) - - result_caption = [] - model.eval() - - x = model.encoder(img).unsqueeze(0) - states = None - - out_captions = model.caption_image(img, vocabulary['itos'], 50) - return " ".join(out_captions[1:-1]) - - -if __name__ == '__main__': - print(inference('./test_examples/dog.png')) diff --git a/spaces/VaishakhRaveendran/Audio_2_chat/README.md b/spaces/VaishakhRaveendran/Audio_2_chat/README.md deleted file mode 100644 index 57e8957e4573e765c848987bae1040b52f42d7c3..0000000000000000000000000000000000000000 --- a/spaces/VaishakhRaveendran/Audio_2_chat/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Audio 2 Chat -emoji: 🌍 -colorFrom: red -colorTo: blue -sdk: streamlit -sdk_version: 1.26.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/VlaTal/facial_beauty_analysis/points.py b/spaces/VlaTal/facial_beauty_analysis/points.py deleted file mode 100644 index 35810d1c3e2042436c2af72ebfdc6e22bcda98b6..0000000000000000000000000000000000000000 --- a/spaces/VlaTal/facial_beauty_analysis/points.py +++ /dev/null @@ -1,27 +0,0 @@ -from ratios import Ratios - -points = { - Ratios.UNDER_EYES: (23, 253), - Ratios.INTEROCULAR: (243, 463), - Ratios.NOSE_WIDTH: (48, 278), - Ratios.MOUTH_WIDTH: (61, 293), - Ratios.UPPER_LIP_TO_JAW: (0, 152), - Ratios.LIPS_HEIGHT: (0, 17), - Ratios.NOSE_TO_MOUTH_HEIGHT: (2, 0), - Ratios.UPPER_LIP_HEIGHT: (0, 13), - Ratios.FACE_TOP_TO_EYEBROWS: (10, 9), - Ratios.EYEBROWS_TO_NOSE: (9, 2), - Ratios.NOSE_TO_JAW: (2, 152), - Ratios.FACE_HEIGHT: (10, 152), - Ratios.FACE_WIDTH: (227, 447), - Ratios.LEFT_LOWER_EYEBROW_LENGTH: (46, 55), - Ratios.RIGHT_LOWER_EYEBROW_LENGTH: (285, 276), - Ratios.LEFT_LOWER_LIP_LENGTH: (61, 17), - Ratios.RIGHT_LOWER_LIP_LENGTH: (17, 291), - Ratios.LEFT_UPPER_EYEBROW_LENGTH: (105, 9), - Ratios.RIGHT_UPPER_EYEBROW_LENGTH: (334, 9), - Ratios.LEFT_UPPER_LIP_LENGTH: (61, 0), - Ratios.RIGHT_UPPER_LIP_LENGTH: (17, 0), - Ratios.LEFT_NOSE_WIDTH: (48, 2), - Ratios.RIGHT_NOSE_WIDTH: (278, 2) -} diff --git a/spaces/VoiceHero69/changer/setup_tools/magicinstaller/requirements/audio2numpy_package.py b/spaces/VoiceHero69/changer/setup_tools/magicinstaller/requirements/audio2numpy_package.py deleted file mode 100644 index 5c8eb3467e91246a1b6d9df02638ed36d778a8da..0000000000000000000000000000000000000000 --- a/spaces/VoiceHero69/changer/setup_tools/magicinstaller/requirements/audio2numpy_package.py +++ /dev/null @@ -1,5 +0,0 @@ -from setup_tools.magicinstaller.requirement import SimpleRequirement - - -class AudioToNumpy(SimpleRequirement): - package_name = 'audio2numpy' diff --git a/spaces/Winterflower/question-generator/app.py b/spaces/Winterflower/question-generator/app.py deleted file mode 100644 index a699bc5b3c2e987102ca93e0ee28d601e0a93d02..0000000000000000000000000000000000000000 --- a/spaces/Winterflower/question-generator/app.py +++ /dev/null @@ -1,7 +0,0 @@ -import gradio as gr - -def greet(name): - return "Hello " + name + "!!" - -iface = gr.Interface(fn=greet, inputs="text", outputs="text") -iface.launch() \ No newline at end of file diff --git a/spaces/Wootang01/Paraphraser_two/README.md b/spaces/Wootang01/Paraphraser_two/README.md deleted file mode 100644 index 76fe17ee17a9b27647cd8745134ff802c24c258b..0000000000000000000000000000000000000000 --- a/spaces/Wootang01/Paraphraser_two/README.md +++ /dev/null @@ -1,11 +0,0 @@ ---- -title: Paraphraser_two -emoji: 🔥 -colorFrom: yellow -colorTo: purple -sdk: streamlit -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference diff --git a/spaces/Wrathless/pyannote-voice-activity-detection/README.md b/spaces/Wrathless/pyannote-voice-activity-detection/README.md deleted file mode 100644 index 1111a5cd6f34b1ce8e68483fe2c1e4d45f87614f..0000000000000000000000000000000000000000 --- a/spaces/Wrathless/pyannote-voice-activity-detection/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: AutoTrain Advanced -emoji: 🚀 -colorFrom: blue -colorTo: green -sdk: docker -pinned: false -duplicated_from: autotrain-projects/autotrain-advanced -license: apache-2.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/XS-1/BW_IMAGE_VIDEO_COLORIZER/fastai/layers.py b/spaces/XS-1/BW_IMAGE_VIDEO_COLORIZER/fastai/layers.py deleted file mode 100644 index ef6a6399f570231ac860afdffc6ba4c257eb389b..0000000000000000000000000000000000000000 --- a/spaces/XS-1/BW_IMAGE_VIDEO_COLORIZER/fastai/layers.py +++ /dev/null @@ -1,306 +0,0 @@ -"`fastai.layers` provides essential functions to building and modifying `model` architectures" -from .torch_core import * - -__all__ = ['AdaptiveConcatPool2d', 'BCEWithLogitsFlat', 'BCEFlat', 'MSELossFlat', 'CrossEntropyFlat', 'Debugger', - 'Flatten', 'Lambda', 'PoolFlatten', 'View', 'ResizeBatch', 'bn_drop_lin', 'conv2d', 'conv2d_trans', 'conv_layer', - 'embedding', 'simple_cnn', 'NormType', 'relu', 'batchnorm_2d', 'trunc_normal_', 'PixelShuffle_ICNR', 'icnr', - 'NoopLoss', 'WassersteinLoss', 'SelfAttention', 'SequentialEx', 'MergeLayer', 'res_block', 'sigmoid_range', - 'SigmoidRange', 'PartialLayer', 'FlattenedLoss', 'BatchNorm1dFlat', 'LabelSmoothingCrossEntropy', 'PooledSelfAttention2d'] - -class Lambda(Module): - "Create a layer that simply calls `func` with `x`" - def __init__(self, func:LambdaFunc): self.func=func - def forward(self, x): return self.func(x) - -class View(Module): - "Reshape `x` to `size`" - def __init__(self, *size:int): self.size = size - def forward(self, x): return x.view(self.size) - -class ResizeBatch(Module): - "Reshape `x` to `size`, keeping batch dim the same size" - def __init__(self, *size:int): self.size = size - def forward(self, x): return x.view((x.size(0),) + self.size) - -class Flatten(Module): - "Flatten `x` to a single dimension, often used at the end of a model. `full` for rank-1 tensor" - def __init__(self, full:bool=False): self.full = full - def forward(self, x): return x.view(-1) if self.full else x.view(x.size(0), -1) - -def PoolFlatten()->nn.Sequential: - "Apply `nn.AdaptiveAvgPool2d` to `x` and then flatten the result." - return nn.Sequential(nn.AdaptiveAvgPool2d(1), Flatten()) - -NormType = Enum('NormType', 'Batch BatchZero Weight Spectral Group Instance SpectralGN') - -def batchnorm_2d(nf:int, norm_type:NormType=NormType.Batch): - "A batchnorm2d layer with `nf` features initialized depending on `norm_type`." - bn = nn.BatchNorm2d(nf) - with torch.no_grad(): - bn.bias.fill_(1e-3) - bn.weight.fill_(0. if norm_type==NormType.BatchZero else 1.) - return bn - -def bn_drop_lin(n_in:int, n_out:int, bn:bool=True, p:float=0., actn:Optional[nn.Module]=None): - "Sequence of batchnorm (if `bn`), dropout (with `p`) and linear (`n_in`,`n_out`) layers followed by `actn`." - layers = [nn.BatchNorm1d(n_in)] if bn else [] - if p != 0: layers.append(nn.Dropout(p)) - layers.append(nn.Linear(n_in, n_out)) - if actn is not None: layers.append(actn) - return layers - -def conv1d(ni:int, no:int, ks:int=1, stride:int=1, padding:int=0, bias:bool=False): - "Create and initialize a `nn.Conv1d` layer with spectral normalization." - conv = nn.Conv1d(ni, no, ks, stride=stride, padding=padding, bias=bias) - nn.init.kaiming_normal_(conv.weight) - if bias: conv.bias.data.zero_() - return spectral_norm(conv) - -class PooledSelfAttention2d(Module): - "Pooled self attention layer for 2d." - def __init__(self, n_channels:int): - self.n_channels = n_channels - self.theta = spectral_norm(conv2d(n_channels, n_channels//8, 1)) # query - self.phi = spectral_norm(conv2d(n_channels, n_channels//8, 1)) # key - self.g = spectral_norm(conv2d(n_channels, n_channels//2, 1)) # value - self.o = spectral_norm(conv2d(n_channels//2, n_channels, 1)) - self.gamma = nn.Parameter(tensor([0.])) - - def forward(self, x): - # code borrowed from https://github.com/ajbrock/BigGAN-PyTorch/blob/7b65e82d058bfe035fc4e299f322a1f83993e04c/layers.py#L156 - theta = self.theta(x) - phi = F.max_pool2d(self.phi(x), [2,2]) - g = F.max_pool2d(self.g(x), [2,2]) - theta = theta.view(-1, self.n_channels // 8, x.shape[2] * x.shape[3]) - phi = phi.view(-1, self.n_channels // 8, x.shape[2] * x.shape[3] // 4) - g = g.view(-1, self.n_channels // 2, x.shape[2] * x.shape[3] // 4) - beta = F.softmax(torch.bmm(theta.transpose(1, 2), phi), -1) - o = self.o(torch.bmm(g, beta.transpose(1,2)).view(-1, self.n_channels // 2, x.shape[2], x.shape[3])) - return self.gamma * o + x - -class SelfAttention(Module): - "Self attention layer for nd." - def __init__(self, n_channels:int): - self.query = conv1d(n_channels, n_channels//8) - self.key = conv1d(n_channels, n_channels//8) - self.value = conv1d(n_channels, n_channels) - self.gamma = nn.Parameter(tensor([0.])) - - def forward(self, x): - #Notation from https://arxiv.org/pdf/1805.08318.pdf - size = x.size() - x = x.view(*size[:2],-1) - f,g,h = self.query(x),self.key(x),self.value(x) - beta = F.softmax(torch.bmm(f.permute(0,2,1).contiguous(), g), dim=1) - o = self.gamma * torch.bmm(h, beta) + x - return o.view(*size).contiguous() - -def conv2d(ni:int, nf:int, ks:int=3, stride:int=1, padding:int=None, bias=False, init:LayerFunc=nn.init.kaiming_normal_) -> nn.Conv2d: - "Create and initialize `nn.Conv2d` layer. `padding` defaults to `ks//2`." - if padding is None: padding = ks//2 - return init_default(nn.Conv2d(ni, nf, kernel_size=ks, stride=stride, padding=padding, bias=bias), init) - -def conv2d_trans(ni:int, nf:int, ks:int=2, stride:int=2, padding:int=0, bias=False) -> nn.ConvTranspose2d: - "Create `nn.ConvTranspose2d` layer." - return nn.ConvTranspose2d(ni, nf, kernel_size=ks, stride=stride, padding=padding, bias=bias) - -def relu(inplace:bool=False, leaky:float=None): - "Return a relu activation, maybe `leaky` and `inplace`." - return nn.LeakyReLU(inplace=inplace, negative_slope=leaky) if leaky is not None else nn.ReLU(inplace=inplace) - -def conv_layer(ni:int, nf:int, ks:int=3, stride:int=1, padding:int=None, bias:bool=None, is_1d:bool=False, - norm_type:Optional[NormType]=NormType.Batch, use_activ:bool=True, leaky:float=None, - transpose:bool=False, init:Callable=nn.init.kaiming_normal_, self_attention:bool=False): - "Create a sequence of convolutional (`ni` to `nf`), ReLU (if `use_activ`) and batchnorm (if `bn`) layers." - if padding is None: padding = (ks-1)//2 if not transpose else 0 - bn = norm_type in (NormType.Batch, NormType.BatchZero) - if bias is None: bias = not bn - conv_func = nn.ConvTranspose2d if transpose else nn.Conv1d if is_1d else nn.Conv2d - conv = init_default(conv_func(ni, nf, kernel_size=ks, bias=bias, stride=stride, padding=padding), init) - if norm_type==NormType.Weight: conv = weight_norm(conv) - elif norm_type==NormType.Spectral: conv = spectral_norm(conv) - layers = [conv] - if use_activ: layers.append(relu(True, leaky=leaky)) - if bn: layers.append((nn.BatchNorm1d if is_1d else nn.BatchNorm2d)(nf)) - if self_attention: layers.append(SelfAttention(nf)) - return nn.Sequential(*layers) - -class SequentialEx(Module): - "Like `nn.Sequential`, but with ModuleList semantics, and can access module input" - def __init__(self, *layers): self.layers = nn.ModuleList(layers) - - def forward(self, x): - res = x - for l in self.layers: - res.orig = x - nres = l(res) - #print(l. + ' mean: ' + str(nres.abs().mean())) - #print(' max: ' + str(nres.abs().max())) - # We have to remove res.orig to avoid hanging refs and therefore memory leaks - res.orig = None - res = nres - return res - - def __getitem__(self,i): return self.layers[i] - def append(self,l): return self.layers.append(l) - def extend(self,l): return self.layers.extend(l) - def insert(self,i,l): return self.layers.insert(i,l) - -class MergeLayer(Module): - "Merge a shortcut with the result of the module by adding them or concatenating thme if `dense=True`." - def __init__(self, dense:bool=False): self.dense=dense - def forward(self, x): return torch.cat([x,x.orig], dim=1) if self.dense else (x+x.orig) - -def res_block(nf, dense:bool=False, norm_type:Optional[NormType]=NormType.Batch, bottle:bool=False, **conv_kwargs): - "Resnet block of `nf` features. `conv_kwargs` are passed to `conv_layer`." - norm2 = norm_type - if not dense and (norm_type==NormType.Batch): norm2 = NormType.BatchZero - nf_inner = nf//2 if bottle else nf - return SequentialEx(conv_layer(nf, nf_inner, norm_type=norm_type, **conv_kwargs), - conv_layer(nf_inner, nf, norm_type=norm2, **conv_kwargs), - MergeLayer(dense)) - -def sigmoid_range(x:Tensor, low:int, high:int): - "Sigmoid function with range `(low, high)`" - return torch.sigmoid(x) * (high - low) + low - -class SigmoidRange(Module): - "Sigmoid module with range `(low,x_max)`" - def __init__(self, low:int, high:int): self.low,self.high = low,high - def forward(self, x): return sigmoid_range(x, self.low, self.high) - -class PartialLayer(Module): - "Layer that applies `partial(func, **kwargs)`." - def __init__(self, func, **kwargs): self.repr,self.func = f'{func}({kwargs})', partial(func, **kwargs) - def forward(self, x): return self.func(x) - def __repr__(self): return self.repr - -class AdaptiveConcatPool2d(Module): - "Layer that concats `AdaptiveAvgPool2d` and `AdaptiveMaxPool2d`." - def __init__(self, sz:Optional[int]=None): - "Output will be 2*sz or 2 if sz is None" - self.output_size = sz or 1 - self.ap = nn.AdaptiveAvgPool2d(self.output_size) - self.mp = nn.AdaptiveMaxPool2d(self.output_size) - - def forward(self, x): return torch.cat([self.mp(x), self.ap(x)], 1) - -class Debugger(Module): - "A module to debug inside a model." - def forward(self,x:Tensor) -> Tensor: - set_trace() - return x - -def icnr(x, scale=2, init=nn.init.kaiming_normal_): - "ICNR init of `x`, with `scale` and `init` function." - ni,nf,h,w = x.shape - ni2 = int(ni/(scale**2)) - k = init(torch.zeros([ni2,nf,h,w])).transpose(0, 1) - k = k.contiguous().view(ni2, nf, -1) - k = k.repeat(1, 1, scale**2) - k = k.contiguous().view([nf,ni,h,w]).transpose(0, 1) - x.data.copy_(k) - -class PixelShuffle_ICNR(Module): - "Upsample by `scale` from `ni` filters to `nf` (default `ni`), using `nn.PixelShuffle`, `icnr` init, and `weight_norm`." - def __init__(self, ni:int, nf:int=None, scale:int=2, blur:bool=False, norm_type=NormType.Weight, leaky:float=None): - nf = ifnone(nf, ni) - self.conv = conv_layer(ni, nf*(scale**2), ks=1, norm_type=norm_type, use_activ=False) - icnr(self.conv[0].weight) - self.shuf = nn.PixelShuffle(scale) - # Blurring over (h*w) kernel - # "Super-Resolution using Convolutional Neural Networks without Any Checkerboard Artifacts" - # - https://arxiv.org/abs/1806.02658 - self.pad = nn.ReplicationPad2d((1,0,1,0)) - self.blur = nn.AvgPool2d(2, stride=1) - self.relu = relu(True, leaky=leaky) - - def forward(self,x): - x = self.shuf(self.relu(self.conv(x))) - return self.blur(self.pad(x)) if self.blur else x - -class FlattenedLoss(): - "Same as `func`, but flattens input and target." - def __init__(self, func, *args, axis:int=-1, floatify:bool=False, is_2d:bool=True, **kwargs): - self.func,self.axis,self.floatify,self.is_2d = func(*args,**kwargs),axis,floatify,is_2d - functools.update_wrapper(self, self.func) - - def __repr__(self): return f"FlattenedLoss of {self.func}" - @property - def reduction(self): return self.func.reduction - @reduction.setter - def reduction(self, v): self.func.reduction = v - - def __call__(self, input:Tensor, target:Tensor, **kwargs)->Rank0Tensor: - input = input.transpose(self.axis,-1).contiguous() - target = target.transpose(self.axis,-1).contiguous() - if self.floatify: target = target.float() - input = input.view(-1,input.shape[-1]) if self.is_2d else input.view(-1) - return self.func.__call__(input, target.view(-1), **kwargs) - -def CrossEntropyFlat(*args, axis:int=-1, **kwargs): - "Same as `nn.CrossEntropyLoss`, but flattens input and target." - return FlattenedLoss(nn.CrossEntropyLoss, *args, axis=axis, **kwargs) - -def BCEWithLogitsFlat(*args, axis:int=-1, floatify:bool=True, **kwargs): - "Same as `nn.BCEWithLogitsLoss`, but flattens input and target." - return FlattenedLoss(nn.BCEWithLogitsLoss, *args, axis=axis, floatify=floatify, is_2d=False, **kwargs) - -def BCEFlat(*args, axis:int=-1, floatify:bool=True, **kwargs): - "Same as `nn.BCELoss`, but flattens input and target." - return FlattenedLoss(nn.BCELoss, *args, axis=axis, floatify=floatify, is_2d=False, **kwargs) - -def MSELossFlat(*args, axis:int=-1, floatify:bool=True, **kwargs): - "Same as `nn.MSELoss`, but flattens input and target." - return FlattenedLoss(nn.MSELoss, *args, axis=axis, floatify=floatify, is_2d=False, **kwargs) - -class NoopLoss(Module): - "Just returns the mean of the `output`." - def forward(self, output, *args): return output.mean() - -class WassersteinLoss(Module): - "For WGAN." - def forward(self, real, fake): return real.mean() - fake.mean() - -def simple_cnn(actns:Collection[int], kernel_szs:Collection[int]=None, - strides:Collection[int]=None, bn=False) -> nn.Sequential: - "CNN with `conv_layer` defined by `actns`, `kernel_szs` and `strides`, plus batchnorm if `bn`." - nl = len(actns)-1 - kernel_szs = ifnone(kernel_szs, [3]*nl) - strides = ifnone(strides , [2]*nl) - layers = [conv_layer(actns[i], actns[i+1], kernel_szs[i], stride=strides[i], - norm_type=(NormType.Batch if bn and i<(len(strides)-1) else None)) for i in range_of(strides)] - layers.append(PoolFlatten()) - return nn.Sequential(*layers) - -def trunc_normal_(x:Tensor, mean:float=0., std:float=1.) -> Tensor: - "Truncated normal initialization." - # From https://discuss.pytorch.org/t/implementing-truncated-normal-initializer/4778/12 - return x.normal_().fmod_(2).mul_(std).add_(mean) - -def embedding(ni:int,nf:int) -> nn.Module: - "Create an embedding layer." - emb = nn.Embedding(ni, nf) - # See https://arxiv.org/abs/1711.09160 - with torch.no_grad(): trunc_normal_(emb.weight, std=0.01) - return emb - -class BatchNorm1dFlat(nn.BatchNorm1d): - "`nn.BatchNorm1d`, but first flattens leading dimensions" - def forward(self, x): - if x.dim()==2: return super().forward(x) - *f,l = x.shape - x = x.contiguous().view(-1,l) - return super().forward(x).view(*f,l) - -class LabelSmoothingCrossEntropy(Module): - def __init__(self, eps:float=0.1, reduction='mean'): self.eps,self.reduction = eps,reduction - - def forward(self, output, target): - c = output.size()[-1] - log_preds = F.log_softmax(output, dim=-1) - if self.reduction=='sum': loss = -log_preds.sum() - else: - loss = -log_preds.sum(dim=-1) - if self.reduction=='mean': loss = loss.mean() - return loss*self.eps/c + (1-self.eps) * F.nll_loss(log_preds, target, reduction=self.reduction) diff --git a/spaces/XzJosh/JM-Bert-VITS2/text/chinese_bert.py b/spaces/XzJosh/JM-Bert-VITS2/text/chinese_bert.py deleted file mode 100644 index cb84ce0b426cd0a1c7954ddcdf41322c10ed14fa..0000000000000000000000000000000000000000 --- a/spaces/XzJosh/JM-Bert-VITS2/text/chinese_bert.py +++ /dev/null @@ -1,50 +0,0 @@ -import torch -from transformers import AutoTokenizer, AutoModelForMaskedLM - -device = torch.device("cuda" if torch.cuda.is_available() else "cpu") - -tokenizer = AutoTokenizer.from_pretrained("./bert/chinese-roberta-wwm-ext-large") -model = AutoModelForMaskedLM.from_pretrained("./bert/chinese-roberta-wwm-ext-large").to(device) - -def get_bert_feature(text, word2ph): - with torch.no_grad(): - inputs = tokenizer(text, return_tensors='pt') - for i in inputs: - inputs[i] = inputs[i].to(device) - res = model(**inputs, output_hidden_states=True) - res = torch.cat(res['hidden_states'][-3:-2], -1)[0].cpu() - - assert len(word2ph) == len(text)+2 - word2phone = word2ph - phone_level_feature = [] - for i in range(len(word2phone)): - repeat_feature = res[i].repeat(word2phone[i], 1) - phone_level_feature.append(repeat_feature) - - phone_level_feature = torch.cat(phone_level_feature, dim=0) - - - return phone_level_feature.T - -if __name__ == '__main__': - # feature = get_bert_feature('你好,我是说的道理。') - import torch - - word_level_feature = torch.rand(38, 1024) # 12个词,每个词1024维特征 - word2phone = [1, 2, 1, 2, 2, 1, 2, 2, 1, 2, 2, 1, 2, 2, 2, 2, 2, 1, 1, 2, 2, 1, 2, 2, 2, 2, 1, 2, 2, 2, 2, 2, 1, 2, 2, 2, 2, 1] - - # 计算总帧数 - total_frames = sum(word2phone) - print(word_level_feature.shape) - print(word2phone) - phone_level_feature = [] - for i in range(len(word2phone)): - print(word_level_feature[i].shape) - - # 对每个词重复word2phone[i]次 - repeat_feature = word_level_feature[i].repeat(word2phone[i], 1) - phone_level_feature.append(repeat_feature) - - phone_level_feature = torch.cat(phone_level_feature, dim=0) - print(phone_level_feature.shape) # torch.Size([36, 1024]) - diff --git a/spaces/XzJosh/maimai-Bert-VITS2/train_ms.py b/spaces/XzJosh/maimai-Bert-VITS2/train_ms.py deleted file mode 100644 index 5d109003d40497ea4493e7c73f47c1eb7370a81e..0000000000000000000000000000000000000000 --- a/spaces/XzJosh/maimai-Bert-VITS2/train_ms.py +++ /dev/null @@ -1,402 +0,0 @@ -import os -import json -import argparse -import itertools -import math -import torch -import shutil -from torch import nn, optim -from torch.nn import functional as F -from torch.utils.data import DataLoader -from torch.utils.tensorboard import SummaryWriter -import torch.multiprocessing as mp -import torch.distributed as dist -from torch.nn.parallel import DistributedDataParallel as DDP -from torch.cuda.amp import autocast, GradScaler -from tqdm import tqdm -import logging -logging.getLogger('numba').setLevel(logging.WARNING) -import commons -import utils -from data_utils import ( - TextAudioSpeakerLoader, - TextAudioSpeakerCollate, - DistributedBucketSampler -) -from models import ( - SynthesizerTrn, - MultiPeriodDiscriminator, - DurationDiscriminator, -) -from losses import ( - generator_loss, - discriminator_loss, - feature_loss, - kl_loss -) -from mel_processing import mel_spectrogram_torch, spec_to_mel_torch -from text.symbols import symbols - -torch.backends.cudnn.benchmark = True -torch.backends.cuda.matmul.allow_tf32 = True -torch.backends.cudnn.allow_tf32 = True -torch.set_float32_matmul_precision('medium') -global_step = 0 - - -def main(): - """Assume Single Node Multi GPUs Training Only""" - assert torch.cuda.is_available(), "CPU training is not allowed." - - n_gpus = torch.cuda.device_count() - os.environ['MASTER_ADDR'] = 'localhost' - os.environ['MASTER_PORT'] = '65280' - - hps = utils.get_hparams() - if not hps.cont: - shutil.copy('./pretrained_models/D_0.pth','./logs/OUTPUT_MODEL/D_0.pth') - shutil.copy('./pretrained_models/G_0.pth','./logs/OUTPUT_MODEL/G_0.pth') - shutil.copy('./pretrained_models/DUR_0.pth','./logs/OUTPUT_MODEL/DUR_0.pth') - mp.spawn(run, nprocs=n_gpus, args=(n_gpus, hps,)) - - -def run(rank, n_gpus, hps): - global global_step - if rank == 0: - logger = utils.get_logger(hps.model_dir) - logger.info(hps) - utils.check_git_hash(hps.model_dir) - writer = SummaryWriter(log_dir=hps.model_dir) - writer_eval = SummaryWriter(log_dir=os.path.join(hps.model_dir, "eval")) - - dist.init_process_group(backend= 'gloo' if os.name == 'nt' else 'nccl', init_method='env://', world_size=n_gpus, rank=rank) - torch.manual_seed(hps.train.seed) - torch.cuda.set_device(rank) - - train_dataset = TextAudioSpeakerLoader(hps.data.training_files, hps.data) - train_sampler = DistributedBucketSampler( - train_dataset, - hps.train.batch_size, - [32, 300, 400, 500, 600, 700, 800, 900, 1000], - num_replicas=n_gpus, - rank=rank, - shuffle=True) - collate_fn = TextAudioSpeakerCollate() - train_loader = DataLoader(train_dataset, num_workers=2, shuffle=False, pin_memory=True, - collate_fn=collate_fn, batch_sampler=train_sampler) - if rank == 0: - eval_dataset = TextAudioSpeakerLoader(hps.data.validation_files, hps.data) - eval_loader = DataLoader(eval_dataset, num_workers=0, shuffle=False, - batch_size=1, pin_memory=True, - drop_last=False, collate_fn=collate_fn) - if "use_noise_scaled_mas" in hps.model.keys() and hps.model.use_noise_scaled_mas == True: - print("Using noise scaled MAS for VITS2") - use_noise_scaled_mas = True - mas_noise_scale_initial = 0.01 - noise_scale_delta = 2e-6 - else: - print("Using normal MAS for VITS1") - use_noise_scaled_mas = False - mas_noise_scale_initial = 0.0 - noise_scale_delta = 0.0 - if "use_duration_discriminator" in hps.model.keys() and hps.model.use_duration_discriminator == True: - print("Using duration discriminator for VITS2") - use_duration_discriminator = True - net_dur_disc = DurationDiscriminator( - hps.model.hidden_channels, - hps.model.hidden_channels, - 3, - 0.1, - gin_channels=hps.model.gin_channels if hps.data.n_speakers != 0 else 0, - ).cuda(rank) - if "use_spk_conditioned_encoder" in hps.model.keys() and hps.model.use_spk_conditioned_encoder == True: - if hps.data.n_speakers == 0: - raise ValueError("n_speakers must be > 0 when using spk conditioned encoder to train multi-speaker model") - use_spk_conditioned_encoder = True - else: - print("Using normal encoder for VITS1") - use_spk_conditioned_encoder = False - - net_g = SynthesizerTrn( - len(symbols), - hps.data.filter_length // 2 + 1, - hps.train.segment_size // hps.data.hop_length, - n_speakers=hps.data.n_speakers, - mas_noise_scale_initial = mas_noise_scale_initial, - noise_scale_delta = noise_scale_delta, - **hps.model).cuda(rank) - - freeze_enc = getattr(hps.model, "freeze_enc", False) - if freeze_enc: - print("freeze encoder !!!") - for param in net_g.enc_p.parameters(): - param.requires_grad = False - - net_d = MultiPeriodDiscriminator(hps.model.use_spectral_norm).cuda(rank) - optim_g = torch.optim.AdamW( - filter(lambda p: p.requires_grad, net_g.parameters()), - hps.train.learning_rate, - betas=hps.train.betas, - eps=hps.train.eps) - optim_d = torch.optim.AdamW( - net_d.parameters(), - hps.train.learning_rate, - betas=hps.train.betas, - eps=hps.train.eps) - if net_dur_disc is not None: - optim_dur_disc = torch.optim.AdamW( - net_dur_disc.parameters(), - hps.train.learning_rate, - betas=hps.train.betas, - eps=hps.train.eps) - else: - optim_dur_disc = None - net_g = DDP(net_g, device_ids=[rank], find_unused_parameters=True) - net_d = DDP(net_d, device_ids=[rank], find_unused_parameters=True) - if net_dur_disc is not None: - net_dur_disc = DDP(net_dur_disc, device_ids=[rank], find_unused_parameters=True) - - pretrain_dir = None - if pretrain_dir is None: - try: - if net_dur_disc is not None: - _, optim_dur_disc, _, epoch_str = utils.load_checkpoint(utils.latest_checkpoint_path(hps.model_dir, "DUR_*.pth"), net_dur_disc, optim_dur_disc, skip_optimizer=not hps.cont) - _, optim_g, _, epoch_str = utils.load_checkpoint(utils.latest_checkpoint_path(hps.model_dir, "G_*.pth"), net_g, - optim_g, skip_optimizer=not hps.cont) - _, optim_d, _, epoch_str = utils.load_checkpoint(utils.latest_checkpoint_path(hps.model_dir, "D_*.pth"), net_d, - optim_d, skip_optimizer=not hps.cont) - - epoch_str = max(epoch_str, 1) - global_step = (epoch_str - 1) * len(train_loader) - except Exception as e: - print(e) - epoch_str = 1 - global_step = 0 - else: - _, _, _, epoch_str = utils.load_checkpoint(utils.latest_checkpoint_path(pretrain_dir, "G_*.pth"), net_g, - optim_g, True) - _, _, _, epoch_str = utils.load_checkpoint(utils.latest_checkpoint_path(pretrain_dir, "D_*.pth"), net_d, - optim_d, True) - - - - scheduler_g = torch.optim.lr_scheduler.ExponentialLR(optim_g, gamma=hps.train.lr_decay, last_epoch=epoch_str - 2) - scheduler_d = torch.optim.lr_scheduler.ExponentialLR(optim_d, gamma=hps.train.lr_decay, last_epoch=epoch_str - 2) - if net_dur_disc is not None: - scheduler_dur_disc = torch.optim.lr_scheduler.ExponentialLR(optim_dur_disc, gamma=hps.train.lr_decay, last_epoch=epoch_str-2) - else: - scheduler_dur_disc = None - scaler = GradScaler(enabled=hps.train.fp16_run) - - for epoch in range(epoch_str, hps.train.epochs + 1): - if rank == 0: - train_and_evaluate(rank, epoch, hps, [net_g, net_d, net_dur_disc], [optim_g, optim_d, optim_dur_disc], [scheduler_g, scheduler_d, scheduler_dur_disc], scaler, [train_loader, eval_loader], logger, [writer, writer_eval]) - else: - train_and_evaluate(rank, epoch, hps, [net_g, net_d, net_dur_disc], [optim_g, optim_d, optim_dur_disc], [scheduler_g, scheduler_d, scheduler_dur_disc], scaler, [train_loader, None], None, None) - scheduler_g.step() - scheduler_d.step() - if net_dur_disc is not None: - scheduler_dur_disc.step() - - -def train_and_evaluate(rank, epoch, hps, nets, optims, schedulers, scaler, loaders, logger, writers): - net_g, net_d, net_dur_disc = nets - optim_g, optim_d, optim_dur_disc = optims - scheduler_g, scheduler_d, scheduler_dur_disc = schedulers - train_loader, eval_loader = loaders - if writers is not None: - writer, writer_eval = writers - - train_loader.batch_sampler.set_epoch(epoch) - global global_step - - net_g.train() - net_d.train() - if net_dur_disc is not None: - net_dur_disc.train() - for batch_idx, (x, x_lengths, spec, spec_lengths, y, y_lengths, speakers, tone, language, bert) in tqdm(enumerate(train_loader)): - if net_g.module.use_noise_scaled_mas: - current_mas_noise_scale = net_g.module.mas_noise_scale_initial - net_g.module.noise_scale_delta * global_step - net_g.module.current_mas_noise_scale = max(current_mas_noise_scale, 0.0) - x, x_lengths = x.cuda(rank, non_blocking=True), x_lengths.cuda(rank, non_blocking=True) - spec, spec_lengths = spec.cuda(rank, non_blocking=True), spec_lengths.cuda(rank, non_blocking=True) - y, y_lengths = y.cuda(rank, non_blocking=True), y_lengths.cuda(rank, non_blocking=True) - speakers = speakers.cuda(rank, non_blocking=True) - tone = tone.cuda(rank, non_blocking=True) - language = language.cuda(rank, non_blocking=True) - bert = bert.cuda(rank, non_blocking=True) - - with autocast(enabled=hps.train.fp16_run): - y_hat, l_length, attn, ids_slice, x_mask, z_mask, \ - (z, z_p, m_p, logs_p, m_q, logs_q), (hidden_x, logw, logw_) = net_g(x, x_lengths, spec, spec_lengths, speakers, tone, language, bert) - mel = spec_to_mel_torch( - spec, - hps.data.filter_length, - hps.data.n_mel_channels, - hps.data.sampling_rate, - hps.data.mel_fmin, - hps.data.mel_fmax) - y_mel = commons.slice_segments(mel, ids_slice, hps.train.segment_size // hps.data.hop_length) - y_hat_mel = mel_spectrogram_torch( - y_hat.squeeze(1), - hps.data.filter_length, - hps.data.n_mel_channels, - hps.data.sampling_rate, - hps.data.hop_length, - hps.data.win_length, - hps.data.mel_fmin, - hps.data.mel_fmax - ) - - y = commons.slice_segments(y, ids_slice * hps.data.hop_length, hps.train.segment_size) # slice - - # Discriminator - y_d_hat_r, y_d_hat_g, _, _ = net_d(y, y_hat.detach()) - with autocast(enabled=False): - loss_disc, losses_disc_r, losses_disc_g = discriminator_loss(y_d_hat_r, y_d_hat_g) - loss_disc_all = loss_disc - if net_dur_disc is not None: - y_dur_hat_r, y_dur_hat_g = net_dur_disc(hidden_x.detach(), x_mask.detach(), logw.detach(), logw_.detach()) - with autocast(enabled=False): - # TODO: I think need to mean using the mask, but for now, just mean all - loss_dur_disc, losses_dur_disc_r, losses_dur_disc_g = discriminator_loss(y_dur_hat_r, y_dur_hat_g) - loss_dur_disc_all = loss_dur_disc - optim_dur_disc.zero_grad() - scaler.scale(loss_dur_disc_all).backward() - scaler.unscale_(optim_dur_disc) - grad_norm_dur_disc = commons.clip_grad_value_(net_dur_disc.parameters(), None) - scaler.step(optim_dur_disc) - - optim_d.zero_grad() - scaler.scale(loss_disc_all).backward() - scaler.unscale_(optim_d) - grad_norm_d = commons.clip_grad_value_(net_d.parameters(), None) - scaler.step(optim_d) - - with autocast(enabled=hps.train.fp16_run): - # Generator - y_d_hat_r, y_d_hat_g, fmap_r, fmap_g = net_d(y, y_hat) - if net_dur_disc is not None: - y_dur_hat_r, y_dur_hat_g = net_dur_disc(hidden_x, x_mask, logw, logw_) - with autocast(enabled=False): - loss_dur = torch.sum(l_length.float()) - loss_mel = F.l1_loss(y_mel, y_hat_mel) * hps.train.c_mel - loss_kl = kl_loss(z_p, logs_q, m_p, logs_p, z_mask) * hps.train.c_kl - - loss_fm = feature_loss(fmap_r, fmap_g) - loss_gen, losses_gen = generator_loss(y_d_hat_g) - loss_gen_all = loss_gen + loss_fm + loss_mel + loss_dur + loss_kl - if net_dur_disc is not None: - loss_dur_gen, losses_dur_gen = generator_loss(y_dur_hat_g) - loss_gen_all += loss_dur_gen - optim_g.zero_grad() - scaler.scale(loss_gen_all).backward() - scaler.unscale_(optim_g) - grad_norm_g = commons.clip_grad_value_(net_g.parameters(), None) - scaler.step(optim_g) - scaler.update() - - if rank == 0: - if global_step % hps.train.log_interval == 0: - lr = optim_g.param_groups[0]['lr'] - losses = [loss_disc, loss_gen, loss_fm, loss_mel, loss_dur, loss_kl] - logger.info('Train Epoch: {} [{:.0f}%]'.format( - epoch, - 100. * batch_idx / len(train_loader))) - logger.info([x.item() for x in losses] + [global_step, lr]) - - scalar_dict = {"loss/g/total": loss_gen_all, "loss/d/total": loss_disc_all, "learning_rate": lr, - "grad_norm_d": grad_norm_d, "grad_norm_g": grad_norm_g} - scalar_dict.update( - {"loss/g/fm": loss_fm, "loss/g/mel": loss_mel, "loss/g/dur": loss_dur, "loss/g/kl": loss_kl}) - scalar_dict.update({"loss/g/{}".format(i): v for i, v in enumerate(losses_gen)}) - scalar_dict.update({"loss/d_r/{}".format(i): v for i, v in enumerate(losses_disc_r)}) - scalar_dict.update({"loss/d_g/{}".format(i): v for i, v in enumerate(losses_disc_g)}) - - image_dict = { - "slice/mel_org": utils.plot_spectrogram_to_numpy(y_mel[0].data.cpu().numpy()), - "slice/mel_gen": utils.plot_spectrogram_to_numpy(y_hat_mel[0].data.cpu().numpy()), - "all/mel": utils.plot_spectrogram_to_numpy(mel[0].data.cpu().numpy()), - "all/attn": utils.plot_alignment_to_numpy(attn[0, 0].data.cpu().numpy()) - } - utils.summarize( - writer=writer, - global_step=global_step, - images=image_dict, - scalars=scalar_dict) - - if global_step % hps.train.eval_interval == 0: - evaluate(hps, net_g, eval_loader, writer_eval) - utils.save_checkpoint(net_g, optim_g, hps.train.learning_rate, epoch, - os.path.join(hps.model_dir, "G_{}.pth".format(global_step))) - utils.save_checkpoint(net_d, optim_d, hps.train.learning_rate, epoch, - os.path.join(hps.model_dir, "D_{}.pth".format(global_step))) - if net_dur_disc is not None: - utils.save_checkpoint(net_dur_disc, optim_dur_disc, hps.train.learning_rate, epoch, os.path.join(hps.model_dir, "DUR_{}.pth".format(global_step))) - keep_ckpts = getattr(hps.train, 'keep_ckpts', 5) - if keep_ckpts > 0: - utils.clean_checkpoints(path_to_models=hps.model_dir, n_ckpts_to_keep=keep_ckpts, sort_by_time=True) - - - global_step += 1 - - if rank == 0: - logger.info('====> Epoch: {}'.format(epoch)) - - - -def evaluate(hps, generator, eval_loader, writer_eval): - generator.eval() - image_dict = {} - audio_dict = {} - print("Evaluating ...") - with torch.no_grad(): - for batch_idx, (x, x_lengths, spec, spec_lengths, y, y_lengths, speakers, tone, language, bert) in enumerate(eval_loader): - x, x_lengths = x.cuda(), x_lengths.cuda() - spec, spec_lengths = spec.cuda(), spec_lengths.cuda() - y, y_lengths = y.cuda(), y_lengths.cuda() - speakers = speakers.cuda() - bert = bert.cuda() - tone = tone.cuda() - language = language.cuda() - for use_sdp in [True, False]: - y_hat, attn, mask, *_ = generator.module.infer(x, x_lengths, speakers, tone, language, bert, y=spec, max_len=1000, sdp_ratio=0.0 if not use_sdp else 1.0) - y_hat_lengths = mask.sum([1, 2]).long() * hps.data.hop_length - - mel = spec_to_mel_torch( - spec, - hps.data.filter_length, - hps.data.n_mel_channels, - hps.data.sampling_rate, - hps.data.mel_fmin, - hps.data.mel_fmax) - y_hat_mel = mel_spectrogram_torch( - y_hat.squeeze(1).float(), - hps.data.filter_length, - hps.data.n_mel_channels, - hps.data.sampling_rate, - hps.data.hop_length, - hps.data.win_length, - hps.data.mel_fmin, - hps.data.mel_fmax - ) - image_dict.update({ - f"gen/mel_{batch_idx}": utils.plot_spectrogram_to_numpy(y_hat_mel[0].cpu().numpy()) - }) - audio_dict.update({ - f"gen/audio_{batch_idx}_{use_sdp}": y_hat[0, :, :y_hat_lengths[0]] - }) - image_dict.update({f"gt/mel_{batch_idx}": utils.plot_spectrogram_to_numpy(mel[0].cpu().numpy())}) - audio_dict.update({f"gt/audio_{batch_idx}": y[0, :, :y_lengths[0]]}) - - utils.summarize( - writer=writer_eval, - global_step=global_step, - images=image_dict, - audios=audio_dict, - audio_sampling_rate=hps.data.sampling_rate - ) - generator.train() - -if __name__ == "__main__": - main() diff --git a/spaces/YUMASUKIii/Chat/Dockerfile b/spaces/YUMASUKIii/Chat/Dockerfile deleted file mode 100644 index c677b05b75f7e4b2beee8c97fb47957a0861a83e..0000000000000000000000000000000000000000 --- a/spaces/YUMASUKIii/Chat/Dockerfile +++ /dev/null @@ -1,7 +0,0 @@ -FROM weaigc/bingo:latest - -ARG DEBIAN_FRONTEND=noninteractive - -ENV BING_HEADER "" - -CMD npm start diff --git a/spaces/Yuliang/ECON/lib/pixielib/models/hrnet.py b/spaces/Yuliang/ECON/lib/pixielib/models/hrnet.py deleted file mode 100644 index 665b96efa29fb273b2e28773e5ea35391d99b90e..0000000000000000000000000000000000000000 --- a/spaces/Yuliang/ECON/lib/pixielib/models/hrnet.py +++ /dev/null @@ -1,546 +0,0 @@ -""" -borrowed from https://github.com/vchoutas/expose/blob/master/expose/models/backbone/hrnet.py -""" - -import os.path as osp - -import torch -import torch.nn as nn -from torchvision.models.resnet import BasicBlock, Bottleneck - -BN_MOMENTUM = 0.1 - - -def load_HRNet(pretrained=False): - hr_net_cfg_dict = { - "use_old_impl": False, - "pretrained_layers": ["*"], - "stage1": { - "num_modules": 1, - "num_branches": 1, - "num_blocks": [4], - "num_channels": [64], - "block": "BOTTLENECK", - "fuse_method": "SUM", - }, - "stage2": { - "num_modules": 1, - "num_branches": 2, - "num_blocks": [4, 4], - "num_channels": [48, 96], - "block": "BASIC", - "fuse_method": "SUM", - }, - "stage3": { - "num_modules": 4, - "num_branches": 3, - "num_blocks": [4, 4, 4], - "num_channels": [48, 96, 192], - "block": "BASIC", - "fuse_method": "SUM", - }, - "stage4": { - "num_modules": 3, - "num_branches": 4, - "num_blocks": [4, 4, 4, 4], - "num_channels": [48, 96, 192, 384], - "block": "BASIC", - "fuse_method": "SUM", - }, - } - hr_net_cfg = hr_net_cfg_dict - model = HighResolutionNet(hr_net_cfg) - - return model - - -class HighResolutionModule(nn.Module): - def __init__( - self, - num_branches, - blocks, - num_blocks, - num_inchannels, - num_channels, - fuse_method, - multi_scale_output=True, - ): - super(HighResolutionModule, self).__init__() - self._check_branches(num_branches, blocks, num_blocks, num_inchannels, num_channels) - - self.num_inchannels = num_inchannels - self.fuse_method = fuse_method - self.num_branches = num_branches - - self.multi_scale_output = multi_scale_output - - self.branches = self._make_branches(num_branches, blocks, num_blocks, num_channels) - self.fuse_layers = self._make_fuse_layers() - self.relu = nn.ReLU(True) - - def _check_branches(self, num_branches, blocks, num_blocks, num_inchannels, num_channels): - if num_branches != len(num_blocks): - error_msg = "NUM_BRANCHES({}) <> NUM_BLOCKS({})".format(num_branches, len(num_blocks)) - raise ValueError(error_msg) - - if num_branches != len(num_channels): - error_msg = "NUM_BRANCHES({}) <> NUM_CHANNELS({})".format( - num_branches, len(num_channels) - ) - raise ValueError(error_msg) - - if num_branches != len(num_inchannels): - error_msg = "NUM_BRANCHES({}) <> NUM_INCHANNELS({})".format( - num_branches, len(num_inchannels) - ) - raise ValueError(error_msg) - - def _make_one_branch(self, branch_index, block, num_blocks, num_channels, stride=1): - downsample = None - if ( - stride != 1 or - self.num_inchannels[branch_index] != num_channels[branch_index] * block.expansion - ): - downsample = nn.Sequential( - nn.Conv2d( - self.num_inchannels[branch_index], - num_channels[branch_index] * block.expansion, - kernel_size=1, - stride=stride, - bias=False, - ), - nn.BatchNorm2d(num_channels[branch_index] * block.expansion, momentum=BN_MOMENTUM), - ) - - layers = [] - layers.append( - block( - self.num_inchannels[branch_index], - num_channels[branch_index], - stride, - downsample, - ) - ) - self.num_inchannels[branch_index] = num_channels[branch_index] * block.expansion - for i in range(1, num_blocks[branch_index]): - layers.append(block(self.num_inchannels[branch_index], num_channels[branch_index])) - - return nn.Sequential(*layers) - - def _make_branches(self, num_branches, block, num_blocks, num_channels): - branches = [] - - for i in range(num_branches): - branches.append(self._make_one_branch(i, block, num_blocks, num_channels)) - - return nn.ModuleList(branches) - - def _make_fuse_layers(self): - if self.num_branches == 1: - return None - - num_branches = self.num_branches - num_inchannels = self.num_inchannels - fuse_layers = [] - for i in range(num_branches if self.multi_scale_output else 1): - fuse_layer = [] - for j in range(num_branches): - if j > i: - fuse_layer.append( - nn.Sequential( - nn.Conv2d( - num_inchannels[j], - num_inchannels[i], - 1, - 1, - 0, - bias=False, - ), - nn.BatchNorm2d(num_inchannels[i]), - nn.Upsample(scale_factor=2**(j - i), mode="nearest"), - ) - ) - elif j == i: - fuse_layer.append(None) - else: - conv3x3s = [] - for k in range(i - j): - if k == i - j - 1: - num_outchannels_conv3x3 = num_inchannels[i] - conv3x3s.append( - nn.Sequential( - nn.Conv2d( - num_inchannels[j], - num_outchannels_conv3x3, - 3, - 2, - 1, - bias=False, - ), - nn.BatchNorm2d(num_outchannels_conv3x3), - ) - ) - else: - num_outchannels_conv3x3 = num_inchannels[j] - conv3x3s.append( - nn.Sequential( - nn.Conv2d( - num_inchannels[j], - num_outchannels_conv3x3, - 3, - 2, - 1, - bias=False, - ), - nn.BatchNorm2d(num_outchannels_conv3x3), - nn.ReLU(True), - ) - ) - fuse_layer.append(nn.Sequential(*conv3x3s)) - fuse_layers.append(nn.ModuleList(fuse_layer)) - - return nn.ModuleList(fuse_layers) - - def get_num_inchannels(self): - return self.num_inchannels - - def forward(self, x): - if self.num_branches == 1: - return [self.branches[0](x[0])] - - for i in range(self.num_branches): - x[i] = self.branches[i](x[i]) - - x_fuse = [] - - for i in range(len(self.fuse_layers)): - y = x[0] if i == 0 else self.fuse_layers[i][0](x[0]) - for j in range(1, self.num_branches): - if i == j: - y = y + x[j] - else: - y = y + self.fuse_layers[i][j](x[j]) - x_fuse.append(self.relu(y)) - - return x_fuse - - -blocks_dict = {"BASIC": BasicBlock, "BOTTLENECK": Bottleneck} - - -class HighResolutionNet(nn.Module): - def __init__(self, cfg, **kwargs): - self.inplanes = 64 - super(HighResolutionNet, self).__init__() - use_old_impl = cfg.get("use_old_impl") - self.use_old_impl = use_old_impl - - # stem net - self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=2, padding=1, bias=False) - self.bn1 = nn.BatchNorm2d(64, momentum=BN_MOMENTUM) - self.conv2 = nn.Conv2d(64, 64, kernel_size=3, stride=2, padding=1, bias=False) - self.bn2 = nn.BatchNorm2d(64, momentum=BN_MOMENTUM) - self.relu = nn.ReLU(inplace=True) - - self.stage1_cfg = cfg.get("stage1", {}) - num_channels = self.stage1_cfg["num_channels"][0] - block = blocks_dict[self.stage1_cfg["block"]] - num_blocks = self.stage1_cfg["num_blocks"][0] - self.layer1 = self._make_layer(block, num_channels, num_blocks) - stage1_out_channel = block.expansion * num_channels - - self.stage2_cfg = cfg.get("stage2", {}) - num_channels = self.stage2_cfg.get("num_channels", (32, 64)) - block = blocks_dict[self.stage2_cfg.get("block")] - num_channels = [num_channels[i] * block.expansion for i in range(len(num_channels))] - stage2_num_channels = num_channels - self.transition1 = self._make_transition_layer([stage1_out_channel], num_channels) - self.stage2, pre_stage_channels = self._make_stage(self.stage2_cfg, num_channels) - - self.stage3_cfg = cfg.get("stage3") - num_channels = self.stage3_cfg["num_channels"] - block = blocks_dict[self.stage3_cfg["block"]] - num_channels = [num_channels[i] * block.expansion for i in range(len(num_channels))] - stage3_num_channels = num_channels - self.transition2 = self._make_transition_layer(pre_stage_channels, num_channels) - self.stage3, pre_stage_channels = self._make_stage(self.stage3_cfg, num_channels) - - self.stage4_cfg = cfg.get("stage4") - num_channels = self.stage4_cfg["num_channels"] - block = blocks_dict[self.stage4_cfg["block"]] - num_channels = [num_channels[i] * block.expansion for i in range(len(num_channels))] - self.transition3 = self._make_transition_layer(pre_stage_channels, num_channels) - stage_4_out_channels = num_channels - - self.stage4, pre_stage_channels = self._make_stage( - self.stage4_cfg, num_channels, multi_scale_output=not self.use_old_impl - ) - stage4_num_channels = num_channels - - self.output_channels_dim = pre_stage_channels - - self.pretrained_layers = cfg["pretrained_layers"] - self.init_weights() - - self.avg_pooling = nn.AdaptiveAvgPool2d(1) - - if use_old_impl: - in_dims = ( - 2**2 * stage2_num_channels[-1] + 2**1 * stage3_num_channels[-1] + - stage_4_out_channels[-1] - ) - else: - # TODO: Replace with parameters - in_dims = 4 * 384 - self.subsample_4 = self._make_subsample_layer( - in_channels=stage4_num_channels[0], num_layers=3 - ) - - self.subsample_3 = self._make_subsample_layer( - in_channels=stage2_num_channels[-1], num_layers=2 - ) - self.subsample_2 = self._make_subsample_layer( - in_channels=stage3_num_channels[-1], num_layers=1 - ) - self.conv_layers = self._make_conv_layer(in_channels=in_dims, num_layers=5) - - def get_output_dim(self): - base_output = {f"layer{idx + 1}": val for idx, val in enumerate(self.output_channels_dim)} - output = base_output.copy() - for key in base_output: - output[f"{key}_avg_pooling"] = output[key] - output["concat"] = 2048 - return output - - def _make_transition_layer(self, num_channels_pre_layer, num_channels_cur_layer): - num_branches_cur = len(num_channels_cur_layer) - num_branches_pre = len(num_channels_pre_layer) - - transition_layers = [] - for i in range(num_branches_cur): - if i < num_branches_pre: - if num_channels_cur_layer[i] != num_channels_pre_layer[i]: - transition_layers.append( - nn.Sequential( - nn.Conv2d( - num_channels_pre_layer[i], - num_channels_cur_layer[i], - 3, - 1, - 1, - bias=False, - ), - nn.BatchNorm2d(num_channels_cur_layer[i]), - nn.ReLU(inplace=True), - ) - ) - else: - transition_layers.append(None) - else: - conv3x3s = [] - for j in range(i + 1 - num_branches_pre): - inchannels = num_channels_pre_layer[-1] - outchannels = ( - num_channels_cur_layer[i] if j == i - num_branches_pre else inchannels - ) - conv3x3s.append( - nn.Sequential( - nn.Conv2d(inchannels, outchannels, 3, 2, 1, bias=False), - nn.BatchNorm2d(outchannels), - nn.ReLU(inplace=True), - ) - ) - transition_layers.append(nn.Sequential(*conv3x3s)) - - return nn.ModuleList(transition_layers) - - def _make_layer(self, block, planes, blocks, stride=1): - downsample = None - if stride != 1 or self.inplanes != planes * block.expansion: - downsample = nn.Sequential( - nn.Conv2d( - self.inplanes, - planes * block.expansion, - kernel_size=1, - stride=stride, - bias=False, - ), - nn.BatchNorm2d(planes * block.expansion, momentum=BN_MOMENTUM), - ) - - layers = [] - layers.append(block(self.inplanes, planes, stride, downsample)) - self.inplanes = planes * block.expansion - for i in range(1, blocks): - layers.append(block(self.inplanes, planes)) - - return nn.Sequential(*layers) - - def _make_conv_layer(self, in_channels=2048, num_layers=3, num_filters=2048, stride=1): - - layers = [] - for i in range(num_layers): - - downsample = nn.Conv2d(in_channels, num_filters, stride=1, kernel_size=1, bias=False) - layers.append(Bottleneck(in_channels, num_filters // 4, downsample=downsample)) - in_channels = num_filters - - return nn.Sequential(*layers) - - def _make_subsample_layer(self, in_channels=96, num_layers=3, stride=2): - - layers = [] - for i in range(num_layers): - - layers.append( - nn.Conv2d( - in_channels=in_channels, - out_channels=2 * in_channels, - kernel_size=3, - stride=stride, - padding=1, - ) - ) - in_channels = 2 * in_channels - layers.append(nn.BatchNorm2d(in_channels, momentum=BN_MOMENTUM)) - layers.append(nn.ReLU(inplace=True)) - - return nn.Sequential(*layers) - - def _make_stage(self, layer_config, num_inchannels, multi_scale_output=True, log=False): - num_modules = layer_config["num_modules"] - num_branches = layer_config["num_branches"] - num_blocks = layer_config["num_blocks"] - num_channels = layer_config["num_channels"] - block = blocks_dict[layer_config["block"]] - fuse_method = layer_config["fuse_method"] - - modules = [] - for i in range(num_modules): - # multi_scale_output is only used last module - if not multi_scale_output and i == num_modules - 1: - reset_multi_scale_output = False - else: - reset_multi_scale_output = True - - modules.append( - HighResolutionModule( - num_branches, - block, - num_blocks, - num_inchannels, - num_channels, - fuse_method, - reset_multi_scale_output, - ) - ) - modules[-1].log = log - num_inchannels = modules[-1].get_num_inchannels() - - return nn.Sequential(*modules), num_inchannels - - def forward(self, x): - x = self.conv1(x) - x = self.bn1(x) - x = self.relu(x) - x = self.conv2(x) - x = self.bn2(x) - x = self.relu(x) - x = self.layer1(x) - - x_list = [] - for i in range(self.stage2_cfg["num_branches"]): - if self.transition1[i] is not None: - x_list.append(self.transition1[i](x)) - else: - x_list.append(x) - y_list = self.stage2(x_list) - - x_list = [] - for i in range(self.stage3_cfg["num_branches"]): - if self.transition2[i] is not None: - if i < self.stage2_cfg["num_branches"]: - x_list.append(self.transition2[i](y_list[i])) - else: - x_list.append(self.transition2[i](y_list[-1])) - else: - x_list.append(y_list[i]) - y_list = self.stage3(x_list) - - x_list = [] - for i in range(self.stage4_cfg["num_branches"]): - if self.transition3[i] is not None: - if i < self.stage3_cfg["num_branches"]: - x_list.append(self.transition3[i](y_list[i])) - else: - x_list.append(self.transition3[i](y_list[-1])) - else: - x_list.append(y_list[i]) - if not self.use_old_impl: - y_list = self.stage4(x_list) - - output = {} - for idx, x in enumerate(y_list): - output[f"layer{idx + 1}"] = x - - feat_list = [] - if self.use_old_impl: - x3 = self.subsample_3(x_list[1]) - x2 = self.subsample_2(x_list[2]) - x1 = x_list[3] - feat_list = [x3, x2, x1] - else: - x4 = self.subsample_4(y_list[0]) - x3 = self.subsample_3(y_list[1]) - x2 = self.subsample_2(y_list[2]) - x1 = y_list[3] - feat_list = [x4, x3, x2, x1] - - xf = self.conv_layers(torch.cat(feat_list, dim=1)) - xf = xf.mean(dim=(2, 3)) - xf = xf.view(xf.size(0), -1) - output["concat"] = xf - # y_list = self.stage4(x_list) - # output['stage4'] = y_list[0] - # output['stage4_avg_pooling'] = self.avg_pooling(y_list[0]).view( - # *y_list[0].shape[:2]) - - # concat_outputs = y_list + x_list - # output['concat'] = torch.cat([ - # self.avg_pooling(tensor).view(*tensor.shape[:2]) - # for tensor in concat_outputs], - # dim=1) - - return output - - def init_weights(self): - for m in self.modules(): - if isinstance(m, nn.Conv2d): - # nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') - nn.init.normal_(m.weight, std=0.001) - for name, _ in m.named_parameters(): - if name in ["bias"]: - nn.init.constant_(m.bias, 0) - elif isinstance(m, nn.BatchNorm2d): - nn.init.constant_(m.weight, 1) - nn.init.constant_(m.bias, 0) - elif isinstance(m, nn.ConvTranspose2d): - nn.init.normal_(m.weight, std=0.001) - for name, _ in m.named_parameters(): - if name in ["bias"]: - nn.init.constant_(m.bias, 0) - - def load_weights(self, pretrained=""): - pretrained = osp.expandvars(pretrained) - if osp.isfile(pretrained): - pretrained_state_dict = torch.load(pretrained, map_location=torch.device("cpu")) - - need_init_state_dict = {} - for name, m in pretrained_state_dict.items(): - if ( - name.split(".")[0] in self.pretrained_layers or self.pretrained_layers[0] == "*" - ): - need_init_state_dict[name] = m - missing, unexpected = self.load_state_dict(need_init_state_dict, strict=False) - elif pretrained: - raise ValueError("{} is not exist!".format(pretrained)) diff --git a/spaces/Yuliang/ECON/lib/pymafx/utils/iuvmap.py b/spaces/Yuliang/ECON/lib/pymafx/utils/iuvmap.py deleted file mode 100644 index 8c1914dac03f553f7651fac74ca3bd1b45b65645..0000000000000000000000000000000000000000 --- a/spaces/Yuliang/ECON/lib/pymafx/utils/iuvmap.py +++ /dev/null @@ -1,293 +0,0 @@ -import torch -import torch.nn as nn -import torch.nn.functional as F - - -def iuvmap_clean(U_uv, V_uv, Index_UV, AnnIndex=None): - - Index_UV_max = torch.argmax(Index_UV, dim=1).float() - recon_Index_UV = [] - for i in range(Index_UV.size(1)): - if i == 0: - recon_Index_UV_i = torch.min( - F.threshold(Index_UV_max + 1, 0.5, 0), -F.threshold(-Index_UV_max - 1, -1.5, 0) - ) - else: - recon_Index_UV_i = torch.min( - F.threshold(Index_UV_max, i - 0.5, 0), -F.threshold(-Index_UV_max, -i - 0.5, 0) - ) / float(i) - recon_Index_UV.append(recon_Index_UV_i) - recon_Index_UV = torch.stack(recon_Index_UV, dim=1) - - if AnnIndex is None: - recon_Ann_Index = None - else: - AnnIndex_max = torch.argmax(AnnIndex, dim=1).float() - recon_Ann_Index = [] - for i in range(AnnIndex.size(1)): - if i == 0: - recon_Ann_Index_i = torch.min( - F.threshold(AnnIndex_max + 1, 0.5, 0), -F.threshold(-AnnIndex_max - 1, -1.5, 0) - ) - else: - recon_Ann_Index_i = torch.min( - F.threshold(AnnIndex_max, i - 0.5, 0), -F.threshold(-AnnIndex_max, -i - 0.5, 0) - ) / float(i) - recon_Ann_Index.append(recon_Ann_Index_i) - recon_Ann_Index = torch.stack(recon_Ann_Index, dim=1) - - recon_U = recon_Index_UV * U_uv - recon_V = recon_Index_UV * V_uv - - return recon_U, recon_V, recon_Index_UV, recon_Ann_Index - - -def iuv_map2img(U_uv, V_uv, Index_UV, AnnIndex=None, uv_rois=None, ind_mapping=None, n_part=24): - device_id = U_uv.get_device() - batch_size = U_uv.size(0) - K = U_uv.size(1) - heatmap_size = U_uv.size(2) - - Index_UV_max = torch.argmax(Index_UV, dim=1) - if AnnIndex is None: - Index_UV_max = Index_UV_max.to(torch.int64) - else: - AnnIndex_max = torch.argmax(AnnIndex, dim=1) - Index_UV_max = Index_UV_max * (AnnIndex_max > 0).to(torch.int64) - - outputs = [] - - for batch_id in range(batch_size): - - output = torch.zeros([3, U_uv.size(2), U_uv.size(3)], dtype=torch.float32).cuda(device_id) - output[0] = Index_UV_max[batch_id].to(torch.float32) - if ind_mapping is None: - output[0] /= float(K - 1) - else: - for ind in range(len(ind_mapping)): - output[0][output[0] == ind] = ind_mapping[ind] * (1. / n_part) - - for part_id in range(0, K): - CurrentU = U_uv[batch_id, part_id] - CurrentV = V_uv[batch_id, part_id] - output[1, - Index_UV_max[batch_id] == part_id] = CurrentU[Index_UV_max[batch_id] == part_id] - output[2, - Index_UV_max[batch_id] == part_id] = CurrentV[Index_UV_max[batch_id] == part_id] - - if uv_rois is None: - outputs.append(output.unsqueeze(0)) - else: - roi_fg = uv_rois[batch_id][1:] - - # x1 = roi_fg[0] - # x2 = roi_fg[2] - # y1 = roi_fg[1] - # y2 = roi_fg[3] - - w = roi_fg[2] - roi_fg[0] - h = roi_fg[3] - roi_fg[1] - - aspect_ratio = float(w) / h - - if aspect_ratio < 1: - new_size = [heatmap_size, max(int(heatmap_size * aspect_ratio), 1)] - output = F.interpolate(output.unsqueeze(0), size=new_size, mode='nearest') - paddingleft = int(0.5 * (heatmap_size - new_size[1])) - output = F.pad( - output, pad=(paddingleft, heatmap_size - new_size[1] - paddingleft, 0, 0) - ) - else: - new_size = [max(int(heatmap_size / aspect_ratio), 1), heatmap_size] - output = F.interpolate(output.unsqueeze(0), size=new_size, mode='nearest') - paddingtop = int(0.5 * (heatmap_size - new_size[0])) - output = F.pad( - output, pad=(0, 0, paddingtop, heatmap_size - new_size[0] - paddingtop) - ) - - outputs.append(output) - - return torch.cat(outputs, dim=0) - - -def iuv_img2map(uvimages, uv_rois=None, new_size=None, n_part=24): - device_id = uvimages.get_device() - batch_size = uvimages.size(0) - uvimg_size = uvimages.size(-1) - - Index2mask = [[0], [1, 2], [3], [4], [5], [6], [7, 9], [8, 10], [11, 13], [12, 14], [15, 17], - [16, 18], [19, 21], [20, 22], [23, 24]] - - part_ind = torch.round(uvimages[:, 0, :, :] * n_part) - part_u = uvimages[:, 1, :, :] - part_v = uvimages[:, 2, :, :] - - recon_U = [] - recon_V = [] - recon_Index_UV = [] - recon_Ann_Index = [] - - for i in range(n_part + 1): - if i == 0: - recon_Index_UV_i = torch.min( - F.threshold(part_ind + 1, 0.5, 0), -F.threshold(-part_ind - 1, -1.5, 0) - ) - else: - recon_Index_UV_i = torch.min( - F.threshold(part_ind, i - 0.5, 0), -F.threshold(-part_ind, -i - 0.5, 0) - ) / float(i) - recon_U_i = recon_Index_UV_i * part_u - recon_V_i = recon_Index_UV_i * part_v - - recon_Index_UV.append(recon_Index_UV_i) - recon_U.append(recon_U_i) - recon_V.append(recon_V_i) - - for i in range(len(Index2mask)): - if len(Index2mask[i]) == 1: - recon_Ann_Index_i = recon_Index_UV[Index2mask[i][0]] - elif len(Index2mask[i]) == 2: - p_ind0 = Index2mask[i][0] - p_ind1 = Index2mask[i][1] - # recon_Ann_Index[:, i, :, :] = torch.where(recon_Index_UV[:, p_ind0, :, :] > 0.5, recon_Index_UV[:, p_ind0, :, :], recon_Index_UV[:, p_ind1, :, :]) - # recon_Ann_Index[:, i, :, :] = torch.eq(part_ind, p_ind0) | torch.eq(part_ind, p_ind1) - recon_Ann_Index_i = recon_Index_UV[p_ind0] + recon_Index_UV[p_ind1] - - recon_Ann_Index.append(recon_Ann_Index_i) - - recon_U = torch.stack(recon_U, dim=1) - recon_V = torch.stack(recon_V, dim=1) - recon_Index_UV = torch.stack(recon_Index_UV, dim=1) - recon_Ann_Index = torch.stack(recon_Ann_Index, dim=1) - - if uv_rois is None: - return recon_U, recon_V, recon_Index_UV, recon_Ann_Index - - recon_U_roi = [] - recon_V_roi = [] - recon_Index_UV_roi = [] - recon_Ann_Index_roi = [] - - if new_size is None: - M = uvimg_size - else: - M = new_size - - for i in range(batch_size): - roi_fg = uv_rois[i][1:] - - # x1 = roi_fg[0] - # x2 = roi_fg[2] - # y1 = roi_fg[1] - # y2 = roi_fg[3] - - w = roi_fg[2] - roi_fg[0] - h = roi_fg[3] - roi_fg[1] - - aspect_ratio = float(w) / h - - if aspect_ratio < 1: - w_size = max(int(uvimg_size * aspect_ratio), 1) - w_margin = int((uvimg_size - w_size) / 2) - - recon_U_roi_i = recon_U[i, :, :, w_margin:w_margin + w_size] - recon_V_roi_i = recon_V[i, :, :, w_margin:w_margin + w_size] - recon_Index_UV_roi_i = recon_Index_UV[i, :, :, w_margin:w_margin + w_size] - recon_Ann_Index_roi_i = recon_Ann_Index[i, :, :, w_margin:w_margin + w_size] - else: - h_size = max(int(uvimg_size / aspect_ratio), 1) - h_margin = int((uvimg_size - h_size) / 2) - - recon_U_roi_i = recon_U[i, :, h_margin:h_margin + h_size, :] - recon_V_roi_i = recon_V[i, :, h_margin:h_margin + h_size, :] - recon_Index_UV_roi_i = recon_Index_UV[i, :, h_margin:h_margin + h_size, :] - recon_Ann_Index_roi_i = recon_Ann_Index[i, :, h_margin:h_margin + h_size, :] - - recon_U_roi_i = F.interpolate(recon_U_roi_i.unsqueeze(0), size=(M, M), mode='nearest') - recon_V_roi_i = F.interpolate(recon_V_roi_i.unsqueeze(0), size=(M, M), mode='nearest') - recon_Index_UV_roi_i = F.interpolate( - recon_Index_UV_roi_i.unsqueeze(0), size=(M, M), mode='nearest' - ) - recon_Ann_Index_roi_i = F.interpolate( - recon_Ann_Index_roi_i.unsqueeze(0), size=(M, M), mode='nearest' - ) - - recon_U_roi.append(recon_U_roi_i) - recon_V_roi.append(recon_V_roi_i) - recon_Index_UV_roi.append(recon_Index_UV_roi_i) - recon_Ann_Index_roi.append(recon_Ann_Index_roi_i) - - recon_U_roi = torch.cat(recon_U_roi, dim=0) - recon_V_roi = torch.cat(recon_V_roi, dim=0) - recon_Index_UV_roi = torch.cat(recon_Index_UV_roi, dim=0) - recon_Ann_Index_roi = torch.cat(recon_Ann_Index_roi, dim=0) - - return recon_U_roi, recon_V_roi, recon_Index_UV_roi, recon_Ann_Index_roi - - -def seg_img2map(segimages, uv_rois=None, new_size=None, n_part=24): - device_id = segimages.get_device() - batch_size = segimages.size(0) - uvimg_size = segimages.size(-1) - - part_ind = torch.round(segimages[:, 0, :, :] * n_part) - - recon_Index_UV = [] - - for i in range(n_part + 1): - if i == 0: - recon_Index_UV_i = torch.min( - F.threshold(part_ind + 1, 0.5, 0), -F.threshold(-part_ind - 1, -1.5, 0) - ) - else: - recon_Index_UV_i = torch.min( - F.threshold(part_ind, i - 0.5, 0), -F.threshold(-part_ind, -i - 0.5, 0) - ) / float(i) - - recon_Index_UV.append(recon_Index_UV_i) - - recon_Index_UV = torch.stack(recon_Index_UV, dim=1) - - if uv_rois is None: - return None, None, recon_Index_UV, None - - recon_Index_UV_roi = [] - - if new_size is None: - M = uvimg_size - else: - M = new_size - - for i in range(batch_size): - roi_fg = uv_rois[i][1:] - - # x1 = roi_fg[0] - # x2 = roi_fg[2] - # y1 = roi_fg[1] - # y2 = roi_fg[3] - - w = roi_fg[2] - roi_fg[0] - h = roi_fg[3] - roi_fg[1] - - aspect_ratio = float(w) / h - - if aspect_ratio < 1: - w_size = max(int(uvimg_size * aspect_ratio), 1) - w_margin = int((uvimg_size - w_size) / 2) - - recon_Index_UV_roi_i = recon_Index_UV[i, :, :, w_margin:w_margin + w_size] - else: - h_size = max(int(uvimg_size / aspect_ratio), 1) - h_margin = int((uvimg_size - h_size) / 2) - - recon_Index_UV_roi_i = recon_Index_UV[i, :, h_margin:h_margin + h_size, :] - - recon_Index_UV_roi_i = F.interpolate( - recon_Index_UV_roi_i.unsqueeze(0), size=(M, M), mode='nearest' - ) - - recon_Index_UV_roi.append(recon_Index_UV_roi_i) - - recon_Index_UV_roi = torch.cat(recon_Index_UV_roi, dim=0) - - return None, None, recon_Index_UV_roi, None diff --git a/spaces/abby711/FaceRestoration/gfpgan/train.py b/spaces/abby711/FaceRestoration/gfpgan/train.py deleted file mode 100644 index fe5f1f909ae15a8d830ef65dcb43436d4f4ee7ae..0000000000000000000000000000000000000000 --- a/spaces/abby711/FaceRestoration/gfpgan/train.py +++ /dev/null @@ -1,11 +0,0 @@ -# flake8: noqa -import os.path as osp -from basicsr.train import train_pipeline - -import gfpgan.archs -import gfpgan.data -import gfpgan.models - -if __name__ == '__main__': - root_path = osp.abspath(osp.join(__file__, osp.pardir, osp.pardir)) - train_pipeline(root_path) diff --git a/spaces/abhishek/sketch-to-image/annotator/uniformer/configs/_base_/models/nonlocal_r50-d8.py b/spaces/abhishek/sketch-to-image/annotator/uniformer/configs/_base_/models/nonlocal_r50-d8.py deleted file mode 100644 index 5674a39854cafd1f2e363bac99c58ccae62f24da..0000000000000000000000000000000000000000 --- a/spaces/abhishek/sketch-to-image/annotator/uniformer/configs/_base_/models/nonlocal_r50-d8.py +++ /dev/null @@ -1,46 +0,0 @@ -# model settings -norm_cfg = dict(type='SyncBN', requires_grad=True) -model = dict( - type='EncoderDecoder', - pretrained='open-mmlab://resnet50_v1c', - backbone=dict( - type='ResNetV1c', - depth=50, - num_stages=4, - out_indices=(0, 1, 2, 3), - dilations=(1, 1, 2, 4), - strides=(1, 2, 1, 1), - norm_cfg=norm_cfg, - norm_eval=False, - style='pytorch', - contract_dilation=True), - decode_head=dict( - type='NLHead', - in_channels=2048, - in_index=3, - channels=512, - dropout_ratio=0.1, - reduction=2, - use_scale=True, - mode='embedded_gaussian', - num_classes=19, - norm_cfg=norm_cfg, - align_corners=False, - loss_decode=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), - auxiliary_head=dict( - type='FCNHead', - in_channels=1024, - in_index=2, - channels=256, - num_convs=1, - concat_input=False, - dropout_ratio=0.1, - num_classes=19, - norm_cfg=norm_cfg, - align_corners=False, - loss_decode=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), - # model training and testing settings - train_cfg=dict(), - test_cfg=dict(mode='whole')) diff --git a/spaces/abhishek/sketch-to-image/annotator/uniformer/mmdet_null/core/evaluation/recall.py b/spaces/abhishek/sketch-to-image/annotator/uniformer/mmdet_null/core/evaluation/recall.py deleted file mode 100644 index d840b4f4f100f65158b35ba49eb00214655667f5..0000000000000000000000000000000000000000 --- a/spaces/abhishek/sketch-to-image/annotator/uniformer/mmdet_null/core/evaluation/recall.py +++ /dev/null @@ -1,189 +0,0 @@ -from collections.abc import Sequence - -import numpy as np -from annotator.uniformer.mmcv.utils import print_log -from terminaltables import AsciiTable - -from .bbox_overlaps import bbox_overlaps - - -def _recalls(all_ious, proposal_nums, thrs): - - img_num = all_ious.shape[0] - total_gt_num = sum([ious.shape[0] for ious in all_ious]) - - _ious = np.zeros((proposal_nums.size, total_gt_num), dtype=np.float32) - for k, proposal_num in enumerate(proposal_nums): - tmp_ious = np.zeros(0) - for i in range(img_num): - ious = all_ious[i][:, :proposal_num].copy() - gt_ious = np.zeros((ious.shape[0])) - if ious.size == 0: - tmp_ious = np.hstack((tmp_ious, gt_ious)) - continue - for j in range(ious.shape[0]): - gt_max_overlaps = ious.argmax(axis=1) - max_ious = ious[np.arange(0, ious.shape[0]), gt_max_overlaps] - gt_idx = max_ious.argmax() - gt_ious[j] = max_ious[gt_idx] - box_idx = gt_max_overlaps[gt_idx] - ious[gt_idx, :] = -1 - ious[:, box_idx] = -1 - tmp_ious = np.hstack((tmp_ious, gt_ious)) - _ious[k, :] = tmp_ious - - _ious = np.fliplr(np.sort(_ious, axis=1)) - recalls = np.zeros((proposal_nums.size, thrs.size)) - for i, thr in enumerate(thrs): - recalls[:, i] = (_ious >= thr).sum(axis=1) / float(total_gt_num) - - return recalls - - -def set_recall_param(proposal_nums, iou_thrs): - """Check proposal_nums and iou_thrs and set correct format.""" - if isinstance(proposal_nums, Sequence): - _proposal_nums = np.array(proposal_nums) - elif isinstance(proposal_nums, int): - _proposal_nums = np.array([proposal_nums]) - else: - _proposal_nums = proposal_nums - - if iou_thrs is None: - _iou_thrs = np.array([0.5]) - elif isinstance(iou_thrs, Sequence): - _iou_thrs = np.array(iou_thrs) - elif isinstance(iou_thrs, float): - _iou_thrs = np.array([iou_thrs]) - else: - _iou_thrs = iou_thrs - - return _proposal_nums, _iou_thrs - - -def eval_recalls(gts, - proposals, - proposal_nums=None, - iou_thrs=0.5, - logger=None): - """Calculate recalls. - - Args: - gts (list[ndarray]): a list of arrays of shape (n, 4) - proposals (list[ndarray]): a list of arrays of shape (k, 4) or (k, 5) - proposal_nums (int | Sequence[int]): Top N proposals to be evaluated. - iou_thrs (float | Sequence[float]): IoU thresholds. Default: 0.5. - logger (logging.Logger | str | None): The way to print the recall - summary. See `mmcv.utils.print_log()` for details. Default: None. - - Returns: - ndarray: recalls of different ious and proposal nums - """ - - img_num = len(gts) - assert img_num == len(proposals) - - proposal_nums, iou_thrs = set_recall_param(proposal_nums, iou_thrs) - - all_ious = [] - for i in range(img_num): - if proposals[i].ndim == 2 and proposals[i].shape[1] == 5: - scores = proposals[i][:, 4] - sort_idx = np.argsort(scores)[::-1] - img_proposal = proposals[i][sort_idx, :] - else: - img_proposal = proposals[i] - prop_num = min(img_proposal.shape[0], proposal_nums[-1]) - if gts[i] is None or gts[i].shape[0] == 0: - ious = np.zeros((0, img_proposal.shape[0]), dtype=np.float32) - else: - ious = bbox_overlaps(gts[i], img_proposal[:prop_num, :4]) - all_ious.append(ious) - all_ious = np.array(all_ious) - recalls = _recalls(all_ious, proposal_nums, iou_thrs) - - print_recall_summary(recalls, proposal_nums, iou_thrs, logger=logger) - return recalls - - -def print_recall_summary(recalls, - proposal_nums, - iou_thrs, - row_idxs=None, - col_idxs=None, - logger=None): - """Print recalls in a table. - - Args: - recalls (ndarray): calculated from `bbox_recalls` - proposal_nums (ndarray or list): top N proposals - iou_thrs (ndarray or list): iou thresholds - row_idxs (ndarray): which rows(proposal nums) to print - col_idxs (ndarray): which cols(iou thresholds) to print - logger (logging.Logger | str | None): The way to print the recall - summary. See `mmcv.utils.print_log()` for details. Default: None. - """ - proposal_nums = np.array(proposal_nums, dtype=np.int32) - iou_thrs = np.array(iou_thrs) - if row_idxs is None: - row_idxs = np.arange(proposal_nums.size) - if col_idxs is None: - col_idxs = np.arange(iou_thrs.size) - row_header = [''] + iou_thrs[col_idxs].tolist() - table_data = [row_header] - for i, num in enumerate(proposal_nums[row_idxs]): - row = [f'{val:.3f}' for val in recalls[row_idxs[i], col_idxs].tolist()] - row.insert(0, num) - table_data.append(row) - table = AsciiTable(table_data) - print_log('\n' + table.table, logger=logger) - - -def plot_num_recall(recalls, proposal_nums): - """Plot Proposal_num-Recalls curve. - - Args: - recalls(ndarray or list): shape (k,) - proposal_nums(ndarray or list): same shape as `recalls` - """ - if isinstance(proposal_nums, np.ndarray): - _proposal_nums = proposal_nums.tolist() - else: - _proposal_nums = proposal_nums - if isinstance(recalls, np.ndarray): - _recalls = recalls.tolist() - else: - _recalls = recalls - - import matplotlib.pyplot as plt - f = plt.figure() - plt.plot([0] + _proposal_nums, [0] + _recalls) - plt.xlabel('Proposal num') - plt.ylabel('Recall') - plt.axis([0, proposal_nums.max(), 0, 1]) - f.show() - - -def plot_iou_recall(recalls, iou_thrs): - """Plot IoU-Recalls curve. - - Args: - recalls(ndarray or list): shape (k,) - iou_thrs(ndarray or list): same shape as `recalls` - """ - if isinstance(iou_thrs, np.ndarray): - _iou_thrs = iou_thrs.tolist() - else: - _iou_thrs = iou_thrs - if isinstance(recalls, np.ndarray): - _recalls = recalls.tolist() - else: - _recalls = recalls - - import matplotlib.pyplot as plt - f = plt.figure() - plt.plot(_iou_thrs + [1.0], _recalls + [0.]) - plt.xlabel('IoU') - plt.ylabel('Recall') - plt.axis([iou_thrs.min(), 1, 0, 1]) - f.show() diff --git a/spaces/adirik/stylemc-demo/encoder4editing/metrics/LEC.py b/spaces/adirik/stylemc-demo/encoder4editing/metrics/LEC.py deleted file mode 100644 index 3eef2d2f00a4d757a56b6e845a8fde16aab306ab..0000000000000000000000000000000000000000 --- a/spaces/adirik/stylemc-demo/encoder4editing/metrics/LEC.py +++ /dev/null @@ -1,134 +0,0 @@ -import sys -import argparse -import torch -import numpy as np -from torch.utils.data import DataLoader - -sys.path.append(".") -sys.path.append("..") - -from configs import data_configs -from datasets.images_dataset import ImagesDataset -from utils.model_utils import setup_model - - -class LEC: - def __init__(self, net, is_cars=False): - """ - Latent Editing Consistency metric as proposed in the main paper. - :param net: e4e model loaded over the pSp framework. - :param is_cars: An indication as to whether or not to crop the middle of the StyleGAN's output images. - """ - self.net = net - self.is_cars = is_cars - - def _encode(self, images): - """ - Encodes the given images into StyleGAN's latent space. - :param images: Tensor of shape NxCxHxW representing the images to be encoded. - :return: Tensor of shape NxKx512 representing the latent space embeddings of the given image (in W(K, *) space). - """ - codes = self.net.encoder(images) - assert codes.ndim == 3, f"Invalid latent codes shape, should be NxKx512 but is {codes.shape}" - # normalize with respect to the center of an average face - if self.net.opts.start_from_latent_avg: - codes = codes + self.net.latent_avg.repeat(codes.shape[0], 1, 1) - return codes - - def _generate(self, codes): - """ - Generate the StyleGAN2 images of the given codes - :param codes: Tensor of shape NxKx512 representing the StyleGAN's latent codes (in W(K, *) space). - :return: Tensor of shape NxCxHxW representing the generated images. - """ - images, _ = self.net.decoder([codes], input_is_latent=True, randomize_noise=False, return_latents=True) - images = self.net.face_pool(images) - if self.is_cars: - images = images[:, :, 32:224, :] - return images - - @staticmethod - def _filter_outliers(arr): - arr = np.array(arr) - - lo = np.percentile(arr, 1, interpolation="lower") - hi = np.percentile(arr, 99, interpolation="higher") - return np.extract( - np.logical_and(lo <= arr, arr <= hi), arr - ) - - def calculate_metric(self, data_loader, edit_function, inverse_edit_function): - """ - Calculate the LEC metric score. - :param data_loader: An iterable that returns a tuple of (images, _), similar to the training data loader. - :param edit_function: A function that receives latent codes and performs a semantically meaningful edit in the - latent space. - :param inverse_edit_function: A function that receives latent codes and performs the inverse edit of the - `edit_function` parameter. - :return: The LEC metric score. - """ - distances = [] - with torch.no_grad(): - for batch in data_loader: - x, _ = batch - inputs = x.to(device).float() - - codes = self._encode(inputs) - edited_codes = edit_function(codes) - edited_image = self._generate(edited_codes) - edited_image_inversion_codes = self._encode(edited_image) - inverse_edit_codes = inverse_edit_function(edited_image_inversion_codes) - - dist = (codes - inverse_edit_codes).norm(2, dim=(1, 2)).mean() - distances.append(dist.to("cpu").numpy()) - - distances = self._filter_outliers(distances) - return distances.mean() - - -if __name__ == "__main__": - device = "cuda" - - parser = argparse.ArgumentParser(description="LEC metric calculator") - - parser.add_argument("--batch", type=int, default=8, help="batch size for the models") - parser.add_argument("--images_dir", type=str, default=None, - help="Path to the images directory on which we calculate the LEC score") - parser.add_argument("ckpt", metavar="CHECKPOINT", help="path to the model checkpoints") - - args = parser.parse_args() - print(args) - - net, opts = setup_model(args.ckpt, device) - dataset_args = data_configs.DATASETS[opts.dataset_type] - transforms_dict = dataset_args['transforms'](opts).get_transforms() - - images_directory = dataset_args['test_source_root'] if args.images_dir is None else args.images_dir - test_dataset = ImagesDataset(source_root=images_directory, - target_root=images_directory, - source_transform=transforms_dict['transform_source'], - target_transform=transforms_dict['transform_test'], - opts=opts) - - data_loader = DataLoader(test_dataset, - batch_size=args.batch, - shuffle=False, - num_workers=2, - drop_last=True) - - print(f'dataset length: {len(test_dataset)}') - - # In the following example, we are using an InterfaceGAN based editing to calculate the LEC metric. - # Change the provided example according to your domain and needs. - direction = torch.load('../editings/interfacegan_directions/age.pt').to(device) - - def edit_func_example(codes): - return codes + 3 * direction - - - def inverse_edit_func_example(codes): - return codes - 3 * direction - - lec = LEC(net, is_cars='car' in opts.dataset_type) - result = lec.calculate_metric(data_loader, edit_func_example, inverse_edit_func_example) - print(f"LEC: {result}") diff --git a/spaces/akhaliq/Detic/tools/convert-thirdparty-pretrained-model-to-d2.py b/spaces/akhaliq/Detic/tools/convert-thirdparty-pretrained-model-to-d2.py deleted file mode 100644 index ec042b8ce48d193b40fd1e6311b2cc4b0c4e4086..0000000000000000000000000000000000000000 --- a/spaces/akhaliq/Detic/tools/convert-thirdparty-pretrained-model-to-d2.py +++ /dev/null @@ -1,39 +0,0 @@ -#!/usr/bin/env python -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved -import argparse -import pickle -import torch - -""" -Usage: - -cd DETIC_ROOT/models/ -wget https://miil-public-eu.oss-eu-central-1.aliyuncs.com/model-zoo/ImageNet_21K_P/models/resnet50_miil_21k.pth -python ../tools/convert-thirdparty-pretrained-model-to-d2.py --path resnet50_miil_21k.pth - -wget https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window7_224_22k.pth -python ../tools/convert-thirdparty-pretrained-model-to-d2.py --path swin_base_patch4_window7_224_22k.pth - -""" - - -if __name__ == "__main__": - parser = argparse.ArgumentParser() - parser.add_argument('--path', default='') - args = parser.parse_args() - - print('Loading', args.path) - model = torch.load(args.path, map_location="cpu") - # import pdb; pdb.set_trace() - if 'model' in model: - model = model['model'] - if 'state_dict' in model: - model = model['state_dict'] - ret = { - "model": model, - "__author__": "third_party", - "matching_heuristics": True - } - out_path = args.path.replace('.pth', '.pkl') - print('Saving to', out_path) - pickle.dump(ret, open(out_path, "wb")) diff --git a/spaces/akhaliq/Mask2Former/datasets/prepare_ade20k_sem_seg.py b/spaces/akhaliq/Mask2Former/datasets/prepare_ade20k_sem_seg.py deleted file mode 100644 index b0edfeb340edaff45beb14b3f9438aef2c65e78f..0000000000000000000000000000000000000000 --- a/spaces/akhaliq/Mask2Former/datasets/prepare_ade20k_sem_seg.py +++ /dev/null @@ -1,27 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -# Copyright (c) Facebook, Inc. and its affiliates. -import os -from pathlib import Path - -import numpy as np -import tqdm -from PIL import Image - - -def convert(input, output): - img = np.asarray(Image.open(input)) - assert img.dtype == np.uint8 - img = img - 1 # 0 (ignore) becomes 255. others are shifted by 1 - Image.fromarray(img).save(output) - - -if __name__ == "__main__": - dataset_dir = Path(os.getenv("DETECTRON2_DATASETS", "datasets")) / "ADEChallengeData2016" - for name in ["training", "validation"]: - annotation_dir = dataset_dir / "annotations" / name - output_dir = dataset_dir / "annotations_detectron2" / name - output_dir.mkdir(parents=True, exist_ok=True) - for file in tqdm.tqdm(list(annotation_dir.iterdir())): - output_file = output_dir / file.name - convert(file, output_file) diff --git a/spaces/akhaliq/Music_Source_Separation/scripts/4_train/voicebank-demand/train.sh b/spaces/akhaliq/Music_Source_Separation/scripts/4_train/voicebank-demand/train.sh deleted file mode 100644 index 4968af845ee9aaa4b252d0d434dae3aadac82e0a..0000000000000000000000000000000000000000 --- a/spaces/akhaliq/Music_Source_Separation/scripts/4_train/voicebank-demand/train.sh +++ /dev/null @@ -1,13 +0,0 @@ -#!/bin/bash -WORKSPACE=${1:-"./workspaces/bytesep"} # The first argument is workspace directory. - -echo "WORKSPACE=${WORKSPACE}" - -# Users can modify the following config file. -TRAIN_CONFIG_YAML="scripts/4_train/voicebank-demand/configs/speech-noise,unet.yaml" - -# Train & evaluate & save checkpoints. -CUDA_VISIBLE_DEVICES=0 python3 bytesep/train.py train \ - --workspace=$WORKSPACE \ - --gpus=1 \ - --config_yaml=$TRAIN_CONFIG_YAML \ No newline at end of file diff --git a/spaces/akhaliq/Pop_Music_Transformer/main.py b/spaces/akhaliq/Pop_Music_Transformer/main.py deleted file mode 100644 index dc53eb9839e70e6eca4691059f7f9c0b5f59abda..0000000000000000000000000000000000000000 --- a/spaces/akhaliq/Pop_Music_Transformer/main.py +++ /dev/null @@ -1,31 +0,0 @@ -from model import PopMusicTransformer -import os -os.environ['CUDA_VISIBLE_DEVICES'] = '0' - -def main(): - # declare model - model = PopMusicTransformer( - checkpoint='REMI-tempo-checkpoint', - is_training=False) - - # generate from scratch - model.generate( - n_target_bar=16, - temperature=1.2, - topk=5, - output_path='./result/from_scratch.midi', - prompt=None) - - # generate continuation - model.generate( - n_target_bar=16, - temperature=1.2, - topk=5 - output_path='./result/continuation.midi', - prompt='./data/evaluation/000.midi') - - # close model - model.close() - -if __name__ == '__main__': - main() diff --git a/spaces/akhaliq/SummerTime/model/third_party/HMNet/DataLoader/infinibatch/infinibatch/iterators.py b/spaces/akhaliq/SummerTime/model/third_party/HMNet/DataLoader/infinibatch/infinibatch/iterators.py deleted file mode 100644 index a3be2e238ef4d561a63005ea6b18fc83001fc214..0000000000000000000000000000000000000000 --- a/spaces/akhaliq/SummerTime/model/third_party/HMNet/DataLoader/infinibatch/infinibatch/iterators.py +++ /dev/null @@ -1,1217 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT license. - -""" -## Overview - -This part of the documentation covers the __advanced usage__ of Infinibatch by assembling __custom data loading pipelines__. -Before you continue, please go through the tutorial on the top-level of the documentation of the `infinibatch` module. - -Two of the main features of Infinibatch are __lazy evaluation__ through the use of __iterators__ -and built-in support for __checkpointing__. -In this section, we give an introduction to these features and the basic usage of the Infinibatch iterator library. - - -### Iterators - -As a Python programmer, you are probably familiar with the concept of iterators. -According to the [Python documentation](https://docs.python.org/3.5/glossary.html#term-iterator), -an iterator is an object representing a stream of data, -and repeated calls to the iterator's `__next__()` method (or passing it to the built-in function `next()`) -return successive items in the stream. -It is important not to confuse an [iterator](https://docs.python.org/3.5/glossary.html#term-iterator) -with an [iterable](https://docs.python.org/3.5/glossary.html#term-iterable). -For more information on this subject, please follow the links above. - -The Python standard library contains a module of iterators called `itertools` -that bears some resembles to Infinibatch. -Infinibatch differs from `itertools` in two ways: - -1. Infinibatch provides iterators specifically for the purpose of creating __randomized batches of data for machine learning__. -2. All iterators in Infinibatch support __checkpointing__ (see the following section). - -Infinibatch iterators are not directly compatible with itertools due to the checkpointing requirement. - -Infinibatch enables you to build complex data loaders by combining iterators from this module into a pipeline. -To give you a high-level idea of how this is works, we provide a very simple example. -Note that this example is completely artificial and does not solve any useful task. -Its only purpose is to demonstrate the behavior of a pipeline of iterators. -We provide a more realistic example in a later section. - -First, we create a small test data set. ->>> dataset = list(range(6)) # 0, 1, 2, 3, 4, 5 - -We can turn this data set into an Infinibatch iterator by wrapping it in a `NativeCheckpointableIterator`. ->>> it = NativeCheckpointableIterator(dataset) # 0, 1, 2, 3, 4, 5 - -We can then transform the data items using a `MapIterator`, -which applies a given function to each individual data item. -For example, we can multiply each data item by 2. ->>> it = MapIterator(it, lambda n: 2 * n) # 0, 2, 4, 6, 8, 10 - -We can restructure the data set by batching together pairs of data items into lists using a `FixedBatchIterator`. ->>> it = FixedBatchIterator(it, batch_size=2) # [0, 2], [4, 6], [8, 10] - -Using another `MapIterator`, we can reduce each of these lists to its second element. ->>> it = MapIterator(it, lambda l: l[1]) # 2, 6, 10 - -Finally, we can use the resulting iterator `it` just like any standard Python iterator. -```py ->>> for item in it: -... print(item) -2 -6 -10 - -``` - -By using iterators, Infinibatch operates in a __lazy__ fashion: -It generally doesn't apply operations to an entire data set at once, -but rather operates on individual data items on-the-fly as they are consumed. -When used correctly, this allows Infinibatch to have a low start-up time and low memory overhead. -For more detail on this, please consult the section on performance considerations below. - - -### Checkpointing - -The main features that sets Infinibatch iterators apart from standard Python iterators is that they support __checkpointing__. -A checkpoint encapsulates the internal state of an entire pipeline of iterators at a specific point while iterating through a data set. -Once you retrieve a checkpoint, you can later use it to reset the pipeline of iterators to the exact state it was in -when the checkpoint was created. -Checkpoints can easily be serialized and stored to disk using [Pythons `pickle` module](https://docs.python.org/3.5/library/pickle.html). -Infinibatch's checkpointing feature is particularly useful when you're training large deep neural network models over days or weeks, -and you want to make sure that, in case your training is interrupted for any reason, __you can pick up your training exactly where you left off__. - -The checkpointing interface consists of two functions `getstate` and `setstate` that are defined in `CheckpointableIterator`, -the common base class of all iterators in this module. -As the names suggest `getstate` returns a checkpoint object that represents the state of a pipeline at the time the function is called, -and 'setstate' receives a checkpoint object to reset the state of a pipeline. -`setstate` also accepts `None`, which resets a pipeline to the __beginning__ of the iteration, -i.e. the state of the pipeline immediately after its construction. - -It is important to realize that __a checkpoint represents the state of a complete pipeline of iterators__. -If you have a pipeline consisting of a sequence of iterators, you only have to call `getstate` on the __last__ iterator in the sequence -to capture the state of the entire pipeline. -Internally, this is achieved by recursive calls that traverse the entire data loading pipeline to collect the state of every iterator in it. -Similarly, when you want to reset a pipeline to a previous state, you only have to call `setstate` on the __last__ iterator in the pipeline. - - -To demonstrate this, we recreate the pipeline from the previous section. ->>> dataset = list(range(6)) # 0, 1, 2, 3, 4, 5 ->>> it = NativeCheckpointableIterator(dataset) # 0, 1, 2, 3, 4, 5 ->>> it = MapIterator(it, lambda n: 2 * n) # 0, 2, 4, 6, 8, 10 ->>> it = FixedBatchIterator(it, batch_size=2) # [0, 2], [4, 6], [8, 10] ->>> it = MapIterator(it, lambda l: l[1]) # 2, 6, 10 - -Since `it` behaves just like a standard Python iterator, we can call `next` to retrieve its first element. ->>> next(it) -2 - -We can now call `getstate` on `it` (which is the last `MapIterator` in the pipeline) -to get a checkpoint of the internal state of the entire data loading pipeline. ->>> checkpoint = it.getstate() - -Note that the checkpoint represents the internal state of the pipeline after the data item `2` has been retrieved. -Using the checkpoint, we can always return to this __exact__ point in the data set. -To show this, let's exhaust the iterator by casting it to a list. ->>> list(it) -[6, 10] - -Since the iterator is now exhausted, calling `next` raises a `StopIteration` exception. -``` ->>> next(it) -Traceback (most recent call last): - ... -StopIteration - -``` - -We can now reset the pipeline to the checkpoint using `setstate`. ->>> it.setstate(checkpoint) - -This recovers the state of the pipeline after the data item `2` has been retrieved. -Thereby, we expect the next element to be `6`. ->>> next(it) -6 - - -## Types of Iterators - -This section provides a brief overview of the different types of iterators in Infinibatch. - - -### Classes and Factory Functions - -Most iterators in this module are implemented as classes that inherit from the abstract base class `CheckpointableIterator`. -However, some iterators (such as the `BlockwiseShuffleIterator`) are simple combinations of other iterators. -These iterators are implemented as __factory functions__ that construct a pipeline of iterators -and return the last iterator in the pipeline. -For consistency with class-based iterators, -we name these factory function using CamelCase instead of the more pythonic use_of_underscores. - -.. todo:: - We currently also have one factory function that actually looks like one: `create_source_iterator`. - Provide a comment on this describing why that is. - - -### Source Iterators - -There are three iterators that are intended to go at the __beginning__ of a data loading pipeline: - -- `InfinitePermutationSourceIterator`: -This iterator accepts a list, shuffles it, and yields its elements. -It repeats this infinitely, shuffling the list after each pass. -Thereby, __this iterator is infinte and cannot be exhausted__. -This iterator is meant to be used as the first iterator in a training scenario -and supports splitting the data for multi-GPU training. -- `ChunkedSourceIterator`: -This iterator accepts a list and yields its elements. -It is meant to be used as the first iterator in an inference or validation scenario -and supports splitting the data for mult-GPU inference. -- `NativeCheckpointableIterator`: -This iterator wraps a Python iterable and makes it checkpointable. -It is mainly intended for demonstration and debugging purposes. - - -### Shuffling - -.. todo:: Describe `BufferedShuffleIterator` and `BlockwiseShuffleIterator`. - - -### Batching, SelectMany, and Windowing - -.. todo:: Describe `FixedBatchIterator`, `SelectManyIterator`, and `WindowedIterator`. - - -### Mapping - -.. todo:: Describe `MapIterator`, `ParallelMapIterator`, `RecurrentIterator`, and `SamplingRandomMapIterator`. - - -### Other Iterators - -.. todo:: Describe `ZipIterator`, `PrefetchIterator`, and `BucketedReadaheadBatchIterator`. - - -## Complete Example - -.. todo:: - Give a more realistic example following, in broad strokes, the ChunkedDataset including: - - - use gzip chunks - - training pipeline example - - inference pipeline example - - pipeline that can do both - - etc. - -## Performance Considerations - -.. todo:: - Describe what parameters influence performance measures such as memory usage and start-up time. -""" - -from abc import abstractmethod -import collections -import copy -import gzip -from itertools import cycle, islice -import math -from multiprocessing import Pool -import os -from queue import Full, Queue -from random import Random -from threading import Thread -from typing import ( - Any, - Callable, - Dict, - Generator, - Iterable, - Iterator, - List, - Optional, - Tuple, - Union, -) - - -from .closablequeue import ClosableQueue, ClosedException - - -# TODO for next release: -# - benchmark the accuracy when using BlockwiseShuffleIterator vs. the BufferedShuffleIterator -# - change all convenience functions back to true classes, using a wrapper class - -# TODO later: -# - make iterator pipeline work for streaming data - - -def _advance_iterator(iterator: Iterator, n: int): - """Little helper to advance an iterator by n items""" - for _ in range(n): - next(iterator) - return n - - -class CheckpointableIterator(collections.abc.Iterator): - """ - Abstract base class that defines the interface for checkpointing. - - The interface (getstate, setstate) is inspired by Python's random package. - """ - - def __iter__(self): - return self - - @abstractmethod - def getstate(self) -> Dict: - """ - Get checkpoint of current state of iterator - - In a pipeline of iterators, this function __recursively__ calls itself on the preceeding iterator - and includes the gathered information in the returned checkpoint. - Thereby, to obtain a checkpoint of the state of an entire pipeline of iterators - you only have to call this function on the __last__ iterator in the pipeline. - A checkpoint is represented as a `dict`, - but the caller should treat a checkpoint as an opaque object - and not make any assumptions about the existence or meaning of the `dict` entries. - """ - pass - - @abstractmethod - def setstate(self, checkpoint: Optional[Dict]): - """ - Set state of iterator to given checkpoint - - In a pipeline of iterators, this function __recursively__ calls itself on the preceeding iterator. - Thereby, to set the state of an entire pipeline of iterators to a given checkpoint - you only have to call this function on the __last__ iterator in the pipeline. - - Args: - checkpoint: Checkpoint that should be used to reset the state of the iterator (or pipeline). - If this is __None__, the state of the iterator (or pipeline) is reset to the initial - state immediately after construction. - """ - pass - - def __getstate__(self) -> Dict: # implementation of pickle Protocol - return self.getstate() - - def __setstate__(self, checkpoint: Optional[Dict]): - self.setstate(checkpoint) - - @abstractmethod - def __next__(self): - pass - - -class NativeCheckpointableIterator(CheckpointableIterator): - """ - Simple wrapper class that turns a Python Iterable into a CheckpointableIterator - - When calling setstate on this class, it simply replays the iterator all the way to the checkpoint one element at a time, - which makes it generally inefficient. - - Warning: This class cannot be used with Iterators (as opposed to Iterables), which have an `__iter__` function that simply returns self, but does not reset. - """ - - def __init__(self, iterable: Iterable): - # check whether iterable is iterable or iterator: - # if the variable iterable contains an iterator, the function __iter__ returns self - # if the variable iterable is an actual iterator, it should not return self - if iter(iterable) is iterable: - raise ValueError( - "It looks like you are passing an iterator instead of an iterable. This is not supported and can cause undefined behavior when used with checkpointing." - ) - self._input_iterable = iterable - self.setstate(None) - - def getstate(self) -> Dict: - return {"num_items_yielded": self._num_items_yielded} - - def setstate(self, checkpoint: Optional[Dict]): - self._iterator = iter(self._input_iterable) - self._num_items_yielded = ( - _advance_iterator(self._iterator, checkpoint["num_items_yielded"]) - if checkpoint is not None - else 0 - ) - - def __next__(self): - item = next( - self._iterator - ) # call this before increasing _num_items_yielded to correctly handle the case when a StopIteration exception is thrown - self._num_items_yielded += 1 - return item - - -def create_source_iterator( - source_items: List, - train: bool = True, - seed: Optional[int] = None, - shuffle: bool = True, - num_instances: int = 1, - instance_rank: int = 0, -): - if not train and shuffle: - raise ValueError("shuffling is not supported when train=False") - if train: - return InfinitePermutationSourceIterator( - source_items, - seed=seed, - shuffle=shuffle, - num_instances=num_instances, - instance_rank=instance_rank, - ) - else: - return ChunkedSourceIterator( - source_items, num_instances=num_instances, instance_rank=instance_rank - ) - - -def ChunkedSourceIterator( - source_items: List, num_instances: int = 1, instance_rank: int = 0 -): - """ - Cuts source list into chunks, one per instance, and serves out items in chunk corresponding to instance_rank - - This is a source iterator: - It is meant to be used at the beginning of a data loading pipeline. - As such, it takes a list as its source and not a CheckpointableIterator. - - Args: - source_items: input list, must not be empty and must be small enough to fit into RAM entirely, ownership of the list and the data goes to the iterator, do not modify it! - num_instances: number of instances of this iterator. Meant for use with multi-process data loading, e.g., in distributed training. - instance_rank: rank of this instance of the iterator. Meant for use with multi-process data loading, e.g., in distributed training. - """ - # heuristic: assuming blocks are all of the same size, math.ceil should give us the shortest makespan - chunk_size = math.ceil(len(source_items) / num_instances) - # this does not cause any out-of-bounds issues: - # a slice with a start-index beyong the end of the list is empty, - # and an end-index of a slice is capped at the end of the list - chunk = source_items[instance_rank * chunk_size : (instance_rank + 1) * chunk_size] - return NativeCheckpointableIterator(chunk) - - -class InfinitePermutationSourceIterator(CheckpointableIterator): - """ - Infinitely generates permutations of the items in the given list. - - This is a source iterator: - It is meant to be used at the beginning of a data loading pipeline. - As such, it takes a list as its source and not a CheckpointableIterator. - The given list is loaded completely into RAM. - - For example, this is used for randomizing the pathnames of data blocks read by ChunkedReadlinesIterator. - """ - - def __init__( - self, - source_items: List, - seed: Optional[int] = None, - shuffle: bool = True, - num_instances: int = 1, - instance_rank: int = 0, - ): - """ - Args: - source_items: input list, must not be empty and must be small enough to fit into RAM entirely, ownership of the list and the data goes to the iterator, do not modify it! - seed: random seed used for shuffling (or None) - shuffle: set False to bypass the shuffling. Then this is just a checkpointed version of itertools.cycle(). (Default: True) - num_instances: number of instances of this iterator. Meant for use with multi-process data loading, e.g., in distributed training. - instance_rank: rank of this instance of the iterator. Meant for use with multi-process data loading, e.g., in distributed training. - """ - self._source_items = source_items - if not self._source_items: - raise ValueError("InfinitePermutationIterator: source must not be empty") - self._shuffle = shuffle - self._seed = seed - self._num_instances = num_instances - self._instance_rank = instance_rank - self.setstate(None) - - def getstate(self) -> Dict: - return { - "random_state": self._random_state, # state of random generator before generating the current shuffling of the sequence - "num_items_yielded": self._num_items_yielded, - } # how many items have already been iterated over in the current shuffling - - def setstate(self, checkpoint: Optional[Dict]): - # set iteration state. Do this outside the generator below in case getstate() is called before ever iterating - self._random_state = checkpoint["random_state"] if checkpoint else None - self._num_items_yielded = checkpoint["num_items_yielded"] if checkpoint else 0 - # We define the iteration itself as a generator for ease of implementation. - # We could as well just have used an explicit state machine represented by class members. - def _generate() -> Iterator: - # create and reset random generator - random = Random(self._seed) - if self._random_state is not None: # restore the random generator's state - random.setstate(self._random_state) - skip_to_checkpoint = ( - self._num_items_yielded - ) # items to skip in order to advance to checkpoint - # main outer loop for infinite passes over items (reshuffle before each pass) - while True: - # (re-)shuffle all items - self._random_state = ( - random.getstate() - ) # remember random state before shuffling - self._num_items_yielded = 0 - shuffled_items = self._source_items[ - : - ] # note: if underlying iterator is checkpointable, use setstate(checkpoint['nested_state']) on it - if self._shuffle: - random.shuffle(shuffled_items) - shuffled_iterator = iter(shuffled_items) - # skip initial items when restarting from checkpoint - if ( - skip_to_checkpoint - ): # @TODO: find a way to abstract this more, so that we can plug it into the 'for' statement directly - self._num_items_yielded += _advance_iterator( - shuffled_iterator, skip_to_checkpoint - ) - skip_to_checkpoint = 0 # done skipping - # main inner loop over items - for item in shuffled_iterator: - self._num_items_yielded += 1 # record how many items we have iterated over in this pass over the items - if ( - self._num_items_yielded - 1 - ) % self._num_instances == self._instance_rank: # build-in islice facility - yield item - - self._iterator = _generate() - - def __next__(self): - return next(self._iterator) - - -class SelectManyIterator(CheckpointableIterator): - """ - Projects each element of a source sequence to a sequence and flattens the resulting sequences into one sequence. - """ - - def __init__( - self, - source_iterator: CheckpointableIterator, - collection_selector: Optional[Callable[[Any], Iterator]] = None, - ): - """ - Args: - source_iterator: iterator over the items to pass to collection_selector() - collection_selector: user callback that maps an item into an Iterable, whose items will be yielded. - The returned Iterator is used only once. Hence, it is also allowed to - return self-iterables, such as iterators and generator expressions. - If None is given, no callback is applied. - """ - if not isinstance(source_iterator, CheckpointableIterator): - raise ValueError("source_iterator has to be a CheckpointableIterator") - self._source_iterator = source_iterator # type: CheckpointableIterator - self._collection_selector = ( - collection_selector - ) # type: Callable[[Any], Iterator] - self.setstate(None) - - def getstate(self) -> Dict: - return { - "source_state": self._source_state, - "flattened_items_yielded": self._flattened_items_yielded, - } - - def setstate(self, checkpoint: Optional[Dict]): - self._source_state = checkpoint["source_state"] if checkpoint else None - self._flattened_items_yielded = ( - checkpoint["flattened_items_yielded"] if checkpoint else 0 - ) - self._source_iterator.setstate(self._source_state) - - def _generate(): - skip_to_checkpoint = self._flattened_items_yielded - # main loop over source source_items - for source_item in self._source_iterator: - if self._collection_selector is not None: - data = iter(self._collection_selector(source_item)) - else: - data = iter(source_item) - self._flattened_items_yielded = 0 - if skip_to_checkpoint: - # print("Skipping to index", skip_to_checkpoint, file=sys.stderr) - self._flattened_items_yielded += _advance_iterator( - data, skip_to_checkpoint - ) - skip_to_checkpoint = 0 - # main loop over lines - for item in data: - self._flattened_items_yielded += 1 - yield item - self._source_state = self._source_iterator.getstate() - - self._iterator = _generate() - - def __next__(self): - return next(self._iterator) - - -class BufferedShuffleIterator(CheckpointableIterator): - """ - Shuffles given iterable using a limited buffer. - """ - - def __init__( - self, source_iterator: CheckpointableIterator, buffer_size: int, seed: int = 0 - ): - """ - Args: - source_iterator: checkpointable iterator or restartable iterable over input items to shuffle - buffer_size: size of the buffer in number of items used for shuffling - seed: random seed used for shuffling (or None) - """ - if not isinstance(source_iterator, CheckpointableIterator): - raise ValueError("source_iterator has to be a CheckpointableIterator") - self._source_iterator = source_iterator - self._buffer = [ - None for _ in range(buffer_size) - ] # maybe do this lazily? --Yes, since user may set state immediately, then this is not needed here - self._random = Random(seed) - self.setstate(None) - - def getstate(self) -> Dict: - return { - "source_state": self._source_iterator.getstate(), - "buffer": copy.deepcopy(self._buffer), - "random_state": self._random.getstate(), - } - - def setstate(self, checkpoint: Optional[Dict]): - if checkpoint: - self._source_iterator.setstate(checkpoint["source_state"]) - self._buffer = checkpoint["buffer"] - self._random.setstate(checkpoint["random_state"]) - # @TODO: Can we add a comment how the flush part is handled? - else: - self._source_iterator.setstate(None) - self._iterator = self._generate() - - def _generate(self) -> Iterator: - # shuffle data with a buffer: - # this is similar to what the Fisher-Yates shuffle does, - # but modified to run with a constant-size buffer - # see https://en.wikipedia.org/wiki/Fisher%E2%80%93Yates_shuffle - # this was inspired by an algorithm implemented in Kaldi - # see https://kaldi-asr.org/doc/nnet-shuffle-egs_8cc.html - for item in self._source_iterator: - index = self._random.randrange(0, len(self._buffer)) - result = None - if self._buffer[index] is not None: - result = self._buffer[index] - self._buffer[index] = item - # only yield value once buffer is updated to allow for correct checkpointing! - if result is not None: - yield result - - # flush buffer - while self._buffer: - item = self._buffer.pop() - if item is not None: - yield item - - def __next__(self): - return next(self._iterator) - - -class MapIterator(CheckpointableIterator): - """ - Applies given tranform to each data item - """ - - def __init__( - self, source_iterator: CheckpointableIterator, transform: Callable[[str], Any] - ): - """ - Args: - source_iterator: checkpointable iterator - transform: function to be applied to each data item - """ - if not isinstance(source_iterator, CheckpointableIterator): - raise ValueError("source_iterator has to be a CheckpointableIterator") - self._source_iterator = source_iterator - self._transform = transform - - def getstate(self) -> Dict: - return self._source_iterator.getstate() - - def setstate(self, checkpoint: Optional[Dict]): - self._source_iterator.setstate(checkpoint) - - def __next__(self): - return self._transform(next(self._source_iterator)) - - -def ParallelMapIterator( - source_iterator: CheckpointableIterator, - transform: Callable[[str], Any], - num_processes: int, - num_items_per_process: int, -): - """ - Applies given transform to each data item - - Behaves the same as MapIterator, but applies transform in parallel using multiple processes in a parallel map operation. - - Warning: - The transform function has to be pickleable because it is sent across process boundaries. - To achieve this, transform should be a top-level function. - - Args: - source_iterator: checkpointable iterator - transform: function to be applied to each data item, has to be pickleable, see above - num_processes: number of processes to use for parallel map - num_items_per_process: number of data items each process operates on - """ - # divide stream of data items into batches - batched_samples = FixedBatchIterator( - source_iterator, num_processes * num_items_per_process - ) - # create process pool and capture it in closure that performs parallel map - p = Pool(num_processes) - - def parallel_map_transform(buffer): - return p.map(transform, buffer) - - # apply transform in parallel to data items in a batch - batched_transformed_samples = MapIterator(batched_samples, parallel_map_transform) - # unpack batches to go back to stream of (now transformed) data items - transformed_samples = SelectManyIterator(batched_transformed_samples) - return transformed_samples - - -class ZipIterator(CheckpointableIterator): - """ - Zips items from all given iterators, like the Python standard function zip(). - - Like Python's build-in zip(), the iteration stops when the shortest input iterable is exhausted. - """ - - def __init__(self, *source_iterators: CheckpointableIterator): - """ - Args: - source_iterators: list of iterators to zip, item by item - """ - for source_iterator in source_iterators: - if not isinstance(source_iterator, CheckpointableIterator): - raise ValueError( - "all iterators in source_iterators have to be CheckpointableIterator" - ) - self._source_iterators = source_iterators # type: List[CheckpointableIterator] - - def getstate(self) -> Dict: - return { - "input_states": tuple( - iterator.getstate() for iterator in self._source_iterators - ) - } - - def setstate(self, checkpoint: Optional[Dict]): - if checkpoint is None: - for iterator in self._source_iterators: - iterator.setstate(None) - else: - for iterator, state in zip( - self._source_iterators, checkpoint["input_states"] - ): - iterator.setstate(state) - - def __next__(self): - res = ( - [] - ) # (note: can't use a generator expression, as it gets confused when a next() call raises StopIteration) - for iterator in self._source_iterators: - res.append(next(iterator)) - return tuple(res) - - -# @TODO: The yield makes a (shallow) copy of the window, which has complexity O(width * length). In some cases, -# we don't actually need to consume all items in the window. Hence, to make this faster, we should use -# double-buffering and return a slice view (which we'd have to write). -class WindowedIterator(CheckpointableIterator): - """ - Yields 'width' consecutive items in a sliding window. - - E.g. [1, 2, 3, 4, 5, 6] with width = 3 will yield - [[1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, 6]] - """ - - def __init__(self, source_iterator: CheckpointableIterator, width: int): - """ - Args: - source_iterator: checkpointable input iterators - """ - if not isinstance(source_iterator, CheckpointableIterator): - raise ValueError("source_iterator has to be a CheckpointableIterator") - self._source_iterator = source_iterator # type: CheckpointableIterator - self._width = width # type: int - self.setstate(None) - - def getstate(self) -> Dict: - return { - "source_state": self._source_state, # state for first item in FIFO - "item_index": self._item_index, - } # index of next item to serve - - def setstate(self, checkpoint: Optional[Dict]): - self._source_state = checkpoint["source_state"] if checkpoint else None - self._item_index = checkpoint["item_index"] if checkpoint else 0 - self._source_iterator.setstate(self._source_state) - self._iterator = self._generate() - - def _fifo_slice(self, i): # returns a window into the FIFO beginning at i - # @TODO: for efficiency, make this a slice view - return tuple(self._fifo[i : i + self._width]) - - def _generate(self) -> Iterator: - self._source_state = self._source_iterator.getstate() - self._fifo = list(islice(self._source_iterator, self._width)) - # we do this in overlapping blocks of length 2*width, for easier checkpointing and potential efficiency - while len(self._fifo) == self._width: - # we got 'width' items; append another 'width' (or less if at end) - next_input_state = self._source_iterator.getstate() - self._fifo.extend(islice(self._source_iterator, self._width)) - # now serve all positions in first half (last = width - 1). If at end, then limit accordingly. - last = min(self._width - 1, len(self._fifo) - self._width) - while self._item_index <= last: - window = self._fifo_slice(self._item_index) - self._item_index += 1 - yield window - # drop all we just served; if < width left, we have hit the end - self._fifo = self._fifo[ - last + 1 : - ] # Note: This must be a new list, since the old might still be in a slice view. - self._source_state = ( - next_input_state # this reflects now the first element in the FIFO - ) - self._item_index = 0 - - def __next__(self): - return next(self._iterator) - - -# @TODO: research on whether this operation has a well-known name -class FixedBatchIterator(CheckpointableIterator): - """ - Batches N consecutive items into a single item that is a list of these items. - - E.g. [1, 2, 3 4, 5, 6, 7, 8] with batch_size = 3 will yield - [(1, 2, 3), (4, 5, 6), (7, 8)] - """ - - def __init__(self, source_iterator: CheckpointableIterator, batch_size: int): - """ - Args: - source_iterator: checkpointable input iterators - batch_size: number of items per batch - """ - if not isinstance(source_iterator, CheckpointableIterator): - raise ValueError("source_iterator has to be a CheckpointableIterator") - self._source_iterator = source_iterator # type: CheckpointableIterator - self._batch_size = batch_size # type: int - self.setstate(None) - - def getstate(self) -> Dict: - return { - "source_state": self._source_iterator.getstate() - } # state for first item in next batch - - def setstate(self, checkpoint: Optional[Dict]): - self._source_state = checkpoint["source_state"] if checkpoint else None - self._source_iterator.setstate(self._source_state) - self._iterator = self._generate() - - def _generate(self) -> Iterator: - while True: - batch = list(islice(self._source_iterator, self._batch_size)) - if not batch: - break - yield batch - - def __next__(self): - return next(self._iterator) - - -class RandomIterator(CheckpointableIterator): - """ - Iterator to generate uniformly distributed random numbers in the interval [0,1). - Very similar to Random.random(), except that random numbers are - obtained via next(). - """ - - def __init__(self, seed: Optional[int] = None): - """ - Args: - seed: Random seed. - """ - self._random = Random() # type: Random - if seed is not None: - self._random.seed(seed) - - def getstate(self) -> Dict: - return {"random_state": self._random.getstate()} - - def setstate(self, checkpoint: Optional[Dict]): - self._random.setstate(checkpoint["random_state"] if checkpoint else None) - - def __next__(self): - return self._random.random() - - -class RecurrentIterator(CheckpointableIterator): - """ - Iterates statefully over a step function. The step function accepts a state and a new item, - and returns a new state and an output item, which is yielded. - """ - - def __init__( - self, - source_iterator: CheckpointableIterator, - step_function: Callable[[Any, Any], Tuple[Any, Any]], - initial_state: Any = None, - ): - """ - Args: - source_iterator: checkpointable iterator to recur over - step_function: user-supplied function with signature step_function(state, item) -> (new_state, output) - initial_state: initial state to be passed to the step_function upon first invocation - """ - if not isinstance(source_iterator, CheckpointableIterator): - raise ValueError("source_iterator has to be a CheckpointableIterator") - self._source_iterator = source_iterator # type: CheckpointableIterator - self._step_function = step_function # type: Callable[[Any,Any], Tuple[Any,Any]] - self._initial_state = initial_state # type: Any - self.setstate(None) - - def getstate(self): - return { - "recurrent_state": self._recurrent_state, - "source_state": self._source_iterator.getstate(), - } - - def setstate(self, checkpoint): - self._recurrent_state = ( - checkpoint["recurrent_state"] if checkpoint else self._initial_state - ) - self._source_iterator.setstate( - checkpoint["source_state"] if checkpoint else None - ) - - def _generate(): - for item in self._source_iterator: - self._recurrent_state, output = self._step_function( - self._recurrent_state, item - ) - yield output - - self._iterator = _generate() - - def __next__(self): - return next(self._iterator) - - -def SamplingRandomMapIterator( - source_iterator: CheckpointableIterator, - transform: Callable[[Random, Any], Any], - seed: Optional[int] = None, -): - """ - An iterator that calls a transform function on each item, while also passing a checkpointed - random generator. - - Args: - source_iterator: checkpointable iterator to recur over - step_function: user-supplied function with signature step_function(random, item) -> result_item - seed: random seed - """ - _random = Random() - if seed is not None: - _random.seed(seed) - - def _step_function(state, item): - _random.setstate(state) - output = transform(_random, item) - return _random.getstate(), output - - return RecurrentIterator( - source_iterator, _step_function, initial_state=_random.getstate() - ) - - -def BlockwiseShuffleIterator( - source_iterator: CheckpointableIterator, block_size: int, seed: int = 0 -): - """ - Shuffles a sequence of items by grouping consecutive items in blocks of fixed size, shuffling - each block, and yielding the shuffled items of all blocks as a flat sequence. - - E.g. [1, 2, 3, 4, 5, 6, 7, 8] with block_size = 3 may yield [3, 1, 2, 4, 6, 5, 8, 7]. - - Args: - source_iterator: checkpointable iterator or restartable iterable over input items to shuffle - block_size: size of the buffer in number of items used for shuffling - seed: random seed used for shuffling (or None) - """ - # This is implemented as a pipeline: - # - group N consecutive items together - # - shuffle them - # - flatten the result - blocks = FixedBatchIterator(source_iterator, batch_size=block_size) - - def shuffle_block_fn(random: Random, block: List): - random.shuffle(block) - return block - - shuffled_blocks = SamplingRandomMapIterator( - blocks, transform=shuffle_block_fn, seed=seed - ) - samples = SelectManyIterator( - shuffled_blocks, collection_selector=lambda shuffled_block: iter(shuffled_block) - ) - return samples - - -class PrefetchIterator(CheckpointableIterator): - """ - An iterator prefetching data into a buffer on a seperate thread to smooth out IO latency. - - Args: - source_iterator: checkpointable iterator to recur over - buffer_size: size of the queue between the threads - """ - - def __init__( - self, source_iterator: CheckpointableIterator, buffer_size: int = 1000 - ): - if not isinstance(source_iterator, CheckpointableIterator): - raise ValueError("source_iterator has to be a CheckpointableIterator") - self._source_iterator = source_iterator # type:CheckpointableIterator - self._buffer_size = buffer_size # type: int - self._queue = None # type: Optional[ClosableQueue] - self._thread = None # type: Optional[Thread] - self.setstate(None) - - def getstate(self) -> Dict: - return {"source_state": self._source_state, "item_offset": self._item_offset} - - def setstate(self, checkpoint: Optional[Dict]): - if ( - self._thread is not None - ): # if there is a prefetching thread running, close the queue and wait for the thread to terminate - assert self._queue is not None - self._queue.close() - self._thread.join() - - self._source_state = ( - checkpoint["source_state"] if checkpoint is not None else None - ) - self._item_offset = checkpoint["item_offset"] if checkpoint is not None else 0 - - self._source_iterator.setstate(self._source_state) - - self._queue = ClosableQueue(maxsize=self._buffer_size) # clear queue - # make thread daemonic so it is killed when the main program terminates - self._thread = Thread( - target=self._prefetch_thread_fn, - args=( - self._source_iterator, - self._item_offset, - self._buffer_size, - self._queue, - ), - daemon=True, - ) - self._thread.start() - - @staticmethod - def _prefetch_thread_fn( - source, item_offset, buffer_size, queue - ): # behavior of the prefetching thread, only call from that thread! - _advance_iterator(source, item_offset) # skip to checkpoint - - while True: - try: - item = next(source) - except StopIteration: - queue.close() - return - - if ( - item_offset == buffer_size - 1 - ): # send a new source state a the END of each window of length _buffer_size - source_state = ( - source.getstate() - ) # this is the state for retrieving the NEXT element, i.e. the first element of the next buffer - item_offset = 0 - else: - source_state = None - item_offset += 1 - msg = (item, source_state) - - try: - queue.put(msg) - except ClosedException: - return - - def __next__(self): - try: - msg = self._queue.get() - except ClosedException: - raise StopIteration - - item, prefetch_source_state = msg - if prefetch_source_state is not None: - assert ( - self._item_offset == self._buffer_size - 1 - ) # we expect a new source state at then END of each window of length _buffer_size - self._source_state = prefetch_source_state - self._item_offset = 0 - else: - self._item_offset = self._item_offset + 1 - assert self._item_offset < self._buffer_size - return item # for debugging, its useful to return msg instead of item - - def __del__( - self, - ): # note: this is often not called. If you really need it, gc.collect() will do the trick. - if self._thread is not None: - assert self._queue is not None - self._queue.close() - try: - self._thread.join() - except: - pass - - -class BucketedReadaheadBatchIterator(CheckpointableIterator): - """ - Iterates over items from a checkpointable iterator and groups items of similar length into batches. - - The algorithm reads a head a certain number of lines (e.g. 10 million), sorts them by - length, and them groups them into batches from start to end. The sort is stable, such - that prior randomization is not undone (except for the length grouping). The batch size - is dynamic, and determined by a user-provided callback. - - This is based on Marian NMT's BatchGenerator. - """ - - def __init__( - self, - source_iterator: CheckpointableIterator, - read_ahead: int, - key: Callable[[Any], Any], - batch_size: Union[int, Callable[[Any], int]], - shuffle: bool = True, - seed: Optional[int] = None, - ): - """ - Args: - source_iterator: The data set that is read from. Typically this is an infinite source. - read_ahead: Number of items to fetch ahead for grouping purposes. - key: User-provided callback to define how data is sorted for purpose of batching. - batch_size: Batch size in number of items. Either an integer or a callback to determine batch size for a given first batch item. - shuffle: Pass False to not randomize the batches. (default: True) - seed: Random seed for batch shuffling. - """ - if not isinstance(source_iterator, CheckpointableIterator): - raise ValueError("source_iterator has to be a CheckpointableIterator") - # keep arguments - self._key = key # type: Callable[[Any], Any] - self._batch_size = batch_size # type: Union[int,Callable[[Any], int]] - self._read_ahead = read_ahead # type: int - # initialize state - self._random = None - if shuffle: - self._random = Random() # type: Random - if seed is not None: - self._random.seed(seed) - self._source_iterator = iter(source_iterator) # type: CheckpointableIterator - self.setstate(None) - - def getstate(self): - return { - "source_state": self._source_state, - "random_state": self._random_state, - "num_served": self._num_batches_yielded, - } - - def setstate(self, checkpoint: Optional[Dict]): - self._source_state = ( - checkpoint["source_state"] if checkpoint else None - ) # type: Dict -- state of input before reading the current set of batches - self._random_state = ( - checkpoint["random_state"] if checkpoint else None - ) # type: Any -- state of random generator at _source_state - self._num_batches_yielded = ( - checkpoint["num_served"] if checkpoint else 0 - ) # type: int -- number of batches served from the current set of batches - # checkpointing: restore to start of current set of batches - self._source_iterator.setstate(self._source_state) - if self._random_state: - self._random.setstate(self._random_state) - self._source_exhausted = ( - False - ) # type: bool -- set to True once we hit StopIteration on source - - def _generate(): - skip_to_checkpoint = self._num_batches_yielded - source_exhausted = False - while not source_exhausted: - # prefetch the readahead buffer - self._source_state = self._source_iterator.getstate() - self._random_state = self._random.getstate() if self._random else None - items = list(islice(self._source_iterator, self._read_ahead)) - source_exhausted = len(items) < self._read_ahead - # create batches - batches = self._create_batches(items) - # shuffle the batches - if self._random: - self._random.shuffle(batches) - # on first loop iteration, restore iterator inside batches from checkpoint - batches = iter(batches) - self._num_batches_yielded = _advance_iterator( - batches, skip_to_checkpoint - ) - skip_to_checkpoint = 0 - # main loop over batches in current read-ahead section - for batch in batches: - self._num_batches_yielded += 1 - yield batch - - self._iterator = ( - _generate() - ) # type: Iterator -- iterator into current set of batches - - def _create_batches( - self, items: List[Any] - ) -> List[List[Any]]: # helper to form batches from a list of items - # sort by length, longest first - if self._key: - items.sort( - key=self._key, reverse=True - ) # note: sort() is stable, so we won't undo any randomization besides the bucketing - # group into batches - cur_batch = None - batches = [] - for item in items: - if not cur_batch: - batch_size = ( - self._batch_size - if isinstance(self._batch_size, int) - else self._batch_size(item) - ) - cur_batch = [] - cur_batch.append(item) - if len(cur_batch) >= batch_size: # this batch is full - batches.append(cur_batch) - cur_batch = None - if cur_batch: - batches.append(cur_batch) - return batches - - def __next__(self): - return next(self._iterator) diff --git a/spaces/akhaliq/VQMIVC/ParallelWaveGAN/egs/libritts/voc1/local/data_download.sh b/spaces/akhaliq/VQMIVC/ParallelWaveGAN/egs/libritts/voc1/local/data_download.sh deleted file mode 100644 index 881fe5fe40bd8e53aa40c8439fab01eb8662eb9c..0000000000000000000000000000000000000000 --- a/spaces/akhaliq/VQMIVC/ParallelWaveGAN/egs/libritts/voc1/local/data_download.sh +++ /dev/null @@ -1,51 +0,0 @@ -#!/bin/bash - -# Copyright 2020 Tomoki Hayashi -# MIT License (https://opensource.org/licenses/MIT) - -download_dir=$1 - -# check arguments -if [ $# != 1 ]; then - echo "Usage: $0 " - exit 1 -fi - -set -euo pipefail - -base_url=http://www.openslr.org/resources/60 -parts="dev-clean test-clean dev-other test-other train-clean-100 train-clean-360 train-other-500" - -cwd=$(pwd) -if [ ! -e "${download_dir}/LibriTTS/.done" ]; then - mkdir -p "${download_dir}" - cd "${download_dir}" || exit 1; - for part in ${parts}; do - if [ -e "./LibriTTS/.${part}_done" ]; then - echo "Download of ${part} is already finished. skipped." - continue - fi - wget --no-check-certificate "${base_url}/${part}.tar.gz" - tar xvzf "${part}.tar.gz" - touch "./LibriTTS/.${part}_done" - done - touch ./LibriTTS/.done - cd "${cwd}" || exit 1; - echo "Successfully downloaded data." -else - echo "Already exists. Skipped." -fi - -if [ ! -e "${download_dir}/LibriTTSLabel/.done" ]; then - cd "${download_dir}" || exit 1; - rm -rf LibriTTSLabel - git clone https://github.com/kan-bayashi/LibriTTSLabel.git - cd LibriTTSLabel - cat lab.tar.gz-* > lab.tar.gz - tar xvzf lab.tar.gz - touch .done - cd "${cwd}" || exit 1; - echo "Successfully downloaded label data." -else - echo "Already exists. Skipped." -fi diff --git a/spaces/akhaliq/deeplab2/utils/panoptic_instances.py b/spaces/akhaliq/deeplab2/utils/panoptic_instances.py deleted file mode 100644 index 800b11238714094badcb43c16092e048010416da..0000000000000000000000000000000000000000 --- a/spaces/akhaliq/deeplab2/utils/panoptic_instances.py +++ /dev/null @@ -1,303 +0,0 @@ -# coding=utf-8 -# Copyright 2021 The Deeplab2 Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Tensorflow code for working with object instances in segmentation.""" - -from typing import Iterable -from typing import Optional -from typing import Tuple -from typing import Union - -import tensorflow as tf - - -def instances_without_ignore_categories(panoptic_labels: tf.Tensor, - ignore_categories: Union[tf.Tensor, - Iterable[int]], - panoptic_divisor: Union[tf.Tensor, - int] = 256): - """Determines which instances to keep after ignoring a set of categories. - - Args: - panoptic_labels: An integer tensor of panoptic labels of shape `[height, - width]`. Each element will be `category * panoptic_divisor + instance` for - a pixel. - ignore_categories: An iterable or tensor of integer category labels. - Instances where the category portion of the label in `panoptic_labels` are - in the ignore set will not be included in the results. - panoptic_divisor: The divisor used to multiply the category label when - constructing panoptic labels, as in integer or integer scalar tensor. - - Returns: - A boolean tensor masking which of the input `panoptic_labels` corresponds - to an instance that will be kept, or equivalently *not* ignored. - """ - ignore_categories = tf.convert_to_tensor( - ignore_categories, dtype=panoptic_labels.dtype) - panoptic_divisor = tf.convert_to_tensor( - panoptic_divisor, dtype=panoptic_labels.dtype) - - instance_category = tf.math.floordiv(panoptic_labels, panoptic_divisor) - instance_is_ignored = tf.math.reduce_any( - tf.equal( - tf.expand_dims(instance_category, 1), - tf.expand_dims(ignore_categories, 0)), - axis=1) - instance_is_kept = tf.math.logical_not(instance_is_ignored) - - return instance_is_kept - - -def _broadcast_over_instances(t, num_instances): - out_shape = tf.concat([tf.shape(t), [num_instances]], axis=0) - return tf.broadcast_to(tf.expand_dims(t, -1), out_shape) - - -def instance_boxes_from_masks( - panoptic_labels: tf.Tensor, - ignore_categories: Optional[Union[tf.Tensor, Iterable[int]]] = None, - panoptic_divisor: Union[tf.Tensor, int] = 256): - """Finds the bounding boxes around instances, given a panoptic label map. - - Args: - panoptic_labels: An integer tensor of panoptic labelsof shape `[height, - width]`. Each element will be `category * panoptic_divisor + instance` for - a pixel. - ignore_categories: An iterable or tensor of integer category labels. - Instances where the category portion of the label in `panoptic_labels` are - in the ignore set will not be included in the results. - panoptic_divisor: The divisor used to multiply the category label when - constructing panoptic labels, as in integer or integer scalar tensor. - - Returns: - A tuple of arrays (unique_labels, box_coords). - unique_labels: An tensor of each possible non-ignored label value in - `panoptic_labels`, in the same order as the boxes. - box_coords: An tensor of shape `[num_labels, 4]`. Each row is one box as - `[ymin, xmin, ymax, xmax]`. - """ - label_shape = tf.shape(panoptic_labels) - height = label_shape[0] - width = label_shape[1] - x_coord, y_coord = tf.meshgrid( - tf.range(width, dtype=tf.float32), tf.range(height, dtype=tf.float32)) - - unique_labels, flat_instance_index = tf.unique( - tf.reshape(panoptic_labels, [height * width])) - num_instances = tf.size(unique_labels) - instance_index = tf.reshape(flat_instance_index, [height, width]) - - y_coord_repeated = _broadcast_over_instances(y_coord, num_instances) - x_coord_repeated = _broadcast_over_instances(x_coord, num_instances) - instance_index_repeated = _broadcast_over_instances(instance_index, - num_instances) - - instance_index_matches = tf.math.equal( - instance_index_repeated, - tf.reshape(tf.range(num_instances), [1, 1, num_instances])) - - # In these tensors, each slice in the 3rd dimension corresponds to an - # instance. We replace the pixels that do _not_ belong to that instance with - # a +/- infinity in order that it not be included in the reduce_min/max below. - inf3d = tf.broadcast_to([[[float('Inf')]]], tf.shape(x_coord_repeated)) - y_or_inf = tf.where(instance_index_matches, y_coord_repeated, inf3d) - y_or_neg_inf = tf.where(instance_index_matches, y_coord_repeated, -inf3d) - x_or_inf = tf.where(instance_index_matches, x_coord_repeated, inf3d) - x_or_neg_inf = tf.where(instance_index_matches, x_coord_repeated, -inf3d) - - y_min = tf.reduce_min(y_or_inf, axis=[0, 1]) - x_min = tf.reduce_min(x_or_inf, axis=[0, 1]) - y_max = tf.reduce_max(y_or_neg_inf, axis=[0, 1]) + 1 - x_max = tf.reduce_max(x_or_neg_inf, axis=[0, 1]) + 1 - - box_coords = tf.stack([y_min, x_min, y_max, x_max], axis=1) - - if ignore_categories is not None: - # Filter out the boxes that correspond to instances in the "ignore" - # categories. - instance_is_kept = instances_without_ignore_categories( - unique_labels, ignore_categories, panoptic_divisor) - - unique_labels = tf.boolean_mask(unique_labels, instance_is_kept) - box_coords = tf.boolean_mask(box_coords, instance_is_kept) - - return unique_labels, box_coords - - -def per_instance_masks(panoptic_labels: tf.Tensor, - instance_panoptic_labels: tf.Tensor, - out_dtype: tf.dtypes.DType = tf.bool) -> tf.Tensor: - """3D tensor where each slice in 3rd dimensions is an instance's mask.""" - num_instances = tf.size(instance_panoptic_labels) - matches = tf.equal( - tf.expand_dims(panoptic_labels, 0), - tf.reshape(instance_panoptic_labels, [num_instances, 1, 1])) - return tf.cast(matches, out_dtype) - - -def _average_per_instance(map_tensor: tf.Tensor, panoptic_labels: tf.Tensor, - instance_panoptic_labels: tf.Tensor, - instance_area: tf.Tensor) -> tf.Tensor: - """Finds the average of the values in map_tensor over each instance.""" - - # For each instance (in the 3rd dim), generate a map that has, per-pixel: - # - The input value if that pixel belongs to the instance. - # - Zero otherwise. - pixel_in_instance = per_instance_masks(panoptic_labels, - instance_panoptic_labels) - - map_dtype = map_tensor.dtype - num_instances = tf.size(instance_panoptic_labels) - map_or_zero = tf.where(pixel_in_instance, tf.expand_dims(map_tensor, 0), - tf.zeros([num_instances, 1, 1], dtype=map_dtype)) - - # Average the semantic probabilities over each instance. - instance_total_prob = tf.math.reduce_sum(map_or_zero, axis=[1, 2]) - instance_avg_prob = tf.divide(instance_total_prob, - tf.cast(instance_area, map_dtype)) - - return instance_avg_prob - - -# pyformat: disable -def per_instance_semantic_probabilities( - panoptic_labels: tf.Tensor, - instance_panoptic_labels: tf.Tensor, - instance_area: tf.Tensor, - semantic_probability: tf.Tensor, - panoptic_divisor: Union[tf.Tensor, int], - ignore_label: Union[tf.Tensor, int]) -> tf.Tensor: - """Mean probability for the semantic label of each unique instance.""" - # pyformat: enable - # Get the probability associated with the semantic label encoded in the - # panoptic_labels at each pixel. - panoptic_divisor = tf.convert_to_tensor(panoptic_divisor, dtype=tf.int32) - ignore_label = tf.convert_to_tensor(ignore_label, dtype=tf.int32) - semantic_label_map = tf.math.floordiv(panoptic_labels, panoptic_divisor) - - map_shape = tf.shape(semantic_label_map) - height = map_shape[0] - width = map_shape[1] - num_pixels = height * width - - semantic_index = tf.reshape(semantic_label_map, [num_pixels]) - # Use 0 as the index for a pixel with the "ignore" label, since that semantic - # label may not be a valid index into the class axis of the - # semantic_probability tensor. - semantic_index = tf.where(semantic_index == ignore_label, 0, semantic_index) - - x, y = tf.meshgrid(tf.range(width), tf.range(height)) - probability_index = tf.stack([ - tf.reshape(y, [num_pixels]), - tf.reshape(x, [num_pixels]), - semantic_index, - ], - axis=1) - - pixel_semantic_probability = tf.reshape( - tf.gather_nd(semantic_probability, probability_index), [height, width]) - # Set the probability for the "ignore" pixels to 0. - pixel_semantic_probability = tf.where(semantic_label_map == ignore_label, 0.0, - pixel_semantic_probability) - - instance_avg_prob = _average_per_instance(pixel_semantic_probability, - panoptic_labels, - instance_panoptic_labels, - instance_area) - - return instance_avg_prob - - -def combined_instance_scores( - panoptic_labels: tf.Tensor, semantic_probability: tf.Tensor, - instance_score_map: tf.Tensor, panoptic_divisor: Union[tf.Tensor, int], - ignore_label: Union[tf.Tensor, int]) -> Tuple[tf.Tensor, tf.Tensor]: - """Combines (with a product) predicted semantic and instance probabilities. - - Args: - panoptic_labels: A 2D integer tensor of panoptic format labels (each pixel - entry is `semantic_label * panoptic_divisor + instance_label`). - semantic_probability: A 3D float tensor, where the 3rd dimension is over - semantic labels, and each spatial location will have the discrete - distribution of the probabilities of the semantic classes. - instance_score_map: A 2D float tensor, where the pixels for an instance will - have the probability of that being an instance. - panoptic_divisor: Integer scalar divisor/multiplier used to construct the - panoptic labels. - ignore_label: Integer scalar, for the "ignore" semantic label in the - panoptic labels. - - Returns: - A tuple of instance labels and the combined scores for those instances, each - as a 1D tensor. - """ - panoptic_divisor = tf.convert_to_tensor(panoptic_divisor, dtype=tf.int32) - ignore_label = tf.convert_to_tensor(ignore_label, dtype=tf.int32) - - num_pixels = tf.size(panoptic_labels) - instance_panoptic_labels, _, instance_area = tf.unique_with_counts( - tf.reshape(panoptic_labels, [num_pixels])) - - instance_semantic_labels = tf.math.floordiv(instance_panoptic_labels, - panoptic_divisor) - instance_panoptic_labels = tf.boolean_mask( - instance_panoptic_labels, instance_semantic_labels != ignore_label) - instance_area = tf.boolean_mask(instance_area, - instance_semantic_labels != ignore_label) - - instance_semantic_probabilities = per_instance_semantic_probabilities( - panoptic_labels, instance_panoptic_labels, instance_area, - semantic_probability, panoptic_divisor, ignore_label) - - instance_scores = _average_per_instance(instance_score_map, panoptic_labels, - instance_panoptic_labels, - instance_area) - - combined_scores = instance_semantic_probabilities * instance_scores - return instance_panoptic_labels, combined_scores - - -def per_instance_is_crowd(is_crowd_map: tf.Tensor, id_map: tf.Tensor, - output_ids: tf.Tensor) -> tf.Tensor: - """Determines the per-instance is_crowd value from a boolian is_crowd map. - - Args: - is_crowd_map: A 2D boolean tensor. Where it is True, the instance in that - region is a "crowd" instance. It is assumed that all pixels in an instance - will have the same value in this map. - id_map: A 2D integer tensor, with the instance id label at each pixel. - output_ids: A 1D integer vector tensor, the per-instance ids for which to - output the is_crowd values. - - Returns: - A 1D boolean vector tensor, with the per-instance is_crowd value. The ith - element of the return value will be the is_crowd result for the segment - with the ith element of the output_ids argument. - """ - flat_is_crowd_map = tf.reshape(is_crowd_map, [-1]) - flat_id_map = tf.reshape(id_map, [-1]) - - # Get an is_crowd value from the map for each id. - # Only need an arbtitrary value due to assumption that the is_crowd map does - # not vary over an instance. - unique_ids, unique_index = tf.unique(flat_id_map) - unique_is_crowd = tf.scatter_nd( - tf.expand_dims(unique_index, 1), flat_is_crowd_map, tf.shape(unique_ids)) - - # Map from the order/set in unique_ids to that in output_ids - matching_ids = tf.math.equal( - tf.expand_dims(output_ids, 1), tf.expand_dims(unique_ids, 0)) - matching_index = tf.where(matching_ids)[:, 1] - return tf.gather(unique_is_crowd, matching_index) diff --git a/spaces/alexray/btc_predictor/venv/lib/python3.10/site-packages/pip/_internal/distributions/installed.py b/spaces/alexray/btc_predictor/venv/lib/python3.10/site-packages/pip/_internal/distributions/installed.py deleted file mode 100644 index be5962f98007b9220fb8eae3184d330772fba9ba..0000000000000000000000000000000000000000 --- a/spaces/alexray/btc_predictor/venv/lib/python3.10/site-packages/pip/_internal/distributions/installed.py +++ /dev/null @@ -1,20 +0,0 @@ -from pip._internal.distributions.base import AbstractDistribution -from pip._internal.index.package_finder import PackageFinder -from pip._internal.metadata import BaseDistribution - - -class InstalledDistribution(AbstractDistribution): - """Represents an installed package. - - This does not need any preparation as the required information has already - been computed. - """ - - def get_metadata_distribution(self) -> BaseDistribution: - assert self.req.satisfied_by is not None, "not actually installed" - return self.req.satisfied_by - - def prepare_distribution_metadata( - self, finder: PackageFinder, build_isolation: bool - ) -> None: - pass diff --git a/spaces/alexray/btc_predictor/venv/lib/python3.10/site-packages/pip/_internal/models/direct_url.py b/spaces/alexray/btc_predictor/venv/lib/python3.10/site-packages/pip/_internal/models/direct_url.py deleted file mode 100644 index 92060d45db8888500a94669a02af76b220b7a242..0000000000000000000000000000000000000000 --- a/spaces/alexray/btc_predictor/venv/lib/python3.10/site-packages/pip/_internal/models/direct_url.py +++ /dev/null @@ -1,220 +0,0 @@ -""" PEP 610 """ -import json -import re -import urllib.parse -from typing import Any, Dict, Iterable, Optional, Type, TypeVar, Union - -__all__ = [ - "DirectUrl", - "DirectUrlValidationError", - "DirInfo", - "ArchiveInfo", - "VcsInfo", -] - -T = TypeVar("T") - -DIRECT_URL_METADATA_NAME = "direct_url.json" -ENV_VAR_RE = re.compile(r"^\$\{[A-Za-z0-9-_]+\}(:\$\{[A-Za-z0-9-_]+\})?$") - - -class DirectUrlValidationError(Exception): - pass - - -def _get( - d: Dict[str, Any], expected_type: Type[T], key: str, default: Optional[T] = None -) -> Optional[T]: - """Get value from dictionary and verify expected type.""" - if key not in d: - return default - value = d[key] - if not isinstance(value, expected_type): - raise DirectUrlValidationError( - "{!r} has unexpected type for {} (expected {})".format( - value, key, expected_type - ) - ) - return value - - -def _get_required( - d: Dict[str, Any], expected_type: Type[T], key: str, default: Optional[T] = None -) -> T: - value = _get(d, expected_type, key, default) - if value is None: - raise DirectUrlValidationError(f"{key} must have a value") - return value - - -def _exactly_one_of(infos: Iterable[Optional["InfoType"]]) -> "InfoType": - infos = [info for info in infos if info is not None] - if not infos: - raise DirectUrlValidationError( - "missing one of archive_info, dir_info, vcs_info" - ) - if len(infos) > 1: - raise DirectUrlValidationError( - "more than one of archive_info, dir_info, vcs_info" - ) - assert infos[0] is not None - return infos[0] - - -def _filter_none(**kwargs: Any) -> Dict[str, Any]: - """Make dict excluding None values.""" - return {k: v for k, v in kwargs.items() if v is not None} - - -class VcsInfo: - name = "vcs_info" - - def __init__( - self, - vcs: str, - commit_id: str, - requested_revision: Optional[str] = None, - resolved_revision: Optional[str] = None, - resolved_revision_type: Optional[str] = None, - ) -> None: - self.vcs = vcs - self.requested_revision = requested_revision - self.commit_id = commit_id - self.resolved_revision = resolved_revision - self.resolved_revision_type = resolved_revision_type - - @classmethod - def _from_dict(cls, d: Optional[Dict[str, Any]]) -> Optional["VcsInfo"]: - if d is None: - return None - return cls( - vcs=_get_required(d, str, "vcs"), - commit_id=_get_required(d, str, "commit_id"), - requested_revision=_get(d, str, "requested_revision"), - resolved_revision=_get(d, str, "resolved_revision"), - resolved_revision_type=_get(d, str, "resolved_revision_type"), - ) - - def _to_dict(self) -> Dict[str, Any]: - return _filter_none( - vcs=self.vcs, - requested_revision=self.requested_revision, - commit_id=self.commit_id, - resolved_revision=self.resolved_revision, - resolved_revision_type=self.resolved_revision_type, - ) - - -class ArchiveInfo: - name = "archive_info" - - def __init__( - self, - hash: Optional[str] = None, - ) -> None: - self.hash = hash - - @classmethod - def _from_dict(cls, d: Optional[Dict[str, Any]]) -> Optional["ArchiveInfo"]: - if d is None: - return None - return cls(hash=_get(d, str, "hash")) - - def _to_dict(self) -> Dict[str, Any]: - return _filter_none(hash=self.hash) - - -class DirInfo: - name = "dir_info" - - def __init__( - self, - editable: bool = False, - ) -> None: - self.editable = editable - - @classmethod - def _from_dict(cls, d: Optional[Dict[str, Any]]) -> Optional["DirInfo"]: - if d is None: - return None - return cls(editable=_get_required(d, bool, "editable", default=False)) - - def _to_dict(self) -> Dict[str, Any]: - return _filter_none(editable=self.editable or None) - - -InfoType = Union[ArchiveInfo, DirInfo, VcsInfo] - - -class DirectUrl: - def __init__( - self, - url: str, - info: InfoType, - subdirectory: Optional[str] = None, - ) -> None: - self.url = url - self.info = info - self.subdirectory = subdirectory - - def _remove_auth_from_netloc(self, netloc: str) -> str: - if "@" not in netloc: - return netloc - user_pass, netloc_no_user_pass = netloc.split("@", 1) - if ( - isinstance(self.info, VcsInfo) - and self.info.vcs == "git" - and user_pass == "git" - ): - return netloc - if ENV_VAR_RE.match(user_pass): - return netloc - return netloc_no_user_pass - - @property - def redacted_url(self) -> str: - """url with user:password part removed unless it is formed with - environment variables as specified in PEP 610, or it is ``git`` - in the case of a git URL. - """ - purl = urllib.parse.urlsplit(self.url) - netloc = self._remove_auth_from_netloc(purl.netloc) - surl = urllib.parse.urlunsplit( - (purl.scheme, netloc, purl.path, purl.query, purl.fragment) - ) - return surl - - def validate(self) -> None: - self.from_dict(self.to_dict()) - - @classmethod - def from_dict(cls, d: Dict[str, Any]) -> "DirectUrl": - return DirectUrl( - url=_get_required(d, str, "url"), - subdirectory=_get(d, str, "subdirectory"), - info=_exactly_one_of( - [ - ArchiveInfo._from_dict(_get(d, dict, "archive_info")), - DirInfo._from_dict(_get(d, dict, "dir_info")), - VcsInfo._from_dict(_get(d, dict, "vcs_info")), - ] - ), - ) - - def to_dict(self) -> Dict[str, Any]: - res = _filter_none( - url=self.redacted_url, - subdirectory=self.subdirectory, - ) - res[self.info.name] = self.info._to_dict() - return res - - @classmethod - def from_json(cls, s: str) -> "DirectUrl": - return cls.from_dict(json.loads(s)) - - def to_json(self) -> str: - return json.dumps(self.to_dict(), sort_keys=True) - - def is_local_editable(self) -> bool: - return isinstance(self.info, DirInfo) and self.info.editable diff --git a/spaces/alexray/btc_predictor/venv/lib/python3.10/site-packages/pip/_vendor/chardet/escprober.py b/spaces/alexray/btc_predictor/venv/lib/python3.10/site-packages/pip/_vendor/chardet/escprober.py deleted file mode 100644 index c70493f2b131b32378612044f30173eabbfbc3f4..0000000000000000000000000000000000000000 --- a/spaces/alexray/btc_predictor/venv/lib/python3.10/site-packages/pip/_vendor/chardet/escprober.py +++ /dev/null @@ -1,101 +0,0 @@ -######################## BEGIN LICENSE BLOCK ######################## -# The Original Code is mozilla.org code. -# -# The Initial Developer of the Original Code is -# Netscape Communications Corporation. -# Portions created by the Initial Developer are Copyright (C) 1998 -# the Initial Developer. All Rights Reserved. -# -# Contributor(s): -# Mark Pilgrim - port to Python -# -# This library is free software; you can redistribute it and/or -# modify it under the terms of the GNU Lesser General Public -# License as published by the Free Software Foundation; either -# version 2.1 of the License, or (at your option) any later version. -# -# This library is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -# Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public -# License along with this library; if not, write to the Free Software -# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA -# 02110-1301 USA -######################### END LICENSE BLOCK ######################### - -from .charsetprober import CharSetProber -from .codingstatemachine import CodingStateMachine -from .enums import LanguageFilter, ProbingState, MachineState -from .escsm import (HZ_SM_MODEL, ISO2022CN_SM_MODEL, ISO2022JP_SM_MODEL, - ISO2022KR_SM_MODEL) - - -class EscCharSetProber(CharSetProber): - """ - This CharSetProber uses a "code scheme" approach for detecting encodings, - whereby easily recognizable escape or shift sequences are relied on to - identify these encodings. - """ - - def __init__(self, lang_filter=None): - super(EscCharSetProber, self).__init__(lang_filter=lang_filter) - self.coding_sm = [] - if self.lang_filter & LanguageFilter.CHINESE_SIMPLIFIED: - self.coding_sm.append(CodingStateMachine(HZ_SM_MODEL)) - self.coding_sm.append(CodingStateMachine(ISO2022CN_SM_MODEL)) - if self.lang_filter & LanguageFilter.JAPANESE: - self.coding_sm.append(CodingStateMachine(ISO2022JP_SM_MODEL)) - if self.lang_filter & LanguageFilter.KOREAN: - self.coding_sm.append(CodingStateMachine(ISO2022KR_SM_MODEL)) - self.active_sm_count = None - self._detected_charset = None - self._detected_language = None - self._state = None - self.reset() - - def reset(self): - super(EscCharSetProber, self).reset() - for coding_sm in self.coding_sm: - if not coding_sm: - continue - coding_sm.active = True - coding_sm.reset() - self.active_sm_count = len(self.coding_sm) - self._detected_charset = None - self._detected_language = None - - @property - def charset_name(self): - return self._detected_charset - - @property - def language(self): - return self._detected_language - - def get_confidence(self): - if self._detected_charset: - return 0.99 - else: - return 0.00 - - def feed(self, byte_str): - for c in byte_str: - for coding_sm in self.coding_sm: - if not coding_sm or not coding_sm.active: - continue - coding_state = coding_sm.next_state(c) - if coding_state == MachineState.ERROR: - coding_sm.active = False - self.active_sm_count -= 1 - if self.active_sm_count <= 0: - self._state = ProbingState.NOT_ME - return self.state - elif coding_state == MachineState.ITS_ME: - self._state = ProbingState.FOUND_IT - self._detected_charset = coding_sm.get_coding_state_machine() - self._detected_language = coding_sm.language - return self.state - - return self.state diff --git a/spaces/alexray/btc_predictor/venv/lib/python3.10/site-packages/pip/_vendor/html5lib/treeadapters/sax.py b/spaces/alexray/btc_predictor/venv/lib/python3.10/site-packages/pip/_vendor/html5lib/treeadapters/sax.py deleted file mode 100644 index f4ccea5a25653dd9e4c1bcf1047f407184562a1b..0000000000000000000000000000000000000000 --- a/spaces/alexray/btc_predictor/venv/lib/python3.10/site-packages/pip/_vendor/html5lib/treeadapters/sax.py +++ /dev/null @@ -1,50 +0,0 @@ -from __future__ import absolute_import, division, unicode_literals - -from xml.sax.xmlreader import AttributesNSImpl - -from ..constants import adjustForeignAttributes, unadjustForeignAttributes - -prefix_mapping = {} -for prefix, localName, namespace in adjustForeignAttributes.values(): - if prefix is not None: - prefix_mapping[prefix] = namespace - - -def to_sax(walker, handler): - """Call SAX-like content handler based on treewalker walker - - :arg walker: the treewalker to use to walk the tree to convert it - - :arg handler: SAX handler to use - - """ - handler.startDocument() - for prefix, namespace in prefix_mapping.items(): - handler.startPrefixMapping(prefix, namespace) - - for token in walker: - type = token["type"] - if type == "Doctype": - continue - elif type in ("StartTag", "EmptyTag"): - attrs = AttributesNSImpl(token["data"], - unadjustForeignAttributes) - handler.startElementNS((token["namespace"], token["name"]), - token["name"], - attrs) - if type == "EmptyTag": - handler.endElementNS((token["namespace"], token["name"]), - token["name"]) - elif type == "EndTag": - handler.endElementNS((token["namespace"], token["name"]), - token["name"]) - elif type in ("Characters", "SpaceCharacters"): - handler.characters(token["data"]) - elif type == "Comment": - pass - else: - assert False, "Unknown token type" - - for prefix, namespace in prefix_mapping.items(): - handler.endPrefixMapping(prefix) - handler.endDocument() diff --git a/spaces/alexray/btc_predictor/venv/lib/python3.10/site-packages/pip/_vendor/rich/ansi.py b/spaces/alexray/btc_predictor/venv/lib/python3.10/site-packages/pip/_vendor/rich/ansi.py deleted file mode 100644 index 92e4772eddfb5320dadadaf233bb5a89c53dc5e8..0000000000000000000000000000000000000000 --- a/spaces/alexray/btc_predictor/venv/lib/python3.10/site-packages/pip/_vendor/rich/ansi.py +++ /dev/null @@ -1,228 +0,0 @@ -from contextlib import suppress -import re -from typing import Iterable, NamedTuple - -from .color import Color -from .style import Style -from .text import Text - -re_ansi = re.compile(r"(?:\x1b\[(.*?)m)|(?:\x1b\](.*?)\x1b\\)") -re_csi = re.compile(r"\x1B(?:[@-Z\\-_]|\[[0-?]*[ -/]*[@-~])") - - -class _AnsiToken(NamedTuple): - """Result of ansi tokenized string.""" - - plain: str = "" - sgr: str = "" - osc: str = "" - - -def _ansi_tokenize(ansi_text: str) -> Iterable[_AnsiToken]: - """Tokenize a string in to plain text and ANSI codes. - - Args: - ansi_text (str): A String containing ANSI codes. - - Yields: - AnsiToken: A named tuple of (plain, sgr, osc) - """ - - def remove_csi(ansi_text: str) -> str: - """Remove unknown CSI sequences.""" - return re_csi.sub("", ansi_text) - - position = 0 - for match in re_ansi.finditer(ansi_text): - start, end = match.span(0) - sgr, osc = match.groups() - if start > position: - yield _AnsiToken(remove_csi(ansi_text[position:start])) - yield _AnsiToken("", sgr, osc) - position = end - if position < len(ansi_text): - yield _AnsiToken(remove_csi(ansi_text[position:])) - - -SGR_STYLE_MAP = { - 1: "bold", - 2: "dim", - 3: "italic", - 4: "underline", - 5: "blink", - 6: "blink2", - 7: "reverse", - 8: "conceal", - 9: "strike", - 21: "underline2", - 22: "not dim not bold", - 23: "not italic", - 24: "not underline", - 25: "not blink", - 26: "not blink2", - 27: "not reverse", - 28: "not conceal", - 29: "not strike", - 30: "color(0)", - 31: "color(1)", - 32: "color(2)", - 33: "color(3)", - 34: "color(4)", - 35: "color(5)", - 36: "color(6)", - 37: "color(7)", - 39: "default", - 40: "on color(0)", - 41: "on color(1)", - 42: "on color(2)", - 43: "on color(3)", - 44: "on color(4)", - 45: "on color(5)", - 46: "on color(6)", - 47: "on color(7)", - 49: "on default", - 51: "frame", - 52: "encircle", - 53: "overline", - 54: "not frame not encircle", - 55: "not overline", - 90: "color(8)", - 91: "color(9)", - 92: "color(10)", - 93: "color(11)", - 94: "color(12)", - 95: "color(13)", - 96: "color(14)", - 97: "color(15)", - 100: "on color(8)", - 101: "on color(9)", - 102: "on color(10)", - 103: "on color(11)", - 104: "on color(12)", - 105: "on color(13)", - 106: "on color(14)", - 107: "on color(15)", -} - - -class AnsiDecoder: - """Translate ANSI code in to styled Text.""" - - def __init__(self) -> None: - self.style = Style.null() - - def decode(self, terminal_text: str) -> Iterable[Text]: - """Decode ANSI codes in an interable of lines. - - Args: - lines (Iterable[str]): An iterable of lines of terminal output. - - Yields: - Text: Marked up Text. - """ - for line in terminal_text.splitlines(): - yield self.decode_line(line) - - def decode_line(self, line: str) -> Text: - """Decode a line containing ansi codes. - - Args: - line (str): A line of terminal output. - - Returns: - Text: A Text instance marked up according to ansi codes. - """ - from_ansi = Color.from_ansi - from_rgb = Color.from_rgb - _Style = Style - text = Text() - append = text.append - line = line.rsplit("\r", 1)[-1] - for token in _ansi_tokenize(line): - plain_text, sgr, osc = token - if plain_text: - append(plain_text, self.style or None) - elif osc: - if osc.startswith("8;"): - _params, semicolon, link = osc[2:].partition(";") - if semicolon: - self.style = self.style.update_link(link or None) - elif sgr: - # Translate in to semi-colon separated codes - # Ignore invalid codes, because we want to be lenient - codes = [ - min(255, int(_code)) for _code in sgr.split(";") if _code.isdigit() - ] - iter_codes = iter(codes) - for code in iter_codes: - if code == 0: - # reset - self.style = _Style.null() - elif code in SGR_STYLE_MAP: - # styles - self.style += _Style.parse(SGR_STYLE_MAP[code]) - elif code == 38: - #  Foreground - with suppress(StopIteration): - color_type = next(iter_codes) - if color_type == 5: - self.style += _Style.from_color( - from_ansi(next(iter_codes)) - ) - elif color_type == 2: - self.style += _Style.from_color( - from_rgb( - next(iter_codes), - next(iter_codes), - next(iter_codes), - ) - ) - elif code == 48: - # Background - with suppress(StopIteration): - color_type = next(iter_codes) - if color_type == 5: - self.style += _Style.from_color( - None, from_ansi(next(iter_codes)) - ) - elif color_type == 2: - self.style += _Style.from_color( - None, - from_rgb( - next(iter_codes), - next(iter_codes), - next(iter_codes), - ), - ) - - return text - - -if __name__ == "__main__": # pragma: no cover - import pty - import io - import os - import sys - - decoder = AnsiDecoder() - - stdout = io.BytesIO() - - def read(fd: int) -> bytes: - data = os.read(fd, 1024) - stdout.write(data) - return data - - pty.spawn(sys.argv[1:], read) - - from .console import Console - - console = Console(record=True) - - stdout_result = stdout.getvalue().decode("utf-8") - print(stdout_result) - - for line in decoder.decode(stdout_result): - console.print(line) - - console.save_html("stdout.html") diff --git a/spaces/alexray/btc_predictor/venv/lib/python3.10/site-packages/pip/_vendor/rich/console.py b/spaces/alexray/btc_predictor/venv/lib/python3.10/site-packages/pip/_vendor/rich/console.py deleted file mode 100644 index 27e722760ffa94f03f21fa1b0a715dc2ca5ccbe3..0000000000000000000000000000000000000000 --- a/spaces/alexray/btc_predictor/venv/lib/python3.10/site-packages/pip/_vendor/rich/console.py +++ /dev/null @@ -1,2211 +0,0 @@ -import inspect -import os -import platform -import sys -import threading -from abc import ABC, abstractmethod -from dataclasses import dataclass, field -from datetime import datetime -from functools import wraps -from getpass import getpass -from html import escape -from inspect import isclass -from itertools import islice -from threading import RLock -from time import monotonic -from types import FrameType, ModuleType, TracebackType -from typing import ( - IO, - TYPE_CHECKING, - Any, - Callable, - Dict, - Iterable, - List, - Mapping, - NamedTuple, - Optional, - TextIO, - Tuple, - Type, - Union, - cast, -) - -if sys.version_info >= (3, 8): - from typing import Literal, Protocol, runtime_checkable -else: - from pip._vendor.typing_extensions import ( - Literal, - Protocol, - runtime_checkable, - ) # pragma: no cover - -from . import errors, themes -from ._emoji_replace import _emoji_replace -from ._log_render import FormatTimeCallable, LogRender -from .align import Align, AlignMethod -from .color import ColorSystem -from .control import Control -from .emoji import EmojiVariant -from .highlighter import NullHighlighter, ReprHighlighter -from .markup import render as render_markup -from .measure import Measurement, measure_renderables -from .pager import Pager, SystemPager -from .pretty import Pretty, is_expandable -from .protocol import rich_cast -from .region import Region -from .scope import render_scope -from .screen import Screen -from .segment import Segment -from .style import Style, StyleType -from .styled import Styled -from .terminal_theme import DEFAULT_TERMINAL_THEME, TerminalTheme -from .text import Text, TextType -from .theme import Theme, ThemeStack - -if TYPE_CHECKING: - from ._windows import WindowsConsoleFeatures - from .live import Live - from .status import Status - -WINDOWS = platform.system() == "Windows" - -HighlighterType = Callable[[Union[str, "Text"]], "Text"] -JustifyMethod = Literal["default", "left", "center", "right", "full"] -OverflowMethod = Literal["fold", "crop", "ellipsis", "ignore"] - - -class NoChange: - pass - - -NO_CHANGE = NoChange() - - -CONSOLE_HTML_FORMAT = """\ - - - - - - - - -
    {code}
    -
    - - -""" - -_TERM_COLORS = {"256color": ColorSystem.EIGHT_BIT, "16color": ColorSystem.STANDARD} - - -class ConsoleDimensions(NamedTuple): - """Size of the terminal.""" - - width: int - """The width of the console in 'cells'.""" - height: int - """The height of the console in lines.""" - - -@dataclass -class ConsoleOptions: - """Options for __rich_console__ method.""" - - size: ConsoleDimensions - """Size of console.""" - legacy_windows: bool - """legacy_windows: flag for legacy windows.""" - min_width: int - """Minimum width of renderable.""" - max_width: int - """Maximum width of renderable.""" - is_terminal: bool - """True if the target is a terminal, otherwise False.""" - encoding: str - """Encoding of terminal.""" - max_height: int - """Height of container (starts as terminal)""" - justify: Optional[JustifyMethod] = None - """Justify value override for renderable.""" - overflow: Optional[OverflowMethod] = None - """Overflow value override for renderable.""" - no_wrap: Optional[bool] = False - """Disable wrapping for text.""" - highlight: Optional[bool] = None - """Highlight override for render_str.""" - markup: Optional[bool] = None - """Enable markup when rendering strings.""" - height: Optional[int] = None - - @property - def ascii_only(self) -> bool: - """Check if renderables should use ascii only.""" - return not self.encoding.startswith("utf") - - def copy(self) -> "ConsoleOptions": - """Return a copy of the options. - - Returns: - ConsoleOptions: a copy of self. - """ - options: ConsoleOptions = ConsoleOptions.__new__(ConsoleOptions) - options.__dict__ = self.__dict__.copy() - return options - - def update( - self, - *, - width: Union[int, NoChange] = NO_CHANGE, - min_width: Union[int, NoChange] = NO_CHANGE, - max_width: Union[int, NoChange] = NO_CHANGE, - justify: Union[Optional[JustifyMethod], NoChange] = NO_CHANGE, - overflow: Union[Optional[OverflowMethod], NoChange] = NO_CHANGE, - no_wrap: Union[Optional[bool], NoChange] = NO_CHANGE, - highlight: Union[Optional[bool], NoChange] = NO_CHANGE, - markup: Union[Optional[bool], NoChange] = NO_CHANGE, - height: Union[Optional[int], NoChange] = NO_CHANGE, - ) -> "ConsoleOptions": - """Update values, return a copy.""" - options = self.copy() - if not isinstance(width, NoChange): - options.min_width = options.max_width = max(0, width) - if not isinstance(min_width, NoChange): - options.min_width = min_width - if not isinstance(max_width, NoChange): - options.max_width = max_width - if not isinstance(justify, NoChange): - options.justify = justify - if not isinstance(overflow, NoChange): - options.overflow = overflow - if not isinstance(no_wrap, NoChange): - options.no_wrap = no_wrap - if not isinstance(highlight, NoChange): - options.highlight = highlight - if not isinstance(markup, NoChange): - options.markup = markup - if not isinstance(height, NoChange): - if height is not None: - options.max_height = height - options.height = None if height is None else max(0, height) - return options - - def update_width(self, width: int) -> "ConsoleOptions": - """Update just the width, return a copy. - - Args: - width (int): New width (sets both min_width and max_width) - - Returns: - ~ConsoleOptions: New console options instance. - """ - options = self.copy() - options.min_width = options.max_width = max(0, width) - return options - - def update_height(self, height: int) -> "ConsoleOptions": - """Update the height, and return a copy. - - Args: - height (int): New height - - Returns: - ~ConsoleOptions: New Console options instance. - """ - options = self.copy() - options.max_height = options.height = height - return options - - def update_dimensions(self, width: int, height: int) -> "ConsoleOptions": - """Update the width and height, and return a copy. - - Args: - width (int): New width (sets both min_width and max_width). - height (int): New height. - - Returns: - ~ConsoleOptions: New console options instance. - """ - options = self.copy() - options.min_width = options.max_width = max(0, width) - options.height = options.max_height = height - return options - - -@runtime_checkable -class RichCast(Protocol): - """An object that may be 'cast' to a console renderable.""" - - def __rich__(self) -> Union["ConsoleRenderable", str]: # pragma: no cover - ... - - -@runtime_checkable -class ConsoleRenderable(Protocol): - """An object that supports the console protocol.""" - - def __rich_console__( - self, console: "Console", options: "ConsoleOptions" - ) -> "RenderResult": # pragma: no cover - ... - - -# A type that may be rendered by Console. -RenderableType = Union[ConsoleRenderable, RichCast, str] - - -# The result of calling a __rich_console__ method. -RenderResult = Iterable[Union[RenderableType, Segment]] - - -_null_highlighter = NullHighlighter() - - -class CaptureError(Exception): - """An error in the Capture context manager.""" - - -class NewLine: - """A renderable to generate new line(s)""" - - def __init__(self, count: int = 1) -> None: - self.count = count - - def __rich_console__( - self, console: "Console", options: "ConsoleOptions" - ) -> Iterable[Segment]: - yield Segment("\n" * self.count) - - -class ScreenUpdate: - """Render a list of lines at a given offset.""" - - def __init__(self, lines: List[List[Segment]], x: int, y: int) -> None: - self._lines = lines - self.x = x - self.y = y - - def __rich_console__( - self, console: "Console", options: ConsoleOptions - ) -> RenderResult: - x = self.x - move_to = Control.move_to - for offset, line in enumerate(self._lines, self.y): - yield move_to(x, offset) - yield from line - - -class Capture: - """Context manager to capture the result of printing to the console. - See :meth:`~rich.console.Console.capture` for how to use. - - Args: - console (Console): A console instance to capture output. - """ - - def __init__(self, console: "Console") -> None: - self._console = console - self._result: Optional[str] = None - - def __enter__(self) -> "Capture": - self._console.begin_capture() - return self - - def __exit__( - self, - exc_type: Optional[Type[BaseException]], - exc_val: Optional[BaseException], - exc_tb: Optional[TracebackType], - ) -> None: - self._result = self._console.end_capture() - - def get(self) -> str: - """Get the result of the capture.""" - if self._result is None: - raise CaptureError( - "Capture result is not available until context manager exits." - ) - return self._result - - -class ThemeContext: - """A context manager to use a temporary theme. See :meth:`~rich.console.Console.use_theme` for usage.""" - - def __init__(self, console: "Console", theme: Theme, inherit: bool = True) -> None: - self.console = console - self.theme = theme - self.inherit = inherit - - def __enter__(self) -> "ThemeContext": - self.console.push_theme(self.theme) - return self - - def __exit__( - self, - exc_type: Optional[Type[BaseException]], - exc_val: Optional[BaseException], - exc_tb: Optional[TracebackType], - ) -> None: - self.console.pop_theme() - - -class PagerContext: - """A context manager that 'pages' content. See :meth:`~rich.console.Console.pager` for usage.""" - - def __init__( - self, - console: "Console", - pager: Optional[Pager] = None, - styles: bool = False, - links: bool = False, - ) -> None: - self._console = console - self.pager = SystemPager() if pager is None else pager - self.styles = styles - self.links = links - - def __enter__(self) -> "PagerContext": - self._console._enter_buffer() - return self - - def __exit__( - self, - exc_type: Optional[Type[BaseException]], - exc_val: Optional[BaseException], - exc_tb: Optional[TracebackType], - ) -> None: - if exc_type is None: - with self._console._lock: - buffer: List[Segment] = self._console._buffer[:] - del self._console._buffer[:] - segments: Iterable[Segment] = buffer - if not self.styles: - segments = Segment.strip_styles(segments) - elif not self.links: - segments = Segment.strip_links(segments) - content = self._console._render_buffer(segments) - self.pager.show(content) - self._console._exit_buffer() - - -class ScreenContext: - """A context manager that enables an alternative screen. See :meth:`~rich.console.Console.screen` for usage.""" - - def __init__( - self, console: "Console", hide_cursor: bool, style: StyleType = "" - ) -> None: - self.console = console - self.hide_cursor = hide_cursor - self.screen = Screen(style=style) - self._changed = False - - def update( - self, *renderables: RenderableType, style: Optional[StyleType] = None - ) -> None: - """Update the screen. - - Args: - renderable (RenderableType, optional): Optional renderable to replace current renderable, - or None for no change. Defaults to None. - style: (Style, optional): Replacement style, or None for no change. Defaults to None. - """ - if renderables: - self.screen.renderable = ( - Group(*renderables) if len(renderables) > 1 else renderables[0] - ) - if style is not None: - self.screen.style = style - self.console.print(self.screen, end="") - - def __enter__(self) -> "ScreenContext": - self._changed = self.console.set_alt_screen(True) - if self._changed and self.hide_cursor: - self.console.show_cursor(False) - return self - - def __exit__( - self, - exc_type: Optional[Type[BaseException]], - exc_val: Optional[BaseException], - exc_tb: Optional[TracebackType], - ) -> None: - if self._changed: - self.console.set_alt_screen(False) - if self.hide_cursor: - self.console.show_cursor(True) - - -class Group: - """Takes a group of renderables and returns a renderable object that renders the group. - - Args: - renderables (Iterable[RenderableType]): An iterable of renderable objects. - fit (bool, optional): Fit dimension of group to contents, or fill available space. Defaults to True. - """ - - def __init__(self, *renderables: "RenderableType", fit: bool = True) -> None: - self._renderables = renderables - self.fit = fit - self._render: Optional[List[RenderableType]] = None - - @property - def renderables(self) -> List["RenderableType"]: - if self._render is None: - self._render = list(self._renderables) - return self._render - - def __rich_measure__( - self, console: "Console", options: "ConsoleOptions" - ) -> "Measurement": - if self.fit: - return measure_renderables(console, options, self.renderables) - else: - return Measurement(options.max_width, options.max_width) - - def __rich_console__( - self, console: "Console", options: "ConsoleOptions" - ) -> RenderResult: - yield from self.renderables - - -def group(fit: bool = True) -> Callable[..., Callable[..., Group]]: - """A decorator that turns an iterable of renderables in to a group. - - Args: - fit (bool, optional): Fit dimension of group to contents, or fill available space. Defaults to True. - """ - - def decorator( - method: Callable[..., Iterable[RenderableType]] - ) -> Callable[..., Group]: - """Convert a method that returns an iterable of renderables in to a Group.""" - - @wraps(method) - def _replace(*args: Any, **kwargs: Any) -> Group: - renderables = method(*args, **kwargs) - return Group(*renderables, fit=fit) - - return _replace - - return decorator - - -def _is_jupyter() -> bool: # pragma: no cover - """Check if we're running in a Jupyter notebook.""" - try: - get_ipython # type: ignore - except NameError: - return False - ipython = get_ipython() # type: ignore - shell = ipython.__class__.__name__ - if "google.colab" in str(ipython.__class__) or shell == "ZMQInteractiveShell": - return True # Jupyter notebook or qtconsole - elif shell == "TerminalInteractiveShell": - return False # Terminal running IPython - else: - return False # Other type (?) - - -COLOR_SYSTEMS = { - "standard": ColorSystem.STANDARD, - "256": ColorSystem.EIGHT_BIT, - "truecolor": ColorSystem.TRUECOLOR, - "windows": ColorSystem.WINDOWS, -} - - -_COLOR_SYSTEMS_NAMES = {system: name for name, system in COLOR_SYSTEMS.items()} - - -@dataclass -class ConsoleThreadLocals(threading.local): - """Thread local values for Console context.""" - - theme_stack: ThemeStack - buffer: List[Segment] = field(default_factory=list) - buffer_index: int = 0 - - -class RenderHook(ABC): - """Provides hooks in to the render process.""" - - @abstractmethod - def process_renderables( - self, renderables: List[ConsoleRenderable] - ) -> List[ConsoleRenderable]: - """Called with a list of objects to render. - - This method can return a new list of renderables, or modify and return the same list. - - Args: - renderables (List[ConsoleRenderable]): A number of renderable objects. - - Returns: - List[ConsoleRenderable]: A replacement list of renderables. - """ - - -_windows_console_features: Optional["WindowsConsoleFeatures"] = None - - -def get_windows_console_features() -> "WindowsConsoleFeatures": # pragma: no cover - global _windows_console_features - if _windows_console_features is not None: - return _windows_console_features - from ._windows import get_windows_console_features - - _windows_console_features = get_windows_console_features() - return _windows_console_features - - -def detect_legacy_windows() -> bool: - """Detect legacy Windows.""" - return WINDOWS and not get_windows_console_features().vt - - -if detect_legacy_windows(): # pragma: no cover - from pip._vendor.colorama import init - - init(strip=False) - - -class Console: - """A high level console interface. - - Args: - color_system (str, optional): The color system supported by your terminal, - either ``"standard"``, ``"256"`` or ``"truecolor"``. Leave as ``"auto"`` to autodetect. - force_terminal (Optional[bool], optional): Enable/disable terminal control codes, or None to auto-detect terminal. Defaults to None. - force_jupyter (Optional[bool], optional): Enable/disable Jupyter rendering, or None to auto-detect Jupyter. Defaults to None. - force_interactive (Optional[bool], optional): Enable/disable interactive mode, or None to auto detect. Defaults to None. - soft_wrap (Optional[bool], optional): Set soft wrap default on print method. Defaults to False. - theme (Theme, optional): An optional style theme object, or ``None`` for default theme. - stderr (bool, optional): Use stderr rather than stdout if ``file`` is not specified. Defaults to False. - file (IO, optional): A file object where the console should write to. Defaults to stdout. - quiet (bool, Optional): Boolean to suppress all output. Defaults to False. - width (int, optional): The width of the terminal. Leave as default to auto-detect width. - height (int, optional): The height of the terminal. Leave as default to auto-detect height. - style (StyleType, optional): Style to apply to all output, or None for no style. Defaults to None. - no_color (Optional[bool], optional): Enabled no color mode, or None to auto detect. Defaults to None. - tab_size (int, optional): Number of spaces used to replace a tab character. Defaults to 8. - record (bool, optional): Boolean to enable recording of terminal output, - required to call :meth:`export_html` and :meth:`export_text`. Defaults to False. - markup (bool, optional): Boolean to enable :ref:`console_markup`. Defaults to True. - emoji (bool, optional): Enable emoji code. Defaults to True. - emoji_variant (str, optional): Optional emoji variant, either "text" or "emoji". Defaults to None. - highlight (bool, optional): Enable automatic highlighting. Defaults to True. - log_time (bool, optional): Boolean to enable logging of time by :meth:`log` methods. Defaults to True. - log_path (bool, optional): Boolean to enable the logging of the caller by :meth:`log`. Defaults to True. - log_time_format (Union[str, TimeFormatterCallable], optional): If ``log_time`` is enabled, either string for strftime or callable that formats the time. Defaults to "[%X] ". - highlighter (HighlighterType, optional): Default highlighter. - legacy_windows (bool, optional): Enable legacy Windows mode, or ``None`` to auto detect. Defaults to ``None``. - safe_box (bool, optional): Restrict box options that don't render on legacy Windows. - get_datetime (Callable[[], datetime], optional): Callable that gets the current time as a datetime.datetime object (used by Console.log), - or None for datetime.now. - get_time (Callable[[], time], optional): Callable that gets the current time in seconds, default uses time.monotonic. - """ - - _environ: Mapping[str, str] = os.environ - - def __init__( - self, - *, - color_system: Optional[ - Literal["auto", "standard", "256", "truecolor", "windows"] - ] = "auto", - force_terminal: Optional[bool] = None, - force_jupyter: Optional[bool] = None, - force_interactive: Optional[bool] = None, - soft_wrap: bool = False, - theme: Optional[Theme] = None, - stderr: bool = False, - file: Optional[IO[str]] = None, - quiet: bool = False, - width: Optional[int] = None, - height: Optional[int] = None, - style: Optional[StyleType] = None, - no_color: Optional[bool] = None, - tab_size: int = 8, - record: bool = False, - markup: bool = True, - emoji: bool = True, - emoji_variant: Optional[EmojiVariant] = None, - highlight: bool = True, - log_time: bool = True, - log_path: bool = True, - log_time_format: Union[str, FormatTimeCallable] = "[%X]", - highlighter: Optional["HighlighterType"] = ReprHighlighter(), - legacy_windows: Optional[bool] = None, - safe_box: bool = True, - get_datetime: Optional[Callable[[], datetime]] = None, - get_time: Optional[Callable[[], float]] = None, - _environ: Optional[Mapping[str, str]] = None, - ): - # Copy of os.environ allows us to replace it for testing - if _environ is not None: - self._environ = _environ - - self.is_jupyter = _is_jupyter() if force_jupyter is None else force_jupyter - if self.is_jupyter: - width = width or 93 - height = height or 100 - - self.soft_wrap = soft_wrap - self._width = width - self._height = height - self.tab_size = tab_size - self.record = record - self._markup = markup - self._emoji = emoji - self._emoji_variant: Optional[EmojiVariant] = emoji_variant - self._highlight = highlight - self.legacy_windows: bool = ( - (detect_legacy_windows() and not self.is_jupyter) - if legacy_windows is None - else legacy_windows - ) - if width is None: - columns = self._environ.get("COLUMNS") - if columns is not None and columns.isdigit(): - width = int(columns) - self.legacy_windows - if height is None: - lines = self._environ.get("LINES") - if lines is not None and lines.isdigit(): - height = int(lines) - - self.soft_wrap = soft_wrap - self._width = width - self._height = height - - self._color_system: Optional[ColorSystem] - self._force_terminal = force_terminal - self._file = file - self.quiet = quiet - self.stderr = stderr - - if color_system is None: - self._color_system = None - elif color_system == "auto": - self._color_system = self._detect_color_system() - else: - self._color_system = COLOR_SYSTEMS[color_system] - - self._lock = threading.RLock() - self._log_render = LogRender( - show_time=log_time, - show_path=log_path, - time_format=log_time_format, - ) - self.highlighter: HighlighterType = highlighter or _null_highlighter - self.safe_box = safe_box - self.get_datetime = get_datetime or datetime.now - self.get_time = get_time or monotonic - self.style = style - self.no_color = ( - no_color if no_color is not None else "NO_COLOR" in self._environ - ) - self.is_interactive = ( - (self.is_terminal and not self.is_dumb_terminal) - if force_interactive is None - else force_interactive - ) - - self._record_buffer_lock = threading.RLock() - self._thread_locals = ConsoleThreadLocals( - theme_stack=ThemeStack(themes.DEFAULT if theme is None else theme) - ) - self._record_buffer: List[Segment] = [] - self._render_hooks: List[RenderHook] = [] - self._live: Optional["Live"] = None - self._is_alt_screen = False - - def __repr__(self) -> str: - return f"" - - @property - def file(self) -> IO[str]: - """Get the file object to write to.""" - file = self._file or (sys.stderr if self.stderr else sys.stdout) - file = getattr(file, "rich_proxied_file", file) - return file - - @file.setter - def file(self, new_file: IO[str]) -> None: - """Set a new file object.""" - self._file = new_file - - @property - def _buffer(self) -> List[Segment]: - """Get a thread local buffer.""" - return self._thread_locals.buffer - - @property - def _buffer_index(self) -> int: - """Get a thread local buffer.""" - return self._thread_locals.buffer_index - - @_buffer_index.setter - def _buffer_index(self, value: int) -> None: - self._thread_locals.buffer_index = value - - @property - def _theme_stack(self) -> ThemeStack: - """Get the thread local theme stack.""" - return self._thread_locals.theme_stack - - def _detect_color_system(self) -> Optional[ColorSystem]: - """Detect color system from env vars.""" - if self.is_jupyter: - return ColorSystem.TRUECOLOR - if not self.is_terminal or self.is_dumb_terminal: - return None - if WINDOWS: # pragma: no cover - if self.legacy_windows: # pragma: no cover - return ColorSystem.WINDOWS - windows_console_features = get_windows_console_features() - return ( - ColorSystem.TRUECOLOR - if windows_console_features.truecolor - else ColorSystem.EIGHT_BIT - ) - else: - color_term = self._environ.get("COLORTERM", "").strip().lower() - if color_term in ("truecolor", "24bit"): - return ColorSystem.TRUECOLOR - term = self._environ.get("TERM", "").strip().lower() - _term_name, _hyphen, colors = term.rpartition("-") - color_system = _TERM_COLORS.get(colors, ColorSystem.STANDARD) - return color_system - - def _enter_buffer(self) -> None: - """Enter in to a buffer context, and buffer all output.""" - self._buffer_index += 1 - - def _exit_buffer(self) -> None: - """Leave buffer context, and render content if required.""" - self._buffer_index -= 1 - self._check_buffer() - - def set_live(self, live: "Live") -> None: - """Set Live instance. Used by Live context manager. - - Args: - live (Live): Live instance using this Console. - - Raises: - errors.LiveError: If this Console has a Live context currently active. - """ - with self._lock: - if self._live is not None: - raise errors.LiveError("Only one live display may be active at once") - self._live = live - - def clear_live(self) -> None: - """Clear the Live instance.""" - with self._lock: - self._live = None - - def push_render_hook(self, hook: RenderHook) -> None: - """Add a new render hook to the stack. - - Args: - hook (RenderHook): Render hook instance. - """ - with self._lock: - self._render_hooks.append(hook) - - def pop_render_hook(self) -> None: - """Pop the last renderhook from the stack.""" - with self._lock: - self._render_hooks.pop() - - def __enter__(self) -> "Console": - """Own context manager to enter buffer context.""" - self._enter_buffer() - return self - - def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None: - """Exit buffer context.""" - self._exit_buffer() - - def begin_capture(self) -> None: - """Begin capturing console output. Call :meth:`end_capture` to exit capture mode and return output.""" - self._enter_buffer() - - def end_capture(self) -> str: - """End capture mode and return captured string. - - Returns: - str: Console output. - """ - render_result = self._render_buffer(self._buffer) - del self._buffer[:] - self._exit_buffer() - return render_result - - def push_theme(self, theme: Theme, *, inherit: bool = True) -> None: - """Push a new theme on to the top of the stack, replacing the styles from the previous theme. - Generally speaking, you should call :meth:`~rich.console.Console.use_theme` to get a context manager, rather - than calling this method directly. - - Args: - theme (Theme): A theme instance. - inherit (bool, optional): Inherit existing styles. Defaults to True. - """ - self._theme_stack.push_theme(theme, inherit=inherit) - - def pop_theme(self) -> None: - """Remove theme from top of stack, restoring previous theme.""" - self._theme_stack.pop_theme() - - def use_theme(self, theme: Theme, *, inherit: bool = True) -> ThemeContext: - """Use a different theme for the duration of the context manager. - - Args: - theme (Theme): Theme instance to user. - inherit (bool, optional): Inherit existing console styles. Defaults to True. - - Returns: - ThemeContext: [description] - """ - return ThemeContext(self, theme, inherit) - - @property - def color_system(self) -> Optional[str]: - """Get color system string. - - Returns: - Optional[str]: "standard", "256" or "truecolor". - """ - - if self._color_system is not None: - return _COLOR_SYSTEMS_NAMES[self._color_system] - else: - return None - - @property - def encoding(self) -> str: - """Get the encoding of the console file, e.g. ``"utf-8"``. - - Returns: - str: A standard encoding string. - """ - return (getattr(self.file, "encoding", "utf-8") or "utf-8").lower() - - @property - def is_terminal(self) -> bool: - """Check if the console is writing to a terminal. - - Returns: - bool: True if the console writing to a device capable of - understanding terminal codes, otherwise False. - """ - if self._force_terminal is not None: - return self._force_terminal - isatty: Optional[Callable[[], bool]] = getattr(self.file, "isatty", None) - try: - return False if isatty is None else isatty() - except ValueError: - # in some situation (at the end of a pytest run for example) isatty() can raise - # ValueError: I/O operation on closed file - # return False because we aren't in a terminal anymore - return False - - @property - def is_dumb_terminal(self) -> bool: - """Detect dumb terminal. - - Returns: - bool: True if writing to a dumb terminal, otherwise False. - - """ - _term = self._environ.get("TERM", "") - is_dumb = _term.lower() in ("dumb", "unknown") - return self.is_terminal and is_dumb - - @property - def options(self) -> ConsoleOptions: - """Get default console options.""" - return ConsoleOptions( - max_height=self.size.height, - size=self.size, - legacy_windows=self.legacy_windows, - min_width=1, - max_width=self.width, - encoding=self.encoding, - is_terminal=self.is_terminal, - ) - - @property - def size(self) -> ConsoleDimensions: - """Get the size of the console. - - Returns: - ConsoleDimensions: A named tuple containing the dimensions. - """ - - if self._width is not None and self._height is not None: - return ConsoleDimensions(self._width - self.legacy_windows, self._height) - - if self.is_dumb_terminal: - return ConsoleDimensions(80, 25) - - width: Optional[int] = None - height: Optional[int] = None - - if WINDOWS: # pragma: no cover - try: - width, height = os.get_terminal_size() - except OSError: # Probably not a terminal - pass - else: - try: - width, height = os.get_terminal_size(sys.__stdin__.fileno()) - except (AttributeError, ValueError, OSError): - try: - width, height = os.get_terminal_size(sys.__stdout__.fileno()) - except (AttributeError, ValueError, OSError): - pass - - columns = self._environ.get("COLUMNS") - if columns is not None and columns.isdigit(): - width = int(columns) - lines = self._environ.get("LINES") - if lines is not None and lines.isdigit(): - height = int(lines) - - # get_terminal_size can report 0, 0 if run from pseudo-terminal - width = width or 80 - height = height or 25 - return ConsoleDimensions( - width - self.legacy_windows if self._width is None else self._width, - height if self._height is None else self._height, - ) - - @size.setter - def size(self, new_size: Tuple[int, int]) -> None: - """Set a new size for the terminal. - - Args: - new_size (Tuple[int, int]): New width and height. - """ - width, height = new_size - self._width = width - self._height = height - - @property - def width(self) -> int: - """Get the width of the console. - - Returns: - int: The width (in characters) of the console. - """ - return self.size.width - - @width.setter - def width(self, width: int) -> None: - """Set width. - - Args: - width (int): New width. - """ - self._width = width - - @property - def height(self) -> int: - """Get the height of the console. - - Returns: - int: The height (in lines) of the console. - """ - return self.size.height - - @height.setter - def height(self, height: int) -> None: - """Set height. - - Args: - height (int): new height. - """ - self._height = height - - def bell(self) -> None: - """Play a 'bell' sound (if supported by the terminal).""" - self.control(Control.bell()) - - def capture(self) -> Capture: - """A context manager to *capture* the result of print() or log() in a string, - rather than writing it to the console. - - Example: - >>> from rich.console import Console - >>> console = Console() - >>> with console.capture() as capture: - ... console.print("[bold magenta]Hello World[/]") - >>> print(capture.get()) - - Returns: - Capture: Context manager with disables writing to the terminal. - """ - capture = Capture(self) - return capture - - def pager( - self, pager: Optional[Pager] = None, styles: bool = False, links: bool = False - ) -> PagerContext: - """A context manager to display anything printed within a "pager". The pager application - is defined by the system and will typically support at least pressing a key to scroll. - - Args: - pager (Pager, optional): A pager object, or None to use :class:`~rich.pager.SystemPager`. Defaults to None. - styles (bool, optional): Show styles in pager. Defaults to False. - links (bool, optional): Show links in pager. Defaults to False. - - Example: - >>> from rich.console import Console - >>> from rich.__main__ import make_test_card - >>> console = Console() - >>> with console.pager(): - console.print(make_test_card()) - - Returns: - PagerContext: A context manager. - """ - return PagerContext(self, pager=pager, styles=styles, links=links) - - def line(self, count: int = 1) -> None: - """Write new line(s). - - Args: - count (int, optional): Number of new lines. Defaults to 1. - """ - - assert count >= 0, "count must be >= 0" - self.print(NewLine(count)) - - def clear(self, home: bool = True) -> None: - """Clear the screen. - - Args: - home (bool, optional): Also move the cursor to 'home' position. Defaults to True. - """ - if home: - self.control(Control.clear(), Control.home()) - else: - self.control(Control.clear()) - - def status( - self, - status: RenderableType, - *, - spinner: str = "dots", - spinner_style: str = "status.spinner", - speed: float = 1.0, - refresh_per_second: float = 12.5, - ) -> "Status": - """Display a status and spinner. - - Args: - status (RenderableType): A status renderable (str or Text typically). - spinner (str, optional): Name of spinner animation (see python -m rich.spinner). Defaults to "dots". - spinner_style (StyleType, optional): Style of spinner. Defaults to "status.spinner". - speed (float, optional): Speed factor for spinner animation. Defaults to 1.0. - refresh_per_second (float, optional): Number of refreshes per second. Defaults to 12.5. - - Returns: - Status: A Status object that may be used as a context manager. - """ - from .status import Status - - status_renderable = Status( - status, - console=self, - spinner=spinner, - spinner_style=spinner_style, - speed=speed, - refresh_per_second=refresh_per_second, - ) - return status_renderable - - def show_cursor(self, show: bool = True) -> bool: - """Show or hide the cursor. - - Args: - show (bool, optional): Set visibility of the cursor. - """ - if self.is_terminal and not self.legacy_windows: - self.control(Control.show_cursor(show)) - return True - return False - - def set_alt_screen(self, enable: bool = True) -> bool: - """Enables alternative screen mode. - - Note, if you enable this mode, you should ensure that is disabled before - the application exits. See :meth:`~rich.Console.screen` for a context manager - that handles this for you. - - Args: - enable (bool, optional): Enable (True) or disable (False) alternate screen. Defaults to True. - - Returns: - bool: True if the control codes were written. - - """ - changed = False - if self.is_terminal and not self.legacy_windows: - self.control(Control.alt_screen(enable)) - changed = True - self._is_alt_screen = enable - return changed - - @property - def is_alt_screen(self) -> bool: - """Check if the alt screen was enabled. - - Returns: - bool: True if the alt screen was enabled, otherwise False. - """ - return self._is_alt_screen - - def screen( - self, hide_cursor: bool = True, style: Optional[StyleType] = None - ) -> "ScreenContext": - """Context manager to enable and disable 'alternative screen' mode. - - Args: - hide_cursor (bool, optional): Also hide the cursor. Defaults to False. - style (Style, optional): Optional style for screen. Defaults to None. - - Returns: - ~ScreenContext: Context which enables alternate screen on enter, and disables it on exit. - """ - return ScreenContext(self, hide_cursor=hide_cursor, style=style or "") - - def measure( - self, renderable: RenderableType, *, options: Optional[ConsoleOptions] = None - ) -> Measurement: - """Measure a renderable. Returns a :class:`~rich.measure.Measurement` object which contains - information regarding the number of characters required to print the renderable. - - Args: - renderable (RenderableType): Any renderable or string. - options (Optional[ConsoleOptions], optional): Options to use when measuring, or None - to use default options. Defaults to None. - - Returns: - Measurement: A measurement of the renderable. - """ - measurement = Measurement.get(self, options or self.options, renderable) - return measurement - - def render( - self, renderable: RenderableType, options: Optional[ConsoleOptions] = None - ) -> Iterable[Segment]: - """Render an object in to an iterable of `Segment` instances. - - This method contains the logic for rendering objects with the console protocol. - You are unlikely to need to use it directly, unless you are extending the library. - - Args: - renderable (RenderableType): An object supporting the console protocol, or - an object that may be converted to a string. - options (ConsoleOptions, optional): An options object, or None to use self.options. Defaults to None. - - Returns: - Iterable[Segment]: An iterable of segments that may be rendered. - """ - - _options = options or self.options - if _options.max_width < 1: - # No space to render anything. This prevents potential recursion errors. - return - render_iterable: RenderResult - - renderable = rich_cast(renderable) - if hasattr(renderable, "__rich_console__") and not isclass(renderable): - render_iterable = renderable.__rich_console__(self, _options) # type: ignore - elif isinstance(renderable, str): - text_renderable = self.render_str( - renderable, highlight=_options.highlight, markup=_options.markup - ) - render_iterable = text_renderable.__rich_console__(self, _options) - else: - raise errors.NotRenderableError( - f"Unable to render {renderable!r}; " - "A str, Segment or object with __rich_console__ method is required" - ) - - try: - iter_render = iter(render_iterable) - except TypeError: - raise errors.NotRenderableError( - f"object {render_iterable!r} is not renderable" - ) - _Segment = Segment - for render_output in iter_render: - if isinstance(render_output, _Segment): - yield render_output - else: - yield from self.render(render_output, _options) - - def render_lines( - self, - renderable: RenderableType, - options: Optional[ConsoleOptions] = None, - *, - style: Optional[Style] = None, - pad: bool = True, - new_lines: bool = False, - ) -> List[List[Segment]]: - """Render objects in to a list of lines. - - The output of render_lines is useful when further formatting of rendered console text - is required, such as the Panel class which draws a border around any renderable object. - - Args: - renderable (RenderableType): Any object renderable in the console. - options (Optional[ConsoleOptions], optional): Console options, or None to use self.options. Default to ``None``. - style (Style, optional): Optional style to apply to renderables. Defaults to ``None``. - pad (bool, optional): Pad lines shorter than render width. Defaults to ``True``. - new_lines (bool, optional): Include "\n" characters at end of lines. - - Returns: - List[List[Segment]]: A list of lines, where a line is a list of Segment objects. - """ - with self._lock: - render_options = options or self.options - _rendered = self.render(renderable, render_options) - if style: - _rendered = Segment.apply_style(_rendered, style) - lines = list( - islice( - Segment.split_and_crop_lines( - _rendered, - render_options.max_width, - include_new_lines=new_lines, - pad=pad, - ), - None, - render_options.height, - ) - ) - if render_options.height is not None: - extra_lines = render_options.height - len(lines) - if extra_lines > 0: - pad_line = [ - [Segment(" " * render_options.max_width, style), Segment("\n")] - if new_lines - else [Segment(" " * render_options.max_width, style)] - ] - lines.extend(pad_line * extra_lines) - - return lines - - def render_str( - self, - text: str, - *, - style: Union[str, Style] = "", - justify: Optional[JustifyMethod] = None, - overflow: Optional[OverflowMethod] = None, - emoji: Optional[bool] = None, - markup: Optional[bool] = None, - highlight: Optional[bool] = None, - highlighter: Optional[HighlighterType] = None, - ) -> "Text": - """Convert a string to a Text instance. This is is called automatically if - you print or log a string. - - Args: - text (str): Text to render. - style (Union[str, Style], optional): Style to apply to rendered text. - justify (str, optional): Justify method: "default", "left", "center", "full", or "right". Defaults to ``None``. - overflow (str, optional): Overflow method: "crop", "fold", or "ellipsis". Defaults to ``None``. - emoji (Optional[bool], optional): Enable emoji, or ``None`` to use Console default. - markup (Optional[bool], optional): Enable markup, or ``None`` to use Console default. - highlight (Optional[bool], optional): Enable highlighting, or ``None`` to use Console default. - highlighter (HighlighterType, optional): Optional highlighter to apply. - Returns: - ConsoleRenderable: Renderable object. - - """ - emoji_enabled = emoji or (emoji is None and self._emoji) - markup_enabled = markup or (markup is None and self._markup) - highlight_enabled = highlight or (highlight is None and self._highlight) - - if markup_enabled: - rich_text = render_markup( - text, - style=style, - emoji=emoji_enabled, - emoji_variant=self._emoji_variant, - ) - rich_text.justify = justify - rich_text.overflow = overflow - else: - rich_text = Text( - _emoji_replace(text, default_variant=self._emoji_variant) - if emoji_enabled - else text, - justify=justify, - overflow=overflow, - style=style, - ) - - _highlighter = (highlighter or self.highlighter) if highlight_enabled else None - if _highlighter is not None: - highlight_text = _highlighter(str(rich_text)) - highlight_text.copy_styles(rich_text) - return highlight_text - - return rich_text - - def get_style( - self, name: Union[str, Style], *, default: Optional[Union[Style, str]] = None - ) -> Style: - """Get a Style instance by it's theme name or parse a definition. - - Args: - name (str): The name of a style or a style definition. - - Returns: - Style: A Style object. - - Raises: - MissingStyle: If no style could be parsed from name. - - """ - if isinstance(name, Style): - return name - - try: - style = self._theme_stack.get(name) - if style is None: - style = Style.parse(name) - return style.copy() if style.link else style - except errors.StyleSyntaxError as error: - if default is not None: - return self.get_style(default) - raise errors.MissingStyle( - f"Failed to get style {name!r}; {error}" - ) from None - - def _collect_renderables( - self, - objects: Iterable[Any], - sep: str, - end: str, - *, - justify: Optional[JustifyMethod] = None, - emoji: Optional[bool] = None, - markup: Optional[bool] = None, - highlight: Optional[bool] = None, - ) -> List[ConsoleRenderable]: - """Combine a number of renderables and text into one renderable. - - Args: - objects (Iterable[Any]): Anything that Rich can render. - sep (str): String to write between print data. - end (str): String to write at end of print data. - justify (str, optional): One of "left", "right", "center", or "full". Defaults to ``None``. - emoji (Optional[bool], optional): Enable emoji code, or ``None`` to use console default. - markup (Optional[bool], optional): Enable markup, or ``None`` to use console default. - highlight (Optional[bool], optional): Enable automatic highlighting, or ``None`` to use console default. - - Returns: - List[ConsoleRenderable]: A list of things to render. - """ - renderables: List[ConsoleRenderable] = [] - _append = renderables.append - text: List[Text] = [] - append_text = text.append - - append = _append - if justify in ("left", "center", "right"): - - def align_append(renderable: RenderableType) -> None: - _append(Align(renderable, cast(AlignMethod, justify))) - - append = align_append - - _highlighter: HighlighterType = _null_highlighter - if highlight or (highlight is None and self._highlight): - _highlighter = self.highlighter - - def check_text() -> None: - if text: - sep_text = Text(sep, justify=justify, end=end) - append(sep_text.join(text)) - del text[:] - - for renderable in objects: - renderable = rich_cast(renderable) - if isinstance(renderable, str): - append_text( - self.render_str( - renderable, emoji=emoji, markup=markup, highlighter=_highlighter - ) - ) - elif isinstance(renderable, Text): - append_text(renderable) - elif isinstance(renderable, ConsoleRenderable): - check_text() - append(renderable) - elif is_expandable(renderable): - check_text() - append(Pretty(renderable, highlighter=_highlighter)) - else: - append_text(_highlighter(str(renderable))) - - check_text() - - if self.style is not None: - style = self.get_style(self.style) - renderables = [Styled(renderable, style) for renderable in renderables] - - return renderables - - def rule( - self, - title: TextType = "", - *, - characters: str = "─", - style: Union[str, Style] = "rule.line", - align: AlignMethod = "center", - ) -> None: - """Draw a line with optional centered title. - - Args: - title (str, optional): Text to render over the rule. Defaults to "". - characters (str, optional): Character(s) to form the line. Defaults to "─". - style (str, optional): Style of line. Defaults to "rule.line". - align (str, optional): How to align the title, one of "left", "center", or "right". Defaults to "center". - """ - from .rule import Rule - - rule = Rule(title=title, characters=characters, style=style, align=align) - self.print(rule) - - def control(self, *control: Control) -> None: - """Insert non-printing control codes. - - Args: - control_codes (str): Control codes, such as those that may move the cursor. - """ - if not self.is_dumb_terminal: - with self: - self._buffer.extend(_control.segment for _control in control) - - def out( - self, - *objects: Any, - sep: str = " ", - end: str = "\n", - style: Optional[Union[str, Style]] = None, - highlight: Optional[bool] = None, - ) -> None: - """Output to the terminal. This is a low-level way of writing to the terminal which unlike - :meth:`~rich.console.Console.print` won't pretty print, wrap text, or apply markup, but will - optionally apply highlighting and a basic style. - - Args: - sep (str, optional): String to write between print data. Defaults to " ". - end (str, optional): String to write at end of print data. Defaults to "\\\\n". - style (Union[str, Style], optional): A style to apply to output. Defaults to None. - highlight (Optional[bool], optional): Enable automatic highlighting, or ``None`` to use - console default. Defaults to ``None``. - """ - raw_output: str = sep.join(str(_object) for _object in objects) - self.print( - raw_output, - style=style, - highlight=highlight, - emoji=False, - markup=False, - no_wrap=True, - overflow="ignore", - crop=False, - end=end, - ) - - def print( - self, - *objects: Any, - sep: str = " ", - end: str = "\n", - style: Optional[Union[str, Style]] = None, - justify: Optional[JustifyMethod] = None, - overflow: Optional[OverflowMethod] = None, - no_wrap: Optional[bool] = None, - emoji: Optional[bool] = None, - markup: Optional[bool] = None, - highlight: Optional[bool] = None, - width: Optional[int] = None, - height: Optional[int] = None, - crop: bool = True, - soft_wrap: Optional[bool] = None, - new_line_start: bool = False, - ) -> None: - """Print to the console. - - Args: - objects (positional args): Objects to log to the terminal. - sep (str, optional): String to write between print data. Defaults to " ". - end (str, optional): String to write at end of print data. Defaults to "\\\\n". - style (Union[str, Style], optional): A style to apply to output. Defaults to None. - justify (str, optional): Justify method: "default", "left", "right", "center", or "full". Defaults to ``None``. - overflow (str, optional): Overflow method: "ignore", "crop", "fold", or "ellipsis". Defaults to None. - no_wrap (Optional[bool], optional): Disable word wrapping. Defaults to None. - emoji (Optional[bool], optional): Enable emoji code, or ``None`` to use console default. Defaults to ``None``. - markup (Optional[bool], optional): Enable markup, or ``None`` to use console default. Defaults to ``None``. - highlight (Optional[bool], optional): Enable automatic highlighting, or ``None`` to use console default. Defaults to ``None``. - width (Optional[int], optional): Width of output, or ``None`` to auto-detect. Defaults to ``None``. - crop (Optional[bool], optional): Crop output to width of terminal. Defaults to True. - soft_wrap (bool, optional): Enable soft wrap mode which disables word wrapping and cropping of text or ``None`` for - Console default. Defaults to ``None``. - new_line_start (bool, False): Insert a new line at the start if the output contains more than one line. Defaults to ``False``. - """ - if not objects: - objects = (NewLine(),) - - if soft_wrap is None: - soft_wrap = self.soft_wrap - if soft_wrap: - if no_wrap is None: - no_wrap = True - if overflow is None: - overflow = "ignore" - crop = False - render_hooks = self._render_hooks[:] - with self: - renderables = self._collect_renderables( - objects, - sep, - end, - justify=justify, - emoji=emoji, - markup=markup, - highlight=highlight, - ) - for hook in render_hooks: - renderables = hook.process_renderables(renderables) - render_options = self.options.update( - justify=justify, - overflow=overflow, - width=min(width, self.width) if width is not None else NO_CHANGE, - height=height, - no_wrap=no_wrap, - markup=markup, - highlight=highlight, - ) - - new_segments: List[Segment] = [] - extend = new_segments.extend - render = self.render - if style is None: - for renderable in renderables: - extend(render(renderable, render_options)) - else: - for renderable in renderables: - extend( - Segment.apply_style( - render(renderable, render_options), self.get_style(style) - ) - ) - if new_line_start: - if ( - len("".join(segment.text for segment in new_segments).splitlines()) - > 1 - ): - new_segments.insert(0, Segment.line()) - if crop: - buffer_extend = self._buffer.extend - for line in Segment.split_and_crop_lines( - new_segments, self.width, pad=False - ): - buffer_extend(line) - else: - self._buffer.extend(new_segments) - - def print_json( - self, - json: Optional[str] = None, - *, - data: Any = None, - indent: Union[None, int, str] = 2, - highlight: bool = True, - skip_keys: bool = False, - ensure_ascii: bool = True, - check_circular: bool = True, - allow_nan: bool = True, - default: Optional[Callable[[Any], Any]] = None, - sort_keys: bool = False, - ) -> None: - """Pretty prints JSON. Output will be valid JSON. - - Args: - json (Optional[str]): A string containing JSON. - data (Any): If json is not supplied, then encode this data. - indent (Union[None, int, str], optional): Number of spaces to indent. Defaults to 2. - highlight (bool, optional): Enable highlighting of output: Defaults to True. - skip_keys (bool, optional): Skip keys not of a basic type. Defaults to False. - ensure_ascii (bool, optional): Escape all non-ascii characters. Defaults to False. - check_circular (bool, optional): Check for circular references. Defaults to True. - allow_nan (bool, optional): Allow NaN and Infinity values. Defaults to True. - default (Callable, optional): A callable that converts values that can not be encoded - in to something that can be JSON encoded. Defaults to None. - sort_keys (bool, optional): Sort dictionary keys. Defaults to False. - """ - from pip._vendor.rich.json import JSON - - if json is None: - json_renderable = JSON.from_data( - data, - indent=indent, - highlight=highlight, - skip_keys=skip_keys, - ensure_ascii=ensure_ascii, - check_circular=check_circular, - allow_nan=allow_nan, - default=default, - sort_keys=sort_keys, - ) - else: - if not isinstance(json, str): - raise TypeError( - f"json must be str. Did you mean print_json(data={json!r}) ?" - ) - json_renderable = JSON( - json, - indent=indent, - highlight=highlight, - skip_keys=skip_keys, - ensure_ascii=ensure_ascii, - check_circular=check_circular, - allow_nan=allow_nan, - default=default, - sort_keys=sort_keys, - ) - self.print(json_renderable, soft_wrap=True) - - def update_screen( - self, - renderable: RenderableType, - *, - region: Optional[Region] = None, - options: Optional[ConsoleOptions] = None, - ) -> None: - """Update the screen at a given offset. - - Args: - renderable (RenderableType): A Rich renderable. - region (Region, optional): Region of screen to update, or None for entire screen. Defaults to None. - x (int, optional): x offset. Defaults to 0. - y (int, optional): y offset. Defaults to 0. - - Raises: - errors.NoAltScreen: If the Console isn't in alt screen mode. - - """ - if not self.is_alt_screen: - raise errors.NoAltScreen("Alt screen must be enabled to call update_screen") - render_options = options or self.options - if region is None: - x = y = 0 - render_options = render_options.update_dimensions( - render_options.max_width, render_options.height or self.height - ) - else: - x, y, width, height = region - render_options = render_options.update_dimensions(width, height) - - lines = self.render_lines(renderable, options=render_options) - self.update_screen_lines(lines, x, y) - - def update_screen_lines( - self, lines: List[List[Segment]], x: int = 0, y: int = 0 - ) -> None: - """Update lines of the screen at a given offset. - - Args: - lines (List[List[Segment]]): Rendered lines (as produced by :meth:`~rich.Console.render_lines`). - x (int, optional): x offset (column no). Defaults to 0. - y (int, optional): y offset (column no). Defaults to 0. - - Raises: - errors.NoAltScreen: If the Console isn't in alt screen mode. - """ - if not self.is_alt_screen: - raise errors.NoAltScreen("Alt screen must be enabled to call update_screen") - screen_update = ScreenUpdate(lines, x, y) - segments = self.render(screen_update) - self._buffer.extend(segments) - self._check_buffer() - - def print_exception( - self, - *, - width: Optional[int] = 100, - extra_lines: int = 3, - theme: Optional[str] = None, - word_wrap: bool = False, - show_locals: bool = False, - suppress: Iterable[Union[str, ModuleType]] = (), - max_frames: int = 100, - ) -> None: - """Prints a rich render of the last exception and traceback. - - Args: - width (Optional[int], optional): Number of characters used to render code. Defaults to 88. - extra_lines (int, optional): Additional lines of code to render. Defaults to 3. - theme (str, optional): Override pygments theme used in traceback - word_wrap (bool, optional): Enable word wrapping of long lines. Defaults to False. - show_locals (bool, optional): Enable display of local variables. Defaults to False. - suppress (Iterable[Union[str, ModuleType]]): Optional sequence of modules or paths to exclude from traceback. - max_frames (int): Maximum number of frames to show in a traceback, 0 for no maximum. Defaults to 100. - """ - from .traceback import Traceback - - traceback = Traceback( - width=width, - extra_lines=extra_lines, - theme=theme, - word_wrap=word_wrap, - show_locals=show_locals, - suppress=suppress, - max_frames=max_frames, - ) - self.print(traceback) - - @staticmethod - def _caller_frame_info( - offset: int, - currentframe: Callable[[], Optional[FrameType]] = inspect.currentframe, - ) -> Tuple[str, int, Dict[str, Any]]: - """Get caller frame information. - - Args: - offset (int): the caller offset within the current frame stack. - currentframe (Callable[[], Optional[FrameType]], optional): the callable to use to - retrieve the current frame. Defaults to ``inspect.currentframe``. - - Returns: - Tuple[str, int, Dict[str, Any]]: A tuple containing the filename, the line number and - the dictionary of local variables associated with the caller frame. - - Raises: - RuntimeError: If the stack offset is invalid. - """ - # Ignore the frame of this local helper - offset += 1 - - frame = currentframe() - if frame is not None: - # Use the faster currentframe where implemented - while offset and frame: - frame = frame.f_back - offset -= 1 - assert frame is not None - return frame.f_code.co_filename, frame.f_lineno, frame.f_locals - else: - # Fallback to the slower stack - frame_info = inspect.stack()[offset] - return frame_info.filename, frame_info.lineno, frame_info.frame.f_locals - - def log( - self, - *objects: Any, - sep: str = " ", - end: str = "\n", - style: Optional[Union[str, Style]] = None, - justify: Optional[JustifyMethod] = None, - emoji: Optional[bool] = None, - markup: Optional[bool] = None, - highlight: Optional[bool] = None, - log_locals: bool = False, - _stack_offset: int = 1, - ) -> None: - """Log rich content to the terminal. - - Args: - objects (positional args): Objects to log to the terminal. - sep (str, optional): String to write between print data. Defaults to " ". - end (str, optional): String to write at end of print data. Defaults to "\\\\n". - style (Union[str, Style], optional): A style to apply to output. Defaults to None. - justify (str, optional): One of "left", "right", "center", or "full". Defaults to ``None``. - overflow (str, optional): Overflow method: "crop", "fold", or "ellipsis". Defaults to None. - emoji (Optional[bool], optional): Enable emoji code, or ``None`` to use console default. Defaults to None. - markup (Optional[bool], optional): Enable markup, or ``None`` to use console default. Defaults to None. - highlight (Optional[bool], optional): Enable automatic highlighting, or ``None`` to use console default. Defaults to None. - log_locals (bool, optional): Boolean to enable logging of locals where ``log()`` - was called. Defaults to False. - _stack_offset (int, optional): Offset of caller from end of call stack. Defaults to 1. - """ - if not objects: - objects = (NewLine(),) - - render_hooks = self._render_hooks[:] - - with self: - renderables = self._collect_renderables( - objects, - sep, - end, - justify=justify, - emoji=emoji, - markup=markup, - highlight=highlight, - ) - if style is not None: - renderables = [Styled(renderable, style) for renderable in renderables] - - filename, line_no, locals = self._caller_frame_info(_stack_offset) - link_path = None if filename.startswith("<") else os.path.abspath(filename) - path = filename.rpartition(os.sep)[-1] - if log_locals: - locals_map = { - key: value - for key, value in locals.items() - if not key.startswith("__") - } - renderables.append(render_scope(locals_map, title="[i]locals")) - - renderables = [ - self._log_render( - self, - renderables, - log_time=self.get_datetime(), - path=path, - line_no=line_no, - link_path=link_path, - ) - ] - for hook in render_hooks: - renderables = hook.process_renderables(renderables) - new_segments: List[Segment] = [] - extend = new_segments.extend - render = self.render - render_options = self.options - for renderable in renderables: - extend(render(renderable, render_options)) - buffer_extend = self._buffer.extend - for line in Segment.split_and_crop_lines( - new_segments, self.width, pad=False - ): - buffer_extend(line) - - def _check_buffer(self) -> None: - """Check if the buffer may be rendered.""" - if self.quiet: - del self._buffer[:] - return - with self._lock: - if self._buffer_index == 0: - if self.is_jupyter: # pragma: no cover - from .jupyter import display - - display(self._buffer, self._render_buffer(self._buffer[:])) - del self._buffer[:] - else: - text = self._render_buffer(self._buffer[:]) - del self._buffer[:] - if text: - try: - if WINDOWS: # pragma: no cover - # https://bugs.python.org/issue37871 - write = self.file.write - for line in text.splitlines(True): - write(line) - else: - self.file.write(text) - self.file.flush() - except UnicodeEncodeError as error: - error.reason = f"{error.reason}\n*** You may need to add PYTHONIOENCODING=utf-8 to your environment ***" - raise - - def _render_buffer(self, buffer: Iterable[Segment]) -> str: - """Render buffered output, and clear buffer.""" - output: List[str] = [] - append = output.append - color_system = self._color_system - legacy_windows = self.legacy_windows - if self.record: - with self._record_buffer_lock: - self._record_buffer.extend(buffer) - not_terminal = not self.is_terminal - if self.no_color and color_system: - buffer = Segment.remove_color(buffer) - for text, style, control in buffer: - if style: - append( - style.render( - text, - color_system=color_system, - legacy_windows=legacy_windows, - ) - ) - elif not (not_terminal and control): - append(text) - - rendered = "".join(output) - return rendered - - def input( - self, - prompt: TextType = "", - *, - markup: bool = True, - emoji: bool = True, - password: bool = False, - stream: Optional[TextIO] = None, - ) -> str: - """Displays a prompt and waits for input from the user. The prompt may contain color / style. - - It works in the same way as Python's builtin :func:`input` function and provides elaborate line editing and history features if Python's builtin :mod:`readline` module is previously loaded. - - Args: - prompt (Union[str, Text]): Text to render in the prompt. - markup (bool, optional): Enable console markup (requires a str prompt). Defaults to True. - emoji (bool, optional): Enable emoji (requires a str prompt). Defaults to True. - password: (bool, optional): Hide typed text. Defaults to False. - stream: (TextIO, optional): Optional file to read input from (rather than stdin). Defaults to None. - - Returns: - str: Text read from stdin. - """ - prompt_str = "" - if prompt: - with self.capture() as capture: - self.print(prompt, markup=markup, emoji=emoji, end="") - prompt_str = capture.get() - if self.legacy_windows: - # Legacy windows doesn't like ANSI codes in getpass or input (colorama bug)? - self.file.write(prompt_str) - prompt_str = "" - if password: - result = getpass(prompt_str, stream=stream) - else: - if stream: - self.file.write(prompt_str) - result = stream.readline() - else: - result = input(prompt_str) - return result - - def export_text(self, *, clear: bool = True, styles: bool = False) -> str: - """Generate text from console contents (requires record=True argument in constructor). - - Args: - clear (bool, optional): Clear record buffer after exporting. Defaults to ``True``. - styles (bool, optional): If ``True``, ansi escape codes will be included. ``False`` for plain text. - Defaults to ``False``. - - Returns: - str: String containing console contents. - - """ - assert ( - self.record - ), "To export console contents set record=True in the constructor or instance" - - with self._record_buffer_lock: - if styles: - text = "".join( - (style.render(text) if style else text) - for text, style, _ in self._record_buffer - ) - else: - text = "".join( - segment.text - for segment in self._record_buffer - if not segment.control - ) - if clear: - del self._record_buffer[:] - return text - - def save_text(self, path: str, *, clear: bool = True, styles: bool = False) -> None: - """Generate text from console and save to a given location (requires record=True argument in constructor). - - Args: - path (str): Path to write text files. - clear (bool, optional): Clear record buffer after exporting. Defaults to ``True``. - styles (bool, optional): If ``True``, ansi style codes will be included. ``False`` for plain text. - Defaults to ``False``. - - """ - text = self.export_text(clear=clear, styles=styles) - with open(path, "wt", encoding="utf-8") as write_file: - write_file.write(text) - - def export_html( - self, - *, - theme: Optional[TerminalTheme] = None, - clear: bool = True, - code_format: Optional[str] = None, - inline_styles: bool = False, - ) -> str: - """Generate HTML from console contents (requires record=True argument in constructor). - - Args: - theme (TerminalTheme, optional): TerminalTheme object containing console colors. - clear (bool, optional): Clear record buffer after exporting. Defaults to ``True``. - code_format (str, optional): Format string to render HTML, should contain {foreground} - {background} and {code}. - inline_styles (bool, optional): If ``True`` styles will be inlined in to spans, which makes files - larger but easier to cut and paste markup. If ``False``, styles will be embedded in a style tag. - Defaults to False. - - Returns: - str: String containing console contents as HTML. - """ - assert ( - self.record - ), "To export console contents set record=True in the constructor or instance" - fragments: List[str] = [] - append = fragments.append - _theme = theme or DEFAULT_TERMINAL_THEME - stylesheet = "" - - render_code_format = CONSOLE_HTML_FORMAT if code_format is None else code_format - - with self._record_buffer_lock: - if inline_styles: - for text, style, _ in Segment.filter_control( - Segment.simplify(self._record_buffer) - ): - text = escape(text) - if style: - rule = style.get_html_style(_theme) - if style.link: - text = f'{text}' - text = f'{text}' if rule else text - append(text) - else: - styles: Dict[str, int] = {} - for text, style, _ in Segment.filter_control( - Segment.simplify(self._record_buffer) - ): - text = escape(text) - if style: - rule = style.get_html_style(_theme) - style_number = styles.setdefault(rule, len(styles) + 1) - if style.link: - text = f'{text}' - else: - text = f'{text}' - append(text) - stylesheet_rules: List[str] = [] - stylesheet_append = stylesheet_rules.append - for style_rule, style_number in styles.items(): - if style_rule: - stylesheet_append(f".r{style_number} {{{style_rule}}}") - stylesheet = "\n".join(stylesheet_rules) - - rendered_code = render_code_format.format( - code="".join(fragments), - stylesheet=stylesheet, - foreground=_theme.foreground_color.hex, - background=_theme.background_color.hex, - ) - if clear: - del self._record_buffer[:] - return rendered_code - - def save_html( - self, - path: str, - *, - theme: Optional[TerminalTheme] = None, - clear: bool = True, - code_format: str = CONSOLE_HTML_FORMAT, - inline_styles: bool = False, - ) -> None: - """Generate HTML from console contents and write to a file (requires record=True argument in constructor). - - Args: - path (str): Path to write html file. - theme (TerminalTheme, optional): TerminalTheme object containing console colors. - clear (bool, optional): Clear record buffer after exporting. Defaults to ``True``. - code_format (str, optional): Format string to render HTML, should contain {foreground} - {background} and {code}. - inline_styles (bool, optional): If ``True`` styles will be inlined in to spans, which makes files - larger but easier to cut and paste markup. If ``False``, styles will be embedded in a style tag. - Defaults to False. - - """ - html = self.export_html( - theme=theme, - clear=clear, - code_format=code_format, - inline_styles=inline_styles, - ) - with open(path, "wt", encoding="utf-8") as write_file: - write_file.write(html) - - -if __name__ == "__main__": # pragma: no cover - console = Console() - - console.log( - "JSONRPC [i]request[/i]", - 5, - 1.3, - True, - False, - None, - { - "jsonrpc": "2.0", - "method": "subtract", - "params": {"minuend": 42, "subtrahend": 23}, - "id": 3, - }, - ) - - console.log("Hello, World!", "{'a': 1}", repr(console)) - - console.print( - { - "name": None, - "empty": [], - "quiz": { - "sport": { - "answered": True, - "q1": { - "question": "Which one is correct team name in NBA?", - "options": [ - "New York Bulls", - "Los Angeles Kings", - "Golden State Warriors", - "Huston Rocket", - ], - "answer": "Huston Rocket", - }, - }, - "maths": { - "answered": False, - "q1": { - "question": "5 + 7 = ?", - "options": [10, 11, 12, 13], - "answer": 12, - }, - "q2": { - "question": "12 - 8 = ?", - "options": [1, 2, 3, 4], - "answer": 4, - }, - }, - }, - } - ) - console.log("foo") diff --git a/spaces/ali-ghamdan/deoldify/fastai/imports/__init__.py b/spaces/ali-ghamdan/deoldify/fastai/imports/__init__.py deleted file mode 100644 index d61cb768acb79ed49c20afb7d0957110a8d8769f..0000000000000000000000000000000000000000 --- a/spaces/ali-ghamdan/deoldify/fastai/imports/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -from .core import * -from .torch import * diff --git a/spaces/alistairmcleay/cambridge-masters-project/app.py b/spaces/alistairmcleay/cambridge-masters-project/app.py deleted file mode 100644 index 562923cc5ec9f65a430d9a68328df8c1583c0fac..0000000000000000000000000000000000000000 --- a/spaces/alistairmcleay/cambridge-masters-project/app.py +++ /dev/null @@ -1,204 +0,0 @@ -import random -import gradio as gr -import sys -import traceback -import pandas as pd -import gradio as gr -import json - -import yaml - -# from tqdm import tqdm -from scripts.UBAR_code.interaction import UBAR_interact -from scripts.user_model_code.interaction import multiwoz_interact -from scripts.UBAR_code.interaction.UBAR_interact import bcolors - -# Initialise agents -UBAR_checkpoint_path = "epoch50_trloss0.59_gpt2" -user_model_checkpoint_path = "MultiWOZ-full_checkpoint_step340k" - -sys_model = self_play_sys_model = UBAR_interact.UbarSystemModel( - "UBAR_sys_model", UBAR_checkpoint_path, "scripts/UBAR_code/interaction/config.yaml" -) -user_model = self_play_user_model = multiwoz_interact.NeuralAgent( - "user", user_model_checkpoint_path, "scripts/user_model_code/interaction/config.yaml" -) - - -# Get goals -n_goals = 100 -goals_path = "data/raw/UBAR/multi-woz/data.json" -print("Loading goals...") -goals = multiwoz_interact.read_multiWOZ_20_goals(goals_path, n_goals) - -# Initialise agent with first goal (can be incrememnted by user) for user simulator tab -curr_goal_idx = random.randint(0, n_goals - 1) -current_goal = goals[curr_goal_idx] -user_model.init_session(ini_goal=current_goal) - -# Do the same initialisation but for the self-play tab -curr_sp_goal_idx = random.randint(0, n_goals - 1) -current_sp_goal = goals[curr_sp_goal_idx] -self_play_user_model.init_session(ini_goal=current_sp_goal) - -# Get the responses for each agent and track conversation history -ds_history = [] -us_history = [] -self_play_history = [] - - -def reset_ds_state(): - ds_history.clear() - sys_model.init_session() - return ds_history - - -def reset_us_state(): - us_history.clear() - user_model.init_session(ini_goal=current_goal) - return us_history - - -def reset_self_play_state(): - self_play_history.clear() - self_play_sys_model.init_session() - self_play_user_model.init_session(ini_goal=current_sp_goal) - return self_play_history - - -def change_goal(): - global curr_goal_idx - global current_goal - curr_goal_idx = random.randint(0, n_goals - 1) - current_goal = goals[curr_goal_idx] - us_history = reset_us_state() - current_goal_yaml = yaml.dump(current_goal, default_flow_style=False) - return current_goal_yaml, us_history - - -def change_sp_goal(): - global curr_sp_goal_idx - global current_sp_goal - curr_sp_goal_idx = random.randint(0, n_goals - 1) - current_sp_goal = goals[curr_sp_goal_idx] - self_play_history = reset_self_play_state() - current_sp_goal_yaml = yaml.dump(current_sp_goal, default_flow_style=False) - return current_sp_goal_yaml, self_play_history - - -def ds_chatbot(user_utt): - turn_id = len(ds_history) - sys_response = sys_model.response(user_utt, turn_id) - sys_response = sys_response[0].upper() + sys_response[1:] - ds_history.append((user_utt, sys_response)) - return ds_history - - -def us_chatbot(sys_response): - user_utt = user_model.response(sys_response) - us_history.append((sys_response, user_utt)) - if user_model.is_terminated(): - change_goal() - return us_history - - -def self_play(): - if len(self_play_history) == 0: - sys_response = "" - else: - sys_response = self_play_history[-1][1] - - user_utt = self_play_user_model.response(sys_response) - - turn_id = len(self_play_history) - sys_response = self_play_sys_model.response(user_utt, turn_id) - sys_response = sys_response[0].upper() + sys_response[1:] - - self_play_history.append((user_utt, sys_response)) - - if user_model.is_terminated(): - change_goal() - - return self_play_history - - -# Reset state upon client-side refresh -reset_ds_state() -reset_us_state() -reset_self_play_state() - - -# Initialise demo render -block = gr.Blocks() - -with block: - gr.Markdown("# 💬 Jointly Optimized Task-Oriented Dialogue System And User Simulator 💬") - gr.Markdown( - "Created by [Alistair McLeay](https://alistairmcleay.com) for the [Masters in Machine Learning & Machine Intelligence at Cambridge University](https://www.mlmi.eng.cam.ac.uk/).
    \ - Thank you to [Professor Bill Byrne](https://sites.google.com/view/bill-byrne/home) for his supervision and guidance.
    \ - Thank you to [Andy Tseng](https://github.com/andy194673) and [Alex Coca](https://github.com/alexcoca) who provided code and guidance." - ) - gr.Markdown( - "Both Systems are trained on the [MultiWOZ dataset](https://github.com/budzianowski/multiwoz).
    \ - Supported domains are:
    \ - 1. 🚆 Train, 2. 🏨 Hotel, 3. 🚕 Taxi, 4. 🚓 Police, 5. 🏣 Restaurant, 6. 🗿 Attraction, 7. 🏥 Hospital." - ) - gr.Markdown( - "**Please note:**
    \ - 1. These systems are in development and are full of funny little bugs, as is this app.
    \ - 2. If you refresh this page the conversation state will persist. To reset a conversion you need to click 'Reset Conversation' below." - ) - with gr.Tabs(): - with gr.TabItem("Dialogue System"): - gr.Markdown( - "This bot is a Task-Oriented Dialogue Systen.
    \ - You are the user. Go ahead and try to book a train, or a hotel etc." - ) - with gr.Row(): - ds_input_text = gr.inputs.Textbox( - label="User Message", placeholder="I'd like to book a train from Cambridge to London" - ) - ds_response = gr.outputs.Chatbot(label="Dialogue System Response") - ds_button = gr.Button("Submit Message") - reset_ds_button = gr.Button("Reset Conversation") - - with gr.TabItem("User Simulator"): - gr.Markdown( - "This bot is a User Simulator.
    \ - You are the Task-Oriented Dialogue System. Your job is to help the user with their requests.
    \ - If you want the User Simulator to have a different goal press 'Generate New Goal'." - ) - with gr.Row(): - us_input_text = gr.inputs.Textbox( - label="Dialogue System Message", placeholder="How can I help you today?" - ) - us_response = gr.outputs.Chatbot(label="User Simulator Response") - us_button = gr.Button("Submit Message") - reset_us_button = gr.Button("Reset Conversation") - new_goal_button = gr.Button("Generate New Goal") - current_goal_yaml = gr.outputs.Textbox(label="New Goal (YAML)") - - with gr.TabItem("Self-Play"): - gr.Markdown( - "In this case both the User Simulator and the Task-Oriented Dialogue System are agents.
    \ - Get them to interact by pressing 'Run Next Step'.
    \ - If you want the User Simulator to have a different goal press 'Generate New Goal'." - ) - self_play_response = gr.outputs.Chatbot(label="Self-Play Output") - self_play_button = gr.Button("Run Next Step") - reset_self_play_button = gr.Button("Reset Conversation") - new_sp_goal_button = gr.Button("Generate New Goal") - current_sp_goal_yaml = gr.outputs.Textbox(label="New Goal (YAML)") - - gr.Markdown("Want to get in touch? [Email me](mailto:am@alistairmcleay.com)") - - ds_button.click(ds_chatbot, ds_input_text, ds_response) - us_button.click(us_chatbot, us_input_text, us_response) - self_play_button.click(self_play, None, self_play_response) - new_goal_button.click(change_goal, None, [current_goal_yaml, us_response]) - new_sp_goal_button.click(change_sp_goal, None, [current_sp_goal_yaml, self_play_response]) - reset_ds_button.click(reset_ds_state, None, ds_response) - reset_us_button.click(reset_us_state, None, us_response) - reset_self_play_button.click(reset_self_play_state, None, self_play_response) - -block.launch() diff --git a/spaces/alphunt/diffdock-alphunt-demo/datasets/pdbbind_lm_embedding_preparation.py b/spaces/alphunt/diffdock-alphunt-demo/datasets/pdbbind_lm_embedding_preparation.py deleted file mode 100644 index cabc81c489649cf3b8fe564421fdadc24bec6823..0000000000000000000000000000000000000000 --- a/spaces/alphunt/diffdock-alphunt-demo/datasets/pdbbind_lm_embedding_preparation.py +++ /dev/null @@ -1,94 +0,0 @@ -import os -from argparse import FileType, ArgumentParser - -import numpy as np -from Bio.PDB import PDBParser -from Bio.Seq import Seq -from Bio.SeqRecord import SeqRecord -from tqdm import tqdm - -parser = ArgumentParser() -parser.add_argument('--data_dir', type=str, default='data/PDBBind_processed', help='') -parser.add_argument('--chain_cutoff', type=int, default=10, help='') -parser.add_argument('--out_file', type=str, default="data/pdbbind_sequences.fasta") -args = parser.parse_args() - -cutoff = args.chain_cutoff -data_dir = args.data_dir -names = os.listdir(data_dir) -#%% -from Bio import SeqIO -biopython_parser = PDBParser() - -three_to_one = {'ALA': 'A', -'ARG': 'R', -'ASN': 'N', -'ASP': 'D', -'CYS': 'C', -'GLN': 'Q', -'GLU': 'E', -'GLY': 'G', -'HIS': 'H', -'ILE': 'I', -'LEU': 'L', -'LYS': 'K', -'MET': 'M', -'MSE': 'M', # this is almost the same AA as MET. The sulfur is just replaced by Selen -'PHE': 'F', -'PRO': 'P', -'PYL': 'O', -'SER': 'S', -'SEC': 'U', -'THR': 'T', -'TRP': 'W', -'TYR': 'Y', -'VAL': 'V', -'ASX': 'B', -'GLX': 'Z', -'XAA': 'X', -'XLE': 'J'} - -sequences = [] -ids = [] -for name in tqdm(names): - if name == '.DS_Store': continue - if os.path.exists(os.path.join(data_dir, name, f'{name}_protein_processed.pdb')): - rec_path = os.path.join(data_dir, name, f'{name}_protein_processed.pdb') - else: - rec_path = os.path.join(data_dir, name, f'{name}_protein.pdb') - if cutoff > 10: - rec_path = os.path.join(data_dir, name, f'{name}_protein_obabel_reduce.pdb') - if not os.path.exists(rec_path): - rec_path = os.path.join(data_dir, name, f'{name}_protein.pdb') - structure = biopython_parser.get_structure('random_id', rec_path) - structure = structure[0] - for i, chain in enumerate(structure): - seq = '' - for res_idx, residue in enumerate(chain): - if residue.get_resname() == 'HOH': - continue - residue_coords = [] - c_alpha, n, c = None, None, None - for atom in residue: - if atom.name == 'CA': - c_alpha = list(atom.get_vector()) - if atom.name == 'N': - n = list(atom.get_vector()) - if atom.name == 'C': - c = list(atom.get_vector()) - if c_alpha != None and n != None and c != None: # only append residue if it is an amino acid and not - try: - seq += three_to_one[residue.get_resname()] - except Exception as e: - seq += '-' - print("encountered unknown AA: ", residue.get_resname(), ' in the complex ', name, '. Replacing it with a dash - .') - sequences.append(seq) - ids.append(f'{name}_chain_{i}') -records = [] -for (index, seq) in zip(ids,sequences): - record = SeqRecord(Seq(seq), str(index)) - record.description = '' - records.append(record) -SeqIO.write(records, args.out_file, "fasta") - - diff --git a/spaces/alvanlii/FROMAGe/fromage/utils.py b/spaces/alvanlii/FROMAGe/fromage/utils.py deleted file mode 100644 index ad6ba695643897f97f7c9aaf2179f44d26f02eb6..0000000000000000000000000000000000000000 --- a/spaces/alvanlii/FROMAGe/fromage/utils.py +++ /dev/null @@ -1,250 +0,0 @@ -from enum import Enum -import subprocess -import sys -import shutil -import torch -import torch.distributed as dist -from torchvision.transforms import functional as F -from torchvision import transforms as T -from transformers import AutoFeatureExtractor -from PIL import Image, ImageDraw, ImageFont, ImageOps -import requests -from io import BytesIO - -import random - - -def dump_git_status(out_file=sys.stdout, exclude_file_patterns=['*.ipynb', '*.th', '*.sh', '*.txt', '*.json']): - """Logs git status to stdout.""" - subprocess.call('git rev-parse HEAD', shell=True, stdout=out_file) - subprocess.call('echo', shell=True, stdout=out_file) - exclude_string = '' - subprocess.call('git --no-pager diff -- . {}'.format(exclude_string), shell=True, stdout=out_file) - - -def get_image_from_url(url: str): - response = requests.get(url) - img = Image.open(BytesIO(response.content)) - img = img.resize((224, 224)) - img = img.convert('RGB') - return img - - -def truncate_caption(caption: str) -> str: - """Truncate captions at periods and newlines.""" - trunc_index = caption.find('\n') + 1 - if trunc_index <= 0: - trunc_index = caption.find('.') + 1 - caption = caption[:trunc_index] - return caption - - -def pad_to_size(x, size=256): - delta_w = size - x.size[0] - delta_h = size - x.size[1] - padding = ( - delta_w // 2, - delta_h // 2, - delta_w - (delta_w // 2), - delta_h - (delta_h // 2), - ) - new_im = ImageOps.expand(x, padding) - return new_im - - -class RandCropResize(object): - - """ - Randomly crops, then randomly resizes, then randomly crops again, an image. Mirroring the augmentations from https://arxiv.org/abs/2102.12092 - """ - - def __init__(self, target_size): - self.target_size = target_size - - def __call__(self, img): - img = pad_to_size(img, self.target_size) - d_min = min(img.size) - img = T.RandomCrop(size=d_min)(img) - t_min = min(d_min, round(9 / 8 * self.target_size)) - t_max = min(d_min, round(12 / 8 * self.target_size)) - t = random.randint(t_min, t_max + 1) - img = T.Resize(t)(img) - if min(img.size) < 256: - img = T.Resize(256)(img) - return T.RandomCrop(size=self.target_size)(img) - - -class SquarePad(object): - """Pads image to square. - From https://discuss.pytorch.org/t/how-to-resize-and-pad-in-a-torchvision-transforms-compose/71850/9 - """ - def __call__(self, image): - max_wh = max(image.size) - p_left, p_top = [(max_wh - s) // 2 for s in image.size] - p_right, p_bottom = [max_wh - (s+pad) for s, pad in zip(image.size, [p_left, p_top])] - padding = (p_left, p_top, p_right, p_bottom) - return F.pad(image, padding, 0, 'constant') - - -def create_image_of_text(text: str, width: int = 224, nrows: int = 2, color=(255, 255, 255), font=None) -> torch.Tensor: - """Creates a (3, nrows * 14, width) image of text. - - Returns: - cap_img: (3, 14 * nrows, width) image of wrapped text. - """ - height = 12 - padding = 5 - effective_width = width - 2 * padding - # Create a black image to draw text on. - cap_img = Image.new('RGB', (effective_width * nrows, height), color = (0, 0, 0)) - draw = ImageDraw.Draw(cap_img) - draw.text((0, 0), text, color, font=font or ImageFont.load_default()) - cap_img = F.convert_image_dtype(F.pil_to_tensor(cap_img), torch.float32) # (3, height, W * nrows) - cap_img = torch.split(cap_img, effective_width, dim=-1) # List of nrow elements of shape (3, height, W) - cap_img = torch.cat(cap_img, dim=1) # (3, height * nrows, W) - # Add zero padding. - cap_img = torch.nn.functional.pad(cap_img, [padding, padding, 0, padding]) - return cap_img - - -def get_feature_extractor_for_model(model_name: str, image_size: int = 224, train: bool = True): - print(f'Using HuggingFace AutoFeatureExtractor for {model_name}.') - feature_extractor = AutoFeatureExtractor.from_pretrained(model_name) - return feature_extractor - - -def get_pixel_values_for_model(feature_extractor, img): - pixel_values = feature_extractor( - img.convert('RGB'), - return_tensors="pt").pixel_values[0, ...] # (3, H, W) - return pixel_values - - -def save_checkpoint(state, is_best, filename='checkpoint'): - torch.save(state, filename + '.pth.tar') - if is_best: - shutil.copyfile(filename + '.pth.tar', filename + '_best.pth.tar') - - -def accuracy(output, target, padding, topk=(1,)): - """Computes the accuracy over the k top predictions for the specified values of k""" - with torch.no_grad(): - maxk = max(topk) - if output.shape[-1] < maxk: - print(f"[WARNING] Less than {maxk} predictions available. Using {output.shape[-1]} for topk.") - - maxk = min(maxk, output.shape[-1]) - batch_size = target.size(0) - - # Take topk along the last dimension. - _, pred = output.topk(maxk, -1, True, True) # (N, T, topk) - - mask = (target != padding).type(target.dtype) - target_expand = target[..., None].expand_as(pred) - correct = pred.eq(target_expand) - correct = correct * mask[..., None].expand_as(correct) - - res = [] - for k in topk: - correct_k = correct[..., :k].reshape(-1).float().sum(0, keepdim=True) - res.append(correct_k.mul_(100.0 / mask.sum())) - return res - - -def get_params_count(model, max_name_len: int = 60): - params = [(name[:max_name_len], p.numel(), str(tuple(p.shape)), p.requires_grad) for name, p in model.named_parameters()] - total_trainable_params = sum([x[1] for x in params if x[-1]]) - total_nontrainable_params = sum([x[1] for x in params if not x[-1]]) - return params, total_trainable_params, total_nontrainable_params - - -def get_params_count_str(model, max_name_len: int = 60): - padding = 70 # Hardcoded depending on desired amount of padding and separators. - params, total_trainable_params, total_nontrainable_params = get_params_count(model, max_name_len) - param_counts_text = '' - param_counts_text += '=' * (max_name_len + padding) + '\n' - param_counts_text += f'| {"Module":<{max_name_len}} | {"Trainable":<10} | {"Shape":>15} | {"Param Count":>12} |\n' - param_counts_text += '-' * (max_name_len + padding) + '\n' - for name, param_count, shape, trainable in params: - param_counts_text += f'| {name:<{max_name_len}} | {"True" if trainable else "False":<10} | {shape:>15} | {param_count:>12,} |\n' - param_counts_text += '-' * (max_name_len + padding) + '\n' - param_counts_text += f'| {"Total trainable params":<{max_name_len}} | {"":<10} | {"":<15} | {total_trainable_params:>12,} |\n' - param_counts_text += f'| {"Total non-trainable params":<{max_name_len}} | {"":<10} | {"":<15} | {total_nontrainable_params:>12,} |\n' - param_counts_text += '=' * (max_name_len + padding) + '\n' - return param_counts_text - - -class Summary(Enum): - NONE = 0 - AVERAGE = 1 - SUM = 2 - COUNT = 3 - - -class ProgressMeter(object): - def __init__(self, num_batches, meters, prefix=""): - self.batch_fmtstr = self._get_batch_fmtstr(num_batches) - self.meters = meters - self.prefix = prefix - - def display(self, batch): - entries = [self.prefix + self.batch_fmtstr.format(batch)] - entries += [str(meter) for meter in self.meters] - print('\t'.join(entries)) - - def display_summary(self): - entries = [" *"] - entries += [meter.summary() for meter in self.meters] - print(' '.join(entries)) - - def _get_batch_fmtstr(self, num_batches): - num_digits = len(str(num_batches // 1)) - fmt = '{:' + str(num_digits) + 'd}' - return '[' + fmt + '/' + fmt.format(num_batches) + ']' - - -class AverageMeter(object): - """Computes and stores the average and current value""" - def __init__(self, name, fmt=':f', summary_type=Summary.AVERAGE): - self.name = name - self.fmt = fmt - self.summary_type = summary_type - self.reset() - - def reset(self): - self.val = 0 - self.avg = 0 - self.sum = 0 - self.count = 0 - - def update(self, val, n=1): - self.val = val - self.sum += val * n - self.count += n - self.avg = self.sum / self.count - - def all_reduce(self): - device = "cuda" if torch.cuda.is_available() else "cpu" - total = torch.tensor([self.sum, self.count], dtype=torch.float32, device=device) - dist.all_reduce(total, dist.ReduceOp.SUM, async_op=False) - self.sum, self.count = total.tolist() - self.avg = self.sum / self.count - - def __str__(self): - fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})' - return fmtstr.format(**self.__dict__) - - def summary(self): - fmtstr = '' - if self.summary_type is Summary.NONE: - fmtstr = '' - elif self.summary_type is Summary.AVERAGE: - fmtstr = '{name} {avg:.3f}' - elif self.summary_type is Summary.SUM: - fmtstr = '{name} {sum:.3f}' - elif self.summary_type is Summary.COUNT: - fmtstr = '{name} {count:.3f}' - else: - raise ValueError('invalid summary type %r' % self.summary_type) - - return fmtstr.format(**self.__dict__) diff --git a/spaces/amanatid/ArxivGPT_Streamlit/sidebar.py b/spaces/amanatid/ArxivGPT_Streamlit/sidebar.py deleted file mode 100644 index a56eb92850ff368732454b070e5bc605b54de67e..0000000000000000000000000000000000000000 --- a/spaces/amanatid/ArxivGPT_Streamlit/sidebar.py +++ /dev/null @@ -1,49 +0,0 @@ -import streamlit as st - -from faq import faq - - -#def set_openai_api_key(api_key: str): -# st.session_state["OPENAI_API_KEY"] = api_key - - -def sidebar(): - with st.sidebar: - st.markdown( - "## How to use\n" - "1. Enter your [OpenAI API key](https://platform.openai.com/account/api-keys) below🔑\n" # noqa: E501 - "2. Choose the Scientific Topic to dicuss🚩\n" - "3. Load the number of papers you want to investigate. \n" - "4. Choose a criterion.\n" - "5. Wait for the message 'Arxiv papers are loaded based on the criteria' to be appeared.\n" - ) - - ''' - api_key_input = st.text_input( - "OpenAI API Key", - type="password", - placeholder="Paste your OpenAI API key here (sk-...)", - help="You can get your API key from https://platform.openai.com/account/api-keys.", - value=st.session_state.get("OPENAI_API_KEY", ""), - ) - - if api_key_input: - set_openai_api_key(api_key_input) - ''' - st.markdown("---") - st.markdown("# About") - st.markdown( - "📚ArxivGPT allows you to commit a scientific dialogue based on" - " a specific question/criterion and the amount of data that are loaded from" - "[arxiv.org](https://arxiv.org/). " - ) - st.markdown( - "This is a work in progress. " - "You can contribute to the project on [GitHub](https://github.com/amanatid/ArxivChatBot_StreamlitApp) " - "with your feedback and suggestions💡. Due to reqular updates from the llama/streamlit team, the app might " - "crash. I try to maintain it up. In any case, please report any problem in the email below." - ) - st.markdown("Made by [amanatid](amanatid@gmail.com)") - st.markdown("---") - - faq() \ No newline at end of file diff --git a/spaces/amarchheda/ChordDuplicate/portaudio/test/patest_dsound_low_level_latency_params.c b/spaces/amarchheda/ChordDuplicate/portaudio/test/patest_dsound_low_level_latency_params.c deleted file mode 100644 index d583e694f922b11031eff99c71277aaaecddfad9..0000000000000000000000000000000000000000 --- a/spaces/amarchheda/ChordDuplicate/portaudio/test/patest_dsound_low_level_latency_params.c +++ /dev/null @@ -1,186 +0,0 @@ -/* - * $Id: $ - * Portable Audio I/O Library - * Windows DirectSound low level buffer parameters test - * - * Copyright (c) 2011 Ross Bencina - * - * Permission is hereby granted, free of charge, to any person obtaining - * a copy of this software and associated documentation files - * (the "Software"), to deal in the Software without restriction, - * including without limitation the rights to use, copy, modify, merge, - * publish, distribute, sublicense, and/or sell copies of the Software, - * and to permit persons to whom the Software is furnished to do so, - * subject to the following conditions: - * - * The above copyright notice and this permission notice shall be - * included in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. - * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR - * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF - * CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - */ - -/* - * The text above constitutes the entire PortAudio license; however, - * the PortAudio community also makes the following non-binding requests: - * - * Any person wishing to distribute modifications to the Software is - * requested to send the modifications to the original developer so that - * they can be incorporated into the canonical version. It is also - * requested that these non-binding requests be included along with the - * license above. - */ - -#include -#include - -#include "portaudio.h" -#include "pa_win_ds.h" - -#define NUM_SECONDS (6) -#define SAMPLE_RATE (44100) - -#define DSOUND_FRAMES_PER_HOST_BUFFER (256*2) //(440*10) - -#define FRAMES_PER_BUFFER 256 - -#ifndef M_PI -#define M_PI (3.14159265) -#endif - -#define TABLE_SIZE (2048) - -#define CHANNEL_COUNT (2) - - -typedef struct -{ - float sine[TABLE_SIZE]; - double phase; -} -paTestData; - -/* This routine will be called by the PortAudio engine when audio is needed. -** It may called at interrupt level on some machines so don't do anything -** that could mess up the system like calling malloc() or free(). -*/ -static int patestCallback( const void *inputBuffer, void *outputBuffer, - unsigned long framesPerBuffer, - const PaStreamCallbackTimeInfo* timeInfo, - PaStreamCallbackFlags statusFlags, - void *userData ) -{ - paTestData *data = (paTestData*)userData; - float *out = (float*)outputBuffer; - unsigned long i,j; - - (void) timeInfo; /* Prevent unused variable warnings. */ - (void) statusFlags; - (void) inputBuffer; - - for( i=0; isine[(int)data->phase]; - data->phase += 20; - if( data->phase >= TABLE_SIZE ){ - data->phase -= TABLE_SIZE; - } - - for( j = 0; j < CHANNEL_COUNT; ++j ){ - *out++ = x; - } - } - - return paContinue; -} - -/*******************************************************************/ -int main(int argc, char* argv[]) -{ - PaStreamParameters outputParameters; - PaWinDirectSoundStreamInfo dsoundStreamInfo; - PaStream *stream; - PaError err; - paTestData data; - int i; - int deviceIndex; - - printf("PortAudio Test: output a sine blip on each channel. SR = %d, BufSize = %d, Chans = %d\n", SAMPLE_RATE, FRAMES_PER_BUFFER, CHANNEL_COUNT); - - err = Pa_Initialize(); - if( err != paNoError ) goto error; - - deviceIndex = Pa_GetHostApiInfo( Pa_HostApiTypeIdToHostApiIndex( paDirectSound ) )->defaultOutputDevice; - if( argc == 2 ){ - sscanf( argv[1], "%d", &deviceIndex ); - } - - printf( "using device id %d (%s)\n", deviceIndex, Pa_GetDeviceInfo(deviceIndex)->name ); - - /* initialise sinusoidal wavetable */ - for( i=0; idefaultLowOutputLatency;*/ - outputParameters.hostApiSpecificStreamInfo = NULL; - - dsoundStreamInfo.size = sizeof(PaWinDirectSoundStreamInfo); - dsoundStreamInfo.hostApiType = paDirectSound; - dsoundStreamInfo.version = 2; - dsoundStreamInfo.flags = paWinDirectSoundUseLowLevelLatencyParameters; - dsoundStreamInfo.framesPerBuffer = DSOUND_FRAMES_PER_HOST_BUFFER; - outputParameters.hostApiSpecificStreamInfo = &dsoundStreamInfo; - - - if( Pa_IsFormatSupported( 0, &outputParameters, SAMPLE_RATE ) == paFormatIsSupported ){ - printf( "Pa_IsFormatSupported reports device will support %d channels.\n", CHANNEL_COUNT ); - }else{ - printf( "Pa_IsFormatSupported reports device will not support %d channels.\n", CHANNEL_COUNT ); - } - - err = Pa_OpenStream( - &stream, - NULL, /* no input */ - &outputParameters, - SAMPLE_RATE, - FRAMES_PER_BUFFER, - paClipOff, /* we won't output out of range samples so don't bother clipping them */ - patestCallback, - &data ); - if( err != paNoError ) goto error; - - err = Pa_StartStream( stream ); - if( err != paNoError ) goto error; - - printf("Play for %d seconds.\n", NUM_SECONDS ); - Pa_Sleep( NUM_SECONDS * 1000 ); - - err = Pa_StopStream( stream ); - if( err != paNoError ) goto error; - - err = Pa_CloseStream( stream ); - if( err != paNoError ) goto error; - - Pa_Terminate(); - printf("Test finished.\n"); - - return err; -error: - Pa_Terminate(); - fprintf( stderr, "An error occurred while using the portaudio stream\n" ); - fprintf( stderr, "Error number: %d\n", err ); - fprintf( stderr, "Error message: %s\n", Pa_GetErrorText( err ) ); - return err; -} diff --git a/spaces/andreassteiner/robo-call/app.py b/spaces/andreassteiner/robo-call/app.py deleted file mode 100644 index edb056c5c5b7231ee98e28fa843709baeb10f018..0000000000000000000000000000000000000000 --- a/spaces/andreassteiner/robo-call/app.py +++ /dev/null @@ -1,110 +0,0 @@ -import os - -import gradio as gr -import openai -from elevenlabs import generate, save, set_api_key, voices - -openai.api_key = os.environ["OPENAI_KEY"] -set_api_key(os.environ["ELEVEN_KEY"]) -voice = voices()[1] - -default_messages = [ - { - "role": "system", - "content": "You are an SDR called Johan Velez working for Valutico performing a Cold Call. The lead name is Andreas Steiner who is the CEO of Steiner Valuation Solutions out of Redmond, WA. Your goal is to sign them up for a demo of our valuation platform called Valutico, with one of our Account Executives. Please keep your responses brief and under 50 words, if possible.", - }, - { - "role": "assistant", - "content": "What can I do with Valutico? Valutico allows you to perform comprehensive business valuations within a fraction of the time usually required. What used to take days and was operationally inefficient can now be created and sent to clients and colleagues with a few clicks of your mouse.", - }, - { - "role": "assistant", - "content": "How does Valutico work? It's an old saying that “valuation is more art than science”. We here at Valutico agree and provide the technology and science to support your artistry. Our platform combines powerful algorithms and world-class financial databases to enhance the human experience in the valuation process. Valutico starts by asking you about key drivers of the business you are valuing, and helps you build a full valuation based on comps, transaction multiples, and a financial forecast.", - }, - { - "role": "assistant", - "content": "Why should I use Valutico? Business valuations are used for a variety of reasons: If you are an investor, you will appreciate the opportunity to value the companies you invest in based on your own unbiased assumptions. As an advisor, Valutico allows you to offer state-of-the-art valuation analyses to your clients without the need for subscribing to various different financial databases and managing a myriad of different spreadsheet models.", - }, - { - "role": "assistant", - "content": "Do I need to be an expoert in Finance to use Valutico? Our users tend to come from a variety of backgrounds and are namely comprised of Accountants, Auditors, Business Consultants and Tax Advisory firms, as well as Investment Management, Corporate Finance and M&A practitioners.", - }, - { - "role": "assistant", - "content": "Is Valutico available in languages other than English? Valutico is currently available in English, French, German, Spanish and Italian. We anticipate adding further languages on an ongoing basis.", - }, - { - "role": "assistant", - "content": "What are the technical requirements? We do the “heavy lifting” on our servers, so a computer, browser and Internet connection are the only requirements. In terms of browser software, please note that we recommend using Safari, Chrome or Firefox. The platform is currently not optimized for Internet Explorer.", - }, - { - "role": "assistant", - "content": "Is Valutico available as downloadable software? Valutico has been designed from the outset purely as a web-based solution. That means you can use Valutico on any computer and even on-the-go with your tablet. It also means that we take the stress out of costly installations and the responsibility for regular updates away, allowing you to focus on what you're best at whilst we support you.", - }, - { - "role": "assistant", - "content": "Who can use Valutico? Valutico has been designed from the ground up with ease-of-use and simplicity in mind. While Valutico's ‘inner workings' are fairly complex, its handling is kept deliberately simple, enabling us to provide valuation professionals with a platform of substantial analytical depth whilst ensuring user friendliness.", - }, - { - "role": "assistant", - "content": "What's the source of Valutico's financial and company data? Our systems aggregate financial information from a variety of public and non-public sources. The quality of our partners – some of the world's leading financial databases – together with Valutico's own diligent research efforts, guarantee that you get the best available information.", - }, - { - "role": "assistant", - "content": "How often is financial data in the background updated? Depending on the type of data, Valutico's update intervals vary between daily and weekly. Market data is typically updated daily, while financial forecasts and macroeconomic assumptions are updated on a weekly basis. EBITA Multiples, EBIT multiples, Revenue multiples, for both current and forward year are refreshed daily. Betas are calculated (levered or unlevered) on a rolling basis, and other data points from our company databases (such as analysts consensus estimates) or market sources (such as risk free rates, tax rates, etc.) are refreshed as frequently as our providers allow (often daily). Multiples by industry are calculated daily from the underlying datasets.", - }, - { - "role": "assistant", - "content": "How much does Valutico cost? That highly depends on your needs. We offer a variety of different packages, ranging from a free trial to a full enterprise solution. Generally, pricing starts ar 10000 EUR per year.", - }, - { - "role": "assistant", - "content": "Do you offer educational discounts? At Valutico we recognize our responsibility in training the next generation of Finance professionals. As such we do provide licenses for academic institutions. If you would like to use our platform in an academic setting, ask your faculty professor to contact us at partnerships@valutico.com.", - }, - { - "role": "assistant", - "content": "Does Valutico guarantee the accuracy of the data and results? The valuations derived by Valutico are indicative, may be based on historical data and should not be substituted for information derived from real time market data. They may vary significantly from indicative valuations available from other sources. There may be certain factors that have not been assessed for the purposes of these valuations that may substantially affect a stated valuation. While Valutico has obtained the information on which these valuations are based from sources that it believes to be reliable, Valutico makes no representations or warranties with respect to any indicative valuations. Past performance cannot be a guide to, or an indication of, future performance.", - }, - { - "role": "assistant", - "content": "Can I use Valutico to support my investment decisions? All information on Valutico is intended to assist professional investors. The information does not constitute investment advice and is subject to correction, completion and amendment without notice. It is not our intention to state, indicate or imply in any manner that current or past results are indicative of future results or expectations. As with all investments, there are associated risks and you could lose money investing. Prior to making any investment, a prospective investor should consult with its own investment, accounting, legal and tax advisers to evaluate independently the risks, consequences and suitability of that investment.", - }, -] - - -def chatbot_audio(input: str, messages): - if not input: - return - - audio_file = open(input, "rb") - transcript = openai.Audio.transcribe("whisper-1", audio_file) - text = transcript.get("text") - - if not text: - return - - messages.append({"role": "user", "content": text}) - chat = openai.ChatCompletion.create(model="gpt-3.5-turbo", messages=messages) - message = chat.choices[0].message.get("content") - messages.append({"role": "assistant", "content": message}) - - audio = generate(message, voice=voice) - save(audio, "./tmp/test.mp3") - - return "./tmp/test.mp3", messages - - -inputs = gr.inputs.Audio(source="microphone", type="filepath", label="You got a call...") -outputs_audio = gr.outputs.Audio(label="Reply", type="filepath") -state = gr.inputs.State(default=default_messages) -state_output = gr.outputs.State() - -demo = gr.Interface( - fn=chatbot_audio, - inputs=[inputs, state], - outputs=[outputs_audio, state_output], - title="AI Callbot", - description="AI SDR", - theme="compact", -) -demo.launch() diff --git a/spaces/andreped/AeroPath/demo/README.md b/spaces/andreped/AeroPath/demo/README.md deleted file mode 100644 index f57aea2774dc0f3342c0deea548df3e01d4bbf92..0000000000000000000000000000000000000000 --- a/spaces/andreped/AeroPath/demo/README.md +++ /dev/null @@ -1,31 +0,0 @@ -# Hugging Face demo - through docker SDK - -Deploying simple models in a gradio-based web interface in Hugging Face spaces is easy. -For any other custom pipeline, with various dependencies and challenging behaviour, it -might be necessary to use Docker containers instead. - -For every new push to the main branch, continuous deployment to the Hugging Face -`AeroPath` space is performed through a GitHub Actions workflow. - -When the space is updated, the Docker image is rebuilt/updated (caching if possible). -Then when finished, the end users can test the app as they please. - -Right now, the functionality of the app is extremely limited, only offering a widget -for uploading a NIfTI file (`.nii` or `.nii.gz`) and visualizing the produced surface -of the predicted lung tumor volume when finished processing. - -Analysis process can be monitored from the `Logs` tab next to the `Running` button -in the Hugging Face `AeroPath` space. - -It is also possible to build the app as a docker image and deploy it. To do so follow these steps: - -``` -docker build -t AeroPath:latest .. -docker run -it -p 7860:7860 AeroPath:latest -``` - -Then open `http://localhost:7860` in your favourite internet browser to view the demo. - -TODOs: -- [X] Add gallery widget to enable scrolling through 2D slices -- [X] Render segmentation for individual 2D slices as overlays diff --git a/spaces/antonovmaxim/text-generation-webui-space/text-generation-webui-main/docs/Using-LoRAs.md b/spaces/antonovmaxim/text-generation-webui-space/text-generation-webui-main/docs/Using-LoRAs.md deleted file mode 100644 index fafd6cde2d87bfdf46d942ab841a74bf50facdb5..0000000000000000000000000000000000000000 --- a/spaces/antonovmaxim/text-generation-webui-space/text-generation-webui-main/docs/Using-LoRAs.md +++ /dev/null @@ -1,55 +0,0 @@ -Based on https://github.com/tloen/alpaca-lora - -## Instructions - -1. Download a LoRA, for instance: - -``` -python download-model.py tloen/alpaca-lora-7b -``` - -2. Load the LoRA. 16-bit, 8-bit, and CPU modes work: - -``` -python server.py --model llama-7b-hf --lora tloen_alpaca-lora-7b -python server.py --model llama-7b-hf --lora tloen_alpaca-lora-7b --load-in-8bit -python server.py --model llama-7b-hf --lora tloen_alpaca-lora-7b --cpu -``` - -* For using LoRAs in 4-bit mode, follow [these special instructions](GPTQ-models-(4-bit-mode).md#using-loras-in-4-bit-mode). - -* Instead of using the `--lora` command-line flag, you can also select the LoRA in the "Parameters" tab of the interface. - -## Prompt -For the Alpaca LoRA in particular, the prompt must be formatted like this: - -``` -Below is an instruction that describes a task. Write a response that appropriately completes the request. -### Instruction: -Write a Python script that generates text using the transformers library. -### Response: -``` - -Sample output: - -``` -Below is an instruction that describes a task. Write a response that appropriately completes the request. -### Instruction: -Write a Python script that generates text using the transformers library. -### Response: - -import transformers -from transformers import AutoTokenizer, AutoModelForCausalLM -tokenizer = AutoTokenizer.from_pretrained("bert-base-uncased") -model = AutoModelForCausalLM.from_pretrained("bert-base-uncased") -texts = ["Hello world", "How are you"] -for sentence in texts: -sentence = tokenizer(sentence) -print(f"Generated {len(sentence)} tokens from '{sentence}'") -output = model(sentences=sentence).predict() -print(f"Predicted {len(output)} tokens for '{sentence}':\n{output}") -``` - -## Training a LoRA - -You can train your own LoRAs from the `Training` tab. See [Training LoRAs](Training-LoRAs.md) for details. diff --git a/spaces/aphenx/bingo/src/components/theme-toggle.tsx b/spaces/aphenx/bingo/src/components/theme-toggle.tsx deleted file mode 100644 index 67d3f1a2c163ccbeb52c40a7e42f107190237154..0000000000000000000000000000000000000000 --- a/spaces/aphenx/bingo/src/components/theme-toggle.tsx +++ /dev/null @@ -1,31 +0,0 @@ -'use client' - -import * as React from 'react' -import { useTheme } from 'next-themes' - -import { Button } from '@/components/ui/button' -import { IconMoon, IconSun } from '@/components/ui/icons' - -export function ThemeToggle() { - const { setTheme, theme } = useTheme() - const [_, startTransition] = React.useTransition() - - return ( - - ) -} diff --git a/spaces/aravinds1811/neural-style-transfer/app.py b/spaces/aravinds1811/neural-style-transfer/app.py deleted file mode 100644 index 60b053b6362b90ecaa68a224b0b28ff8d36353ad..0000000000000000000000000000000000000000 --- a/spaces/aravinds1811/neural-style-transfer/app.py +++ /dev/null @@ -1,37 +0,0 @@ -import gradio as gr -import numpy as np -from PIL import Image -import tensorflow as tf -import tensorflow_hub as hub - -style_transfer_model = hub.load("https://tfhub.dev/google/magenta/arbitrary-image-stylization-v1-256/2") - -def perform_style_transfer(content_image, style_image): - - content_image = tf.convert_to_tensor(content_image, np.float32)[tf.newaxis, ...] / 255. - style_image = tf.convert_to_tensor(style_image, np.float32)[tf.newaxis, ...] / 255. - - output = style_transfer_model(content_image, style_image) - stylized_image = output[0] - - return Image.fromarray(np.uint8(stylized_image[0] * 255)) - - -content_image_input = gr.inputs.Image(label="Content Image") -style_image_input = gr.inputs.Image(shape=(256, 256), label="Style Image") - -# Examples -golden_gate = ["examples/golden_gate_bridge.jpeg", "examples/the_great_wave.jpeg"] -joshua_tree = ["examples/joshua_tree.jpeg", "examples/starry_night.jpeg"] -glacier = ["examples/glacier_national_park.jpeg", "examples/the_scream.jpg"] - -app_interface = gr.Interface(fn=perform_style_transfer, - inputs=[content_image_input, style_image_input], - outputs="image", - title="Fast Neural Style Transfer", - description="Gradio demo for Fast Neural Style Transfer using a pretrained Image Stylization model from TensorFlow Hub. To use it, simply upload a content image and style image, or click one of the examples to load them. To learn more about the project, please find the references listed below.", - examples=[glacier, golden_gate, joshua_tree], - article="**References**\n\n" - "1. Tutorial to implement Fast Neural Style Transfer using the pretrained model from TensorFlow Hub \n" - "2. The idea to build a neural style transfer application was inspired from this Hugging Face Space ") -app_interface.launch() diff --git a/spaces/artificialguybr/video-dubbing/TTS/TTS/server/templates/details.html b/spaces/artificialguybr/video-dubbing/TTS/TTS/server/templates/details.html deleted file mode 100644 index 51c9ed85a83ac0aab045623ee1e6c430fbe51b9d..0000000000000000000000000000000000000000 --- a/spaces/artificialguybr/video-dubbing/TTS/TTS/server/templates/details.html +++ /dev/null @@ -1,131 +0,0 @@ - - - - - - - - - - - TTS engine - - - - - - - - - - Fork me on GitHub - - {% if show_details == true %} - -
    - Model details -
    - -
    -
    - CLI arguments: - - - - - - - {% for key, value in args.items() %} - - - - - - - {% endfor %} -
    CLI key Value
    {{ key }}{{ value }}
    -
    -

    - -
    - - {% if model_config != None %} - -
    - Model config: - - - - - - - - - {% for key, value in model_config.items() %} - - - - - - - {% endfor %} - -
    Key Value
    {{ key }}{{ value }}
    -
    - - {% endif %} - -

    - - - -
    - {% if vocoder_config != None %} -
    - Vocoder model config: - - - - - - - - - {% for key, value in vocoder_config.items() %} - - - - - - - {% endfor %} - - -
    Key Value
    {{ key }}{{ value }}
    -
    - {% endif %} -

    - - {% else %} -
    - Please start server with --show_details=true to see details. -
    - - {% endif %} - - - - \ No newline at end of file diff --git a/spaces/artificialguybr/video-dubbing/TTS/TTS/vc/modules/freevc/speaker_encoder/hparams.py b/spaces/artificialguybr/video-dubbing/TTS/TTS/vc/modules/freevc/speaker_encoder/hparams.py deleted file mode 100644 index 2c536ae16cf8134d66c83aaf978ed01fc396b680..0000000000000000000000000000000000000000 --- a/spaces/artificialguybr/video-dubbing/TTS/TTS/vc/modules/freevc/speaker_encoder/hparams.py +++ /dev/null @@ -1,31 +0,0 @@ -## Mel-filterbank -mel_window_length = 25 # In milliseconds -mel_window_step = 10 # In milliseconds -mel_n_channels = 40 - - -## Audio -sampling_rate = 16000 -# Number of spectrogram frames in a partial utterance -partials_n_frames = 160 # 1600 ms - - -## Voice Activation Detection -# Window size of the VAD. Must be either 10, 20 or 30 milliseconds. -# This sets the granularity of the VAD. Should not need to be changed. -vad_window_length = 30 # In milliseconds -# Number of frames to average together when performing the moving average smoothing. -# The larger this value, the larger the VAD variations must be to not get smoothed out. -vad_moving_average_width = 8 -# Maximum number of consecutive silent frames a segment can have. -vad_max_silence_length = 6 - - -## Audio volume normalization -audio_norm_target_dBFS = -30 - - -## Model parameters -model_hidden_size = 256 -model_embedding_size = 256 -model_num_layers = 3 diff --git a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/Crypto/SelfTest/Cipher/test_OCB.py b/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/Crypto/SelfTest/Cipher/test_OCB.py deleted file mode 100644 index 3a891220b1f85429f9c6b5f47bb8ea252ba5b0b6..0000000000000000000000000000000000000000 --- a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/Crypto/SelfTest/Cipher/test_OCB.py +++ /dev/null @@ -1,742 +0,0 @@ -# =================================================================== -# -# Copyright (c) 2014, Legrandin -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions -# are met: -# -# 1. Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# 2. Redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in -# the documentation and/or other materials provided with the -# distribution. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS -# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE -# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, -# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, -# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT -# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN -# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -# POSSIBILITY OF SUCH DAMAGE. -# =================================================================== - -import os -import re -import unittest -from binascii import hexlify, unhexlify - -from Crypto.Util.py3compat import b, tobytes, bchr -from Crypto.Util.strxor import strxor_c -from Crypto.Util.number import long_to_bytes -from Crypto.SelfTest.st_common import list_test_cases - -from Crypto.Cipher import AES -from Crypto.Hash import SHAKE128 - - -def get_tag_random(tag, length): - return SHAKE128.new(data=tobytes(tag)).read(length) - - -class OcbTests(unittest.TestCase): - - key_128 = get_tag_random("key_128", 16) - nonce_96 = get_tag_random("nonce_128", 12) - data = get_tag_random("data", 128) - - def test_loopback_128(self): - cipher = AES.new(self.key_128, AES.MODE_OCB, nonce=self.nonce_96) - pt = get_tag_random("plaintext", 16 * 100) - ct, mac = cipher.encrypt_and_digest(pt) - - cipher = AES.new(self.key_128, AES.MODE_OCB, nonce=self.nonce_96) - pt2 = cipher.decrypt_and_verify(ct, mac) - self.assertEqual(pt, pt2) - - def test_nonce(self): - # Nonce is optional - AES.new(self.key_128, AES.MODE_OCB) - - cipher = AES.new(self.key_128, AES.MODE_OCB, self.nonce_96) - ct = cipher.encrypt(self.data) - - cipher = AES.new(self.key_128, AES.MODE_OCB, nonce=self.nonce_96) - self.assertEqual(ct, cipher.encrypt(self.data)) - - def test_nonce_must_be_bytes(self): - self.assertRaises(TypeError, AES.new, self.key_128, AES.MODE_OCB, - nonce=u'test12345678') - - def test_nonce_length(self): - # nonce cannot be empty - self.assertRaises(ValueError, AES.new, self.key_128, AES.MODE_OCB, - nonce=b("")) - - # nonce can be up to 15 bytes long - for length in range(1, 16): - AES.new(self.key_128, AES.MODE_OCB, nonce=self.data[:length]) - - self.assertRaises(ValueError, AES.new, self.key_128, AES.MODE_OCB, - nonce=self.data) - - def test_block_size_128(self): - cipher = AES.new(self.key_128, AES.MODE_OCB, nonce=self.nonce_96) - self.assertEqual(cipher.block_size, AES.block_size) - - # By default, a 15 bytes long nonce is randomly generated - nonce1 = AES.new(self.key_128, AES.MODE_OCB).nonce - nonce2 = AES.new(self.key_128, AES.MODE_OCB).nonce - self.assertEqual(len(nonce1), 15) - self.assertNotEqual(nonce1, nonce2) - - def test_nonce_attribute(self): - cipher = AES.new(self.key_128, AES.MODE_OCB, nonce=self.nonce_96) - self.assertEqual(cipher.nonce, self.nonce_96) - - # By default, a 15 bytes long nonce is randomly generated - nonce1 = AES.new(self.key_128, AES.MODE_OCB).nonce - nonce2 = AES.new(self.key_128, AES.MODE_OCB).nonce - self.assertEqual(len(nonce1), 15) - self.assertNotEqual(nonce1, nonce2) - - def test_unknown_parameters(self): - self.assertRaises(TypeError, AES.new, self.key_128, AES.MODE_OCB, - self.nonce_96, 7) - self.assertRaises(TypeError, AES.new, self.key_128, AES.MODE_OCB, - nonce=self.nonce_96, unknown=7) - - # But some are only known by the base cipher - # (e.g. use_aesni consumed by the AES module) - AES.new(self.key_128, AES.MODE_OCB, nonce=self.nonce_96, - use_aesni=False) - - def test_null_encryption_decryption(self): - for func in "encrypt", "decrypt": - cipher = AES.new(self.key_128, AES.MODE_OCB, nonce=self.nonce_96) - result = getattr(cipher, func)(b("")) - self.assertEqual(result, b("")) - - def test_either_encrypt_or_decrypt(self): - cipher = AES.new(self.key_128, AES.MODE_OCB, nonce=self.nonce_96) - cipher.encrypt(b("xyz")) - self.assertRaises(TypeError, cipher.decrypt, b("xyz")) - - cipher = AES.new(self.key_128, AES.MODE_OCB, nonce=self.nonce_96) - cipher.decrypt(b("xyz")) - self.assertRaises(TypeError, cipher.encrypt, b("xyz")) - - def test_data_must_be_bytes(self): - cipher = AES.new(self.key_128, AES.MODE_OCB, nonce=self.nonce_96) - self.assertRaises(TypeError, cipher.encrypt, u'test1234567890-*') - - cipher = AES.new(self.key_128, AES.MODE_OCB, nonce=self.nonce_96) - self.assertRaises(TypeError, cipher.decrypt, u'test1234567890-*') - - def test_mac_len(self): - # Invalid MAC length - self.assertRaises(ValueError, AES.new, self.key_128, AES.MODE_OCB, - nonce=self.nonce_96, mac_len=7) - self.assertRaises(ValueError, AES.new, self.key_128, AES.MODE_OCB, - nonce=self.nonce_96, mac_len=16+1) - - # Valid MAC length - for mac_len in range(8, 16 + 1): - cipher = AES.new(self.key_128, AES.MODE_OCB, nonce=self.nonce_96, - mac_len=mac_len) - _, mac = cipher.encrypt_and_digest(self.data) - self.assertEqual(len(mac), mac_len) - - # Default MAC length - cipher = AES.new(self.key_128, AES.MODE_OCB, nonce=self.nonce_96) - _, mac = cipher.encrypt_and_digest(self.data) - self.assertEqual(len(mac), 16) - - def test_invalid_mac(self): - from Crypto.Util.strxor import strxor_c - cipher = AES.new(self.key_128, AES.MODE_OCB, nonce=self.nonce_96) - ct, mac = cipher.encrypt_and_digest(self.data) - - invalid_mac = strxor_c(mac, 0x01) - - cipher = AES.new(self.key_128, AES.MODE_OCB, nonce=self.nonce_96) - self.assertRaises(ValueError, cipher.decrypt_and_verify, ct, - invalid_mac) - - def test_hex_mac(self): - cipher = AES.new(self.key_128, AES.MODE_OCB, nonce=self.nonce_96) - mac_hex = cipher.hexdigest() - self.assertEqual(cipher.digest(), unhexlify(mac_hex)) - - cipher = AES.new(self.key_128, AES.MODE_OCB, nonce=self.nonce_96) - cipher.hexverify(mac_hex) - - def test_message_chunks(self): - # Validate that both associated data and plaintext/ciphertext - # can be broken up in chunks of arbitrary length - - auth_data = get_tag_random("authenticated data", 127) - plaintext = get_tag_random("plaintext", 127) - - cipher = AES.new(self.key_128, AES.MODE_OCB, nonce=self.nonce_96) - cipher.update(auth_data) - ciphertext, ref_mac = cipher.encrypt_and_digest(plaintext) - - def break_up(data, chunk_length): - return [data[i:i+chunk_length] for i in range(0, len(data), - chunk_length)] - - # Encryption - for chunk_length in 1, 2, 3, 7, 10, 13, 16, 40, 80, 128: - - cipher = AES.new(self.key_128, AES.MODE_OCB, nonce=self.nonce_96) - - for chunk in break_up(auth_data, chunk_length): - cipher.update(chunk) - pt2 = b("") - for chunk in break_up(ciphertext, chunk_length): - pt2 += cipher.decrypt(chunk) - pt2 += cipher.decrypt() - self.assertEqual(plaintext, pt2) - cipher.verify(ref_mac) - - # Decryption - for chunk_length in 1, 2, 3, 7, 10, 13, 16, 40, 80, 128: - - cipher = AES.new(self.key_128, AES.MODE_OCB, nonce=self.nonce_96) - - for chunk in break_up(auth_data, chunk_length): - cipher.update(chunk) - ct2 = b("") - for chunk in break_up(plaintext, chunk_length): - ct2 += cipher.encrypt(chunk) - ct2 += cipher.encrypt() - self.assertEqual(ciphertext, ct2) - self.assertEqual(cipher.digest(), ref_mac) - - def test_bytearray(self): - - # Encrypt - key_ba = bytearray(self.key_128) - nonce_ba = bytearray(self.nonce_96) - header_ba = bytearray(self.data) - data_ba = bytearray(self.data) - - cipher1 = AES.new(self.key_128, - AES.MODE_OCB, - nonce=self.nonce_96) - cipher1.update(self.data) - ct = cipher1.encrypt(self.data) + cipher1.encrypt() - tag = cipher1.digest() - - cipher2 = AES.new(key_ba, - AES.MODE_OCB, - nonce=nonce_ba) - key_ba[:3] = b"\xFF\xFF\xFF" - nonce_ba[:3] = b"\xFF\xFF\xFF" - cipher2.update(header_ba) - header_ba[:3] = b"\xFF\xFF\xFF" - ct_test = cipher2.encrypt(data_ba) + cipher2.encrypt() - data_ba[:3] = b"\xFF\xFF\xFF" - tag_test = cipher2.digest() - - self.assertEqual(ct, ct_test) - self.assertEqual(tag, tag_test) - self.assertEqual(cipher1.nonce, cipher2.nonce) - - # Decrypt - key_ba = bytearray(self.key_128) - nonce_ba = bytearray(self.nonce_96) - header_ba = bytearray(self.data) - del data_ba - - cipher4 = AES.new(key_ba, - AES.MODE_OCB, - nonce=nonce_ba) - key_ba[:3] = b"\xFF\xFF\xFF" - nonce_ba[:3] = b"\xFF\xFF\xFF" - cipher4.update(header_ba) - header_ba[:3] = b"\xFF\xFF\xFF" - pt_test = cipher4.decrypt_and_verify(bytearray(ct_test), bytearray(tag_test)) - - self.assertEqual(self.data, pt_test) - - def test_memoryview(self): - - # Encrypt - key_mv = memoryview(bytearray(self.key_128)) - nonce_mv = memoryview(bytearray(self.nonce_96)) - header_mv = memoryview(bytearray(self.data)) - data_mv = memoryview(bytearray(self.data)) - - cipher1 = AES.new(self.key_128, - AES.MODE_OCB, - nonce=self.nonce_96) - cipher1.update(self.data) - ct = cipher1.encrypt(self.data) + cipher1.encrypt() - tag = cipher1.digest() - - cipher2 = AES.new(key_mv, - AES.MODE_OCB, - nonce=nonce_mv) - key_mv[:3] = b"\xFF\xFF\xFF" - nonce_mv[:3] = b"\xFF\xFF\xFF" - cipher2.update(header_mv) - header_mv[:3] = b"\xFF\xFF\xFF" - ct_test = cipher2.encrypt(data_mv) + cipher2.encrypt() - data_mv[:3] = b"\xFF\xFF\xFF" - tag_test = cipher2.digest() - - self.assertEqual(ct, ct_test) - self.assertEqual(tag, tag_test) - self.assertEqual(cipher1.nonce, cipher2.nonce) - - # Decrypt - key_mv = memoryview(bytearray(self.key_128)) - nonce_mv = memoryview(bytearray(self.nonce_96)) - header_mv = memoryview(bytearray(self.data)) - del data_mv - - cipher4 = AES.new(key_mv, - AES.MODE_OCB, - nonce=nonce_mv) - key_mv[:3] = b"\xFF\xFF\xFF" - nonce_mv[:3] = b"\xFF\xFF\xFF" - cipher4.update(header_mv) - header_mv[:3] = b"\xFF\xFF\xFF" - pt_test = cipher4.decrypt_and_verify(memoryview(ct_test), memoryview(tag_test)) - - self.assertEqual(self.data, pt_test) - - -class OcbFSMTests(unittest.TestCase): - - key_128 = get_tag_random("key_128", 16) - nonce_96 = get_tag_random("nonce_128", 12) - data = get_tag_random("data", 128) - - def test_valid_init_encrypt_decrypt_digest_verify(self): - # No authenticated data, fixed plaintext - # Verify path INIT->ENCRYPT->ENCRYPT(NONE)->DIGEST - cipher = AES.new(self.key_128, AES.MODE_OCB, - nonce=self.nonce_96) - ct = cipher.encrypt(self.data) - ct += cipher.encrypt() - mac = cipher.digest() - - # Verify path INIT->DECRYPT->DECRYPT(NONCE)->VERIFY - cipher = AES.new(self.key_128, AES.MODE_OCB, - nonce=self.nonce_96) - cipher.decrypt(ct) - cipher.decrypt() - cipher.verify(mac) - - def test_invalid_init_encrypt_decrypt_digest_verify(self): - # No authenticated data, fixed plaintext - # Verify path INIT->ENCRYPT->DIGEST - cipher = AES.new(self.key_128, AES.MODE_OCB, - nonce=self.nonce_96) - ct = cipher.encrypt(self.data) - self.assertRaises(TypeError, cipher.digest) - - # Verify path INIT->DECRYPT->VERIFY - cipher = AES.new(self.key_128, AES.MODE_OCB, - nonce=self.nonce_96) - cipher.decrypt(ct) - self.assertRaises(TypeError, cipher.verify) - - def test_valid_init_update_digest_verify(self): - # No plaintext, fixed authenticated data - # Verify path INIT->UPDATE->DIGEST - cipher = AES.new(self.key_128, AES.MODE_OCB, - nonce=self.nonce_96) - cipher.update(self.data) - mac = cipher.digest() - - # Verify path INIT->UPDATE->VERIFY - cipher = AES.new(self.key_128, AES.MODE_OCB, - nonce=self.nonce_96) - cipher.update(self.data) - cipher.verify(mac) - - def test_valid_full_path(self): - # Fixed authenticated data, fixed plaintext - # Verify path INIT->UPDATE->ENCRYPT->ENCRYPT(NONE)->DIGEST - cipher = AES.new(self.key_128, AES.MODE_OCB, - nonce=self.nonce_96) - cipher.update(self.data) - ct = cipher.encrypt(self.data) - ct += cipher.encrypt() - mac = cipher.digest() - - # Verify path INIT->UPDATE->DECRYPT->DECRYPT(NONE)->VERIFY - cipher = AES.new(self.key_128, AES.MODE_OCB, - nonce=self.nonce_96) - cipher.update(self.data) - cipher.decrypt(ct) - cipher.decrypt() - cipher.verify(mac) - - def test_invalid_encrypt_after_final(self): - cipher = AES.new(self.key_128, AES.MODE_OCB, - nonce=self.nonce_96) - cipher.update(self.data) - cipher.encrypt(self.data) - cipher.encrypt() - self.assertRaises(TypeError, cipher.encrypt, self.data) - - def test_invalid_decrypt_after_final(self): - cipher = AES.new(self.key_128, AES.MODE_OCB, - nonce=self.nonce_96) - cipher.update(self.data) - cipher.decrypt(self.data) - cipher.decrypt() - self.assertRaises(TypeError, cipher.decrypt, self.data) - - def test_valid_init_digest(self): - # Verify path INIT->DIGEST - cipher = AES.new(self.key_128, AES.MODE_OCB, nonce=self.nonce_96) - cipher.digest() - - def test_valid_init_verify(self): - # Verify path INIT->VERIFY - cipher = AES.new(self.key_128, AES.MODE_OCB, nonce=self.nonce_96) - mac = cipher.digest() - - cipher = AES.new(self.key_128, AES.MODE_OCB, nonce=self.nonce_96) - cipher.verify(mac) - - def test_valid_multiple_encrypt_or_decrypt(self): - for method_name in "encrypt", "decrypt": - for auth_data in (None, b("333"), self.data, - self.data + b("3")): - if auth_data is None: - assoc_len = None - else: - assoc_len = len(auth_data) - cipher = AES.new(self.key_128, AES.MODE_OCB, - nonce=self.nonce_96) - if auth_data is not None: - cipher.update(auth_data) - method = getattr(cipher, method_name) - method(self.data) - method(self.data) - method(self.data) - method(self.data) - method() - - def test_valid_multiple_digest_or_verify(self): - # Multiple calls to digest - cipher = AES.new(self.key_128, AES.MODE_OCB, nonce=self.nonce_96) - cipher.update(self.data) - first_mac = cipher.digest() - for x in range(4): - self.assertEqual(first_mac, cipher.digest()) - - # Multiple calls to verify - cipher = AES.new(self.key_128, AES.MODE_OCB, nonce=self.nonce_96) - cipher.update(self.data) - for x in range(5): - cipher.verify(first_mac) - - def test_valid_encrypt_and_digest_decrypt_and_verify(self): - # encrypt_and_digest - cipher = AES.new(self.key_128, AES.MODE_OCB, nonce=self.nonce_96) - cipher.update(self.data) - ct, mac = cipher.encrypt_and_digest(self.data) - - # decrypt_and_verify - cipher = AES.new(self.key_128, AES.MODE_OCB, nonce=self.nonce_96) - cipher.update(self.data) - pt = cipher.decrypt_and_verify(ct, mac) - self.assertEqual(self.data, pt) - - def test_invalid_mixing_encrypt_decrypt(self): - # Once per method, with or without assoc. data - for method1_name, method2_name in (("encrypt", "decrypt"), - ("decrypt", "encrypt")): - for assoc_data_present in (True, False): - cipher = AES.new(self.key_128, AES.MODE_OCB, - nonce=self.nonce_96) - if assoc_data_present: - cipher.update(self.data) - getattr(cipher, method1_name)(self.data) - self.assertRaises(TypeError, getattr(cipher, method2_name), - self.data) - - def test_invalid_encrypt_or_update_after_digest(self): - for method_name in "encrypt", "update": - cipher = AES.new(self.key_128, AES.MODE_OCB, nonce=self.nonce_96) - cipher.encrypt(self.data) - cipher.encrypt() - cipher.digest() - self.assertRaises(TypeError, getattr(cipher, method_name), - self.data) - - cipher = AES.new(self.key_128, AES.MODE_OCB, nonce=self.nonce_96) - cipher.encrypt_and_digest(self.data) - - def test_invalid_decrypt_or_update_after_verify(self): - cipher = AES.new(self.key_128, AES.MODE_OCB, nonce=self.nonce_96) - ct = cipher.encrypt(self.data) - ct += cipher.encrypt() - mac = cipher.digest() - - for method_name in "decrypt", "update": - cipher = AES.new(self.key_128, AES.MODE_OCB, nonce=self.nonce_96) - cipher.decrypt(ct) - cipher.decrypt() - cipher.verify(mac) - self.assertRaises(TypeError, getattr(cipher, method_name), - self.data) - - cipher = AES.new(self.key_128, AES.MODE_OCB, nonce=self.nonce_96) - cipher.decrypt_and_verify(ct, mac) - self.assertRaises(TypeError, getattr(cipher, method_name), - self.data) - - -class OcbRfc7253Test(unittest.TestCase): - - # Tuple with - # - nonce - # - authenticated data - # - plaintext - # - ciphertext and 16 byte MAC tag - tv1_key = "000102030405060708090A0B0C0D0E0F" - tv1 = ( - ( - "BBAA99887766554433221100", - "", - "", - "785407BFFFC8AD9EDCC5520AC9111EE6" - ), - ( - "BBAA99887766554433221101", - "0001020304050607", - "0001020304050607", - "6820B3657B6F615A5725BDA0D3B4EB3A257C9AF1F8F03009" - ), - ( - "BBAA99887766554433221102", - "0001020304050607", - "", - "81017F8203F081277152FADE694A0A00" - ), - ( - "BBAA99887766554433221103", - "", - "0001020304050607", - "45DD69F8F5AAE72414054CD1F35D82760B2CD00D2F99BFA9" - ), - ( - "BBAA99887766554433221104", - "000102030405060708090A0B0C0D0E0F", - "000102030405060708090A0B0C0D0E0F", - "571D535B60B277188BE5147170A9A22C3AD7A4FF3835B8C5" - "701C1CCEC8FC3358" - ), - ( - "BBAA99887766554433221105", - "000102030405060708090A0B0C0D0E0F", - "", - "8CF761B6902EF764462AD86498CA6B97" - ), - ( - "BBAA99887766554433221106", - "", - "000102030405060708090A0B0C0D0E0F", - "5CE88EC2E0692706A915C00AEB8B2396F40E1C743F52436B" - "DF06D8FA1ECA343D" - ), - ( - "BBAA99887766554433221107", - "000102030405060708090A0B0C0D0E0F1011121314151617", - "000102030405060708090A0B0C0D0E0F1011121314151617", - "1CA2207308C87C010756104D8840CE1952F09673A448A122" - "C92C62241051F57356D7F3C90BB0E07F" - ), - ( - "BBAA99887766554433221108", - "000102030405060708090A0B0C0D0E0F1011121314151617", - "", - "6DC225A071FC1B9F7C69F93B0F1E10DE" - ), - ( - "BBAA99887766554433221109", - "", - "000102030405060708090A0B0C0D0E0F1011121314151617", - "221BD0DE7FA6FE993ECCD769460A0AF2D6CDED0C395B1C3C" - "E725F32494B9F914D85C0B1EB38357FF" - ), - ( - "BBAA9988776655443322110A", - "000102030405060708090A0B0C0D0E0F1011121314151617" - "18191A1B1C1D1E1F", - "000102030405060708090A0B0C0D0E0F1011121314151617" - "18191A1B1C1D1E1F", - "BD6F6C496201C69296C11EFD138A467ABD3C707924B964DE" - "AFFC40319AF5A48540FBBA186C5553C68AD9F592A79A4240" - ), - ( - "BBAA9988776655443322110B", - "000102030405060708090A0B0C0D0E0F1011121314151617" - "18191A1B1C1D1E1F", - "", - "FE80690BEE8A485D11F32965BC9D2A32" - ), - ( - "BBAA9988776655443322110C", - "", - "000102030405060708090A0B0C0D0E0F1011121314151617" - "18191A1B1C1D1E1F", - "2942BFC773BDA23CABC6ACFD9BFD5835BD300F0973792EF4" - "6040C53F1432BCDFB5E1DDE3BC18A5F840B52E653444D5DF" - ), - ( - "BBAA9988776655443322110D", - "000102030405060708090A0B0C0D0E0F1011121314151617" - "18191A1B1C1D1E1F2021222324252627", - "000102030405060708090A0B0C0D0E0F1011121314151617" - "18191A1B1C1D1E1F2021222324252627", - "D5CA91748410C1751FF8A2F618255B68A0A12E093FF45460" - "6E59F9C1D0DDC54B65E8628E568BAD7AED07BA06A4A69483" - "A7035490C5769E60" - ), - ( - "BBAA9988776655443322110E", - "000102030405060708090A0B0C0D0E0F1011121314151617" - "18191A1B1C1D1E1F2021222324252627", - "", - "C5CD9D1850C141E358649994EE701B68" - ), - ( - "BBAA9988776655443322110F", - "", - "000102030405060708090A0B0C0D0E0F1011121314151617" - "18191A1B1C1D1E1F2021222324252627", - "4412923493C57D5DE0D700F753CCE0D1D2D95060122E9F15" - "A5DDBFC5787E50B5CC55EE507BCB084E479AD363AC366B95" - "A98CA5F3000B1479" - ) - ) - - # Tuple with - # - key - # - nonce - # - authenticated data - # - plaintext - # - ciphertext and 12 byte MAC tag - tv2 = ( - "0F0E0D0C0B0A09080706050403020100", - "BBAA9988776655443322110D", - "000102030405060708090A0B0C0D0E0F1011121314151617" - "18191A1B1C1D1E1F2021222324252627", - "000102030405060708090A0B0C0D0E0F1011121314151617" - "18191A1B1C1D1E1F2021222324252627", - "1792A4E31E0755FB03E31B22116E6C2DDF9EFD6E33D536F1" - "A0124B0A55BAE884ED93481529C76B6AD0C515F4D1CDD4FD" - "AC4F02AA" - ) - - # Tuple with - # - key length - # - MAC tag length - # - Expected output - tv3 = ( - (128, 128, "67E944D23256C5E0B6C61FA22FDF1EA2"), - (192, 128, "F673F2C3E7174AAE7BAE986CA9F29E17"), - (256, 128, "D90EB8E9C977C88B79DD793D7FFA161C"), - (128, 96, "77A3D8E73589158D25D01209"), - (192, 96, "05D56EAD2752C86BE6932C5E"), - (256, 96, "5458359AC23B0CBA9E6330DD"), - (128, 64, "192C9B7BD90BA06A"), - (192, 64, "0066BC6E0EF34E24"), - (256, 64, "7D4EA5D445501CBE"), - ) - - def test1(self): - key = unhexlify(b(self.tv1_key)) - for tv in self.tv1: - nonce, aad, pt, ct = [ unhexlify(b(x)) for x in tv ] - ct, mac_tag = ct[:-16], ct[-16:] - - cipher = AES.new(key, AES.MODE_OCB, nonce=nonce) - cipher.update(aad) - ct2 = cipher.encrypt(pt) + cipher.encrypt() - self.assertEqual(ct, ct2) - self.assertEqual(mac_tag, cipher.digest()) - - cipher = AES.new(key, AES.MODE_OCB, nonce=nonce) - cipher.update(aad) - pt2 = cipher.decrypt(ct) + cipher.decrypt() - self.assertEqual(pt, pt2) - cipher.verify(mac_tag) - - def test2(self): - - key, nonce, aad, pt, ct = [ unhexlify(b(x)) for x in self.tv2 ] - ct, mac_tag = ct[:-12], ct[-12:] - - cipher = AES.new(key, AES.MODE_OCB, nonce=nonce, mac_len=12) - cipher.update(aad) - ct2 = cipher.encrypt(pt) + cipher.encrypt() - self.assertEqual(ct, ct2) - self.assertEqual(mac_tag, cipher.digest()) - - cipher = AES.new(key, AES.MODE_OCB, nonce=nonce, mac_len=12) - cipher.update(aad) - pt2 = cipher.decrypt(ct) + cipher.decrypt() - self.assertEqual(pt, pt2) - cipher.verify(mac_tag) - - def test3(self): - - for keylen, taglen, result in self.tv3: - - key = bchr(0) * (keylen // 8 - 1) + bchr(taglen) - C = b("") - - for i in range(128): - S = bchr(0) * i - - N = long_to_bytes(3 * i + 1, 12) - cipher = AES.new(key, AES.MODE_OCB, nonce=N, mac_len=taglen // 8) - cipher.update(S) - C += cipher.encrypt(S) + cipher.encrypt() + cipher.digest() - - N = long_to_bytes(3 * i + 2, 12) - cipher = AES.new(key, AES.MODE_OCB, nonce=N, mac_len=taglen // 8) - C += cipher.encrypt(S) + cipher.encrypt() + cipher.digest() - - N = long_to_bytes(3 * i + 3, 12) - cipher = AES.new(key, AES.MODE_OCB, nonce=N, mac_len=taglen // 8) - cipher.update(S) - C += cipher.encrypt() + cipher.digest() - - N = long_to_bytes(385, 12) - cipher = AES.new(key, AES.MODE_OCB, nonce=N, mac_len=taglen // 8) - cipher.update(C) - result2 = cipher.encrypt() + cipher.digest() - self.assertEqual(unhexlify(b(result)), result2) - - -def get_tests(config={}): - tests = [] - tests += list_test_cases(OcbTests) - tests += list_test_cases(OcbFSMTests) - tests += list_test_cases(OcbRfc7253Test) - return tests - - -if __name__ == '__main__': - import unittest - suite = lambda: unittest.TestSuite(get_tests()) - unittest.main(defaultTest='suite') diff --git a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/PIL/FitsImagePlugin.py b/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/PIL/FitsImagePlugin.py deleted file mode 100644 index c16300efa897ad168f18b72a3381af75a1792b30..0000000000000000000000000000000000000000 --- a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/PIL/FitsImagePlugin.py +++ /dev/null @@ -1,71 +0,0 @@ -# -# The Python Imaging Library -# $Id$ -# -# FITS file handling -# -# Copyright (c) 1998-2003 by Fredrik Lundh -# -# See the README file for information on usage and redistribution. -# - -import math - -from . import Image, ImageFile - - -def _accept(prefix): - return prefix[:6] == b"SIMPLE" - - -class FitsImageFile(ImageFile.ImageFile): - - format = "FITS" - format_description = "FITS" - - def _open(self): - headers = {} - while True: - header = self.fp.read(80) - if not header: - raise OSError("Truncated FITS file") - keyword = header[:8].strip() - if keyword == b"END": - break - value = header[8:].strip() - if value.startswith(b"="): - value = value[1:].strip() - if not headers and (not _accept(keyword) or value != b"T"): - raise SyntaxError("Not a FITS file") - headers[keyword] = value - - naxis = int(headers[b"NAXIS"]) - if naxis == 0: - raise ValueError("No image data") - elif naxis == 1: - self._size = 1, int(headers[b"NAXIS1"]) - else: - self._size = int(headers[b"NAXIS1"]), int(headers[b"NAXIS2"]) - - number_of_bits = int(headers[b"BITPIX"]) - if number_of_bits == 8: - self.mode = "L" - elif number_of_bits == 16: - self.mode = "I" - # rawmode = "I;16S" - elif number_of_bits == 32: - self.mode = "I" - elif number_of_bits in (-32, -64): - self.mode = "F" - # rawmode = "F" if number_of_bits == -32 else "F;64F" - - offset = math.ceil(self.fp.tell() / 2880) * 2880 - self.tile = [("raw", (0, 0) + self.size, offset, (self.mode, 0, -1))] - - -# -------------------------------------------------------------------- -# Registry - -Image.register_open(FitsImageFile.format, FitsImageFile, _accept) - -Image.register_extensions(FitsImageFile.format, [".fit", ".fits"]) diff --git a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/PIL/ImageMorph.py b/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/PIL/ImageMorph.py deleted file mode 100644 index 1e22c36a8ae1ebea834f36becb5f78d3f7afcd51..0000000000000000000000000000000000000000 --- a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/PIL/ImageMorph.py +++ /dev/null @@ -1,245 +0,0 @@ -# A binary morphology add-on for the Python Imaging Library -# -# History: -# 2014-06-04 Initial version. -# -# Copyright (c) 2014 Dov Grobgeld - -import re - -from . import Image, _imagingmorph - -LUT_SIZE = 1 << 9 - -# fmt: off -ROTATION_MATRIX = [ - 6, 3, 0, - 7, 4, 1, - 8, 5, 2, -] -MIRROR_MATRIX = [ - 2, 1, 0, - 5, 4, 3, - 8, 7, 6, -] -# fmt: on - - -class LutBuilder: - """A class for building a MorphLut from a descriptive language - - The input patterns is a list of a strings sequences like these:: - - 4:(... - .1. - 111)->1 - - (whitespaces including linebreaks are ignored). The option 4 - describes a series of symmetry operations (in this case a - 4-rotation), the pattern is described by: - - - . or X - Ignore - - 1 - Pixel is on - - 0 - Pixel is off - - The result of the operation is described after "->" string. - - The default is to return the current pixel value, which is - returned if no other match is found. - - Operations: - - - 4 - 4 way rotation - - N - Negate - - 1 - Dummy op for no other operation (an op must always be given) - - M - Mirroring - - Example:: - - lb = LutBuilder(patterns = ["4:(... .1. 111)->1"]) - lut = lb.build_lut() - - """ - - def __init__(self, patterns=None, op_name=None): - if patterns is not None: - self.patterns = patterns - else: - self.patterns = [] - self.lut = None - if op_name is not None: - known_patterns = { - "corner": ["1:(... ... ...)->0", "4:(00. 01. ...)->1"], - "dilation4": ["4:(... .0. .1.)->1"], - "dilation8": ["4:(... .0. .1.)->1", "4:(... .0. ..1)->1"], - "erosion4": ["4:(... .1. .0.)->0"], - "erosion8": ["4:(... .1. .0.)->0", "4:(... .1. ..0)->0"], - "edge": [ - "1:(... ... ...)->0", - "4:(.0. .1. ...)->1", - "4:(01. .1. ...)->1", - ], - } - if op_name not in known_patterns: - raise Exception("Unknown pattern " + op_name + "!") - - self.patterns = known_patterns[op_name] - - def add_patterns(self, patterns): - self.patterns += patterns - - def build_default_lut(self): - symbols = [0, 1] - m = 1 << 4 # pos of current pixel - self.lut = bytearray(symbols[(i & m) > 0] for i in range(LUT_SIZE)) - - def get_lut(self): - return self.lut - - def _string_permute(self, pattern, permutation): - """string_permute takes a pattern and a permutation and returns the - string permuted according to the permutation list. - """ - assert len(permutation) == 9 - return "".join(pattern[p] for p in permutation) - - def _pattern_permute(self, basic_pattern, options, basic_result): - """pattern_permute takes a basic pattern and its result and clones - the pattern according to the modifications described in the $options - parameter. It returns a list of all cloned patterns.""" - patterns = [(basic_pattern, basic_result)] - - # rotations - if "4" in options: - res = patterns[-1][1] - for i in range(4): - patterns.append( - (self._string_permute(patterns[-1][0], ROTATION_MATRIX), res) - ) - # mirror - if "M" in options: - n = len(patterns) - for pattern, res in patterns[:n]: - patterns.append((self._string_permute(pattern, MIRROR_MATRIX), res)) - - # negate - if "N" in options: - n = len(patterns) - for pattern, res in patterns[:n]: - # Swap 0 and 1 - pattern = pattern.replace("0", "Z").replace("1", "0").replace("Z", "1") - res = 1 - int(res) - patterns.append((pattern, res)) - - return patterns - - def build_lut(self): - """Compile all patterns into a morphology lut. - - TBD :Build based on (file) morphlut:modify_lut - """ - self.build_default_lut() - patterns = [] - - # Parse and create symmetries of the patterns strings - for p in self.patterns: - m = re.search(r"(\w*):?\s*\((.+?)\)\s*->\s*(\d)", p.replace("\n", "")) - if not m: - raise Exception('Syntax error in pattern "' + p + '"') - options = m.group(1) - pattern = m.group(2) - result = int(m.group(3)) - - # Get rid of spaces - pattern = pattern.replace(" ", "").replace("\n", "") - - patterns += self._pattern_permute(pattern, options, result) - - # compile the patterns into regular expressions for speed - for i, pattern in enumerate(patterns): - p = pattern[0].replace(".", "X").replace("X", "[01]") - p = re.compile(p) - patterns[i] = (p, pattern[1]) - - # Step through table and find patterns that match. - # Note that all the patterns are searched. The last one - # caught overrides - for i in range(LUT_SIZE): - # Build the bit pattern - bitpattern = bin(i)[2:] - bitpattern = ("0" * (9 - len(bitpattern)) + bitpattern)[::-1] - - for p, r in patterns: - if p.match(bitpattern): - self.lut[i] = [0, 1][r] - - return self.lut - - -class MorphOp: - """A class for binary morphological operators""" - - def __init__(self, lut=None, op_name=None, patterns=None): - """Create a binary morphological operator""" - self.lut = lut - if op_name is not None: - self.lut = LutBuilder(op_name=op_name).build_lut() - elif patterns is not None: - self.lut = LutBuilder(patterns=patterns).build_lut() - - def apply(self, image): - """Run a single morphological operation on an image - - Returns a tuple of the number of changed pixels and the - morphed image""" - if self.lut is None: - raise Exception("No operator loaded") - - if image.mode != "L": - raise ValueError("Image mode must be L") - outimage = Image.new(image.mode, image.size, None) - count = _imagingmorph.apply(bytes(self.lut), image.im.id, outimage.im.id) - return count, outimage - - def match(self, image): - """Get a list of coordinates matching the morphological operation on - an image. - - Returns a list of tuples of (x,y) coordinates - of all matching pixels. See :ref:`coordinate-system`.""" - if self.lut is None: - raise Exception("No operator loaded") - - if image.mode != "L": - raise ValueError("Image mode must be L") - return _imagingmorph.match(bytes(self.lut), image.im.id) - - def get_on_pixels(self, image): - """Get a list of all turned on pixels in a binary image - - Returns a list of tuples of (x,y) coordinates - of all matching pixels. See :ref:`coordinate-system`.""" - - if image.mode != "L": - raise ValueError("Image mode must be L") - return _imagingmorph.get_on_pixels(image.im.id) - - def load_lut(self, filename): - """Load an operator from an mrl file""" - with open(filename, "rb") as f: - self.lut = bytearray(f.read()) - - if len(self.lut) != LUT_SIZE: - self.lut = None - raise Exception("Wrong size operator file!") - - def save_lut(self, filename): - """Save an operator to an mrl file""" - if self.lut is None: - raise Exception("No operator loaded") - with open(filename, "wb") as f: - f.write(self.lut) - - def set_lut(self, lut): - """Set the lut from an external source""" - self.lut = lut diff --git a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/cffi/commontypes.py b/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/cffi/commontypes.py deleted file mode 100644 index 8ec97c756a4b1023fd3963dd39b706f7c0e34373..0000000000000000000000000000000000000000 --- a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/cffi/commontypes.py +++ /dev/null @@ -1,80 +0,0 @@ -import sys -from . import model -from .error import FFIError - - -COMMON_TYPES = {} - -try: - # fetch "bool" and all simple Windows types - from _cffi_backend import _get_common_types - _get_common_types(COMMON_TYPES) -except ImportError: - pass - -COMMON_TYPES['FILE'] = model.unknown_type('FILE', '_IO_FILE') -COMMON_TYPES['bool'] = '_Bool' # in case we got ImportError above - -for _type in model.PrimitiveType.ALL_PRIMITIVE_TYPES: - if _type.endswith('_t'): - COMMON_TYPES[_type] = _type -del _type - -_CACHE = {} - -def resolve_common_type(parser, commontype): - try: - return _CACHE[commontype] - except KeyError: - cdecl = COMMON_TYPES.get(commontype, commontype) - if not isinstance(cdecl, str): - result, quals = cdecl, 0 # cdecl is already a BaseType - elif cdecl in model.PrimitiveType.ALL_PRIMITIVE_TYPES: - result, quals = model.PrimitiveType(cdecl), 0 - elif cdecl == 'set-unicode-needed': - raise FFIError("The Windows type %r is only available after " - "you call ffi.set_unicode()" % (commontype,)) - else: - if commontype == cdecl: - raise FFIError( - "Unsupported type: %r. Please look at " - "http://cffi.readthedocs.io/en/latest/cdef.html#ffi-cdef-limitations " - "and file an issue if you think this type should really " - "be supported." % (commontype,)) - result, quals = parser.parse_type_and_quals(cdecl) # recursive - - assert isinstance(result, model.BaseTypeByIdentity) - _CACHE[commontype] = result, quals - return result, quals - - -# ____________________________________________________________ -# extra types for Windows (most of them are in commontypes.c) - - -def win_common_types(): - return { - "UNICODE_STRING": model.StructType( - "_UNICODE_STRING", - ["Length", - "MaximumLength", - "Buffer"], - [model.PrimitiveType("unsigned short"), - model.PrimitiveType("unsigned short"), - model.PointerType(model.PrimitiveType("wchar_t"))], - [-1, -1, -1]), - "PUNICODE_STRING": "UNICODE_STRING *", - "PCUNICODE_STRING": "const UNICODE_STRING *", - - "TBYTE": "set-unicode-needed", - "TCHAR": "set-unicode-needed", - "LPCTSTR": "set-unicode-needed", - "PCTSTR": "set-unicode-needed", - "LPTSTR": "set-unicode-needed", - "PTSTR": "set-unicode-needed", - "PTBYTE": "set-unicode-needed", - "PTCHAR": "set-unicode-needed", - } - -if sys.platform == 'win32': - COMMON_TYPES.update(win_common_types()) diff --git a/spaces/ashuonnet/skillrecommender/app.py b/spaces/ashuonnet/skillrecommender/app.py deleted file mode 100644 index d81d81b3d68dd7b373b1f34cca9555463320d96a..0000000000000000000000000000000000000000 --- a/spaces/ashuonnet/skillrecommender/app.py +++ /dev/null @@ -1,30 +0,0 @@ -import gradio as gr -import spacy -import torch -from Main import Main -from skill_recommender import skills_recommender -from transformers import BertForTokenClassification - - -#recommender = spacy.load("./model-best2") -main = Main() - -def recommend(resume): - summary = '' - found = 0 - - doc = main.predict(resume) - skills = [] - for ent_list in doc: - if(ent_list['entity'] == "Skills"): - temp = ent_list['text'] - temp = temp.split(',') - skills = skills + temp - - - sr = skills_recommender() - recommendation = sr.suggest_skills(skills) - return recommendation - -iface = gr.Interface(fn=recommend, inputs="text", outputs="text",title="Job Skills Recommender!",description="Get new skills recommendation based on your resume!") -iface.launch() \ No newline at end of file diff --git a/spaces/assemblyai/Conformer1-Demo/README.md b/spaces/assemblyai/Conformer1-Demo/README.md deleted file mode 100644 index e7dc4ffce9b712841ca00703d9bfeaa681175061..0000000000000000000000000000000000000000 --- a/spaces/assemblyai/Conformer1-Demo/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Conformer1 Demo -emoji: 👁 -colorFrom: blue -colorTo: green -sdk: gradio -sdk_version: 3.21.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/at2507/SM_NLP_RecoSys/Data/Mentor_interviews/Simon Szalai.html b/spaces/at2507/SM_NLP_RecoSys/Data/Mentor_interviews/Simon Szalai.html deleted file mode 100644 index 1121b5845b561d5eeb6ceb6f96535c6837096539..0000000000000000000000000000000000000000 --- a/spaces/at2507/SM_NLP_RecoSys/Data/Mentor_interviews/Simon Szalai.html +++ /dev/null @@ -1,132 +0,0 @@ - - - - Simon Szalai - - - - -
    -

    Simon Szalai

    - -
    -
    Former mentee!

    Career
    • SM as a mentee in 2020
    • Worked at startup til March 2022
    • Sr DE - quantifies Citizens in Vancouver
    • Also started a startup as CTO - surveys, using the response data. Set up a pipeline

    Mentorship exp
    • Previous job - continuously had interns, interviews, onboarded, mentored (guidance, tips etc)
    • In a startup, have had 2 interns, one joined f/T, working closely with them

    What do beginners need help with? How can you help them?
    • How to learn things on your own
    • There is a LOT online already, but easy to get stuck in local minima for lack of basic knowledge (e.g. database indexes, why is this query taking long)
    • Building a project - guidance, architecture, feedback, and code help
    • Job search - mock interviews, how to look for a job
    • Keep their spirits up during their 

    Question about SM
    • I've been through the process. I get it
    -
    - -
    - - - \ No newline at end of file diff --git a/spaces/attention-refocusing/Attention-refocusing/gligen/ldm/models/diffusion/loss.py b/spaces/attention-refocusing/Attention-refocusing/gligen/ldm/models/diffusion/loss.py deleted file mode 100644 index 25e199219de4b6c3792d56a03e8d71450416151c..0000000000000000000000000000000000000000 --- a/spaces/attention-refocusing/Attention-refocusing/gligen/ldm/models/diffusion/loss.py +++ /dev/null @@ -1,170 +0,0 @@ -import math -import torch -from ldm.models.diffusion.gaussian_smoothing import GaussianSmoothing -from torch.nn import functional as F -from torchvision.utils import save_image - - - - - - -def loss_one_att_outside(attn_map,bboxes, object_positions,t): - # loss = torch.tensor(0).to('cuda') - loss = 0 - object_number = len(bboxes) - b, i, j = attn_map.shape - H = W = int(math.sqrt(i)) - - - # if t== 20: import pdb; pdb.set_trace() - - for obj_idx in range(object_number): - - for obj_box in bboxes[obj_idx]: - mask = torch.zeros(size=(H, W)).cuda() if torch.cuda.is_available() else torch.zeros(size=(H, W)) - x_min, y_min, x_max, y_max = int(obj_box[0] * W), \ - int(obj_box[1] * H), int(obj_box[2] * W), int(obj_box[3] * H) - mask[y_min: y_max, x_min: x_max] = 1. - mask_out = 1. - mask - index = (mask == 1.).nonzero(as_tuple=False) - index_in_key = index[:,0]* H + index[:, 1] - att_box = torch.zeros_like(attn_map) - att_box[:,index_in_key,:] = attn_map[:,index_in_key,:] - - att_box = att_box.sum(axis=1) / index_in_key.shape[0] - att_box = att_box.reshape(-1, H, H) - activation_value = (att_box* mask_out).reshape(b, -1).sum(dim=-1) #/ att_box.reshape(b, -1).sum(dim=-1) - loss += torch.mean(activation_value) - - return loss / object_number - -def caculate_loss_self_att(self_first, self_second, self_third, bboxes, object_positions, t, list_res=[256], smooth_att = True,sigma=0.5,kernel_size=3 ): - all_attn = get_all_self_att(self_first, self_second, self_third) - cnt = 0 - total_loss = 0 - for res in list_res: - attn_maps = all_attn[res] - for attn in attn_maps: - total_loss += loss_one_att_outside(attn, bboxes, object_positions,t) - cnt += 1 - - return total_loss /cnt - - -def get_all_self_att(self_first, self_second, self_third): - result = {256:[], 1024:[], 4096:[], 64:[], 94:[],1054:[] ,286:[],4126:[] } - # import pdb; pdb.set_trace() - all_att = [self_first, self_second, self_third] - for self_att in all_att: - for att in self_att: - if att != []: - temp = att[0] - for attn_map in temp: - current_res = attn_map.shape[1] - # print(current_res) - result[current_res].append(attn_map) - return result - -def get_all_attention(attn_maps_mid, attn_maps_up , attn_maps_down, res): - result = [] - - for attn_map_integrated in attn_maps_up: - if attn_map_integrated == []: continue - attn_map = attn_map_integrated[0][0] - b, i, j = attn_map.shape - H = W = int(math.sqrt(i)) - # print(H) - if H == res: - result.append(attn_map.reshape(-1, res, res,attn_map.shape[-1] )) - for attn_map_integrated in attn_maps_mid: - - # for attn_map_integrated in attn_maps_mid: - attn_map = attn_map_integrated[0] - b, i, j = attn_map.shape - H = W = int(math.sqrt(i)) - # print(H) - if (H==res): - result.append(attn_map.reshape(-1, res, res,attn_map.shape[-1] )) - # import pdb; pdb.set_trace() - for attn_map_integrated in attn_maps_down: - if attn_map_integrated == []: continue - attn_map = attn_map_integrated[0][0] - if attn_map == []: continue - b, i, j = attn_map.shape - H = W = int(math.sqrt(i)) - # print(H) - if (H==res): - result.append(attn_map.reshape(-1, res, res,attn_map.shape[-1] )) - - result = torch.cat(result, dim=0) - result = result.sum(0) / result.shape[0] - return result - - -def caculate_loss_att_fixed_cnt(attn_maps_mid, attn_maps_up, attn_maps_down, bboxes, object_positions, t, res=16, smooth_att = True,sigma=0.5,kernel_size=3 ): - attn16 = get_all_attention(attn_maps_mid, attn_maps_up, attn_maps_down, res) - # attn32 = get_all_attention(attn_maps_mid, attn_maps_up, attn_maps_down, 32) - # attn64 = get_all_attention(attn_maps_mid, attn_maps_up, attn_maps_down, 64) - # attn8 = get_all_attention(attn_maps_mid, attn_maps_up, attn_maps_down, 8) - all_attn = [attn16] - obj_number = len(bboxes) - total_loss = 0 - # import pdb; pdb.set_trace() - for attn in all_attn[0:1]: - attn_text = attn[:, :, 1:-1] - attn_text *= 100 - attn_text = torch.nn.functional.softmax(attn_text, dim=-1) - current_res = attn.shape[0] - H = W = current_res - - # if t == 49: import pdb; pdb.set_trace() - for obj_idx in range(obj_number): - num_boxes= 0 - - for obj_position in object_positions[obj_idx]: - true_obj_position = obj_position - 1 - att_map_obj = attn_text[:,:, true_obj_position] - if smooth_att: - smoothing = GaussianSmoothing(channels=1, kernel_size=kernel_size, sigma=sigma, dim=2).cuda() - input = F.pad(att_map_obj.unsqueeze(0).unsqueeze(0), (1, 1, 1, 1), mode='reflect') - att_map_obj = smoothing(input).squeeze(0).squeeze(0) - other_att_map_obj = att_map_obj.clone() - att_copy = att_map_obj.clone() - - for obj_box in bboxes[obj_idx]: - x_min, y_min, x_max, y_max = int(obj_box[0] * W), \ - int(obj_box[1] * H), int(obj_box[2] * W), int(obj_box[3] * H) - - - if att_map_obj[y_min: y_max, x_min: x_max].numel() == 0: - max_inside=1. - - else: - max_inside = att_map_obj[y_min: y_max, x_min: x_max].max() - total_loss += 1. - max_inside - - # find max outside the box, find in the other boxes - - att_copy[y_min: y_max, x_min: x_max] = 0. - other_att_map_obj[y_min: y_max, x_min: x_max] = 0. - - for obj_outside in range(obj_number): - if obj_outside != obj_idx: - for obj_out_box in bboxes[obj_outside]: - x_min_out, y_min_out, x_max_out, y_max_out = int(obj_out_box[0] * W), \ - int(obj_out_box[1] * H), int(obj_out_box[2] * W), int(obj_out_box[3] * H) - - # att_copy[y_min: y_max, x_min: x_max] = 0. - if other_att_map_obj[y_min_out: y_max_out, x_min_out: x_max_out].numel() == 0: - max_outside_one= 0 - else: - max_outside_one = other_att_map_obj[y_min_out: y_max_out, x_min_out: x_max_out].max() - # max_outside = max(max_outside,max_outside_one ) - att_copy[y_min_out: y_max_out, x_min_out: x_max_out] = 0. - total_loss += max_outside_one - max_background = att_copy.max() - total_loss += len(bboxes[obj_idx]) *max_background /2. - - return total_loss/obj_number - diff --git a/spaces/avid-ml/bias-detection/avidtools/datamodels/vulnerability.py b/spaces/avid-ml/bias-detection/avidtools/datamodels/vulnerability.py deleted file mode 100644 index b5cabc169464773d1926a6ee2b216e7ccae74174..0000000000000000000000000000000000000000 --- a/spaces/avid-ml/bias-detection/avidtools/datamodels/vulnerability.py +++ /dev/null @@ -1,52 +0,0 @@ -from pydantic import BaseModel -from typing import List -from datetime import date - -from .components import Affects, AvidTaxonomy, Problemtype, Reference, LangValue, Impact -from .enums import TypeEnum -from .report import Report - -class VulnMetadata(BaseModel): - vuln_id: str - -class ReportSummary(BaseModel): - report_id: str - type: TypeEnum - name: str - -class Vulnerability(BaseModel): - data_type: str = 'AVID' - data_version: str = None - metadata: VulnMetadata = None - affects: Affects = None - problemtype: Problemtype = None - references: List[Reference] = None - description: LangValue = None - reports: List[ReportSummary] = None - impact: Impact = None - credit: List[LangValue] = None - published_date: date = None - last_modified_date: date = None - - def save(self, location): - with open(location, "w") as outfile: - outfile.write(self.json(indent=4)) - - def convert(self, report: Report): - self.data_version = report.data_version - self.affects = report.affects - self.problemtype = report.problemtype - self.description = report.description - self.references = report.references - self.impact = report.impact - self.credit = report.credit - self.published_date = date.today() - self.last_modified_date = date.today() - - if self.impact.avid is not None: # delete vuln_id field from report - self.impact.avid = AvidTaxonomy( - risk_domain = self.impact.avid.risk_domain, - sep_view = self.impact.avid.sep_view, - lifecycle_view = self.impact.avid.lifecycle_view, - taxonomy_version = self.impact.avid.taxonomy_version - ) diff --git a/spaces/awacke1/02-Gradio-Art-From-Text-And-Images/app.py b/spaces/awacke1/02-Gradio-Art-From-Text-And-Images/app.py deleted file mode 100644 index 10939427025b17176765402185cd11e23caa1523..0000000000000000000000000000000000000000 --- a/spaces/awacke1/02-Gradio-Art-From-Text-And-Images/app.py +++ /dev/null @@ -1,224 +0,0 @@ -import os - -os.system("git clone --recursive https://github.com/JD-P/cloob-latent-diffusion") -os.system("cd cloob-latent-diffusion;pip install omegaconf pillow pytorch-lightning einops wandb ftfy regex ./CLIP") - -import argparse -from functools import partial -from pathlib import Path -import sys -sys.path.append('./cloob-latent-diffusion') -sys.path.append('./cloob-latent-diffusion/cloob-training') -sys.path.append('./cloob-latent-diffusion/latent-diffusion') -sys.path.append('./cloob-latent-diffusion/taming-transformers') -sys.path.append('./cloob-latent-diffusion/v-diffusion-pytorch') -from omegaconf import OmegaConf -from PIL import Image -import torch -from torch import nn -from torch.nn import functional as F -from torchvision import transforms -from torchvision.transforms import functional as TF -from tqdm import trange -from CLIP import clip -from cloob_training import model_pt, pretrained -import ldm.models.autoencoder -from diffusion import sampling, utils -import train_latent_diffusion as train -from huggingface_hub import hf_hub_url, cached_download -import random - -# Download the model files -checkpoint = cached_download(hf_hub_url("huggan/distill-ccld-wa", filename="model_student.ckpt")) -ae_model_path = cached_download(hf_hub_url("huggan/ccld_wa", filename="ae_model.ckpt")) -ae_config_path = cached_download(hf_hub_url("huggan/ccld_wa", filename="ae_model.yaml")) - -# Define a few utility functions - - -def parse_prompt(prompt, default_weight=3.): - if prompt.startswith('http://') or prompt.startswith('https://'): - vals = prompt.rsplit(':', 2) - vals = [vals[0] + ':' + vals[1], *vals[2:]] - else: - vals = prompt.rsplit(':', 1) - vals = vals + ['', default_weight][len(vals):] - return vals[0], float(vals[1]) - - -def resize_and_center_crop(image, size): - fac = max(size[0] / image.size[0], size[1] / image.size[1]) - image = image.resize((int(fac * image.size[0]), int(fac * image.size[1])), Image.LANCZOS) - return TF.center_crop(image, size[::-1]) - - -# Load the models -device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') -print('Using device:', device) -print('loading models') - -# autoencoder -ae_config = OmegaConf.load(ae_config_path) -ae_model = ldm.models.autoencoder.AutoencoderKL(**ae_config.model.params) -ae_model.eval().requires_grad_(False).to(device) -ae_model.load_state_dict(torch.load(ae_model_path)) -n_ch, side_y, side_x = 4, 32, 32 - -# diffusion model -model = train.DiffusionModel(192, [1,1,2,2], autoencoder_scale=torch.tensor(4.3084)) -model.load_state_dict(torch.load(checkpoint, map_location='cpu')) -model = model.to(device).eval().requires_grad_(False) - -# CLOOB -cloob_config = pretrained.get_config('cloob_laion_400m_vit_b_16_16_epochs') -cloob = model_pt.get_pt_model(cloob_config) -checkpoint = pretrained.download_checkpoint(cloob_config) -cloob.load_state_dict(model_pt.get_pt_params(cloob_config, checkpoint)) -cloob.eval().requires_grad_(False).to(device) - - -# The key function: returns a list of n PIL images -def generate(n=1, prompts=['a red circle'], images=[], seed=42, steps=15, - method='plms', eta=None): - zero_embed = torch.zeros([1, cloob.config['d_embed']], device=device) - target_embeds, weights = [zero_embed], [] - - for prompt in prompts: - txt, weight = parse_prompt(prompt) - target_embeds.append(cloob.text_encoder(cloob.tokenize(txt).to(device)).float()) - weights.append(weight) - - for prompt in images: - path, weight = parse_prompt(prompt) - img = Image.open(utils.fetch(path)).convert('RGB') - clip_size = cloob.config['image_encoder']['image_size'] - img = resize_and_center_crop(img, (clip_size, clip_size)) - batch = TF.to_tensor(img)[None].to(device) - embed = F.normalize(cloob.image_encoder(cloob.normalize(batch)).float(), dim=-1) - target_embeds.append(embed) - weights.append(weight) - - weights = torch.tensor([1 - sum(weights), *weights], device=device) - - torch.manual_seed(seed) - - def cfg_model_fn(x, t): - n = x.shape[0] - n_conds = len(target_embeds) - x_in = x.repeat([n_conds, 1, 1, 1]) - t_in = t.repeat([n_conds]) - clip_embed_in = torch.cat([*target_embeds]).repeat_interleave(n, 0) - vs = model(x_in, t_in, clip_embed_in).view([n_conds, n, *x.shape[1:]]) - v = vs.mul(weights[:, None, None, None, None]).sum(0) - return v - - def run(x, steps): - if method == 'ddpm': - return sampling.sample(cfg_model_fn, x, steps, 1., {}) - if method == 'ddim': - return sampling.sample(cfg_model_fn, x, steps, eta, {}) - if method == 'prk': - return sampling.prk_sample(cfg_model_fn, x, steps, {}) - if method == 'plms': - return sampling.plms_sample(cfg_model_fn, x, steps, {}) - if method == 'pie': - return sampling.pie_sample(cfg_model_fn, x, steps, {}) - if method == 'plms2': - return sampling.plms2_sample(cfg_model_fn, x, steps, {}) - assert False - - batch_size = n - x = torch.randn([n, n_ch, side_y, side_x], device=device) - t = torch.linspace(1, 0, steps + 1, device=device)[:-1] - steps = utils.get_spliced_ddpm_cosine_schedule(t) - pil_ims = [] - for i in trange(0, n, batch_size): - cur_batch_size = min(n - i, batch_size) - out_latents = run(x[i:i+cur_batch_size], steps) - outs = ae_model.decode(out_latents * torch.tensor(2.55).to(device)) - for j, out in enumerate(outs): - pil_ims.append(utils.to_pil_image(out)) - - return pil_ims - - -import gradio as gr - -def gen_ims(prompt, im_prompt=None, seed=None, n_steps=10, method='plms'): - if seed == None : - seed = random.randint(0, 10000) - print( prompt, im_prompt, seed, n_steps) - prompts = [prompt] - im_prompts = [] - if im_prompt != None: - im_prompts = [im_prompt] - pil_ims = generate(n=1, prompts=prompts, images=im_prompts, seed=seed, steps=n_steps, method=method) - return pil_ims[0] - -iface = gr.Interface(fn=gen_ims, - inputs=[#gr.inputs.Slider(minimum=1, maximum=1, step=1, default=1,label="Number of images"), - #gr.inputs.Slider(minimum=0, maximum=200, step=1, label='Random seed', default=0), - gr.inputs.Textbox(label="Text prompt"), - gr.inputs.Image(optional=True, label="Image prompt", type='filepath'), - #gr.inputs.Slider(minimum=10, maximum=35, step=1, default=15,label="Number of steps") - ], - outputs=[gr.outputs.Image(type="pil", label="Generated Image")], - examples=[ - ["Futurism, in the style of Wassily Kandinsky"], - ["Art Nouveau, in the style of John Singer Sargent"], - ["Surrealism, in the style of Edgar Degas"], - ["Expressionism, in the style of Wassily Kandinsky"], - ["Futurism, in the style of Egon Schiele"], - ["Neoclassicism, in the style of Gustav Klimt"], - ["Cubism, in the style of Gustav Klimt"], - ["Op Art, in the style of Marc Chagall"], - ["Romanticism, in the style of M.C. Escher"], - ["Futurism, in the style of M.C. Escher"], - ["Abstract Art, in the style of M.C. Escher"], - ["Mannerism, in the style of Paul Klee"], - ["Romanesque Art, in the style of Leonardo da Vinci"], - ["High Renaissance, in the style of Rembrandt"], - ["Magic Realism, in the style of Gustave Dore"], - ["Realism, in the style of Jean-Michel Basquiat"], - ["Art Nouveau, in the style of Paul Gauguin"], - ["Avant-garde, in the style of Pierre-Auguste Renoir"], - ["Baroque, in the style of Edward Hopper"], - ["Post-Impressionism, in the style of Wassily Kandinsky"], - ["Naturalism, in the style of Rene Magritte"], - ["Constructivism, in the style of Paul Cezanne"], - ["Abstract Expressionism, in the style of Henri Matisse"], - ["Pop Art, in the style of Vincent van Gogh"], - ["Futurism, in the style of Wassily Kandinsky"], - ["Futurism, in the style of Zdzislaw Beksinski"], - ['Surrealism, in the style of Salvador Dali'], - ["Aaron Wacker, oil on canvas"], - ["abstract"], - ["landscape"], - ["portrait"], - ["sculpture"], - ["genre painting"], - ["installation"], - ["photo"], - ["figurative"], - ["illustration"], - ["still life"], - ["history painting"], - ["cityscape"], - ["marina"], - ["animal painting"], - ["design"], - ["calligraphy"], - ["symbolic painting"], - ["graffiti"], - ["performance"], - ["mythological painting"], - ["battle painting"], - ["self-portrait"], - ["Impressionism, oil on canvas"] - ], - title='Art Generator and Style Mixer from 🧠 Cloob and 🎨 WikiArt - Visual Art Encyclopedia:', - description="Trained on images from the [WikiArt](https://www.wikiart.org/) dataset, comprised of visual arts", - article = 'Model used is: [model card](https://huggingface.co/huggan/distill-ccld-wa)..' - -) -iface.launch(enable_queue=True) # , debug=True for colab debugging \ No newline at end of file diff --git a/spaces/awacke1/2-NLP-Seq2SeqQAGenerator/README.md b/spaces/awacke1/2-NLP-Seq2SeqQAGenerator/README.md deleted file mode 100644 index 3c3880c64651536fa6d0f5f003d3306a86a03cf7..0000000000000000000000000000000000000000 --- a/spaces/awacke1/2-NLP-Seq2SeqQAGenerator/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: QA NLP Seq2SeqQAGenerator -emoji: 🙋QA❓ -colorFrom: blue -colorTo: yellow -sdk: gradio -sdk_version: 3.1.7 -app_file: app.py -pinned: false -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/awacke1/Azure.Streamlit.Github.Actions.Azure.Container.Registry.Docker.AKS/README.md b/spaces/awacke1/Azure.Streamlit.Github.Actions.Azure.Container.Registry.Docker.AKS/README.md deleted file mode 100644 index ba6dc81fc91acccc414906bf9310000e2288d68f..0000000000000000000000000000000000000000 --- a/spaces/awacke1/Azure.Streamlit.Github.Actions.Azure.Container.Registry.Docker.AKS/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Azure.Streamlit.Github.Actions.Azure.Container.Registry.Docker.AKS -emoji: 🌖 -colorFrom: yellow -colorTo: pink -sdk: streamlit -sdk_version: 1.17.0 -app_file: app.py -pinned: false -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/awacke1/ChatGPT-Memory-Chat-Story-Generator/app.py b/spaces/awacke1/ChatGPT-Memory-Chat-Story-Generator/app.py deleted file mode 100644 index fde041c60ba1c72a261664247ae7e8c7591b0e81..0000000000000000000000000000000000000000 --- a/spaces/awacke1/ChatGPT-Memory-Chat-Story-Generator/app.py +++ /dev/null @@ -1,572 +0,0 @@ -import streamlit as st -import openai -import os -import base64 -import glob -import json -import mistune -import pytz -import math -import requests -import time -import re -import textract -import zipfile # New import for zipping files -from datetime import datetime -from openai import ChatCompletion -from xml.etree import ElementTree as ET -from bs4 import BeautifulSoup -from collections import deque -from audio_recorder_streamlit import audio_recorder -from dotenv import load_dotenv -from PyPDF2 import PdfReader -from langchain.text_splitter import CharacterTextSplitter -from langchain.embeddings import OpenAIEmbeddings -from langchain.vectorstores import FAISS -from langchain.chat_models import ChatOpenAI -from langchain.memory import ConversationBufferMemory -from langchain.chains import ConversationalRetrievalChain -from templates import css, bot_template, user_template -import streamlit.components.v1 as components # Import Streamlit Components for HTML5 - -# page config and sidebar declares up front allow all other functions to see global class variables -st.set_page_config(page_title="GPT Streamlit Document Reasoner", layout="wide") -should_save = st.sidebar.checkbox("💾 Save", value=True) - -# Whisper Paper - how open STT suddenly got so good: -# st link button with emoji anyone? -url="https://arxiv.org/pdf/2212.04356.pdf" -import random -def link_button_with_emoji(url): - emojis = ["💉", "🏥", "🌡️", "🩺", "🌡️", "🔬", "💊", "🧪", "👨‍⚕️", "👩‍⚕️"] - random_emoji = random.choice(emojis) - st.markdown(f"[{random_emoji} Whisper Paper - Robust Speech Recognition via Large-Scale Weak Supervision]({url})") -url = "https://arxiv.org/pdf/2212.04356.pdf" -link_button_with_emoji(url) - - - -def generate_filename_old(prompt, file_type): - central = pytz.timezone('US/Central') - safe_date_time = datetime.now(central).strftime("%m%d_%H%M") # Date and time DD-HHMM - safe_prompt = "".join(x for x in prompt if x.isalnum())[:90] # Limit file name size and trim whitespace - return f"{safe_date_time}_{safe_prompt}.{file_type}" # Return a safe file name - -def generate_filename(prompt, file_type): - central = pytz.timezone('US/Central') - safe_date_time = datetime.now(central).strftime("%m%d_%H%M") - replaced_prompt = prompt.replace(" ", "_").replace("\n", "_") - safe_prompt = "".join(x for x in replaced_prompt if x.isalnum() or x == "_")[:90] - return f"{safe_date_time}_{safe_prompt}.{file_type}" - -def transcribe_audio(file_path, model): - key = os.getenv('OPENAI_API_KEY') - headers = { - "Authorization": f"Bearer {key}", - } - with open(file_path, 'rb') as f: - data = {'file': f} - st.write("Read file {file_path}", file_path) - OPENAI_API_URL = "https://api.openai.com/v1/audio/transcriptions" - response = requests.post(OPENAI_API_URL, headers=headers, files=data, data={'model': model}) - if response.status_code == 200: - st.write(response.json()) - chatResponse = chat_with_model(response.json().get('text'), '') # ************************************* - transcript = response.json().get('text') - #st.write('Responses:') - #st.write(chatResponse) - filename = generate_filename(transcript, 'txt') - #create_file(filename, transcript, chatResponse) - response = chatResponse - user_prompt = transcript - create_file(filename, user_prompt, response, should_save) - return transcript - else: - st.write(response.json()) - st.error("Error in API call.") - return None - -def save_and_play_audio(audio_recorder): - audio_bytes = audio_recorder() - if audio_bytes: - filename = generate_filename("Recording", "wav") - with open(filename, 'wb') as f: - f.write(audio_bytes) - st.audio(audio_bytes, format="audio/wav") - return filename - return None - -def create_file(filename, prompt, response, should_save=True): - if not should_save: - return - - # Step 2: Extract base filename without extension - base_filename, ext = os.path.splitext(filename) - - # Step 3: Check if the response contains Python code - has_python_code = bool(re.search(r"```python([\s\S]*?)```", response)) - - # Step 4: Write files based on type - if ext in ['.txt', '.htm', '.md']: - # Create Prompt file - with open(f"{base_filename}-Prompt.txt", 'w') as file: - file.write(prompt) - - # Create Response file - with open(f"{base_filename}-Response.md", 'w') as file: - file.write(response) - - # Create Code file if Python code is present - if has_python_code: - # Extract Python code from the response - python_code = re.findall(r"```python([\s\S]*?)```", response)[0].strip() - - with open(f"{base_filename}-Code.py", 'w') as file: - file.write(python_code) - - -def create_file_old(filename, prompt, response, should_save=True): - if not should_save: - return - if filename.endswith(".txt"): - with open(filename, 'w') as file: - file.write(f"{prompt}\n{response}") - elif filename.endswith(".htm"): - with open(filename, 'w') as file: - file.write(f"{prompt} {response}") - elif filename.endswith(".md"): - with open(filename, 'w') as file: - file.write(f"{prompt}\n\n{response}") - -def truncate_document(document, length): - return document[:length] -def divide_document(document, max_length): - return [document[i:i+max_length] for i in range(0, len(document), max_length)] - -def get_table_download_link(file_path): - with open(file_path, 'r') as file: - try: - data = file.read() - except: - st.write('') - return file_path - b64 = base64.b64encode(data.encode()).decode() - file_name = os.path.basename(file_path) - ext = os.path.splitext(file_name)[1] # get the file extension - if ext == '.txt': - mime_type = 'text/plain' - elif ext == '.py': - mime_type = 'text/plain' - elif ext == '.xlsx': - mime_type = 'text/plain' - elif ext == '.csv': - mime_type = 'text/plain' - elif ext == '.htm': - mime_type = 'text/html' - elif ext == '.md': - mime_type = 'text/markdown' - else: - mime_type = 'application/octet-stream' # general binary data type - href = f'{file_name}' - return href - -def CompressXML(xml_text): - root = ET.fromstring(xml_text) - for elem in list(root.iter()): - if isinstance(elem.tag, str) and 'Comment' in elem.tag: - elem.parent.remove(elem) - return ET.tostring(root, encoding='unicode', method="xml") - -def read_file_content(file,max_length): - if file.type == "application/json": - content = json.load(file) - return str(content) - elif file.type == "text/html" or file.type == "text/htm": - content = BeautifulSoup(file, "html.parser") - return content.text - elif file.type == "application/xml" or file.type == "text/xml": - tree = ET.parse(file) - root = tree.getroot() - xml = CompressXML(ET.tostring(root, encoding='unicode')) - return xml - elif file.type == "text/markdown" or file.type == "text/md": - md = mistune.create_markdown() - content = md(file.read().decode()) - return content - elif file.type == "text/plain": - return file.getvalue().decode() - else: - return "" - -def readitaloud(result): - documentHTML5=''' - - - - Read It Aloud - - - -

    🔊 Read It Aloud

    - -
    - - - - ''' - - components.html(documentHTML5, width=1280, height=1024) - #return result - -def chat_with_model(prompt, document_section, model_choice='gpt-3.5-turbo'): - model = model_choice - conversation = [{'role': 'system', 'content': 'You are a helpful assistant.'}] - conversation.append({'role': 'user', 'content': prompt}) - if len(document_section)>0: - conversation.append({'role': 'assistant', 'content': document_section}) - - start_time = time.time() - report = [] - res_box = st.empty() - collected_chunks = [] - collected_messages = [] - - key = os.getenv('OPENAI_API_KEY') - openai.api_key = key - for chunk in openai.ChatCompletion.create( - model='gpt-3.5-turbo', - messages=conversation, - temperature=0.5, - stream=True - ): - - collected_chunks.append(chunk) # save the event response - chunk_message = chunk['choices'][0]['delta'] # extract the message - collected_messages.append(chunk_message) # save the message - - content=chunk["choices"][0].get("delta",{}).get("content") - - try: - report.append(content) - if len(content) > 0: - result = "".join(report).strip() - #result = result.replace("\n", "") - res_box.markdown(f'*{result}*') - except: - st.write(' ') - - full_reply_content = ''.join([m.get('content', '') for m in collected_messages]) - st.write("Elapsed time:") - st.write(time.time() - start_time) - readitaloud(full_reply_content) - return full_reply_content - -def chat_with_file_contents(prompt, file_content, model_choice='gpt-3.5-turbo'): - conversation = [{'role': 'system', 'content': 'You are a helpful assistant.'}] - conversation.append({'role': 'user', 'content': prompt}) - if len(file_content)>0: - conversation.append({'role': 'assistant', 'content': file_content}) - response = openai.ChatCompletion.create(model=model_choice, messages=conversation) - return response['choices'][0]['message']['content'] - -def extract_mime_type(file): - # Check if the input is a string - if isinstance(file, str): - pattern = r"type='(.*?)'" - match = re.search(pattern, file) - if match: - return match.group(1) - else: - raise ValueError(f"Unable to extract MIME type from {file}") - # If it's not a string, assume it's a streamlit.UploadedFile object - elif isinstance(file, streamlit.UploadedFile): - return file.type - else: - raise TypeError("Input should be a string or a streamlit.UploadedFile object") - -from io import BytesIO -import re - -def extract_file_extension(file): - # get the file name directly from the UploadedFile object - file_name = file.name - pattern = r".*?\.(.*?)$" - match = re.search(pattern, file_name) - if match: - return match.group(1) - else: - raise ValueError(f"Unable to extract file extension from {file_name}") - -def pdf2txt(docs): - text = "" - for file in docs: - file_extension = extract_file_extension(file) - # print the file extension - st.write(f"File type extension: {file_extension}") - - # read the file according to its extension - try: - if file_extension.lower() in ['py', 'txt', 'html', 'htm', 'xml', 'json']: - text += file.getvalue().decode('utf-8') - elif file_extension.lower() == 'pdf': - from PyPDF2 import PdfReader - pdf = PdfReader(BytesIO(file.getvalue())) - for page in range(len(pdf.pages)): - text += pdf.pages[page].extract_text() # new PyPDF2 syntax - except Exception as e: - st.write(f"Error processing file {file.name}: {e}") - - return text - -def pdf2txt_old(pdf_docs): - st.write(pdf_docs) - for file in pdf_docs: - mime_type = extract_mime_type(file) - st.write(f"MIME type of file: {mime_type}") - - text = "" - for pdf in pdf_docs: - pdf_reader = PdfReader(pdf) - for page in pdf_reader.pages: - text += page.extract_text() - return text - -def txt2chunks(text): - text_splitter = CharacterTextSplitter(separator="\n", chunk_size=1000, chunk_overlap=200, length_function=len) - return text_splitter.split_text(text) - -def vector_store(text_chunks): - key = os.getenv('OPENAI_API_KEY') - embeddings = OpenAIEmbeddings(openai_api_key=key) - return FAISS.from_texts(texts=text_chunks, embedding=embeddings) - -def get_chain(vectorstore): - llm = ChatOpenAI() - memory = ConversationBufferMemory(memory_key='chat_history', return_messages=True) - return ConversationalRetrievalChain.from_llm(llm=llm, retriever=vectorstore.as_retriever(), memory=memory) - -def process_user_input(user_question): - response = st.session_state.conversation({'question': user_question}) - st.session_state.chat_history = response['chat_history'] - for i, message in enumerate(st.session_state.chat_history): - template = user_template if i % 2 == 0 else bot_template - st.write(template.replace("{{MSG}}", message.content), unsafe_allow_html=True) - # Save file output from PDF query results - filename = generate_filename(user_question, 'txt') - #create_file(filename, user_question, message.content) - response = message.content - user_prompt = user_question - create_file(filename, user_prompt, response, should_save) - #st.sidebar.markdown(get_table_download_link(filename), unsafe_allow_html=True) - -def divide_prompt(prompt, max_length): - words = prompt.split() - chunks = [] - current_chunk = [] - current_length = 0 - for word in words: - if len(word) + current_length <= max_length: - current_length += len(word) + 1 # Adding 1 to account for spaces - current_chunk.append(word) - else: - chunks.append(' '.join(current_chunk)) - current_chunk = [word] - current_length = len(word) - chunks.append(' '.join(current_chunk)) # Append the final chunk - return chunks - -def create_zip_of_files(files): - """ - Create a zip file from a list of files. - """ - zip_name = "all_files.zip" - with zipfile.ZipFile(zip_name, 'w') as zipf: - for file in files: - zipf.write(file) - return zip_name - - -def get_zip_download_link(zip_file): - """ - Generate a link to download the zip file. - """ - with open(zip_file, 'rb') as f: - data = f.read() - b64 = base64.b64encode(data).decode() - href = f'Download All' - return href - - -def main(): - #openai.api_key = os.getenv('OPENAI_API_KEY') - - # File type for output, model choice - menu = ["txt", "htm", "xlsx", "csv", "md", "py"] - choice = st.sidebar.selectbox("Output File Type:", menu) - model_choice = st.sidebar.radio("Select Model:", ('gpt-3.5-turbo', 'gpt-3.5-turbo-0301')) - - # Audio, transcribe, GPT: - filename = save_and_play_audio(audio_recorder) - - if filename is not None: - try: - transcription = transcribe_audio(filename, "whisper-1") - except: - st.write(' ') - st.sidebar.markdown(get_table_download_link(filename), unsafe_allow_html=True) - filename = None - - # prompt interfaces - user_prompt = st.text_area("Enter prompts, instructions & questions:", '', height=100) - - # file section interface for prompts against large documents as context - collength, colupload = st.columns([2,3]) # adjust the ratio as needed - with collength: - max_length = st.slider("File section length for large files", min_value=1000, max_value=128000, value=12000, step=1000) - with colupload: - uploaded_file = st.file_uploader("Add a file for context:", type=["pdf", "xml", "json", "xlsx", "csv", "html", "htm", "md", "txt"]) - - - # Document section chat - - document_sections = deque() - document_responses = {} - if uploaded_file is not None: - file_content = read_file_content(uploaded_file, max_length) - document_sections.extend(divide_document(file_content, max_length)) - if len(document_sections) > 0: - if st.button("👁️ View Upload"): - st.markdown("**Sections of the uploaded file:**") - for i, section in enumerate(list(document_sections)): - st.markdown(f"**Section {i+1}**\n{section}") - st.markdown("**Chat with the model:**") - for i, section in enumerate(list(document_sections)): - if i in document_responses: - st.markdown(f"**Section {i+1}**\n{document_responses[i]}") - else: - if st.button(f"Chat about Section {i+1}"): - st.write('Reasoning with your inputs...') - response = chat_with_model(user_prompt, section, model_choice) # ************************************* - st.write('Response:') - st.write(response) - document_responses[i] = response - filename = generate_filename(f"{user_prompt}_section_{i+1}", choice) - create_file(filename, user_prompt, response, should_save) - st.sidebar.markdown(get_table_download_link(filename), unsafe_allow_html=True) - - if st.button('💬 Chat'): - st.write('Reasoning with your inputs...') - - #response = chat_with_model(user_prompt, ''.join(list(document_sections,)), model_choice) # ************************************* - - # Divide the user_prompt into smaller sections - user_prompt_sections = divide_prompt(user_prompt, max_length) - full_response = '' - for prompt_section in user_prompt_sections: - # Process each section with the model - response = chat_with_model(prompt_section, ''.join(list(document_sections)), model_choice) - full_response += response + '\n' # Combine the responses - - #st.write('Response:') - #st.write(full_response) - - response = full_response - st.write('Response:') - st.write(response) - - filename = generate_filename(user_prompt, choice) - create_file(filename, user_prompt, response, should_save) - st.sidebar.markdown(get_table_download_link(filename), unsafe_allow_html=True) - - all_files = glob.glob("*.*") - all_files = [file for file in all_files if len(os.path.splitext(file)[0]) >= 20] # exclude files with short names - all_files.sort(key=lambda x: (os.path.splitext(x)[1], x), reverse=True) # sort by file type and file name in descending order - - # Added "Delete All" button - if st.sidebar.button("🗑 Delete All"): - for file in all_files: - os.remove(file) - st.experimental_rerun() - - # Added "Download All" button - if st.sidebar.button("⬇️ Download All"): - zip_file = create_zip_of_files(all_files) - st.sidebar.markdown(get_zip_download_link(zip_file), unsafe_allow_html=True) - - # Sidebar of Files Saving History and surfacing files as context of prompts and responses - file_contents='' - next_action='' - for file in all_files: - col1, col2, col3, col4, col5 = st.sidebar.columns([1,6,1,1,1]) # adjust the ratio as needed - with col1: - if st.button("🌐", key="md_"+file): # md emoji button - with open(file, 'r') as f: - file_contents = f.read() - next_action='md' - with col2: - st.markdown(get_table_download_link(file), unsafe_allow_html=True) - with col3: - if st.button("📂", key="open_"+file): # open emoji button - with open(file, 'r') as f: - file_contents = f.read() - next_action='open' - with col4: - if st.button("🔍", key="read_"+file): # search emoji button - with open(file, 'r') as f: - file_contents = f.read() - next_action='search' - with col5: - if st.button("🗑", key="delete_"+file): - os.remove(file) - st.experimental_rerun() - - if len(file_contents) > 0: - if next_action=='open': - file_content_area = st.text_area("File Contents:", file_contents, height=500) - if next_action=='md': - st.markdown(file_contents) - if next_action=='search': - file_content_area = st.text_area("File Contents:", file_contents, height=500) - st.write('Reasoning with your inputs...') - response = chat_with_model(user_prompt, file_contents, model_choice) - filename = generate_filename(file_contents, choice) - create_file(filename, user_prompt, response, should_save) - - st.experimental_rerun() - #st.sidebar.markdown(get_table_download_link(filename), unsafe_allow_html=True) - -if __name__ == "__main__": - main() - -load_dotenv() -st.write(css, unsafe_allow_html=True) - -st.header("Chat with documents :books:") -user_question = st.text_input("Ask a question about your documents:") -if user_question: - process_user_input(user_question) - -with st.sidebar: - st.subheader("Your documents") - docs = st.file_uploader("import documents", accept_multiple_files=True) - with st.spinner("Processing"): - raw = pdf2txt(docs) - if len(raw) > 0: - length = str(len(raw)) - text_chunks = txt2chunks(raw) - vectorstore = vector_store(text_chunks) - st.session_state.conversation = get_chain(vectorstore) - st.markdown('# AI Search Index of Length:' + length + ' Created.') # add timing - filename = generate_filename(raw, 'txt') - create_file(filename, raw, '', should_save) - #create_file(filename, raw, '') diff --git a/spaces/awacke1/DatasetAnalyzer1215/app.py b/spaces/awacke1/DatasetAnalyzer1215/app.py deleted file mode 100644 index 4808b175c23bbd4ccf349cdedc5ac90e72bb7c7c..0000000000000000000000000000000000000000 --- a/spaces/awacke1/DatasetAnalyzer1215/app.py +++ /dev/null @@ -1,99 +0,0 @@ -from typing import List, Dict -import httpx -import gradio as gr -import pandas as pd - -async def get_splits(dataset_name: str) -> Dict[str, List[Dict]]: - URL = f"https://datasets-server.huggingface.co/splits?dataset={dataset_name}" - async with httpx.AsyncClient() as session: - response = await session.get(URL) - return response.json() - -async def get_valid_datasets() -> Dict[str, List[str]]: - URL = f"https://datasets-server.huggingface.co/valid" - async with httpx.AsyncClient() as session: - response = await session.get(URL) - datasets = response.json()["valid"] - return gr.Dropdown.update(choices=datasets, value="awacke1/ChatbotMemory.csv") - # The one to watch: https://huggingface.co/rungalileo - # rungalileo/medical_transcription_40 - -async def get_first_rows(dataset: str, config: str, split: str) -> Dict[str, Dict[str, List[Dict]]]: - URL = f"https://datasets-server.huggingface.co/first-rows?dataset={dataset}&config={config}&split={split}" - async with httpx.AsyncClient() as session: - response = await session.get(URL) - print(URL) - gr.Markdown(URL) - return response.json() - -def get_df_from_rows(api_output): - dfFromSort = pd.DataFrame([row["row"] for row in api_output["rows"]]) - try: - dfFromSort.sort_values(by=1, axis=1, ascending=True, inplace=False, kind='mergesort', na_position='last', ignore_index=False, key=None) - except: - print("Exception sorting due to keyerror?") - return dfFromSort - -async def update_configs(dataset_name: str): - splits = await get_splits(dataset_name) - all_configs = sorted(set([s["config"] for s in splits["splits"]])) - return (gr.Dropdown.update(choices=all_configs, value=all_configs[0]), - splits) - -async def update_splits(config_name: str, state: gr.State): - splits_for_config = sorted(set([s["split"] for s in state["splits"] if s["config"] == config_name])) - dataset_name = state["splits"][0]["dataset"] - dataset = await update_dataset(splits_for_config[0], config_name, dataset_name) - return (gr.Dropdown.update(choices=splits_for_config, value=splits_for_config[0]), dataset) - -async def update_dataset(split_name: str, config_name: str, dataset_name: str): - rows = await get_first_rows(dataset_name, config_name, split_name) - df = get_df_from_rows(rows) - return df - -# Guido von Roissum: https://www.youtube.com/watch?v=-DVyjdw4t9I -async def update_URL(dataset: str, config: str, split: str) -> str: - URL = f"https://datasets-server.huggingface.co/first-rows?dataset={dataset}&config={config}&split={split}" - URL = f"https://huggingface.co/datasets/{split}" - return (URL) - -async def openurl(URL: str) -> str: - html = f"{URL}" - return (html) - -with gr.Blocks() as demo: - gr.Markdown("

    🥫Datasets🎨

    ") - gr.Markdown("""
    Curated Datasets: Kaggle. NLM UMLS. LOINC. ICD10 Diagnosis. ICD11. Papers,Code,Datasets for SOTA in Medicine. Mental. Behavior. CMS Downloads. CMS CPT and HCPCS Procedures and Services """) - - splits_data = gr.State() - - with gr.Row(): - dataset_name = gr.Dropdown(label="Dataset", interactive=True) - config = gr.Dropdown(label="Subset", interactive=True) - split = gr.Dropdown(label="Split", interactive=True) - - with gr.Row(): - #filterleft = gr.Textbox(label="First Column Filter",placeholder="Filter Column 1") - URLcenter = gr.Textbox(label="Dataset URL", placeholder="URL") - btn = gr.Button("Use Dataset") - #URLoutput = gr.Textbox(label="Output",placeholder="URL Output") - URLoutput = gr.HTML(label="Output",placeholder="URL Output") - - with gr.Row(): - dataset = gr.DataFrame(wrap=True, interactive=True) - - demo.load(get_valid_datasets, inputs=None, outputs=[dataset_name]) - - dataset_name.change(update_configs, inputs=[dataset_name], outputs=[config, splits_data]) - config.change(update_splits, inputs=[config, splits_data], outputs=[split, dataset]) - split.change(update_dataset, inputs=[split, config, dataset_name], outputs=[dataset]) - - dataset_name.change(update_URL, inputs=[split, config, dataset_name], outputs=[URLcenter]) - - btn.click(openurl, [URLcenter], URLoutput) - -demo.launch(debug=True) - -# original: https://huggingface.co/spaces/freddyaboulton/dataset-viewer -- Freddy thanks! Your examples are the best. -# playlist on Gradio and Mermaid: https://www.youtube.com/watch?v=o7kCD4aWMR4&list=PLHgX2IExbFosW7hWNryq8hs2bt2aj91R- -# Link to Mermaid model and code: [![](https://mermaid.ink/img/pako:eNp1U8mO2zAM_RXCZ-eQpZccCmSZTIpOMQESIAdnDrRMx0JkydXSNDOYfy_lpUgD1AfBfnx8fCTlj0SYgpJ5UipzFRVaD4flSQM_YjwafcVJ9-FCfrbYVGA0ZQeLUkt9futiOM72pEh4QFijR9iTf2tzsx3Z0ti6hxslvb_Lm0TSNPvBDhQsg1TFXXAag7NBef_9hdDqFA6knbEbdgvGwu7mjRXVkDOLOV-yNXmytdQEsoROvTfi4EhK9XTSxUNz_mo4uVHm1lPyce-uR1k_n2RHymHRNPAvNXaTT7NVZYwjeDECVbS4UiYUAyc2lc-yFoPXxkujHaAl2G54PCjIpfBssZAGtsZ5KlLYkjWXkMLiuOfjPVhiymr3_x4qS7wicneTFuMW6Gdxlb6Cb7oJvt1LbEpMso08sza8MnqskA9jL27Ij72Jafb0G-tGkQNTdgKOy_XcFP5GDxFbWsJLV3FQid2LWfZsfpHVqAXBCBYa1e2dAHUBu5Ar6dgby0ghPWxQWk2Oh_L0M0h_S2Ep0YHUrXFHXD_msefo5XEkfFWBK8atdkA7mgfoalpATJI0qfnWoCz4b_iI0VPiK6rplMz5taASg_Kn5KQ_mYrBm_1Ni2TubaA0CU2BntYSeQl1Mi9ROfr8A8FBGds?type=png)](https://mermaid.live/edit#pako:eNp1U8mO2zAM_RXCZ-eQpZccCmSZTIpOMQESIAdnDrRMx0JkydXSNDOYfy_lpUgD1AfBfnx8fCTlj0SYgpJ5UipzFRVaD4flSQM_YjwafcVJ9-FCfrbYVGA0ZQeLUkt9futiOM72pEh4QFijR9iTf2tzsx3Z0ti6hxslvb_Lm0TSNPvBDhQsg1TFXXAag7NBef_9hdDqFA6knbEbdgvGwu7mjRXVkDOLOV-yNXmytdQEsoROvTfi4EhK9XTSxUNz_mo4uVHm1lPyce-uR1k_n2RHymHRNPAvNXaTT7NVZYwjeDECVbS4UiYUAyc2lc-yFoPXxkujHaAl2G54PCjIpfBssZAGtsZ5KlLYkjWXkMLiuOfjPVhiymr3_x4qS7wicneTFuMW6Gdxlb6Cb7oJvt1LbEpMso08sza8MnqskA9jL27Ij72Jafb0G-tGkQNTdgKOy_XcFP5GDxFbWsJLV3FQid2LWfZsfpHVqAXBCBYa1e2dAHUBu5Ar6dgby0ghPWxQWk2Oh_L0M0h_S2Ep0YHUrXFHXD_msefo5XEkfFWBK8atdkA7mgfoalpATJI0qfnWoCz4b_iI0VPiK6rplMz5taASg_Kn5KQ_mYrBm_1Ni2TubaA0CU2BntYSeQl1Mi9ROfr8A8FBGds) diff --git a/spaces/awacke1/Docker.VSCode.Integration.HF/on_startup.sh b/spaces/awacke1/Docker.VSCode.Integration.HF/on_startup.sh deleted file mode 100644 index 448000271bbc7142681947fd1a447772f12ecfff..0000000000000000000000000000000000000000 --- a/spaces/awacke1/Docker.VSCode.Integration.HF/on_startup.sh +++ /dev/null @@ -1,5 +0,0 @@ -#!/bin/bash -# Write some commands here that will run on root user before startup. -# For example, to clone transformers and install it in dev mode: -# git clone https://github.com/huggingface/transformers.git -# cd transformers && pip install -e ".[dev]" \ No newline at end of file diff --git a/spaces/awacke1/VizLib-Keras-n-Plotly/backup-app.py b/spaces/awacke1/VizLib-Keras-n-Plotly/backup-app.py deleted file mode 100644 index 77de9fd7ca6026b45c2edc8bcb900011eff42b60..0000000000000000000000000000000000000000 --- a/spaces/awacke1/VizLib-Keras-n-Plotly/backup-app.py +++ /dev/null @@ -1,46 +0,0 @@ -import streamlit as st -import pandas as pd -import numpy as np -import plotly.graph_objs as go -from keras.preprocessing.text import Tokenizer - -# Set up the Streamlit app -st.set_page_config(page_title='Keras and Plotly Example') -st.sidebar.title('Word Frequency') - -# Generate random collections of words and save to CSV file -def generate_data(): - data = [] - for i in range(5): - words = np.random.choice(['apple', 'banana', 'cherry', 'date', 'elderberry'], size=100) - data.append(' '.join(words)) - df = pd.DataFrame({'text': data}) - df.to_csv('word_data.csv', index=False) - -# Load data from CSV file -def load_data(): - df = pd.read_csv('word_data.csv') - return df - -# Create a bar chart of word frequency -def plot_word_frequency(text): - tokenizer = Tokenizer() - tokenizer.fit_on_texts(text) - word_counts = tokenizer.word_counts - words = list(word_counts.keys()) - counts = list(word_counts.values()) - fig = go.Figure([go.Bar(x=words, y=counts)]) - fig.update_layout(title='Word Frequency') - st.plotly_chart(fig) - -# Main Streamlit app -if st.sidebar.button('Generate Data'): - generate_data() - st.sidebar.write('Data generated') -else: - st.sidebar.write('Click "Generate Data" to generate new data') - -df = load_data() -text = df['text'].tolist() -st.write(df) -plot_word_frequency(text) \ No newline at end of file diff --git a/spaces/awacke1/acw-dr-llama-7b-chat/app.py b/spaces/awacke1/acw-dr-llama-7b-chat/app.py deleted file mode 100644 index 65ac4f5913b1b86be748ed4cb05f5f5ef040d456..0000000000000000000000000000000000000000 --- a/spaces/awacke1/acw-dr-llama-7b-chat/app.py +++ /dev/null @@ -1,797 +0,0 @@ -# Imports -import base64 -import glob -import json -import math -import openai -import os -import pytz -import re -import requests -import streamlit as st -import textract -import time -import zipfile -import huggingface_hub -import dotenv -from audio_recorder_streamlit import audio_recorder -from bs4 import BeautifulSoup -from collections import deque -from datetime import datetime -from dotenv import load_dotenv -from huggingface_hub import InferenceClient -from io import BytesIO -from langchain.chat_models import ChatOpenAI -from langchain.chains import ConversationalRetrievalChain -from langchain.embeddings import OpenAIEmbeddings -from langchain.memory import ConversationBufferMemory -from langchain.text_splitter import CharacterTextSplitter -from langchain.vectorstores import FAISS -from openai import ChatCompletion -from PyPDF2 import PdfReader -from templates import bot_template, css, user_template -from xml.etree import ElementTree as ET -import streamlit.components.v1 as components # Import Streamlit Components for HTML5 - - -st.set_page_config(page_title="🐪Llama Whisperer🦙 Voice Chat🌟", layout="wide") - - -def add_Med_Licensing_Exam_Dataset(): - import streamlit as st - from datasets import load_dataset - dataset = load_dataset("augtoma/usmle_step_1")['test'] # Using 'test' split - st.title("USMLE Step 1 Dataset Viewer") - if len(dataset) == 0: - st.write("😢 The dataset is empty.") - else: - st.write(""" - 🔍 Use the search box to filter questions or use the grid to scroll through the dataset. - """) - - # 👩‍🔬 Search Box - search_term = st.text_input("Search for a specific question:", "") - - # 🎛 Pagination - records_per_page = 100 - num_records = len(dataset) - num_pages = max(int(num_records / records_per_page), 1) - - # Skip generating the slider if num_pages is 1 (i.e., all records fit in one page) - if num_pages > 1: - page_number = st.select_slider("Select page:", options=list(range(1, num_pages + 1))) - else: - page_number = 1 # Only one page - - # 📊 Display Data - start_idx = (page_number - 1) * records_per_page - end_idx = start_idx + records_per_page - - # 🧪 Apply the Search Filter - filtered_data = [] - for record in dataset[start_idx:end_idx]: - if isinstance(record, dict) and 'text' in record and 'id' in record: - if search_term: - if search_term.lower() in record['text'].lower(): - st.markdown(record) - filtered_data.append(record) - else: - filtered_data.append(record) - - # 🌐 Render the Grid - for record in filtered_data: - st.write(f"## Question ID: {record['id']}") - st.write(f"### Question:") - st.write(f"{record['text']}") - st.write(f"### Answer:") - st.write(f"{record['answer']}") - st.write("---") - - st.write(f"😊 Total Records: {num_records} | 📄 Displaying {start_idx+1} to {min(end_idx, num_records)}") - -# 1. Constants and Top Level UI Variables - -# My Inference API Copy -# API_URL = 'https://qe55p8afio98s0u3.us-east-1.aws.endpoints.huggingface.cloud' # Dr Llama -# Original: -API_URL = "https://api-inference.huggingface.co/models/meta-llama/Llama-2-7b-chat-hf" -API_KEY = os.getenv('API_KEY') -MODEL1="meta-llama/Llama-2-7b-chat-hf" -MODEL1URL="https://huggingface.co/meta-llama/Llama-2-7b-chat-hf" -HF_KEY = os.getenv('HF_KEY') -headers = { - "Authorization": f"Bearer {HF_KEY}", - "Content-Type": "application/json" -} -key = os.getenv('OPENAI_API_KEY') -prompt = f"Write instructions to teach anyone to write a discharge plan. List the entities, features and relationships to CCDA and FHIR objects in boldface." -should_save = st.sidebar.checkbox("💾 Save", value=True, help="Save your session data.") - -# 2. Prompt label button demo for LLM -def add_witty_humor_buttons(): - with st.expander("Wit and Humor 🤣", expanded=True): - # Tip about the Dromedary family - st.markdown("🔬 **Fun Fact**: Dromedaries, part of the camel family, have a single hump and are adapted to arid environments. Their 'superpowers' include the ability to survive without water for up to 7 days, thanks to their specialized blood cells and water storage in their hump.") - - # Define button descriptions - descriptions = { - "Generate Limericks 😂": "Write ten random adult limericks based on quotes that are tweet length and make you laugh 🎭", - "Wise Quotes 🧙": "Generate ten wise quotes that are tweet length 🦉", - "Funny Rhymes 🎤": "Create ten funny rhymes that are tweet length 🎶", - "Medical Jokes 💉": "Create ten medical jokes that are tweet length 🏥", - "Minnesota Humor ❄️": "Create ten jokes about Minnesota that are tweet length 🌨️", - "Top Funny Stories 📖": "Create ten funny stories that are tweet length 📚", - "More Funny Rhymes 🎙️": "Create ten more funny rhymes that are tweet length 🎵" - } - - # Create columns - col1, col2, col3 = st.columns([1, 1, 1], gap="small") - - # Add buttons to columns - if col1.button("Generate Limericks 😂"): - StreamLLMChatResponse(descriptions["Generate Limericks 😂"]) - - if col2.button("Wise Quotes 🧙"): - StreamLLMChatResponse(descriptions["Wise Quotes 🧙"]) - - if col3.button("Funny Rhymes 🎤"): - StreamLLMChatResponse(descriptions["Funny Rhymes 🎤"]) - - col4, col5, col6 = st.columns([1, 1, 1], gap="small") - - if col4.button("Medical Jokes 💉"): - StreamLLMChatResponse(descriptions["Medical Jokes 💉"]) - - if col5.button("Minnesota Humor ❄️"): - StreamLLMChatResponse(descriptions["Minnesota Humor ❄️"]) - - if col6.button("Top Funny Stories 📖"): - StreamLLMChatResponse(descriptions["Top Funny Stories 📖"]) - - col7 = st.columns(1, gap="small") - - if col7[0].button("More Funny Rhymes 🎙️"): - StreamLLMChatResponse(descriptions["More Funny Rhymes 🎙️"]) - -def SpeechSynthesis(result): - documentHTML5=''' - - - - Read It Aloud - - - -

    🔊 Read It Aloud

    - -
    - - - - ''' - - components.html(documentHTML5, width=1280, height=1024) - #return result - - -# 3. Stream Llama Response -# @st.cache_resource -def StreamLLMChatResponse(prompt): - try: - endpoint_url = API_URL - hf_token = API_KEY - client = InferenceClient(endpoint_url, token=hf_token) - gen_kwargs = dict( - max_new_tokens=512, - top_k=30, - top_p=0.9, - temperature=0.2, - repetition_penalty=1.02, - stop_sequences=["\nUser:", "<|endoftext|>", "
    "], - ) - stream = client.text_generation(prompt, stream=True, details=True, **gen_kwargs) - report=[] - res_box = st.empty() - collected_chunks=[] - collected_messages=[] - allresults='' - for r in stream: - if r.token.special: - continue - if r.token.text in gen_kwargs["stop_sequences"]: - break - collected_chunks.append(r.token.text) - chunk_message = r.token.text - collected_messages.append(chunk_message) - try: - report.append(r.token.text) - if len(r.token.text) > 0: - result="".join(report).strip() - res_box.markdown(f'*{result}*') - - except: - st.write('Stream llm issue') - SpeechSynthesis(result) - return result - except: - st.write('Llama model is asleep. Starting up now on A10 - please give 5 minutes then retry as KEDA scales up from zero to activate running container(s).') - -# 4. Run query with payload -def query(payload): - response = requests.post(API_URL, headers=headers, json=payload) - st.markdown(response.json()) - return response.json() -def get_output(prompt): - return query({"inputs": prompt}) - -# 5. Auto name generated output files from time and content -def generate_filename(prompt, file_type): - central = pytz.timezone('US/Central') - safe_date_time = datetime.now(central).strftime("%m%d_%H%M") - replaced_prompt = prompt.replace(" ", "_").replace("\n", "_") - safe_prompt = "".join(x for x in replaced_prompt if x.isalnum() or x == "_")[:45] - return f"{safe_date_time}_{safe_prompt}.{file_type}" - -# 6. Speech transcription via OpenAI service -def transcribe_audio(openai_key, file_path, model): - openai.api_key = openai_key - OPENAI_API_URL = "https://api.openai.com/v1/audio/transcriptions" - headers = { - "Authorization": f"Bearer {openai_key}", - } - with open(file_path, 'rb') as f: - data = {'file': f} - response = requests.post(OPENAI_API_URL, headers=headers, files=data, data={'model': model}) - if response.status_code == 200: - st.write(response.json()) - chatResponse = chat_with_model(response.json().get('text'), '') # ************************************* - transcript = response.json().get('text') - filename = generate_filename(transcript, 'txt') - response = chatResponse - user_prompt = transcript - create_file(filename, user_prompt, response, should_save) - return transcript - else: - st.write(response.json()) - st.error("Error in API call.") - return None - -# 7. Auto stop on silence audio control for recording WAV files -def save_and_play_audio(audio_recorder): - audio_bytes = audio_recorder(key='audio_recorder') - if audio_bytes: - filename = generate_filename("Recording", "wav") - with open(filename, 'wb') as f: - f.write(audio_bytes) - st.audio(audio_bytes, format="audio/wav") - return filename - return None - -# 8. File creator that interprets type and creates output file for text, markdown and code -def create_file(filename, prompt, response, should_save=True): - if not should_save: - return - base_filename, ext = os.path.splitext(filename) - if ext in ['.txt', '.htm', '.md']: - with open(f"{base_filename}.md", 'w') as file: - try: - content = prompt.strip() + '\r\n' + response - file.write(content) - except: - st.write('.') - - #has_python_code = re.search(r"```python([\s\S]*?)```", prompt.strip() + '\r\n' + response) - #has_python_code = bool(re.search(r"```python([\s\S]*?)```", prompt.strip() + '\r\n' + response)) - #if has_python_code: - # python_code = re.findall(r"```python([\s\S]*?)```", response)[0].strip() - # with open(f"{base_filename}-Code.py", 'w') as file: - # file.write(python_code) - # with open(f"{base_filename}.md", 'w') as file: - # content = prompt.strip() + '\r\n' + response - # file.write(content) - -def truncate_document(document, length): - return document[:length] -def divide_document(document, max_length): - return [document[i:i+max_length] for i in range(0, len(document), max_length)] - -# 9. Sidebar with UI controls to review and re-run prompts and continue responses -@st.cache_resource -def get_table_download_link(file_path): - with open(file_path, 'r') as file: - data = file.read() - - b64 = base64.b64encode(data.encode()).decode() - file_name = os.path.basename(file_path) - ext = os.path.splitext(file_name)[1] # get the file extension - if ext == '.txt': - mime_type = 'text/plain' - elif ext == '.py': - mime_type = 'text/plain' - elif ext == '.xlsx': - mime_type = 'text/plain' - elif ext == '.csv': - mime_type = 'text/plain' - elif ext == '.htm': - mime_type = 'text/html' - elif ext == '.md': - mime_type = 'text/markdown' - else: - mime_type = 'application/octet-stream' # general binary data type - href = f'{file_name}' - return href - - -def CompressXML(xml_text): - root = ET.fromstring(xml_text) - for elem in list(root.iter()): - if isinstance(elem.tag, str) and 'Comment' in elem.tag: - elem.parent.remove(elem) - return ET.tostring(root, encoding='unicode', method="xml") - -# 10. Read in and provide UI for past files -@st.cache_resource -def read_file_content(file,max_length): - if file.type == "application/json": - content = json.load(file) - return str(content) - elif file.type == "text/html" or file.type == "text/htm": - content = BeautifulSoup(file, "html.parser") - return content.text - elif file.type == "application/xml" or file.type == "text/xml": - tree = ET.parse(file) - root = tree.getroot() - xml = CompressXML(ET.tostring(root, encoding='unicode')) - return xml - elif file.type == "text/markdown" or file.type == "text/md": - md = mistune.create_markdown() - content = md(file.read().decode()) - return content - elif file.type == "text/plain": - return file.getvalue().decode() - else: - return "" - -# 11. Chat with GPT - Caution on quota - now favoring fastest AI pipeline STT Whisper->LLM Llama->TTS -@st.cache_resource -def chat_with_model(prompt, document_section, model_choice='gpt-3.5-turbo'): - model = model_choice - conversation = [{'role': 'system', 'content': 'You are a helpful assistant.'}] - conversation.append({'role': 'user', 'content': prompt}) - if len(document_section)>0: - conversation.append({'role': 'assistant', 'content': document_section}) - start_time = time.time() - report = [] - res_box = st.empty() - collected_chunks = [] - collected_messages = [] - for chunk in openai.ChatCompletion.create(model='gpt-3.5-turbo', messages=conversation, temperature=0.5, stream=True): - collected_chunks.append(chunk) - chunk_message = chunk['choices'][0]['delta'] - collected_messages.append(chunk_message) - content=chunk["choices"][0].get("delta",{}).get("content") - try: - report.append(content) - if len(content) > 0: - result = "".join(report).strip() - res_box.markdown(f'*{result}*') - except: - st.write(' ') - full_reply_content = ''.join([m.get('content', '') for m in collected_messages]) - st.write("Elapsed time:") - st.write(time.time() - start_time) - return full_reply_content - -# 12. Embedding VectorDB for LLM query of documents to text to compress inputs and prompt together as Chat memory using Langchain -@st.cache_resource -def chat_with_file_contents(prompt, file_content, model_choice='gpt-3.5-turbo'): - conversation = [{'role': 'system', 'content': 'You are a helpful assistant.'}] - conversation.append({'role': 'user', 'content': prompt}) - if len(file_content)>0: - conversation.append({'role': 'assistant', 'content': file_content}) - response = openai.ChatCompletion.create(model=model_choice, messages=conversation) - return response['choices'][0]['message']['content'] - -def extract_mime_type(file): - if isinstance(file, str): - pattern = r"type='(.*?)'" - match = re.search(pattern, file) - if match: - return match.group(1) - else: - raise ValueError(f"Unable to extract MIME type from {file}") - elif isinstance(file, streamlit.UploadedFile): - return file.type - else: - raise TypeError("Input should be a string or a streamlit.UploadedFile object") - -def extract_file_extension(file): - # get the file name directly from the UploadedFile object - file_name = file.name - pattern = r".*?\.(.*?)$" - match = re.search(pattern, file_name) - if match: - return match.group(1) - else: - raise ValueError(f"Unable to extract file extension from {file_name}") - -# Normalize input as text from PDF and other formats -@st.cache_resource -def pdf2txt(docs): - text = "" - for file in docs: - file_extension = extract_file_extension(file) - st.write(f"File type extension: {file_extension}") - if file_extension.lower() in ['py', 'txt', 'html', 'htm', 'xml', 'json']: - text += file.getvalue().decode('utf-8') - elif file_extension.lower() == 'pdf': - from PyPDF2 import PdfReader - pdf = PdfReader(BytesIO(file.getvalue())) - for page in range(len(pdf.pages)): - text += pdf.pages[page].extract_text() # new PyPDF2 syntax - return text - -def txt2chunks(text): - text_splitter = CharacterTextSplitter(separator="\n", chunk_size=1000, chunk_overlap=200, length_function=len) - return text_splitter.split_text(text) - -# Vector Store using FAISS -@st.cache_resource -def vector_store(text_chunks): - embeddings = OpenAIEmbeddings(openai_api_key=key) - return FAISS.from_texts(texts=text_chunks, embedding=embeddings) - -# Memory and Retrieval chains -@st.cache_resource -def get_chain(vectorstore): - llm = ChatOpenAI() - memory = ConversationBufferMemory(memory_key='chat_history', return_messages=True) - return ConversationalRetrievalChain.from_llm(llm=llm, retriever=vectorstore.as_retriever(), memory=memory) - -def process_user_input(user_question): - response = st.session_state.conversation({'question': user_question}) - st.session_state.chat_history = response['chat_history'] - for i, message in enumerate(st.session_state.chat_history): - template = user_template if i % 2 == 0 else bot_template - st.write(template.replace("{{MSG}}", message.content), unsafe_allow_html=True) - filename = generate_filename(user_question, 'txt') - response = message.content - user_prompt = user_question - create_file(filename, user_prompt, response, should_save) - -def divide_prompt(prompt, max_length): - words = prompt.split() - chunks = [] - current_chunk = [] - current_length = 0 - for word in words: - if len(word) + current_length <= max_length: - current_length += len(word) + 1 - current_chunk.append(word) - else: - chunks.append(' '.join(current_chunk)) - current_chunk = [word] - current_length = len(word) - chunks.append(' '.join(current_chunk)) - return chunks - - -# 13. Provide way of saving all and deleting all to give way of reviewing output and saving locally before clearing it - -@st.cache_resource -def create_zip_of_files(files): - zip_name = "all_files.zip" - with zipfile.ZipFile(zip_name, 'w') as zipf: - for file in files: - zipf.write(file) - return zip_name - -@st.cache_resource -def get_zip_download_link(zip_file): - with open(zip_file, 'rb') as f: - data = f.read() - b64 = base64.b64encode(data).decode() - href = f'Download All' - return href - -# 14. Inference Endpoints for Whisper (best fastest STT) on NVIDIA T4 and Llama (best fastest AGI LLM) on NVIDIA A10 -# My Inference Endpoint -#API_URL_IE = f'https://tonpixzfvq3791u9.us-east-1.aws.endpoints.huggingface.cloud' -# Original -#API_URL_IE = "https://api-inference.huggingface.co/models/openai/whisper-small.en" -# A10 Inference Endpoint for whisper large tests -API_URL_IE = "https://hifdvffh2em0wn50.us-east-1.aws.endpoints.huggingface.cloud" - -MODEL2 = "openai/whisper-small.en" -MODEL2_URL = "https://huggingface.co/openai/whisper-small.en" -#headers = { -# "Authorization": "Bearer XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX", -# "Content-Type": "audio/wav" -#} -HF_KEY = os.getenv('HF_KEY') -headers = { - "Authorization": f"Bearer {HF_KEY}", - "Content-Type": "audio/wav" -} - -#@st.cache_resource -def query(filename): - with open(filename, "rb") as f: - data = f.read() - response = requests.post(API_URL_IE, headers=headers, data=data) - return response.json() - -def generate_filename(prompt, file_type): - central = pytz.timezone('US/Central') - safe_date_time = datetime.now(central).strftime("%m%d_%H%M") - replaced_prompt = prompt.replace(" ", "_").replace("\n", "_") - safe_prompt = "".join(x for x in replaced_prompt if x.isalnum() or x == "_")[:90] - return f"{safe_date_time}_{safe_prompt}.{file_type}" - -# 15. Audio recorder to Wav file -def save_and_play_audio(audio_recorder): - audio_bytes = audio_recorder() - if audio_bytes: - filename = generate_filename("Recording", "wav") - with open(filename, 'wb') as f: - f.write(audio_bytes) - st.audio(audio_bytes, format="audio/wav") - return filename - -# 16. Speech transcription to file output -def transcribe_audio(filename): - output = query(filename) - return output - - -def whisper_main(): - st.title("Speech to Text") - st.write("Record your speech and get the text.") - - # Audio, transcribe, GPT: - filename = save_and_play_audio(audio_recorder) - if filename is not None: - transcription = transcribe_audio(filename) - #try: - - transcript = transcription['text'] - #except: - #st.write('Whisper model is asleep. Starting up now on T4 GPU - please give 5 minutes then retry as it scales up from zero to activate running container(s).') - - st.write(transcript) - response = StreamLLMChatResponse(transcript) - # st.write(response) - redundant with streaming result? - filename = generate_filename(transcript, ".txt") - create_file(filename, transcript, response, should_save) - #st.sidebar.markdown(get_table_download_link(filename), unsafe_allow_html=True) - -import streamlit as st - -# Sample function to demonstrate a response, replace with your own logic -def StreamMedChatResponse(topic): - st.write(f"Showing resources or questions related to: {topic}") - -def add_multi_system_agent_topics(): - with st.expander("Multi-System Agent AI Topics 🤖", expanded=True): - st.markdown("🤖 **Explore Multi-System Agent AI Topics**: This section provides a variety of topics related to multi-system agent AI systems.") - - # Define multi-system agent AI topics and descriptions - descriptions = { - "Reinforcement Learning 🎮": "Questions related to reinforcement learning algorithms and applications 🕹️", - "Natural Language Processing 🗣️": "Questions about natural language processing techniques and chatbot development 🗨️", - "Multi-Agent Systems 🤝": "Questions pertaining to multi-agent systems and cooperative AI interactions 🤖", - "Conversational AI 🗨️": "Questions on building conversational AI agents and chatbots for various platforms 💬", - "Distributed AI Systems 🌐": "Questions about distributed AI systems and their implementation in networked environments 🌐", - "AI Ethics and Bias 🤔": "Questions related to ethics and bias considerations in AI systems and decision-making 🧠", - "AI in Healthcare 🏥": "Questions about the application of AI in healthcare and medical diagnosis 🩺", - "AI in Autonomous Vehicles 🚗": "Questions on the use of AI in autonomous vehicles and self-driving technology 🚗" - } - - # Create columns - col1, col2, col3, col4 = st.columns([1, 1, 1, 1], gap="small") - - # Add buttons to columns - if col1.button("Reinforcement Learning 🎮"): - st.write(descriptions["Reinforcement Learning 🎮"]) - StreamLLMChatResponse(descriptions["Reinforcement Learning 🎮"]) - - if col2.button("Natural Language Processing 🗣️"): - st.write(descriptions["Natural Language Processing 🗣️"]) - StreamLLMChatResponse(descriptions["Natural Language Processing 🗣️"]) - - if col3.button("Multi-Agent Systems 🤝"): - st.write(descriptions["Multi-Agent Systems 🤝"]) - StreamLLMChatResponse(descriptions["Multi-Agent Systems 🤝"]) - - if col4.button("Conversational AI 🗨️"): - st.write(descriptions["Conversational AI 🗨️"]) - StreamLLMChatResponse(descriptions["Conversational AI 🗨️"]) - - col5, col6, col7, col8 = st.columns([1, 1, 1, 1], gap="small") - - if col5.button("Distributed AI Systems 🌐"): - st.write(descriptions["Distributed AI Systems 🌐"]) - StreamLLMChatResponse(descriptions["Distributed AI Systems 🌐"]) - - if col6.button("AI Ethics and Bias 🤔"): - st.write(descriptions["AI Ethics and Bias 🤔"]) - StreamLLMChatResponse(descriptions["AI Ethics and Bias 🤔"]) - - if col7.button("AI in Healthcare 🏥"): - st.write(descriptions["AI in Healthcare 🏥"]) - StreamLLMChatResponse(descriptions["AI in Healthcare 🏥"]) - - if col8.button("AI in Autonomous Vehicles 🚗"): - st.write(descriptions["AI in Autonomous Vehicles 🚗"]) - StreamLLMChatResponse(descriptions["AI in Autonomous Vehicles 🚗"]) - - -# 17. Main -def main(): - - st.title("Try Some Topics:") - prompt = f"Write ten funny jokes that are tweet length stories that make you laugh. Show as markdown outline with emojis for each." - - # Add Wit and Humor buttons - # add_witty_humor_buttons() - # Calling the function to add the multi-system agent AI topics buttons - add_multi_system_agent_topics() - - example_input = st.text_input("Enter your example text:", value=prompt, help="Enter text to get a response from DromeLlama.") - if st.button("Run Prompt With DromeLlama", help="Click to run the prompt."): - try: - StreamLLMChatResponse(example_input) - except: - st.write('DromeLlama is asleep. Starting up now on A10 - please give 5 minutes then retry as KEDA scales up from zero to activate running container(s).') - - openai.api_key = os.getenv('OPENAI_KEY') - menu = ["txt", "htm", "xlsx", "csv", "md", "py"] - choice = st.sidebar.selectbox("Output File Type:", menu) - model_choice = st.sidebar.radio("Select Model:", ('gpt-3.5-turbo', 'gpt-3.5-turbo-0301')) - user_prompt = st.text_area("Enter prompts, instructions & questions:", '', height=100) - collength, colupload = st.columns([2,3]) # adjust the ratio as needed - with collength: - max_length = st.slider("File section length for large files", min_value=1000, max_value=128000, value=12000, step=1000) - with colupload: - uploaded_file = st.file_uploader("Add a file for context:", type=["pdf", "xml", "json", "xlsx", "csv", "html", "htm", "md", "txt"]) - document_sections = deque() - document_responses = {} - if uploaded_file is not None: - file_content = read_file_content(uploaded_file, max_length) - document_sections.extend(divide_document(file_content, max_length)) - if len(document_sections) > 0: - if st.button("👁️ View Upload"): - st.markdown("**Sections of the uploaded file:**") - for i, section in enumerate(list(document_sections)): - st.markdown(f"**Section {i+1}**\n{section}") - st.markdown("**Chat with the model:**") - for i, section in enumerate(list(document_sections)): - if i in document_responses: - st.markdown(f"**Section {i+1}**\n{document_responses[i]}") - else: - if st.button(f"Chat about Section {i+1}"): - st.write('Reasoning with your inputs...') - response = chat_with_model(user_prompt, section, model_choice) - st.write('Response:') - st.write(response) - document_responses[i] = response - filename = generate_filename(f"{user_prompt}_section_{i+1}", choice) - create_file(filename, user_prompt, response, should_save) - st.sidebar.markdown(get_table_download_link(filename), unsafe_allow_html=True) - if st.button('💬 Chat'): - st.write('Reasoning with your inputs...') - user_prompt_sections = divide_prompt(user_prompt, max_length) - full_response = '' - for prompt_section in user_prompt_sections: - response = chat_with_model(prompt_section, ''.join(list(document_sections)), model_choice) - full_response += response + '\n' # Combine the responses - response = full_response - st.write('Response:') - st.write(response) - filename = generate_filename(user_prompt, choice) - create_file(filename, user_prompt, response, should_save) - st.sidebar.markdown(get_table_download_link(filename), unsafe_allow_html=True) - - # Compose a file sidebar of past encounters - all_files = glob.glob("*.*") - all_files = [file for file in all_files if len(os.path.splitext(file)[0]) >= 20] # exclude files with short names - all_files.sort(key=lambda x: (os.path.splitext(x)[1], x), reverse=True) # sort by file type and file name in descending order - if st.sidebar.button("🗑 Delete All"): - for file in all_files: - os.remove(file) - st.experimental_rerun() - if st.sidebar.button("⬇️ Download All"): - zip_file = create_zip_of_files(all_files) - st.sidebar.markdown(get_zip_download_link(zip_file), unsafe_allow_html=True) - file_contents='' - next_action='' - for file in all_files: - col1, col2, col3, col4, col5 = st.sidebar.columns([1,6,1,1,1]) # adjust the ratio as needed - with col1: - if st.button("🌐", key="md_"+file): # md emoji button - with open(file, 'r') as f: - file_contents = f.read() - next_action='md' - with col2: - st.markdown(get_table_download_link(file), unsafe_allow_html=True) - with col3: - if st.button("📂", key="open_"+file): # open emoji button - with open(file, 'r') as f: - file_contents = f.read() - next_action='open' - with col4: - if st.button("🔍", key="read_"+file): # search emoji button - with open(file, 'r') as f: - file_contents = f.read() - next_action='search' - with col5: - if st.button("🗑", key="delete_"+file): - os.remove(file) - st.experimental_rerun() - - - if len(file_contents) > 0: - if next_action=='open': - file_content_area = st.text_area("File Contents:", file_contents, height=500) - if next_action=='md': - st.markdown(file_contents) - if next_action=='search': - file_content_area = st.text_area("File Contents:", file_contents, height=500) - st.write('Reasoning with your inputs...') - - # new - llama - response = StreamLLMChatResponse(file_contents) - filename = generate_filename(user_prompt, ".md") - create_file(filename, file_contents, response, should_save) - SpeechSynthesis(response) - - # old - gpt - #response = chat_with_model(user_prompt, file_contents, model_choice) - #filename = generate_filename(file_contents, choice) - #create_file(filename, user_prompt, response, should_save) - - st.experimental_rerun() - - # Feedback - # Step: Give User a Way to Upvote or Downvote - feedback = st.radio("Step 8: Give your feedback", ("👍 Upvote", "👎 Downvote")) - if feedback == "👍 Upvote": - st.write("You upvoted 👍. Thank you for your feedback!") - else: - st.write("You downvoted 👎. Thank you for your feedback!") - - load_dotenv() - st.write(css, unsafe_allow_html=True) - st.header("Chat with documents :books:") - user_question = st.text_input("Ask a question about your documents:") - if user_question: - process_user_input(user_question) - with st.sidebar: - st.subheader("Your documents") - docs = st.file_uploader("import documents", accept_multiple_files=True) - with st.spinner("Processing"): - raw = pdf2txt(docs) - if len(raw) > 0: - length = str(len(raw)) - text_chunks = txt2chunks(raw) - vectorstore = vector_store(text_chunks) - st.session_state.conversation = get_chain(vectorstore) - st.markdown('# AI Search Index of Length:' + length + ' Created.') # add timing - filename = generate_filename(raw, 'txt') - create_file(filename, raw, '', should_save) - -# 18. Run AI Pipeline -if __name__ == "__main__": - whisper_main() - main() - add_Med_Licensing_Exam_Dataset() \ No newline at end of file diff --git a/spaces/awsaf49/gcvit-tf/gcvit/layers/feature.py b/spaces/awsaf49/gcvit-tf/gcvit/layers/feature.py deleted file mode 100644 index 29c2ad988af05cf643b7a73baabaa1418f8a38c3..0000000000000000000000000000000000000000 --- a/spaces/awsaf49/gcvit-tf/gcvit/layers/feature.py +++ /dev/null @@ -1,255 +0,0 @@ -import tensorflow as tf -import tensorflow_addons as tfa - -H_AXIS = -3 -W_AXIS = -2 - -@tf.keras.utils.register_keras_serializable(package="gcvit") -class Mlp(tf.keras.layers.Layer): - def __init__(self, hidden_features=None, out_features=None, act_layer='gelu', dropout=0., **kwargs): - super().__init__(**kwargs) - self.hidden_features = hidden_features - self.out_features = out_features - self.act_layer = act_layer - self.dropout = dropout - - def build(self, input_shape): - self.in_features = input_shape[-1] - self.hidden_features = self.hidden_features or self.in_features - self.out_features = self.out_features or self.in_features - self.fc1 = tf.keras.layers.Dense(self.hidden_features, name="fc1") - self.act = tf.keras.layers.Activation(self.act_layer, name="act") - self.fc2 = tf.keras.layers.Dense(self.out_features, name="fc2") - self.drop1 = tf.keras.layers.Dropout(self.dropout, name="drop1") - self.drop2 = tf.keras.layers.Dropout(self.dropout, name="drop2") - super().build(input_shape) - - def call(self, inputs, **kwargs): - x = self.fc1(inputs) - x = self.act(x) - x = self.drop1(x) - x = self.fc2(x) - x = self.drop2(x) - return x - - def get_config(self): - config = super().get_config() - config.update({ - "hidden_features":self.hidden_features, - "out_features":self.out_features, - "act_layer":self.act_layer, - "dropout":self.dropout - }) - return config - -@tf.keras.utils.register_keras_serializable(package="gcvit") -class SE(tf.keras.layers.Layer): - def __init__(self, oup=None, expansion=0.25, **kwargs): - super().__init__(**kwargs) - self.expansion = expansion - self.oup = oup - - def build(self, input_shape): - inp = input_shape[-1] - self.oup = self.oup or inp - self.avg_pool = tfa.layers.AdaptiveAveragePooling2D(1, name="avg_pool") - self.fc = [ - tf.keras.layers.Dense(int(inp * self.expansion), use_bias=False, name='fc/0'), - tf.keras.layers.Activation('gelu', name='fc/1'), - tf.keras.layers.Dense(self.oup, use_bias=False, name='fc/2'), - tf.keras.layers.Activation('sigmoid', name='fc/3') - ] - super().build(input_shape) - - def call(self, inputs, **kwargs): - b, _, _, c = tf.unstack(tf.shape(inputs), num=4) - x = tf.reshape(self.avg_pool(inputs), (b, c)) - for layer in self.fc: - x = layer(x) - x = tf.reshape(x, (b, 1, 1, c)) - return x*inputs - - def get_config(self): - config = super().get_config() - config.update({ - 'expansion': self.expansion, - 'oup': self.oup, - }) - return config - -@tf.keras.utils.register_keras_serializable(package="gcvit") -class ReduceSize(tf.keras.layers.Layer): - def __init__(self, keep_dim=False, **kwargs): - super().__init__(**kwargs) - self.keep_dim = keep_dim - - def build(self, input_shape): - dim = input_shape[-1] - dim_out = dim if self.keep_dim else 2*dim - self.pad1 = tf.keras.layers.ZeroPadding2D(1, name='pad1') - self.pad2 = tf.keras.layers.ZeroPadding2D(1, name='pad2') - self.conv = [ - tf.keras.layers.DepthwiseConv2D(kernel_size=3, strides=1, padding='valid', use_bias=False, name='conv/0'), - tf.keras.layers.Activation('gelu', name='conv/1'), - SE(name='conv/2'), - tf.keras.layers.Conv2D(dim, kernel_size=1, strides=1, padding='valid', use_bias=False, name='conv/3') - ] - self.reduction = tf.keras.layers.Conv2D(dim_out, kernel_size=3, strides=2, padding='valid', use_bias=False, - name='reduction') - self.norm1 = tf.keras.layers.LayerNormalization(axis=-1, epsilon=1e-05, name='norm1') # eps like PyTorch - self.norm2 = tf.keras.layers.LayerNormalization(axis=-1, epsilon=1e-05, name='norm2') - super().build(input_shape) - - def call(self, inputs, **kwargs): - x = self.norm1(inputs) - xr = self.pad1(x) # if pad had weights it would've thrown error with .save_weights() - for layer in self.conv: - xr = layer(xr) - x = x + xr - x = self.pad2(x) - x = self.reduction(x) - x = self.norm2(x) - return x - - def get_config(self): - config = super().get_config() - config.update({ - "keep_dim":self.keep_dim, - }) - return config - -@tf.keras.utils.register_keras_serializable(package="gcvit") -class FeatExtract(tf.keras.layers.Layer): - def __init__(self, keep_dim=False, **kwargs): - super().__init__(**kwargs) - self.keep_dim = keep_dim - - def build(self, input_shape): - dim = input_shape[-1] - self.pad1 = tf.keras.layers.ZeroPadding2D(1, name='pad1') - self.pad2 = tf.keras.layers.ZeroPadding2D(1, name='pad2') - self.conv = [ - tf.keras.layers.DepthwiseConv2D(kernel_size=3, strides=1, padding='valid', use_bias=False, name='conv/0'), - tf.keras.layers.Activation('gelu', name='conv/1'), - SE(name='conv/2'), - tf.keras.layers.Conv2D(dim, kernel_size=1, strides=1, padding='valid', use_bias=False, name='conv/3') - ] - if not self.keep_dim: - self.pool = tf.keras.layers.MaxPool2D(pool_size=3, strides=2, padding='valid', name='pool') - # else: - # self.pool = tf.keras.layers.Activation('linear', name='identity') # hack for PyTorch nn.Identity layer ;) - super().build(input_shape) - - def call(self, inputs, **kwargs): - x = inputs - xr = self.pad1(x) - for layer in self.conv: - xr = layer(xr) - x = x + xr # if pad had weights it would've thrown error with .save_weights() - if not self.keep_dim: - x = self.pad2(x) - x = self.pool(x) - return x - - def get_config(self): - config = super().get_config() - config.update({ - "keep_dim":self.keep_dim, - }) - return config - -@tf.keras.utils.register_keras_serializable(package="gcvit") -class GlobalQueryGen(tf.keras.layers.Layer): - """ - Global query generator based on: "Hatamizadeh et al., - Global Context Vision Transformers " - """ - def __init__(self, keep_dims=False, **kwargs): - super().__init__(**kwargs) - self.keep_dims = keep_dims - - def build(self, input_shape): - self.to_q_global = [FeatExtract(keep_dim, name=f'to_q_global/{i}') \ - for i, keep_dim in enumerate(self.keep_dims)] - super().build(input_shape) - - def call(self, inputs, **kwargs): - x = inputs - for layer in self.to_q_global: - x = layer(x) - return x - - def get_config(self): - config = super().get_config() - config.update({ - "keep_dims":self.keep_dims, - }) - return config - -@tf.keras.utils.register_keras_serializable(package="gcvit") -class Resizing(tf.keras.layers.Layer): - def __init__(self, - height, - width, - interpolation='bilinear', - **kwargs): - self.height = height - self.width = width - self.interpolation = interpolation - super().__init__(**kwargs) - - def call(self, inputs): - # tf.image.resize will always output float32 and operate more efficiently on - # float32 unless interpolation is nearest, in which case ouput type matches - # input type. - if self.interpolation == 'nearest': - input_dtype = self.compute_dtype - else: - input_dtype = tf.float32 - inputs = tf.cast(inputs, dtype=input_dtype) - size = [self.height, self.width] - outputs = tf.image.resize( - inputs, - size=size, - method=self.interpolation) - return tf.cast(outputs, self.compute_dtype) - - def compute_output_shape(self, input_shape): - input_shape = tf.TensorShape(input_shape).as_list() - input_shape[H_AXIS] = self.height - input_shape[W_AXIS] = self.width - return tf.TensorShape(input_shape) - - def get_config(self): - config = super().get_config() - config.update({ - 'height': self.height, - 'width': self.width, - 'interpolation': self.interpolation, - }) - return config - -@tf.keras.utils.register_keras_serializable(package="gcvit") -class FitWindow(tf.keras.layers.Layer): - "Pad feature to fit window" - def __init__(self, window_size, **kwargs): - super().__init__(**kwargs) - self.window_size = window_size - - def call(self, inputs): - B, H, W, C = tf.unstack(tf.shape(inputs), num=4) - # pad to multiple of window_size - h_pad = (self.window_size - H % self.window_size) % self.window_size - w_pad = (self.window_size - W % self.window_size) % self.window_size - x = tf.pad(inputs, [[0, 0], - [h_pad//2, (h_pad//2 + h_pad%2)], # padding in both directions unlike tfgcvit - [w_pad//2, (w_pad//2 + w_pad%2)], - [0, 0]]) - return x - - def get_config(self): - config = super().get_config() - config.update({ - 'window_size': self.window_size, - }) - return config \ No newline at end of file diff --git a/spaces/banana-projects/web3d/node_modules/three/examples/js/loaders/LDrawLoader.js b/spaces/banana-projects/web3d/node_modules/three/examples/js/loaders/LDrawLoader.js deleted file mode 100644 index 139413c1b11d1ad53e21c33d86e22769f1f2edc9..0000000000000000000000000000000000000000 --- a/spaces/banana-projects/web3d/node_modules/three/examples/js/loaders/LDrawLoader.js +++ /dev/null @@ -1,1330 +0,0 @@ -/** - * @author mrdoob / http://mrdoob.com/ - * @author yomboprime / https://github.com/yomboprime/ - * - * - */ - -THREE.LDrawLoader = ( function () { - - function LineParser( line, lineNumber ) { - - this.line = line; - this.lineLength = line.length; - this.currentCharIndex = 0; - this.currentChar = ' '; - this.lineNumber = lineNumber; - - } - - LineParser.prototype = { - - constructor: LineParser, - - seekNonSpace: function () { - - while ( this.currentCharIndex < this.lineLength ) { - - this.currentChar = this.line.charAt( this.currentCharIndex ); - - if ( this.currentChar !== ' ' && this.currentChar !== '\t' ) { - - return; - - } - - this.currentCharIndex ++; - - } - - }, - - getToken: function () { - - var pos0 = this.currentCharIndex ++; - - // Seek space - while ( this.currentCharIndex < this.lineLength ) { - - this.currentChar = this.line.charAt( this.currentCharIndex ); - - if ( this.currentChar === ' ' || this.currentChar === '\t' ) { - - break; - - } - - this.currentCharIndex ++; - - } - - var pos1 = this.currentCharIndex; - - this.seekNonSpace(); - - return this.line.substring( pos0, pos1 ); - - }, - - getRemainingString: function () { - - return this.line.substring( this.currentCharIndex, this.lineLength ); - - }, - - isAtTheEnd: function () { - - return this.currentCharIndex >= this.lineLength; - - }, - - setToEnd: function () { - - this.currentCharIndex = this.lineLength; - - }, - - getLineNumberString: function () { - - return this.lineNumber >= 0 ? " at line " + this.lineNumber : ""; - - } - - - }; - - function sortByMaterial( a, b ) { - - if ( a.colourCode === b.colourCode ) { - - return 0; - - } - - if ( a.colourCode < b.colourCode ) { - - return - 1; - - } - - return 1; - - } - - function createObject( elements, elementSize ) { - - // Creates a THREE.LineSegments (elementSize = 2) or a THREE.Mesh (elementSize = 3 ) - // With per face / segment material, implemented with mesh groups and materials array - - // Sort the triangles or line segments by colour code to make later the mesh groups - elements.sort( sortByMaterial ); - - var vertices = []; - var materials = []; - - var bufferGeometry = new THREE.BufferGeometry(); - bufferGeometry.clearGroups(); - var prevMaterial = null; - var index0 = 0; - var numGroupVerts = 0; - - for ( var iElem = 0, nElem = elements.length; iElem < nElem; iElem ++ ) { - - var elem = elements[ iElem ]; - var v0 = elem.v0; - var v1 = elem.v1; - // Note that LDraw coordinate system is rotated 180 deg. in the X axis w.r.t. Three.js's one - vertices.push( v0.x, v0.y, v0.z, v1.x, v1.y, v1.z ); - if ( elementSize === 3 ) { - - vertices.push( elem.v2.x, elem.v2.y, elem.v2.z ); - - } - - if ( prevMaterial !== elem.material ) { - - if ( prevMaterial !== null ) { - - bufferGeometry.addGroup( index0, numGroupVerts, materials.length - 1 ); - - } - - materials.push( elem.material ); - - prevMaterial = elem.material; - index0 = iElem * elementSize; - numGroupVerts = elementSize; - - } else { - - numGroupVerts += elementSize; - - } - - } - - if ( numGroupVerts > 0 ) { - - bufferGeometry.addGroup( index0, Infinity, materials.length - 1 ); - - } - - bufferGeometry.addAttribute( 'position', new THREE.Float32BufferAttribute( vertices, 3 ) ); - - var object3d = null; - - if ( elementSize === 2 ) { - - object3d = new THREE.LineSegments( bufferGeometry, materials ); - - } else if ( elementSize === 3 ) { - - bufferGeometry.computeVertexNormals(); - - object3d = new THREE.Mesh( bufferGeometry, materials ); - - } - - return object3d; - - } - - // - - function LDrawLoader( manager ) { - - this.manager = ( manager !== undefined ) ? manager : THREE.DefaultLoadingManager; - - // This is a stack of 'parse scopes' with one level per subobject loaded file. - // Each level contains a material lib and also other runtime variables passed between parent and child subobjects - // When searching for a material code, the stack is read from top of the stack to bottom - // Each material library is an object map keyed by colour codes. - this.parseScopesStack = null; - - this.path = ''; - - // Array of THREE.Material - this.materials = []; - - // Not using THREE.Cache here because it returns the previous HTML error response instead of calling onError() - // This also allows to handle the embedded text files ("0 FILE" lines) - this.subobjectCache = {}; - - // This object is a map from file names to paths. It agilizes the paths search. If it is not set then files will be searched by trial and error. - this.fileMap = null; - - // Add default main triangle and line edge materials (used in piecess that can be coloured with a main color) - this.setMaterials( [ - this.parseColourMetaDirective( new LineParser( "Main_Colour CODE 16 VALUE #FF8080 EDGE #333333" ) ), - this.parseColourMetaDirective( new LineParser( "Edge_Colour CODE 24 VALUE #A0A0A0 EDGE #333333" ) ) - ] ); - - // If this flag is set to true, each subobject will be a THREE.Object. - // If not (the default), only one object which contains all the merged primitives will be created. - this.separateObjects = false; - - // Current merged object and primitives - this.currentGroupObject = null; - this.currentTriangles = null; - this.currentLineSegments = null; - - } - - // Special surface finish tag types. - // Note: "MATERIAL" tag (e.g. GLITTER, SPECKLE) is not implemented - LDrawLoader.FINISH_TYPE_DEFAULT = 0; - LDrawLoader.FINISH_TYPE_CHROME = 1; - LDrawLoader.FINISH_TYPE_PEARLESCENT = 2; - LDrawLoader.FINISH_TYPE_RUBBER = 3; - LDrawLoader.FINISH_TYPE_MATTE_METALLIC = 4; - LDrawLoader.FINISH_TYPE_METAL = 5; - - // State machine to search a subobject path. - // The LDraw standard establishes these various possible subfolders. - LDrawLoader.FILE_LOCATION_AS_IS = 0; - LDrawLoader.FILE_LOCATION_TRY_PARTS = 1; - LDrawLoader.FILE_LOCATION_TRY_P = 2; - LDrawLoader.FILE_LOCATION_TRY_MODELS = 3; - LDrawLoader.FILE_LOCATION_TRY_RELATIVE = 4; - LDrawLoader.FILE_LOCATION_TRY_ABSOLUTE = 5; - LDrawLoader.FILE_LOCATION_NOT_FOUND = 6; - - LDrawLoader.prototype = { - - constructor: LDrawLoader, - - load: function ( url, onLoad, onProgress, onError ) { - - if ( ! this.fileMap ) { - - this.fileMap = {}; - - } - - var scope = this; - - var fileLoader = new THREE.FileLoader( this.manager ); - fileLoader.setPath( this.path ); - fileLoader.load( url, function ( text ) { - - processObject( text, onLoad ); - - }, onProgress, onError ); - - function processObject( text, onProcessed ) { - - var parseScope = scope.newParseScopeLevel(); - parseScope.url = url; - - var parentParseScope = scope.getParentParseScope(); - - // Add to cache - var currentFileName = parentParseScope.currentFileName; - if ( scope.subobjectCache[ currentFileName ] === undefined ) { - - scope.subobjectCache[ currentFileName ] = text; - - - } - - // Parse the object (returns a THREE.Group) - var objGroup = scope.parse( text ); - - // Load subobjects - parseScope.subobjects = objGroup.userData.subobjects; - parseScope.numSubobjects = parseScope.subobjects.length; - parseScope.subobjectIndex = 0; - - if ( parseScope.numSubobjects > 0 ) { - - // Load the first subobject - var subobjectGroup = loadSubobject( parseScope.subobjects[ 0 ], true ); - - // Optimization for loading pack: If subobjects are obtained from cache, keep loading them iteratively rather than recursively - if ( subobjectGroup ) { - - while ( subobjectGroup && parseScope.subobjectIndex < parseScope.numSubobjects - 1 ) { - - subobjectGroup = loadSubobject( parseScope.subobjects[ ++ parseScope.subobjectIndex ], true ); - - } - - if ( subobjectGroup ) { - - finalizeObject(); - - } - - } - - } else { - - // No subobjects, finish object - finalizeObject(); - - } - - return objGroup; - - function finalizeObject() { - - if ( ! scope.separateObjects && ! parentParseScope.isFromParse ) { - - // We are finalizing the root object and merging primitives is activated, so create the entire Mesh and LineSegments objects now - if ( scope.currentLineSegments.length > 0 ) { - - objGroup.add( createObject( scope.currentLineSegments, 2 ) ); - - } - - if ( scope.currentTriangles.length > 0 ) { - - objGroup.add( createObject( scope.currentTriangles, 3 ) ); - - } - - } - - scope.removeScopeLevel(); - - if ( onProcessed ) { - - onProcessed( objGroup ); - - } - - } - - function loadSubobject( subobject, sync ) { - - parseScope.mainColourCode = subobject.material.userData.code; - parseScope.mainEdgeColourCode = subobject.material.userData.edgeMaterial.userData.code; - parseScope.currentFileName = subobject.originalFileName; - - if ( ! scope.separateObjects ) { - - // Set current matrix - parseScope.currentMatrix.multiplyMatrices( parentParseScope.currentMatrix, subobject.matrix ); - - } - - // If subobject was cached previously, use the cached one - var cached = scope.subobjectCache[ subobject.originalFileName ]; - if ( cached ) { - - var subobjectGroup = processObject( cached, sync ? undefined : onSubobjectLoaded ); - if ( sync ) { - - addSubobject( subobject, subobjectGroup ); - return subobjectGroup; - - } - - return; - - } - - // Adjust file name to locate the subobject file path in standard locations (always under directory scope.path) - // Update also subobject.locationState for the next try if this load fails. - var subobjectURL = subobject.fileName; - var newLocationState = LDrawLoader.FILE_LOCATION_NOT_FOUND; - - switch ( subobject.locationState ) { - - case LDrawLoader.FILE_LOCATION_AS_IS: - newLocationState = subobject.locationState + 1; - break; - - case LDrawLoader.FILE_LOCATION_TRY_PARTS: - subobjectURL = 'parts/' + subobjectURL; - newLocationState = subobject.locationState + 1; - break; - - case LDrawLoader.FILE_LOCATION_TRY_P: - subobjectURL = 'p/' + subobjectURL; - newLocationState = subobject.locationState + 1; - break; - - case LDrawLoader.FILE_LOCATION_TRY_MODELS: - subobjectURL = 'models/' + subobjectURL; - newLocationState = subobject.locationState + 1; - break; - - case LDrawLoader.FILE_LOCATION_TRY_RELATIVE: - subobjectURL = url.substring( 0, url.lastIndexOf( "/" ) + 1 ) + subobjectURL; - newLocationState = subobject.locationState + 1; - break; - - case LDrawLoader.FILE_LOCATION_TRY_ABSOLUTE: - - if ( subobject.triedLowerCase ) { - - // Try absolute path - newLocationState = LDrawLoader.FILE_LOCATION_NOT_FOUND; - - } else { - - // Next attempt is lower case - subobject.fileName = subobject.fileName.toLowerCase(); - subobjectURL = subobject.fileName; - subobject.triedLowerCase = true; - newLocationState = LDrawLoader.FILE_LOCATION_AS_IS; - - } - break; - - case LDrawLoader.FILE_LOCATION_NOT_FOUND: - - // All location possibilities have been tried, give up loading this object - console.warn( 'LDrawLoader: Subobject "' + subobject.originalFileName + '" could not be found.' ); - - // Try to read the next subobject - parseScope.subobjectIndex ++; - - if ( parseScope.subobjectIndex >= parseScope.numSubobjects ) { - - // All subojects have been loaded. Finish parent object - scope.removeScopeLevel(); - onProcessed( objGroup ); - - } else { - - // Load next subobject - loadSubobject( parseScope.subobjects[ parseScope.subobjectIndex ] ); - - } - - return; - - } - - subobject.locationState = newLocationState; - subobject.url = subobjectURL; - - // Load the subobject - scope.load( subobjectURL, onSubobjectLoaded, undefined, onSubobjectError ); - - } - - function onSubobjectLoaded( subobjectGroup ) { - - var subobject = parseScope.subobjects[ parseScope.subobjectIndex ]; - - if ( subobjectGroup === null ) { - - // Try to reload - loadSubobject( subobject ); - return; - - } - - // Add the subobject just loaded - addSubobject( subobject, subobjectGroup ); - - // Proceed to load the next subobject, or finish the parent object - - parseScope.subobjectIndex ++; - - if ( parseScope.subobjectIndex < parseScope.numSubobjects ) { - - loadSubobject( parseScope.subobjects[ parseScope.subobjectIndex ] ); - - } else { - - finalizeObject(); - - } - - } - - function addSubobject( subobject, subobjectGroup ) { - - if ( scope.separateObjects ) { - - subobjectGroup.name = subobject.fileName; - objGroup.add( subobjectGroup ); - subobjectGroup.matrix.copy( subobject.matrix ); - subobjectGroup.matrixAutoUpdate = false; - - } - - scope.fileMap[ subobject.originalFileName ] = subobject.url; - - } - - function onSubobjectError( err ) { - - // Retry download from a different default possible location - loadSubobject( parseScope.subobjects[ parseScope.subobjectIndex ] ); - - } - - } - - }, - - setPath: function ( value ) { - - this.path = value; - - return this; - - }, - - setMaterials: function ( materials ) { - - // Clears parse scopes stack, adds new scope with material library - - this.parseScopesStack = []; - - this.newParseScopeLevel( materials ); - - this.getCurrentParseScope().isFromParse = false; - - this.materials = materials; - - this.currentGroupObject = null; - - return this; - - }, - - setFileMap: function ( fileMap ) { - - this.fileMap = fileMap; - - return this; - - }, - - newParseScopeLevel: function ( materials ) { - - // Adds a new scope level, assign materials to it and returns it - - var matLib = {}; - - if ( materials ) { - - for ( var i = 0, n = materials.length; i < n; i ++ ) { - - var material = materials[ i ]; - matLib[ material.userData.code ] = material; - - } - - } - - var topParseScope = this.getCurrentParseScope(); - - var parentParseScope = this.getParentParseScope(); - - var newParseScope = { - - lib: matLib, - url: null, - - // Subobjects - subobjects: null, - numSubobjects: 0, - subobjectIndex: 0, - - // Current subobject - currentFileName: null, - mainColourCode: topParseScope ? topParseScope.mainColourCode : '16', - mainEdgeColourCode: topParseScope ? topParseScope.mainEdgeColourCode : '24', - currentMatrix: new THREE.Matrix4(), - - // If false, it is a root material scope previous to parse - isFromParse: true - }; - - this.parseScopesStack.push( newParseScope ); - - return newParseScope; - - }, - - removeScopeLevel: function () { - - this.parseScopesStack.pop(); - - return this; - - }, - - addMaterial: function ( material ) { - - // Adds a material to the material library which is on top of the parse scopes stack. And also to the materials array - - var matLib = this.getCurrentParseScope().lib; - - if ( ! matLib[ material.userData.code ] ) { - - this.materials.push( material ); - - } - - matLib[ material.userData.code ] = material; - - return this; - - }, - - getMaterial: function ( colourCode ) { - - // Given a colour code search its material in the parse scopes stack - - if ( colourCode.startsWith( "0x2" ) ) { - - // Special 'direct' material value (RGB colour) - - var colour = colourCode.substring( 3 ); - - return this.parseColourMetaDirective( new LineParser( "Direct_Color_" + colour + " CODE -1 VALUE #" + colour + " EDGE #" + colour + "" ) ); - - } - - for ( var i = this.parseScopesStack.length - 1; i >= 0; i -- ) { - - var material = this.parseScopesStack[ i ].lib[ colourCode ]; - - if ( material ) { - - return material; - - } - - } - - // Material was not found - return null; - - }, - - getParentParseScope: function () { - - if ( this.parseScopesStack.length > 1 ) { - - return this.parseScopesStack[ this.parseScopesStack.length - 2 ]; - - } - - return null; - - }, - - getCurrentParseScope: function () { - - if ( this.parseScopesStack.length > 0 ) { - - return this.parseScopesStack[ this.parseScopesStack.length - 1 ]; - - } - - return null; - - }, - - parseColourMetaDirective: function ( lineParser ) { - - // Parses a colour definition and returns a THREE.Material or null if error - - var code = null; - - // Triangle and line colours - var colour = 0xFF00FF; - var edgeColour = 0xFF00FF; - - // Transparency - var alpha = 1; - var isTransparent = false; - // Self-illumination: - var luminance = 0; - - var finishType = LDrawLoader.FINISH_TYPE_DEFAULT; - var canHaveEnvMap = true; - - var edgeMaterial = null; - - var name = lineParser.getToken(); - if ( ! name ) { - - throw 'LDrawLoader: Material name was expected after "!COLOUR tag' + lineParser.getLineNumberString() + "."; - - } - - // Parse tag tokens and their parameters - var token = null; - while ( true ) { - - token = lineParser.getToken(); - - if ( ! token ) { - - break; - - } - - switch ( token.toUpperCase() ) { - - case "CODE": - - code = lineParser.getToken(); - break; - - case "VALUE": - - colour = lineParser.getToken(); - if ( colour.startsWith( '0x' ) ) { - - colour = '#' + colour.substring( 2 ); - - } else if ( ! colour.startsWith( '#' ) ) { - - throw 'LDrawLoader: Invalid colour while parsing material' + lineParser.getLineNumberString() + "."; - - } - break; - - case "EDGE": - - edgeColour = lineParser.getToken(); - if ( edgeColour.startsWith( '0x' ) ) { - - edgeColour = '#' + edgeColour.substring( 2 ); - - } else if ( ! edgeColour.startsWith( '#' ) ) { - - // Try to see if edge colour is a colour code - edgeMaterial = this.getMaterial( edgeColour ); - if ( ! edgeMaterial ) { - - throw 'LDrawLoader: Invalid edge colour while parsing material' + lineParser.getLineNumberString() + "."; - - } - - // Get the edge material for this triangle material - edgeMaterial = edgeMaterial.userData.edgeMaterial; - - } - break; - - case 'ALPHA': - - alpha = parseInt( lineParser.getToken() ); - - if ( isNaN( alpha ) ) { - - throw 'LDrawLoader: Invalid alpha value in material definition' + lineParser.getLineNumberString() + "."; - - } - - alpha = Math.max( 0, Math.min( 1, alpha / 255 ) ); - - if ( alpha < 1 ) { - - isTransparent = true; - - } - - break; - - case 'LUMINANCE': - - luminance = parseInt( lineParser.getToken() ); - - if ( isNaN( luminance ) ) { - - throw 'LDrawLoader: Invalid luminance value in material definition' + LineParser.getLineNumberString() + "."; - - } - - luminance = Math.max( 0, Math.min( 1, luminance / 255 ) ); - - break; - - case 'CHROME': - finishType = LDrawLoader.FINISH_TYPE_CHROME; - break; - - case 'PEARLESCENT': - finishType = LDrawLoader.FINISH_TYPE_PEARLESCENT; - break; - - case 'RUBBER': - finishType = LDrawLoader.FINISH_TYPE_RUBBER; - break; - - case 'MATTE_METALLIC': - finishType = LDrawLoader.FINISH_TYPE_MATTE_METALLIC; - break; - - case 'METAL': - finishType = LDrawLoader.FINISH_TYPE_METAL; - break; - - case 'MATERIAL': - // Not implemented - lineParser.setToEnd(); - break; - - default: - throw 'LDrawLoader: Unknown token "' + token + '" while parsing material' + lineParser.getLineNumberString() + "."; - break; - - } - - } - - var material = null; - - switch ( finishType ) { - - case LDrawLoader.FINISH_TYPE_DEFAULT: - case LDrawLoader.FINISH_TYPE_PEARLESCENT: - - var specular = new THREE.Color( colour ); - var shininess = 35; - var hsl = specular.getHSL( { h: 0, s: 0, l: 0 } ); - - if ( finishType === LDrawLoader.FINISH_TYPE_DEFAULT ) { - - // Default plastic material with shiny specular - hsl.l = Math.min( 1, hsl.l + ( 1 - hsl.l ) * 0.12 ); - - } else { - - // Try to imitate pearlescency by setting the specular to the complementary of the color, and low shininess - hsl.h = ( hsl.h + 0.5 ) % 1; - hsl.l = Math.min( 1, hsl.l + ( 1 - hsl.l ) * 0.7 ); - shininess = 10; - - } - - specular.setHSL( hsl.h, hsl.s, hsl.l ); - - material = new THREE.MeshPhongMaterial( { color: colour, specular: specular, shininess: shininess, reflectivity: 0.3 } ); - break; - - case LDrawLoader.FINISH_TYPE_CHROME: - - // Mirror finish surface - material = new THREE.MeshStandardMaterial( { color: colour, roughness: 0, metalness: 1 } ); - break; - - case LDrawLoader.FINISH_TYPE_RUBBER: - - // Rubber is best simulated with Lambert - material = new THREE.MeshLambertMaterial( { color: colour } ); - canHaveEnvMap = false; - break; - - case LDrawLoader.FINISH_TYPE_MATTE_METALLIC: - - // Brushed metal finish - material = new THREE.MeshStandardMaterial( { color: colour, roughness: 0.8, metalness: 0.4 } ); - break; - - case LDrawLoader.FINISH_TYPE_METAL: - - // Average metal finish - material = new THREE.MeshStandardMaterial( { color: colour, roughness: 0.2, metalness: 0.85 } ); - break; - - default: - // Should not happen - break; - - } - - // BFC (Back Face Culling) LDraw language meta extension is not implemented, so set all materials double-sided: - material.side = THREE.DoubleSide; - - material.transparent = isTransparent; - material.opacity = alpha; - - material.userData.canHaveEnvMap = canHaveEnvMap; - - if ( luminance !== 0 ) { - - material.emissive.set( material.color ).multiplyScalar( luminance ); - - } - - if ( ! edgeMaterial ) { - - // This is the material used for edges - edgeMaterial = new THREE.LineBasicMaterial( { color: edgeColour } ); - edgeMaterial.userData.code = code; - edgeMaterial.name = name + " - Edge"; - edgeMaterial.userData.canHaveEnvMap = false; - - } - - material.userData.code = code; - material.name = name; - - material.userData.edgeMaterial = edgeMaterial; - - return material; - - }, - - // - - parse: function ( text ) { - - //console.time( 'LDrawLoader' ); - - // Retrieve data from the parent parse scope - var parentParseScope = this.getParentParseScope(); - - // Main colour codes passed to this subobject (or default codes 16 and 24 if it is the root object) - var mainColourCode = parentParseScope.mainColourCode; - var mainEdgeColourCode = parentParseScope.mainEdgeColourCode; - - var url = parentParseScope.url; - - var currentParseScope = this.getCurrentParseScope(); - - // Parse result variables - var triangles; - var lineSegments; - - if ( this.separateObjects ) { - - triangles = []; - lineSegments = []; - - } else { - - if ( this.currentGroupObject === null ) { - - this.currentGroupObject = new THREE.Group(); - this.currentTriangles = []; - this.currentLineSegments = []; - - } - - triangles = this.currentTriangles; - lineSegments = this.currentLineSegments; - - } - - var subobjects = []; - - var category = null; - var keywords = null; - - if ( text.indexOf( '\r\n' ) !== - 1 ) { - - // This is faster than String.split with regex that splits on both - text = text.replace( /\r\n/g, '\n' ); - - } - - var lines = text.split( '\n' ); - var numLines = lines.length; - var lineIndex = 0; - - var parsingEmbeddedFiles = false; - var currentEmbeddedFileName = null; - var currentEmbeddedText = null; - - var scope = this; - function parseColourCode( lineParser, forEdge ) { - - // Parses next colour code and returns a THREE.Material - - var colourCode = lineParser.getToken(); - - if ( ! forEdge && colourCode === '16' ) { - - colourCode = mainColourCode; - - } - if ( forEdge && colourCode === '24' ) { - - colourCode = mainEdgeColourCode; - - } - - var material = scope.getMaterial( colourCode ); - - if ( ! material ) { - - throw 'LDrawLoader: Unknown colour code "' + colourCode + '" is used' + lineParser.getLineNumberString() + ' but it was not defined previously.'; - - } - - return material; - - } - - function parseVector( lp ) { - - var v = new THREE.Vector3( parseFloat( lp.getToken() ), parseFloat( lp.getToken() ), parseFloat( lp.getToken() ) ); - - if ( ! scope.separateObjects ) { - - v.applyMatrix4( parentParseScope.currentMatrix ); - - } - - return v; - - } - - // Parse all line commands - for ( lineIndex = 0; lineIndex < numLines; lineIndex ++ ) { - - var line = lines[ lineIndex ]; - - if ( line.length === 0 ) continue; - - if ( parsingEmbeddedFiles ) { - - if ( line.startsWith( '0 FILE ' ) ) { - - // Save previous embedded file in the cache - this.subobjectCache[ currentEmbeddedFileName ] = currentEmbeddedText; - - // New embedded text file - currentEmbeddedFileName = line.substring( 7 ); - currentEmbeddedText = ''; - - } else { - - currentEmbeddedText += line + '\n'; - - } - - continue; - - } - - var lp = new LineParser( line, lineIndex + 1 ); - - lp.seekNonSpace(); - - if ( lp.isAtTheEnd() ) { - - // Empty line - continue; - - } - - // Parse the line type - var lineType = lp.getToken(); - - switch ( lineType ) { - - // Line type 0: Comment or META - case '0': - - // Parse meta directive - var meta = lp.getToken(); - - if ( meta ) { - - switch ( meta ) { - - case '!COLOUR': - - var material = this.parseColourMetaDirective( lp ); - if ( material ) { - - this.addMaterial( material ); - - } else { - - console.warn( 'LDrawLoader: Error parsing material' + lp.getLineNumberString() ); - - } - break; - - case '!CATEGORY': - - category = lp.getToken(); - break; - - case '!KEYWORDS': - - var newKeywords = lp.getRemainingString().split( ',' ); - if ( newKeywords.length > 0 ) { - - if ( ! keywords ) { - - keywords = []; - - } - - newKeywords.forEach( function ( keyword ) { - - keywords.push( keyword.trim() ); - - } ); - - } - break; - - case 'FILE': - - if ( lineIndex > 0 ) { - - // Start embedded text files parsing - parsingEmbeddedFiles = true; - currentEmbeddedFileName = lp.getRemainingString(); - currentEmbeddedText = ''; - - } - - break; - - default: - // Other meta directives are not implemented - break; - - } - - } - - break; - - // Line type 1: Sub-object file - case '1': - - var material = parseColourCode( lp ); - - var posX = parseFloat( lp.getToken() ); - var posY = parseFloat( lp.getToken() ); - var posZ = parseFloat( lp.getToken() ); - var m0 = parseFloat( lp.getToken() ); - var m1 = parseFloat( lp.getToken() ); - var m2 = parseFloat( lp.getToken() ); - var m3 = parseFloat( lp.getToken() ); - var m4 = parseFloat( lp.getToken() ); - var m5 = parseFloat( lp.getToken() ); - var m6 = parseFloat( lp.getToken() ); - var m7 = parseFloat( lp.getToken() ); - var m8 = parseFloat( lp.getToken() ); - - var matrix = new THREE.Matrix4().set( - m0, m1, m2, posX, - m3, m4, m5, posY, - m6, m7, m8, posZ, - 0, 0, 0, 1 - ); - - var fileName = lp.getRemainingString().trim().replace( "\\", "/" ); - - if ( scope.fileMap[ fileName ] ) { - - // Found the subobject path in the preloaded file path map - fileName = scope.fileMap[ fileName ]; - - } else { - - // Standardized subfolders - if ( fileName.startsWith( 's/' ) ) { - - fileName = 'parts/' + fileName; - - } else if ( fileName.startsWith( '48/' ) ) { - - fileName = 'p/' + fileName; - - } - - } - - subobjects.push( { - material: material, - matrix: matrix, - fileName: fileName, - originalFileName: fileName, - locationState: LDrawLoader.FILE_LOCATION_AS_IS, - url: null, - triedLowerCase: false - } ); - - break; - - // Line type 2: Line segment - case '2': - - var material = parseColourCode( lp, true ); - - lineSegments.push( { - material: material.userData.edgeMaterial, - colourCode: material.userData.code, - v0: parseVector( lp ), - v1: parseVector( lp ) - } ); - - break; - - // Line type 3: Triangle - case '3': - - var material = parseColourCode( lp ); - - triangles.push( { - material: material, - colourCode: material.userData.code, - v0: parseVector( lp ), - v1: parseVector( lp ), - v2: parseVector( lp ) - } ); - - break; - - // Line type 4: Quadrilateral - case '4': - - var material = parseColourCode( lp ); - - var v0 = parseVector( lp ); - var v1 = parseVector( lp ); - var v2 = parseVector( lp ); - var v3 = parseVector( lp ); - - triangles.push( { - material: material, - colourCode: material.userData.code, - v0: v0, - v1: v1, - v2: v2 - } ); - - triangles.push( { - material: material, - colourCode: material.userData.code, - v0: v0, - v1: v2, - v2: v3 - } ); - - break; - - // Line type 5: Optional line - case '5': - // Line type 5 is not implemented - break; - - default: - throw 'LDrawLoader: Unknown line type "' + lineType + '"' + lp.getLineNumberString() + '.'; - break; - - } - - } - - if ( parsingEmbeddedFiles ) { - - this.subobjectCache[ currentEmbeddedFileName ] = currentEmbeddedText; - - } - - // - - var groupObject = null; - - if ( this.separateObjects ) { - - groupObject = new THREE.Group(); - - if ( lineSegments.length > 0 ) { - - groupObject.add( createObject( lineSegments, 2 ) ); - - - } - - if ( triangles.length > 0 ) { - - groupObject.add( createObject( triangles, 3 ) ); - - } - - } else { - - groupObject = this.currentGroupObject; - - } - - groupObject.userData.category = category; - groupObject.userData.keywords = keywords; - groupObject.userData.subobjects = subobjects; - - //console.timeEnd( 'LDrawLoader' ); - - return groupObject; - - } - - }; - - return LDrawLoader; - -} )(); diff --git a/spaces/banana-projects/web3d/node_modules/three/src/lights/Light.d.ts b/spaces/banana-projects/web3d/node_modules/three/src/lights/Light.d.ts deleted file mode 100644 index f893bf4cde1dfc4892f3250b74c4e128b2eeccfc..0000000000000000000000000000000000000000 --- a/spaces/banana-projects/web3d/node_modules/three/src/lights/Light.d.ts +++ /dev/null @@ -1,57 +0,0 @@ -import { Color } from './../math/Color'; -import { LightShadow } from './LightShadow'; -import { Object3D } from './../core/Object3D'; - -// Lights ////////////////////////////////////////////////////////////////////////////////// - -/** - * Abstract base class for lights. - */ -export class Light extends Object3D { - constructor(hex?: number | string, intensity?: number); - - color: Color; - intensity: number; - receiveShadow: boolean; - shadow: LightShadow; - /** - * @deprecated Use shadow.camera.fov instead. - */ - shadowCameraFov: any; - /** - * @deprecated Use shadow.camera.left instead. - */ - shadowCameraLeft: any; - /** - * @deprecated Use shadow.camera.right instead. - */ - shadowCameraRight: any; - /** - * @deprecated Use shadow.camera.top instead. - */ - shadowCameraTop: any; - /** - * @deprecated Use shadow.camera.bottom instead. - */ - shadowCameraBottom: any; - /** - * @deprecated Use shadow.camera.near instead. - */ - shadowCameraNear: any; - /** - * @deprecated Use shadow.camera.far instead. - */ - shadowCameraFar: any; - /** - * @deprecated Use shadow.bias instead. - */ - shadowBias: any; - /** - * @deprecated Use shadow.mapSize.width instead. - */ - shadowMapWidth: any; - /** - * @deprecated Use shadow.mapSize.height instead. - */ - shadowMapHeight: any; -} diff --git a/spaces/banana-projects/web3d/node_modules/three/src/loaders/ImageBitmapLoader.d.ts b/spaces/banana-projects/web3d/node_modules/three/src/loaders/ImageBitmapLoader.d.ts deleted file mode 100644 index b26e2fd2af4d1c1717ddf929cd2b203b112c16f1..0000000000000000000000000000000000000000 --- a/spaces/banana-projects/web3d/node_modules/three/src/loaders/ImageBitmapLoader.d.ts +++ /dev/null @@ -1,17 +0,0 @@ -import { LoadingManager } from './LoadingManager.js'; - -export class ImageBitmapLoader { - constructor(manager?: LoadingManager); - - manager: LoadingManager; - - setOptions(options: any): ImageBitmapLoader; - load( - url: string, - onLoad?: (response: string | ArrayBuffer) => void, - onProgress?: (request: ProgressEvent) => void, - onError?: (event: ErrorEvent) => void - ): any; - setCrossOrigin(): ImageBitmapLoader; - setPath(path: string): ImageBitmapLoader; -} diff --git a/spaces/benjaminzuckermanbasisscottsdale/Cardiovascular_Disease_Prediction_Service/app.py b/spaces/benjaminzuckermanbasisscottsdale/Cardiovascular_Disease_Prediction_Service/app.py deleted file mode 100644 index 9f9dc1c2a5d871de74e53609c70c35f11da7480c..0000000000000000000000000000000000000000 --- a/spaces/benjaminzuckermanbasisscottsdale/Cardiovascular_Disease_Prediction_Service/app.py +++ /dev/null @@ -1,319 +0,0 @@ -#importing data analysis libraries -import numpy as np -import pandas as pd -import seaborn as sns -import matplotlib.pyplot as plt -from sklearn.metrics import roc_curve, roc_auc_score -from sklearn.metrics import confusion_matrix ,classification_report,precision_score, recall_score ,f1_score -from sklearn.model_selection import train_test_split -from sklearn.linear_model import LogisticRegression -from sklearn.neighbors import KNeighborsClassifier -from sklearn.naive_bayes import GaussianNB -from sklearn.svm import SVC -import warnings -warnings.filterwarnings('ignore') - -#data is from https://www.kaggle.com/datasets/thedevastator/exploring-risk-factors-for-cardiovascular-diseas -data = pd.read_csv('datasetforriskofcardiodisease.csv') -data_num = data[['age','height','weight','ap_hi','ap_lo']] -data_cat = data[['gender','cholesterol','gluc','smoke','alco','active']] -xaxis = ['Age', 'Height', 'Weight', 'Systolic Blood Pressure', 'Diastolic Blood Pressure'] - -for i, col in enumerate(data_num.columns): - plt.hist(data_num[col]) - plt.title(f'Frequency vs. {xaxis[i]}') - plt.xlabel(xaxis[i]) - plt.ylabel('Frequency') - #plt.show() - -pd.pivot_table(data, index='cardio', values=['age','height','weight','ap_hi','ap_lo']) - -for i in data_cat.columns: - sns.barplot(x=data_cat[i].value_counts().index,y=data_cat[i].value_counts()).set_title(i) - #plt.show() - -#age and categorical variables -print(pd.pivot_table(data,index='cardio',columns='cholesterol', values='age')) -print("="*100) -print(pd.pivot_table(data,index='cardio',columns='gluc', values='age')) -print("="*100) -print(pd.pivot_table(data,index='cardio',columns='smoke', values='age')) -print("="*100) -print(pd.pivot_table(data,index='cardio',columns='alco', values='age')) -print("="*100) -print(pd.pivot_table(data,index='cardio',columns='active', values='age')) -#ap_hi (systolic blood pressure) and categorical variables -print(pd.pivot_table(data,index='cardio',columns='cholesterol', values='ap_hi')) -print("="*100) -print(pd.pivot_table(data,index='cardio',columns='gluc', values='ap_hi')) -print("="*100) -print(pd.pivot_table(data,index='cardio',columns='smoke', values='ap_hi')) -print("="*100) -print(pd.pivot_table(data,index='cardio',columns='alco', values='ap_hi')) -print("="*100) -print(pd.pivot_table(data,index='cardio',columns='active', values='ap_hi')) -#ap_low (diastolic blood pressure) and categorical variables -print(pd.pivot_table(data,index='cardio',columns='cholesterol', values='ap_lo')) -print("="*100) -print(pd.pivot_table(data,index='cardio',columns='gluc', values='ap_lo')) -print("="*100) -print(pd.pivot_table(data,index='cardio',columns='smoke', values='ap_lo')) -print("="*100) -print(pd.pivot_table(data,index='cardio',columns='alco', values='ap_lo')) -print("="*100) -print(pd.pivot_table(data,index='cardio',columns='active', values='ap_lo')) - -for i in data_num.columns: - sns.boxplot(data_num[i]) - plt.title(i) - #plt.show() - -#Getting interquartile range -def outlinefree(dataCol): - sorted(dataCol) - Q1,Q3 = np.percentile(dataCol,[25,75]) - IQR = Q3-Q1 - LowerRange = Q1-(1.5 * IQR) - UpperRange = Q3+(1.5 * IQR) - return LowerRange,UpperRange - -#Removing outliers -lwap_hi,upap_hi = outlinefree(data['ap_hi']) -lwap_lo,upap_lo = outlinefree(data['ap_lo']) - - -data['ap_hi'].replace(list(data[data['ap_hi'] > upap_hi].ap_hi) ,upap_hi,inplace=True) -data['ap_lo'].replace(list(data[data['ap_lo'] > upap_lo].ap_lo) ,upap_lo,inplace=True) - - -features = data.iloc[:,:-1].values -label = data.iloc[:,-1].values - -#------------------------LogisticRegression----------------------- -X_train, X_test, y_train, y_test= train_test_split(features,label, test_size= 0.25, random_state=102) - -classimodel= LogisticRegression() -classimodel.fit(X_train, y_train) -trainscore = classimodel.score(X_train,y_train) -testscore = classimodel.score(X_test,y_test) -print("Logistic Regression-----------------------------------------------------\n") -print("test score: {} train score: {}".format(testscore,trainscore),'\n') - -y_pred = classimodel.predict(X_test) - -#from sklearn.metrics import confusion_matrix -confusion_matrix(y_test, y_pred) - -print(' f1 score: ',f1_score(y_test, y_pred),'\n') -print(' precision score: ',precision_score(y_test, y_pred),'\n') -print(' recall score: ',recall_score(y_test, y_pred),'\n') -print(classification_report(y_test, y_pred)) - -#--------------------------------------K-Nearest Neighbor(KNN)----------------- -X_train, X_test, y_train, y_test= train_test_split(features,label, test_size= 0.25, random_state=193) - - -classifier= KNeighborsClassifier() -knnmodel = classifier.fit(X_train, y_train) - -trainscore = knnmodel.score(X_train,y_train) -testscore = knnmodel.score(X_test,y_test) -print("KNN-----------------------------------------------------\n") -print("test score: {} train score: {}".format(testscore,trainscore),'\n') - -y_predknn = knnmodel.predict(X_test) - -print(confusion_matrix(y_test, y_predknn)) - -print("f1_score: ",f1_score(y_test, y_predknn),'\n') -print("precision_score: ",precision_score(y_test, y_predknn),'\n') -print("recall_score: ",recall_score(y_test, y_predknn),'\n') -print(classification_report(y_test, y_predknn)) - -#------------------------------naive bayes--------------------------- -X_train, X_test, y_train, y_test= train_test_split(features,label, test_size= 0.25, random_state=34) - -NBmodel = GaussianNB() -NBmodel.fit(X_train, y_train) - -trainscore = NBmodel.score(X_train,y_train) -testscore = NBmodel.score(X_test,y_test) -print("Naive Bayes-----------------------------------------------------\n") -print("test score: {} train score: {}".format(testscore,trainscore),'\n') -y_predNB = NBmodel.predict(X_test) -print(confusion_matrix(y_test, y_predNB)) - -print("f1_score: ",f1_score(y_test, y_predNB),'\n') -print("precision_score: ",precision_score(y_test, y_predNB),'\n') -print("recall_score: ",recall_score(y_test, y_predNB),'\n') -print(classification_report(y_test, y_predNB)) - - -#-------------------------------- XGBoost ------------------------------------- -import xgboost as xgb -from sklearn.metrics import mean_squared_error -import pandas as pd -import numpy as np - -X_train, X_test, y_train, y_test= train_test_split(features,label, test_size= 0.25, random_state=102) - -XGmodel= xgb.XGBRFClassifier() -XGmodel.fit(X_train, y_train) -trainscore = XGmodel.score(X_train,y_train) -testscore = XGmodel.score(X_test,y_test) -print("XGBoost-----------------------------------------------------\n") -print("test score: {} train score: {}".format(testscore,trainscore),'\n') - -y_predXG = XGmodel.predict(X_test) - -confusion_matrix(y_test, y_pred) - -print("f1_score: ",f1_score(y_test, y_predXG),'\n') -print("precision_score: ",precision_score(y_test, y_predXG),'\n') -print("recall_score: ",recall_score(y_test, y_predXG),'\n') -print(classification_report(y_test, y_predXG),'\n') -print("AREA UNDER CURVES-----------------------------------------------------\n") -#-------------------------------------- LogisticRegression ------------------------------------- -probabilityValues = classimodel.predict_proba(features)[:,1] -#Calculate AUC -auc = roc_auc_score(label,probabilityValues) -print(auc) -#Calculate roc_curve -fpr,tpr, threshold = roc_curve(label,probabilityValues) -plt.plot([0,1],[0,1], linestyle = '--') -plt.plot(fpr,tpr) - -#-------------------------------------- KNeighborsClassifier ------------------------------------- -probabilityValues = knnmodel.predict_proba(features)[:,1] -#Calculate AUC -auc = roc_auc_score(label,probabilityValues) -print(auc) -#Calculate roc_curve -fpr,tpr, threshold = roc_curve(label,probabilityValues) -plt.plot([0,1],[0,1], linestyle = '--') -plt.plot(fpr,tpr) - -#-------------------------------------- naive bayes ------------------------------------- -probabilityValues = NBmodel.predict_proba(features)[:,1] -#Calculate AUC -auc = roc_auc_score(label,probabilityValues) -print(auc) -#Calculate roc_curve -fpr,tpr, threshold = roc_curve(label,probabilityValues) -plt.plot([0,1],[0,1], linestyle = '--') -plt.plot(fpr,tpr) - - - -#-------------------------------------- XGBoost ------------------------------------- -probabilityValues = XGmodel.predict_proba(features)[:,1] -#Calculate AUC -auc = roc_auc_score(label,probabilityValues) -print(auc) -#Calculate roc_curve -fpr,tpr, threshold = roc_curve(label,probabilityValues) -plt.plot([0,1],[0,1], linestyle = '--') -plt.plot(fpr,tpr) -''' -#--------------------------------------INTERACE TIME LETS GO BOYS----------------------- -from sklearn.feature_extraction.text import CountVectorizer -import joblib -import matplotlib -matplotlib.use("agg") -model_file_name = 'XG_best_model.joblib' -model_folder = 'C:\\Users\\Ben Z\\Downloads\\Models\\' -joblib.dump(XGmodel, model_folder+''+model_file_name) - -#Loading da model -loaded_XG_model = joblib.load(open(model_folder+''+model_file_name, 'rb')) -print (loaded_XG_model) -def make_prediction(value1, checkbox1, value2, value3, value4, value5, value6, value7, checkbox3, checkbox4, checkbox5): - input_array = np.array([value1*365.25, checkbox1, value2, value3, value4, value5, value6, value7, checkbox3, checkbox4, checkbox5]).reshape(1, -1) - prediction = loaded_XG_model.predict(input_array) - info = '' - if prediction[0] == 0: - info = "You are not currently at risk of a cardiovascular disease! ✅" - else: - info = "You are at risk of a cardiovascular disease. I would recommend going to the doctor however, take my advice with a grain of salt as I am an AI model capable of making mistakes. 🚨" - final_info = "The prediction is: {}".format(info) - print (prediction[0]) - return final_info - -input_values = [50.3572895, 1, 168, 62, 110, 80, 1, 1, 0, 0, 1] -result = make_prediction(*input_values) -print(result) - -#------------------------------------------------GRADIO Time lmfao -import gradio as gr - - -headline = "Cardiovascular Disease Risk Prediction Application" -iface = gr.Interface(fn=make_prediction, inputs= - [gr.inputs.Number(label="Age (Years)"), - gr.inputs.Checkbox(label="I am a male"), - gr.inputs.Number(label="Height (cm)"), - gr.inputs.Number(label="Weight (kg)"), - gr.inputs.Number(label="Systolic Blood Pressure (mmHg)"), - gr.inputs.Number(label="Diastolic Blood Pressure (mmHg)"), - gr.inputs.Number(label="Cholesterol (per 20mg/dL)"), - gr.inputs.Number(label="Glucose (per 1 mmol/L)"), - gr.inputs.Checkbox(label="I have smoked."), - gr.inputs.Checkbox(label="I drink more alcohol than I should (>2 cups for men and >1 cup for women)."), - gr.inputs.Checkbox(label="I am physically active.") - ], outputs=gr.outputs.Textbox(label="Prediction Result"), title=headline, theme='soft') - -if __name__ == "__main__": - iface.launch(share=True) -''' -#--------------------------------------INTERACE TIME LETS GO BOYS----------------------- -from sklearn.feature_extraction.text import CountVectorizer -import joblib -import matplotlib -matplotlib.use("agg") -model_file_name = 'XG_best_model.joblib' -model_folder = 'C:\\Users\\Ben Z\\Downloads\\Models\\' -joblib.dump(XGmodel, model_folder+''+model_file_name) - -#Loading da model -loaded_XG_model = joblib.load(open(model_folder+''+model_file_name, 'rb')) -print (loaded_XG_model) -def make_prediction(value1, checkbox1, value2, value3, value4, value5, value6, value7, checkbox3, checkbox4, checkbox5): - checkbox1 = 1 if "Male" in checkbox1 else 0 - input_array = np.array([value1*365.25, checkbox1, value2, value3, value4, value5, value6, value7, checkbox3, checkbox4, checkbox5]).reshape(1, -1) - prediction = loaded_XG_model.predict(input_array) - info = '' - if prediction[0] == 0: - info = "You are not currently at risk of a cardiovascular disease! ✅" - else: - info = "You are at risk of a cardiovascular disease. I would recommend going to the doctor however, take my advice with a grain of salt as I am an AI model capable of making mistakes. 🚨" - final_info = "The prediction is: {}".format(info) - return final_info - -#input_values = [50.3572895, 1, 168, 62, 110, 80, 1, 1, 0, 0, 1] -#result = make_prediction(*input_values) -#print(result) - -#------------------------------------------------GRADIO Time lmfao -import gradio as gr - - -headline = "Cardiovascular Disease Risk Prediction Application" -iface = gr.Interface(fn=make_prediction, inputs= - [gr.inputs.Number(label="Age (Years)"), - gr.inputs.CheckboxGroup( - label="Gender", - choices=["Male", "Female"], - ), - gr.inputs.Number(label="Height (cm)"), - gr.inputs.Number(label="Weight (kg)"), - gr.inputs.Number(label="Systolic Blood Pressure (mmHg)"), - gr.inputs.Number(label="Diastolic Blood Pressure (mmHg)"), - gr.inputs.Number(label="Cholesterol (per 20mg/dL)"), - gr.inputs.Number(label="Glucose (per 1 mmol/L)"), - gr.inputs.Checkbox(label="I have smoked."), - gr.inputs.Checkbox(label="I drink more alcohol than I should (>2 cups for men and >1 cup for women)."), - gr.inputs.Checkbox(label="I am physically active.") - ], outputs=gr.outputs.Textbox(label="Prediction Result"), title=headline, theme='soft') - -if __name__ == "__main__": - iface.launch(share=False) \ No newline at end of file diff --git a/spaces/bigjoker/stable-diffusion-webui/extensions/deforum/scripts/deforum_helpers/animation.py b/spaces/bigjoker/stable-diffusion-webui/extensions/deforum/scripts/deforum_helpers/animation.py deleted file mode 100644 index ccb6dbd206b09ada3627e89398655964fdd72845..0000000000000000000000000000000000000000 --- a/spaces/bigjoker/stable-diffusion-webui/extensions/deforum/scripts/deforum_helpers/animation.py +++ /dev/null @@ -1,258 +0,0 @@ -import numpy as np -import cv2 -from functools import reduce -import math -import py3d_tools as p3d -import torch -from einops import rearrange -from .prompt import check_is_number - -# Webui -from modules.shared import state - -def sample_from_cv2(sample: np.ndarray) -> torch.Tensor: - sample = ((sample.astype(float) / 255.0) * 2) - 1 - sample = sample[None].transpose(0, 3, 1, 2).astype(np.float16) - sample = torch.from_numpy(sample) - return sample - -def sample_to_cv2(sample: torch.Tensor, type=np.uint8) -> np.ndarray: - sample_f32 = rearrange(sample.squeeze().cpu().numpy(), "c h w -> h w c").astype(np.float32) - sample_f32 = ((sample_f32 * 0.5) + 0.5).clip(0, 1) - sample_int8 = (sample_f32 * 255) - return sample_int8.astype(type) - -def construct_RotationMatrixHomogenous(rotation_angles): - assert(type(rotation_angles)==list and len(rotation_angles)==3) - RH = np.eye(4,4) - cv2.Rodrigues(np.array(rotation_angles), RH[0:3, 0:3]) - return RH - -# https://en.wikipedia.org/wiki/Rotation_matrix -def getRotationMatrixManual(rotation_angles): - - rotation_angles = [np.deg2rad(x) for x in rotation_angles] - - phi = rotation_angles[0] # around x - gamma = rotation_angles[1] # around y - theta = rotation_angles[2] # around z - - # X rotation - Rphi = np.eye(4,4) - sp = np.sin(phi) - cp = np.cos(phi) - Rphi[1,1] = cp - Rphi[2,2] = Rphi[1,1] - Rphi[1,2] = -sp - Rphi[2,1] = sp - - # Y rotation - Rgamma = np.eye(4,4) - sg = np.sin(gamma) - cg = np.cos(gamma) - Rgamma[0,0] = cg - Rgamma[2,2] = Rgamma[0,0] - Rgamma[0,2] = sg - Rgamma[2,0] = -sg - - # Z rotation (in-image-plane) - Rtheta = np.eye(4,4) - st = np.sin(theta) - ct = np.cos(theta) - Rtheta[0,0] = ct - Rtheta[1,1] = Rtheta[0,0] - Rtheta[0,1] = -st - Rtheta[1,0] = st - - R = reduce(lambda x,y : np.matmul(x,y), [Rphi, Rgamma, Rtheta]) - - return R - -def getPoints_for_PerspectiveTranformEstimation(ptsIn, ptsOut, W, H, sidelength): - - ptsIn2D = ptsIn[0,:] - ptsOut2D = ptsOut[0,:] - ptsOut2Dlist = [] - ptsIn2Dlist = [] - - for i in range(0,4): - ptsOut2Dlist.append([ptsOut2D[i,0], ptsOut2D[i,1]]) - ptsIn2Dlist.append([ptsIn2D[i,0], ptsIn2D[i,1]]) - - pin = np.array(ptsIn2Dlist) + [W/2.,H/2.] - pout = (np.array(ptsOut2Dlist) + [1.,1.]) * (0.5*sidelength) - pin = pin.astype(np.float32) - pout = pout.astype(np.float32) - - return pin, pout - - -def warpMatrix(W, H, theta, phi, gamma, scale, fV): - - # M is to be estimated - M = np.eye(4, 4) - - fVhalf = np.deg2rad(fV/2.) - d = np.sqrt(W*W+H*H) - sideLength = scale*d/np.cos(fVhalf) - h = d/(2.0*np.sin(fVhalf)) - n = h-(d/2.0) - f = h+(d/2.0) - - # Translation along Z-axis by -h - T = np.eye(4,4) - T[2,3] = -h - - # Rotation matrices around x,y,z - R = getRotationMatrixManual([phi, gamma, theta]) - - - # Projection Matrix - P = np.eye(4,4) - P[0,0] = 1.0/np.tan(fVhalf) - P[1,1] = P[0,0] - P[2,2] = -(f+n)/(f-n) - P[2,3] = -(2.0*f*n)/(f-n) - P[3,2] = -1.0 - - # pythonic matrix multiplication - F = reduce(lambda x,y : np.matmul(x,y), [P, T, R]) - - # shape should be 1,4,3 for ptsIn and ptsOut since perspectiveTransform() expects data in this way. - # In C++, this can be achieved by Mat ptsIn(1,4,CV_64FC3); - ptsIn = np.array([[ - [-W/2., H/2., 0.],[ W/2., H/2., 0.],[ W/2.,-H/2., 0.],[-W/2.,-H/2., 0.] - ]]) - ptsOut = np.array(np.zeros((ptsIn.shape), dtype=ptsIn.dtype)) - ptsOut = cv2.perspectiveTransform(ptsIn, F) - - ptsInPt2f, ptsOutPt2f = getPoints_for_PerspectiveTranformEstimation(ptsIn, ptsOut, W, H, sideLength) - - # check float32 otherwise OpenCV throws an error - assert(ptsInPt2f.dtype == np.float32) - assert(ptsOutPt2f.dtype == np.float32) - M33 = cv2.getPerspectiveTransform(ptsInPt2f,ptsOutPt2f) - - return M33, sideLength - -def get_flip_perspective_matrix(W, H, keys, frame_idx): - perspective_flip_theta = keys.perspective_flip_theta_series[frame_idx] - perspective_flip_phi = keys.perspective_flip_phi_series[frame_idx] - perspective_flip_gamma = keys.perspective_flip_gamma_series[frame_idx] - perspective_flip_fv = keys.perspective_flip_fv_series[frame_idx] - M,sl = warpMatrix(W, H, perspective_flip_theta, perspective_flip_phi, perspective_flip_gamma, 1., perspective_flip_fv); - post_trans_mat = np.float32([[1, 0, (W-sl)/2], [0, 1, (H-sl)/2]]) - post_trans_mat = np.vstack([post_trans_mat, [0,0,1]]) - bM = np.matmul(M, post_trans_mat) - return bM - -def flip_3d_perspective(anim_args, prev_img_cv2, keys, frame_idx): - W, H = (prev_img_cv2.shape[1], prev_img_cv2.shape[0]) - return cv2.warpPerspective( - prev_img_cv2, - get_flip_perspective_matrix(W, H, keys, frame_idx), - (W, H), - borderMode=cv2.BORDER_WRAP if anim_args.border == 'wrap' else cv2.BORDER_REPLICATE - ) - -def anim_frame_warp(prev_img_cv2, args, anim_args, keys, frame_idx, depth_model=None, depth=None, device='cuda', half_precision = False): - - if anim_args.use_depth_warping: - if depth is None and depth_model is not None: - depth = depth_model.predict(prev_img_cv2, anim_args, half_precision) - else: - depth = None - - if anim_args.animation_mode == '2D': - prev_img = anim_frame_warp_2d(prev_img_cv2, args, anim_args, keys, frame_idx) - else: # '3D' - prev_img = anim_frame_warp_3d(device, prev_img_cv2, depth, anim_args, keys, frame_idx) - - return prev_img, depth - -def anim_frame_warp_2d(prev_img_cv2, args, anim_args, keys, frame_idx): - angle = keys.angle_series[frame_idx] - zoom = keys.zoom_series[frame_idx] - translation_x = keys.translation_x_series[frame_idx] - translation_y = keys.translation_y_series[frame_idx] - - center = (args.W // 2, args.H // 2) - trans_mat = np.float32([[1, 0, translation_x], [0, 1, translation_y]]) - rot_mat = cv2.getRotationMatrix2D(center, angle, zoom) - trans_mat = np.vstack([trans_mat, [0,0,1]]) - rot_mat = np.vstack([rot_mat, [0,0,1]]) - if anim_args.enable_perspective_flip: - bM = get_flip_perspective_matrix(args.W, args.H, keys, frame_idx) - rot_mat = np.matmul(bM, rot_mat, trans_mat) - else: - rot_mat = np.matmul(rot_mat, trans_mat) - return cv2.warpPerspective( - prev_img_cv2, - rot_mat, - (prev_img_cv2.shape[1], prev_img_cv2.shape[0]), - borderMode=cv2.BORDER_WRAP if anim_args.border == 'wrap' else cv2.BORDER_REPLICATE - ) - -def anim_frame_warp_3d(device, prev_img_cv2, depth, anim_args, keys, frame_idx): - TRANSLATION_SCALE = 1.0/200.0 # matches Disco - translate_xyz = [ - -keys.translation_x_series[frame_idx] * TRANSLATION_SCALE, - keys.translation_y_series[frame_idx] * TRANSLATION_SCALE, - -keys.translation_z_series[frame_idx] * TRANSLATION_SCALE - ] - rotate_xyz = [ - math.radians(keys.rotation_3d_x_series[frame_idx]), - math.radians(keys.rotation_3d_y_series[frame_idx]), - math.radians(keys.rotation_3d_z_series[frame_idx]) - ] - if anim_args.enable_perspective_flip: - prev_img_cv2 = flip_3d_perspective(anim_args, prev_img_cv2, keys, frame_idx) - rot_mat = p3d.euler_angles_to_matrix(torch.tensor(rotate_xyz, device=device), "XYZ").unsqueeze(0) - result = transform_image_3d(device if not device.type.startswith('mps') else torch.device('cpu'), prev_img_cv2, depth, rot_mat, translate_xyz, anim_args, keys, frame_idx) - torch.cuda.empty_cache() - return result - -def transform_image_3d(device, prev_img_cv2, depth_tensor, rot_mat, translate, anim_args, keys, frame_idx): - # adapted and optimized version of transform_image_3d from Disco Diffusion https://github.com/alembics/disco-diffusion - w, h = prev_img_cv2.shape[1], prev_img_cv2.shape[0] - - aspect_ratio = float(w)/float(h) - near = keys.near_series[frame_idx] - far = keys.far_series[frame_idx] - fov_deg = keys.fov_series[frame_idx] - persp_cam_old = p3d.FoVPerspectiveCameras(near, far, aspect_ratio, fov=fov_deg, degrees=True, device=device) - persp_cam_new = p3d.FoVPerspectiveCameras(near, far, aspect_ratio, fov=fov_deg, degrees=True, R=rot_mat, T=torch.tensor([translate]), device=device) - - # range of [-1,1] is important to torch grid_sample's padding handling - y,x = torch.meshgrid(torch.linspace(-1.,1.,h,dtype=torch.float32,device=device),torch.linspace(-1.,1.,w,dtype=torch.float32,device=device)) - if depth_tensor is None: - z = torch.ones_like(x) - else: - z = torch.as_tensor(depth_tensor, dtype=torch.float32, device=device) - xyz_old_world = torch.stack((x.flatten(), y.flatten(), z.flatten()), dim=1) - - xyz_old_cam_xy = persp_cam_old.get_full_projection_transform().transform_points(xyz_old_world)[:,0:2] - xyz_new_cam_xy = persp_cam_new.get_full_projection_transform().transform_points(xyz_old_world)[:,0:2] - - offset_xy = xyz_new_cam_xy - xyz_old_cam_xy - # affine_grid theta param expects a batch of 2D mats. Each is 2x3 to do rotation+translation. - identity_2d_batch = torch.tensor([[1.,0.,0.],[0.,1.,0.]], device=device).unsqueeze(0) - # coords_2d will have shape (N,H,W,2).. which is also what grid_sample needs. - coords_2d = torch.nn.functional.affine_grid(identity_2d_batch, [1,1,h,w], align_corners=False) - offset_coords_2d = coords_2d - torch.reshape(offset_xy, (h,w,2)).unsqueeze(0) - - image_tensor = rearrange(torch.from_numpy(prev_img_cv2.astype(np.float32)), 'h w c -> c h w').to(device) - new_image = torch.nn.functional.grid_sample( - image_tensor.add(1/512 - 0.0001).unsqueeze(0), - offset_coords_2d, - mode=anim_args.sampling_mode, - padding_mode=anim_args.padding_mode, - align_corners=False - ) - - # convert back to cv2 style numpy array - result = rearrange( - new_image.squeeze().clamp(0,255), - 'c h w -> h w c' - ).cpu().numpy().astype(prev_img_cv2.dtype) - return result diff --git a/spaces/bigscience-data/document-sizes/README.md b/spaces/bigscience-data/document-sizes/README.md deleted file mode 100644 index 0e6442ad16632a7d530f14ed1ea861f2cf6e7071..0000000000000000000000000000000000000000 --- a/spaces/bigscience-data/document-sizes/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Document Sizes -emoji: 📚 -colorFrom: pink -colorTo: gray -sdk: streamlit -sdk_version: 1.10.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/bioriAsaeru/text-to-voice/Baba Ramsa Peer Tamil Pdf Free Candy Fotografici Bi.md b/spaces/bioriAsaeru/text-to-voice/Baba Ramsa Peer Tamil Pdf Free Candy Fotografici Bi.md deleted file mode 100644 index 31f597c022d20d97340faffa1c040f9f7552991c..0000000000000000000000000000000000000000 --- a/spaces/bioriAsaeru/text-to-voice/Baba Ramsa Peer Tamil Pdf Free Candy Fotografici Bi.md +++ /dev/null @@ -1,6 +0,0 @@ -

    Baba Ramsa Peer Tamil Pdf Free candy fotografici bi


    Download Zip 🌟 https://urloso.com/2uyRc3



    -
    - aaccfb2cb3
    -
    -
    -

    diff --git a/spaces/bioriAsaeru/text-to-voice/Compuapps Swissknife V3 License Key !!HOT!! Crack 22.md b/spaces/bioriAsaeru/text-to-voice/Compuapps Swissknife V3 License Key !!HOT!! Crack 22.md deleted file mode 100644 index c290c6eb09188605f901803000ad5b9d3cabd1c0..0000000000000000000000000000000000000000 --- a/spaces/bioriAsaeru/text-to-voice/Compuapps Swissknife V3 License Key !!HOT!! Crack 22.md +++ /dev/null @@ -1,6 +0,0 @@ -

    compuapps swissknife v3 license key crack 22


    Download Filehttps://urloso.com/2uyP3r



    - - 4d29de3e1b
    -
    -
    -

    diff --git a/spaces/bishu3011/hf-xample/README.md b/spaces/bishu3011/hf-xample/README.md deleted file mode 100644 index eeb9cfbe2fa2c95ac285ca239576a378c0e80819..0000000000000000000000000000000000000000 --- a/spaces/bishu3011/hf-xample/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: LLMsIntro -emoji: 🏃 -colorFrom: green -colorTo: purple -sdk: streamlit -sdk_version: 1.21.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/bitofurqan/meta-llama-Llama-2-70b-chat-hf/app.py b/spaces/bitofurqan/meta-llama-Llama-2-70b-chat-hf/app.py deleted file mode 100644 index acd7f0fca38be3e66a294b9c4019d0c98e454ffb..0000000000000000000000000000000000000000 --- a/spaces/bitofurqan/meta-llama-Llama-2-70b-chat-hf/app.py +++ /dev/null @@ -1,65 +0,0 @@ -import json -import gradio as gr -import os -import requests - -hf_token = os.getenv('HF_TOKEN') -api_url = os.getenv('API_URL') -headers = { - 'Content-Type': 'application/json', -} - -system_message = "\nYou are a helpful, respectful and honest assistant. Always answer as helpfully as possible, while being safe. Your answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure that your responses are socially unbiased and positive in nature.\n\nIf a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct. If you don't know the answer to a question, please don't share false information." -title = "Llama2 70B Chatbot" -description = """This Space demonstrates model [Llama-2-70b-chat-hf](https://huggingface.co/meta-llama/Llama-2-70b-chat-hf) by Meta, running on Inference Endpoints using text-generation-inference. Recommended: [Try Bito (AI Coding Assistant) for FREE!](https://bito.ai/). """ -css = """.toast-wrap { display: none !important } """ - -def predict(message, chatbot): - - input_prompt = f"[INST] <>\n{system_message}\n<>\n\n " - for interaction in chatbot: - input_prompt = input_prompt + str(interaction[0]) + " [/INST] " + str(interaction[1]) + " [INST] " - - input_prompt = input_prompt + str(message) + " [/INST] " - - data = { - "inputs": input_prompt, - "parameters": {"max_new_tokens":256} - } - - response = requests.post(api_url, headers=headers, data=json.dumps(data), auth=('hf', hf_token), stream=True) - - partial_message = "" - for line in response.iter_lines(): - if line: # filter out keep-alive new lines - # Decode from bytes to string - decoded_line = line.decode('utf-8') - - # Remove 'data:' prefix - if decoded_line.startswith('data:'): - json_line = decoded_line[5:] # Exclude the first 5 characters ('data:') - else: - gr.Warning(f"This line does not start with 'data:': {decoded_line}") - continue - - # Load as JSON - try: - json_obj = json.loads(json_line) - if 'token' in json_obj: - partial_message = partial_message + json_obj['token']['text'] - yield partial_message - elif 'error' in json_obj: - yield json_obj['error'] + '. Please refresh and try again with an appropriate smaller input prompt.' - else: - gr.Warning(f"The key 'token' does not exist in this JSON object: {json_obj}") - - #partial_message = partial_message + json_obj['token']['text'] - #yield partial_message - except json.JSONDecodeError: - gr.Warning(f"This line is not valid JSON: {json_line}") - continue - except KeyError as e: - gr.Warning(f"KeyError: {e} occurred for JSON object: {json_obj}") - continue - -gr.ChatInterface(predict, title=title, description=description, css=css).queue(concurrency_count=75).launch() diff --git a/spaces/brjathu/HMR2.0/vendor/detectron2/detectron2/tracking/__init__.py b/spaces/brjathu/HMR2.0/vendor/detectron2/detectron2/tracking/__init__.py deleted file mode 100644 index 21078ae822b04b71dbd8b056b5993d173eaf6bff..0000000000000000000000000000000000000000 --- a/spaces/brjathu/HMR2.0/vendor/detectron2/detectron2/tracking/__init__.py +++ /dev/null @@ -1,15 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -from .base_tracker import ( # noqa - BaseTracker, - build_tracker_head, - TRACKER_HEADS_REGISTRY, -) -from .bbox_iou_tracker import BBoxIOUTracker # noqa -from .hungarian_tracker import BaseHungarianTracker # noqa -from .iou_weighted_hungarian_bbox_iou_tracker import ( # noqa - IOUWeightedHungarianBBoxIOUTracker, -) -from .utils import create_prediction_pairs # noqa -from .vanilla_hungarian_bbox_iou_tracker import VanillaHungarianBBoxIOUTracker # noqa - -__all__ = [k for k in globals().keys() if not k.startswith("_")] diff --git a/spaces/brjathu/HMR2.0/vendor/detectron2/projects/DensePose/densepose/modeling/cse/utils.py b/spaces/brjathu/HMR2.0/vendor/detectron2/projects/DensePose/densepose/modeling/cse/utils.py deleted file mode 100644 index 6e70d25df7c8e2c1c408866cf7a6f0156b64114a..0000000000000000000000000000000000000000 --- a/spaces/brjathu/HMR2.0/vendor/detectron2/projects/DensePose/densepose/modeling/cse/utils.py +++ /dev/null @@ -1,81 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved - -import torch -from torch.nn import functional as F - - -def squared_euclidean_distance_matrix(pts1: torch.Tensor, pts2: torch.Tensor) -> torch.Tensor: - """ - Get squared Euclidean Distance Matrix - Computes pairwise squared Euclidean distances between points - - Args: - pts1: Tensor [M x D], M is the number of points, D is feature dimensionality - pts2: Tensor [N x D], N is the number of points, D is feature dimensionality - - Return: - Tensor [M, N]: matrix of squared Euclidean distances; at index (m, n) - it contains || pts1[m] - pts2[n] ||^2 - """ - edm = torch.mm(-2 * pts1, pts2.t()) - edm += (pts1 * pts1).sum(1, keepdim=True) + (pts2 * pts2).sum(1, keepdim=True).t() - return edm.contiguous() - - -def normalize_embeddings(embeddings: torch.Tensor, epsilon: float = 1e-6) -> torch.Tensor: - """ - Normalize N D-dimensional embedding vectors arranged in a tensor [N, D] - - Args: - embeddings (tensor [N, D]): N D-dimensional embedding vectors - epsilon (float): minimum value for a vector norm - Return: - Normalized embeddings (tensor [N, D]), such that L2 vector norms are all equal to 1. - """ - return embeddings / torch.clamp(embeddings.norm(p=None, dim=1, keepdim=True), min=epsilon) - - -def get_closest_vertices_mask_from_ES( - E: torch.Tensor, - S: torch.Tensor, - h: int, - w: int, - mesh_vertex_embeddings: torch.Tensor, - device: torch.device, -): - """ - Interpolate Embeddings and Segmentations to the size of a given bounding box, - and compute closest vertices and the segmentation mask - - Args: - E (tensor [1, D, H, W]): D-dimensional embedding vectors for every point of the - default-sized box - S (tensor [1, 2, H, W]): 2-dimensional segmentation mask for every point of the - default-sized box - h (int): height of the target bounding box - w (int): width of the target bounding box - mesh_vertex_embeddings (tensor [N, D]): vertex embeddings for a chosen mesh - N is the number of vertices in the mesh, D is feature dimensionality - device (torch.device): device to move the tensors to - Return: - Closest Vertices (tensor [h, w]), int, for every point of the resulting box - Segmentation mask (tensor [h, w]), boolean, for every point of the resulting box - """ - embedding_resized = F.interpolate(E, size=(h, w), mode="bilinear")[0].to(device) - coarse_segm_resized = F.interpolate(S, size=(h, w), mode="bilinear")[0].to(device) - mask = coarse_segm_resized.argmax(0) > 0 - closest_vertices = torch.zeros(mask.shape, dtype=torch.long, device=device) - all_embeddings = embedding_resized[:, mask].t() - size_chunk = 10_000 # Chunking to avoid possible OOM - edm = [] - if len(all_embeddings) == 0: - return closest_vertices, mask - for chunk in range((len(all_embeddings) - 1) // size_chunk + 1): - chunk_embeddings = all_embeddings[size_chunk * chunk : size_chunk * (chunk + 1)] - edm.append( - torch.argmin( - squared_euclidean_distance_matrix(chunk_embeddings, mesh_vertex_embeddings), dim=1 - ) - ) - closest_vertices[mask] = torch.cat(edm) - return closest_vertices, mask diff --git a/spaces/brjathu/HMR2.0/vendor/pyrender/docs/make.bat b/spaces/brjathu/HMR2.0/vendor/pyrender/docs/make.bat deleted file mode 100644 index 4d9eb83d9f9309029f4b14ff09024658bb0f5563..0000000000000000000000000000000000000000 --- a/spaces/brjathu/HMR2.0/vendor/pyrender/docs/make.bat +++ /dev/null @@ -1,35 +0,0 @@ -@ECHO OFF - -pushd %~dp0 - -REM Command file for Sphinx documentation - -if "%SPHINXBUILD%" == "" ( - set SPHINXBUILD=sphinx-build -) -set SOURCEDIR=source -set BUILDDIR=build - -if "%1" == "" goto help - -%SPHINXBUILD% >NUL 2>NUL -if errorlevel 9009 ( - echo. - echo.The 'sphinx-build' command was not found. Make sure you have Sphinx - echo.installed, then set the SPHINXBUILD environment variable to point - echo.to the full path of the 'sphinx-build' executable. Alternatively you - echo.may add the Sphinx directory to PATH. - echo. - echo.If you don't have Sphinx installed, grab it from - echo.http://sphinx-doc.org/ - exit /b 1 -) - -%SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% -goto end - -:help -%SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% - -:end -popd diff --git a/spaces/ccds/vits_onnx/app/init_jptalk.py b/spaces/ccds/vits_onnx/app/init_jptalk.py deleted file mode 100644 index 214213d9ad2ed04a836c9fa0ac7b8a7e42517671..0000000000000000000000000000000000000000 --- a/spaces/ccds/vits_onnx/app/init_jptalk.py +++ /dev/null @@ -1,9 +0,0 @@ - - -import pyopenjtalk -import gradio as gr - - - -pyopenjtalk. _lazy_init() -# pyopenjtalk._extract_dic() \ No newline at end of file diff --git a/spaces/chendl/compositional_test/transformers/scripts/distributed/torch-distributed-gpu-test.py b/spaces/chendl/compositional_test/transformers/scripts/distributed/torch-distributed-gpu-test.py deleted file mode 100644 index 22a99d570e4f85ba4fcbaf470fbbda1856fc8edd..0000000000000000000000000000000000000000 --- a/spaces/chendl/compositional_test/transformers/scripts/distributed/torch-distributed-gpu-test.py +++ /dev/null @@ -1,92 +0,0 @@ -#!/usr/bin/env python - -# -# This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or -# many nodes) can talk to each other via nccl and allocate gpu memory. -# -# To run first adjust the number of processes and nodes: -# -# python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py -# -# You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port -# -# You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d -# -# use torch.distributed.launch instead of torch.distributed.run for torch < 1.9 -# -# If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with: -# -# NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py -# -# which should tell you what's going on behind the scenes. -# -# -# This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that -# runs on 2 nodes of 4 gpus per node: -# -# #SBATCH --job-name=test-nodes # name -# #SBATCH --nodes=2 # nodes -# #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node! -# #SBATCH --cpus-per-task=10 # number of cores per tasks -# #SBATCH --gres=gpu:4 # number of gpus -# #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS) -# #SBATCH --output=%x-%j.out # output file name -# -# GPUS_PER_NODE=4 -# MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1) -# MASTER_PORT=6000 -# -# srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \ -# --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \ -# --master_addr $MASTER_ADDR --master_port $MASTER_PORT \ -# torch-distributed-gpu-test.py' -# - -import fcntl -import os -import socket - -import torch -import torch.distributed as dist - - -def printflock(*msgs): - """solves multi-process interleaved print problem""" - with open(__file__, "r") as fh: - fcntl.flock(fh, fcntl.LOCK_EX) - try: - print(*msgs) - finally: - fcntl.flock(fh, fcntl.LOCK_UN) - - -local_rank = int(os.environ["LOCAL_RANK"]) -torch.cuda.set_device(local_rank) -device = torch.device("cuda", local_rank) -hostname = socket.gethostname() - -gpu = f"[{hostname}-{local_rank}]" - -try: - # test distributed - dist.init_process_group("nccl") - dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM) - dist.barrier() - - # test cuda is available and can allocate memory - torch.cuda.is_available() - torch.ones(1).cuda(local_rank) - - # global rank - rank = dist.get_rank() - world_size = dist.get_world_size() - - printflock(f"{gpu} is OK (global rank: {rank}/{world_size})") - - dist.barrier() - if rank == 0: - printflock(f"pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}") - -except Exception: - printflock(f"{gpu} is broken") - raise diff --git a/spaces/chow-q/cut-image/img.py b/spaces/chow-q/cut-image/img.py deleted file mode 100644 index 78c0efe10c1b9dcd24e833ba59803eb584eaa348..0000000000000000000000000000000000000000 --- a/spaces/chow-q/cut-image/img.py +++ /dev/null @@ -1,52 +0,0 @@ -from flask import Flask, render_template, request -import os -import uuid -import cv2 -from modelscope.pipelines import pipeline -from modelscope.utils.constant import Tasks -from modelscope.outputs import OutputKeys -app = Flask(__name__) - -@app.route("/", methods=["GET", "POST"]) -def index(): - if request.method == "POST": - # 判断是否有文件上传 - if "file_input" not in request.files: - return render_template("index.html", error="请选择一个图片上传!") - - file = request.files["file_input"] - print("##################",request.form) - - # 判断上传的文件类型是否合法 - allowed_extensions = {"jpg", "jpeg", "png", "gif"} - _, file_extension = os.path.splitext(file.filename) - if not file_extension[1:] in allowed_extensions: - return render_template("index.html", error="只允许上传 .jpg、.jpeg、.png、.gif 格式的图片!") - - # 生成一个唯一文件名,避免重复 - image_filename = str(uuid.uuid4()) + file_extension - - # 保存上传的图片 - file.save(os.path.join("static", image_filename)) - - if 'image' in request.form: - #人物抠图 - portrait_matting = pipeline(Tasks.portrait_matting,model='damo/cv_unet_image-matting') - result = portrait_matting(f"static/{image_filename}") - result_filename = str(uuid.uuid4()) + ".png" - cv2.imwrite(f"static/{result_filename}", result[OutputKeys.OUTPUT_IMG]) - else: - #通用抠图 - universal_matting = pipeline(Tasks.universal_matting,model='damo/cv_unet_universal-matting') - result = universal_matting(f"static/{image_filename}") - result_filename = str(uuid.uuid4()) + ".png" - cv2.imwrite(f"static/{result_filename}", result[OutputKeys.OUTPUT_IMG]) - - # 显示图片 - image_path = f"static/{image_filename}" - result_path = f"static/{result_filename}" - return render_template("index.html", image_path=image_path, result_path=result_path) - - return render_template("index.html") -if __name__ == '__main__': - app.run(host="0.0.0.0",port=7860) \ No newline at end of file diff --git a/spaces/cihyFjudo/fairness-paper-search/Diskinternals Flash Recovery 4.5 Keygen Software How to Restore Your Flash Memory Data in Minutes.md b/spaces/cihyFjudo/fairness-paper-search/Diskinternals Flash Recovery 4.5 Keygen Software How to Restore Your Flash Memory Data in Minutes.md deleted file mode 100644 index 40c77b8bfb95f16c4186166bcae9e023bd87ae71..0000000000000000000000000000000000000000 --- a/spaces/cihyFjudo/fairness-paper-search/Diskinternals Flash Recovery 4.5 Keygen Software How to Restore Your Flash Memory Data in Minutes.md +++ /dev/null @@ -1,10 +0,0 @@ -
    -

    You cannot download any crack or serial number for Portable DiskInternals Linux Reader on this page. Every software that you are able to download on our site is legal. There is no crack, serial number, hack or activation key for Portable DiskInternals Linux Reader present here. Our collection also doesn't contain any keygens, because keygen programs are being used in illegal ways which we do not support. All software that you can find here is freely downloadable and legal.

    -

    Diskinternals Flash Recovery 4.5 Keygen Software


    DOWNLOADhttps://tinurli.com/2uwjXv



    -

    Just finished burning CD or DVD, only to find out that some files appear to be corrupted or inaccessible? Power went out, while recording data? Added a new session to your CD/DVD and lost old files? Do not worry.

    DiskInternals CD and DVD Recovery, as the name implies is a software solution capable of recovering data from CDs and DVDs, including multi-session ones. CD-R, CD-RW, DVD-R, DVR+R, DVD-RW, DVR+RW, DVD+R DL, DVD-R DL and DVD-RAM disks are supported.

    The program recovers any type of files - data, pictures, video, music - and processes both ISO 9660 and Joliet file systems. The program uses a smart file recovery algorithm that skips bad sectors. In the end, this software will help you restore your valuable data.

    The process of recovery is a very straightforward one, insert disk, press Recover and get the files you need.

    -

    You cannot download any crack or serial number for DiskInternals CD and DVD Recovery on this page. Every software that you are able to download on our site is legal. There is no crack, serial number, hack or activation key for DiskInternals CD and DVD Recovery present here. Our collection also doesn't contain any keygens, because keygen programs are being used in illegal ways which we do not support. All software that you can find here is freely downloadable and legal.

    -

    If you are like most people, then you probably know the frustration caused by accidentally deleting a necessary file.

    Even more frequently, people delete seemingly unnecessary files only to find out that they really needed them later. If this is situation you are facing right now, you should download DiskInternals Uneraser.

    DiskInternals Uneraser is a software that allows you to restore deleted files.

    This simple tool recovers most types of files, including text and PDF documents, audio and video files, pictures and images, and almost all other file types, including compressed files and encrypted ones.

    In addition, you can recover folders and damaged disks with this program as well. But the best feature of DiskInternals Uneraser is that is supports any storage media.

    This means that you can restore a song you deleted from the memory stick of your mp3 player or a picture shot that you erased from your camera. The program processes floppies, external drives, including flash and USB.

    Importantly, the program works with any Windows operating system and recovers even hidden system resources that may get damaged during a virus attack.

    DiskInternals Uneraser supports long filenames as well as non-English character ones. The program recovers all files correctly and ignores access rights (NTFS) when it is necessary.

    There is a unique algorithm for recovering files form FAT32 volumes in Windows NT, 2000 and XP that no other software has.

    The recovered files can be saved to any location, including network, as long as that location is visible to host's OS. Another perk of the program is it capability to create image files for entire hard drives, logical drives, or their parts and to process them like regular disks.

    DiskInternals Uneraser has very simple interface that resembles Windows Explorer. DiskInternals Uneraser is 100% FREE to download and try.

    -

    You cannot download any crack or serial number for DiskInternals Uneraser on this page. Every software that you are able to download on our site is legal. There is no crack, serial number, hack or activation key for DiskInternals Uneraser present here. Our collection also doesn't contain any keygens, because keygen programs are being used in illegal ways which we do not support. All software that you can find here is freely downloadable and legal.

    -

    aaccfb2cb3
    -
    -
    \ No newline at end of file diff --git a/spaces/cihyFjudo/fairness-paper-search/Maine Pyar Kiya hd download Watch the classic romance of Salman Khan and Bhagyashree.md b/spaces/cihyFjudo/fairness-paper-search/Maine Pyar Kiya hd download Watch the classic romance of Salman Khan and Bhagyashree.md deleted file mode 100644 index e64e314ed5c352d8470fe87676adc6629f7b8baf..0000000000000000000000000000000000000000 --- a/spaces/cihyFjudo/fairness-paper-search/Maine Pyar Kiya hd download Watch the classic romance of Salman Khan and Bhagyashree.md +++ /dev/null @@ -1,6 +0,0 @@ -

    Maine Pyar Kiya hd download


    Download File ··· https://tinurli.com/2uwip8



    -
    - aaccfb2cb3
    -
    -
    -

    diff --git a/spaces/cihyFjudo/fairness-paper-search/Omnipotence Paradox 720p Torrent.md b/spaces/cihyFjudo/fairness-paper-search/Omnipotence Paradox 720p Torrent.md deleted file mode 100644 index b3b35ef5b97b32e17abb642f35ac7d9a40ff1bd8..0000000000000000000000000000000000000000 --- a/spaces/cihyFjudo/fairness-paper-search/Omnipotence Paradox 720p Torrent.md +++ /dev/null @@ -1,6 +0,0 @@ -

    Omnipotence Paradox 720p torrent


    Download Filehttps://tinurli.com/2uwkBz



    - - aaccfb2cb3
    -
    -
    -

    diff --git a/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/click/decorators.py b/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/click/decorators.py deleted file mode 100644 index d9bba9502ca353bca5136f43c92436ff584f06e1..0000000000000000000000000000000000000000 --- a/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/click/decorators.py +++ /dev/null @@ -1,561 +0,0 @@ -import inspect -import types -import typing as t -from functools import update_wrapper -from gettext import gettext as _ - -from .core import Argument -from .core import Command -from .core import Context -from .core import Group -from .core import Option -from .core import Parameter -from .globals import get_current_context -from .utils import echo - -if t.TYPE_CHECKING: - import typing_extensions as te - - P = te.ParamSpec("P") - -R = t.TypeVar("R") -T = t.TypeVar("T") -_AnyCallable = t.Callable[..., t.Any] -FC = t.TypeVar("FC", bound=t.Union[_AnyCallable, Command]) - - -def pass_context(f: "t.Callable[te.Concatenate[Context, P], R]") -> "t.Callable[P, R]": - """Marks a callback as wanting to receive the current context - object as first argument. - """ - - def new_func(*args: "P.args", **kwargs: "P.kwargs") -> "R": - return f(get_current_context(), *args, **kwargs) - - return update_wrapper(new_func, f) - - -def pass_obj(f: "t.Callable[te.Concatenate[t.Any, P], R]") -> "t.Callable[P, R]": - """Similar to :func:`pass_context`, but only pass the object on the - context onwards (:attr:`Context.obj`). This is useful if that object - represents the state of a nested system. - """ - - def new_func(*args: "P.args", **kwargs: "P.kwargs") -> "R": - return f(get_current_context().obj, *args, **kwargs) - - return update_wrapper(new_func, f) - - -def make_pass_decorator( - object_type: t.Type[T], ensure: bool = False -) -> t.Callable[["t.Callable[te.Concatenate[T, P], R]"], "t.Callable[P, R]"]: - """Given an object type this creates a decorator that will work - similar to :func:`pass_obj` but instead of passing the object of the - current context, it will find the innermost context of type - :func:`object_type`. - - This generates a decorator that works roughly like this:: - - from functools import update_wrapper - - def decorator(f): - @pass_context - def new_func(ctx, *args, **kwargs): - obj = ctx.find_object(object_type) - return ctx.invoke(f, obj, *args, **kwargs) - return update_wrapper(new_func, f) - return decorator - - :param object_type: the type of the object to pass. - :param ensure: if set to `True`, a new object will be created and - remembered on the context if it's not there yet. - """ - - def decorator(f: "t.Callable[te.Concatenate[T, P], R]") -> "t.Callable[P, R]": - def new_func(*args: "P.args", **kwargs: "P.kwargs") -> "R": - ctx = get_current_context() - - obj: t.Optional[T] - if ensure: - obj = ctx.ensure_object(object_type) - else: - obj = ctx.find_object(object_type) - - if obj is None: - raise RuntimeError( - "Managed to invoke callback without a context" - f" object of type {object_type.__name__!r}" - " existing." - ) - - return ctx.invoke(f, obj, *args, **kwargs) - - return update_wrapper(new_func, f) - - return decorator # type: ignore[return-value] - - -def pass_meta_key( - key: str, *, doc_description: t.Optional[str] = None -) -> "t.Callable[[t.Callable[te.Concatenate[t.Any, P], R]], t.Callable[P, R]]": - """Create a decorator that passes a key from - :attr:`click.Context.meta` as the first argument to the decorated - function. - - :param key: Key in ``Context.meta`` to pass. - :param doc_description: Description of the object being passed, - inserted into the decorator's docstring. Defaults to "the 'key' - key from Context.meta". - - .. versionadded:: 8.0 - """ - - def decorator(f: "t.Callable[te.Concatenate[t.Any, P], R]") -> "t.Callable[P, R]": - def new_func(*args: "P.args", **kwargs: "P.kwargs") -> R: - ctx = get_current_context() - obj = ctx.meta[key] - return ctx.invoke(f, obj, *args, **kwargs) - - return update_wrapper(new_func, f) - - if doc_description is None: - doc_description = f"the {key!r} key from :attr:`click.Context.meta`" - - decorator.__doc__ = ( - f"Decorator that passes {doc_description} as the first argument" - " to the decorated function." - ) - return decorator # type: ignore[return-value] - - -CmdType = t.TypeVar("CmdType", bound=Command) - - -# variant: no call, directly as decorator for a function. -@t.overload -def command(name: _AnyCallable) -> Command: - ... - - -# variant: with positional name and with positional or keyword cls argument: -# @command(namearg, CommandCls, ...) or @command(namearg, cls=CommandCls, ...) -@t.overload -def command( - name: t.Optional[str], - cls: t.Type[CmdType], - **attrs: t.Any, -) -> t.Callable[[_AnyCallable], CmdType]: - ... - - -# variant: name omitted, cls _must_ be a keyword argument, @command(cls=CommandCls, ...) -@t.overload -def command( - name: None = None, - *, - cls: t.Type[CmdType], - **attrs: t.Any, -) -> t.Callable[[_AnyCallable], CmdType]: - ... - - -# variant: with optional string name, no cls argument provided. -@t.overload -def command( - name: t.Optional[str] = ..., cls: None = None, **attrs: t.Any -) -> t.Callable[[_AnyCallable], Command]: - ... - - -def command( - name: t.Union[t.Optional[str], _AnyCallable] = None, - cls: t.Optional[t.Type[CmdType]] = None, - **attrs: t.Any, -) -> t.Union[Command, t.Callable[[_AnyCallable], t.Union[Command, CmdType]]]: - r"""Creates a new :class:`Command` and uses the decorated function as - callback. This will also automatically attach all decorated - :func:`option`\s and :func:`argument`\s as parameters to the command. - - The name of the command defaults to the name of the function with - underscores replaced by dashes. If you want to change that, you can - pass the intended name as the first argument. - - All keyword arguments are forwarded to the underlying command class. - For the ``params`` argument, any decorated params are appended to - the end of the list. - - Once decorated the function turns into a :class:`Command` instance - that can be invoked as a command line utility or be attached to a - command :class:`Group`. - - :param name: the name of the command. This defaults to the function - name with underscores replaced by dashes. - :param cls: the command class to instantiate. This defaults to - :class:`Command`. - - .. versionchanged:: 8.1 - This decorator can be applied without parentheses. - - .. versionchanged:: 8.1 - The ``params`` argument can be used. Decorated params are - appended to the end of the list. - """ - - func: t.Optional[t.Callable[[_AnyCallable], t.Any]] = None - - if callable(name): - func = name - name = None - assert cls is None, "Use 'command(cls=cls)(callable)' to specify a class." - assert not attrs, "Use 'command(**kwargs)(callable)' to provide arguments." - - if cls is None: - cls = t.cast(t.Type[CmdType], Command) - - def decorator(f: _AnyCallable) -> CmdType: - if isinstance(f, Command): - raise TypeError("Attempted to convert a callback into a command twice.") - - attr_params = attrs.pop("params", None) - params = attr_params if attr_params is not None else [] - - try: - decorator_params = f.__click_params__ # type: ignore - except AttributeError: - pass - else: - del f.__click_params__ # type: ignore - params.extend(reversed(decorator_params)) - - if attrs.get("help") is None: - attrs["help"] = f.__doc__ - - if t.TYPE_CHECKING: - assert cls is not None - assert not callable(name) - - cmd = cls( - name=name or f.__name__.lower().replace("_", "-"), - callback=f, - params=params, - **attrs, - ) - cmd.__doc__ = f.__doc__ - return cmd - - if func is not None: - return decorator(func) - - return decorator - - -GrpType = t.TypeVar("GrpType", bound=Group) - - -# variant: no call, directly as decorator for a function. -@t.overload -def group(name: _AnyCallable) -> Group: - ... - - -# variant: with positional name and with positional or keyword cls argument: -# @group(namearg, GroupCls, ...) or @group(namearg, cls=GroupCls, ...) -@t.overload -def group( - name: t.Optional[str], - cls: t.Type[GrpType], - **attrs: t.Any, -) -> t.Callable[[_AnyCallable], GrpType]: - ... - - -# variant: name omitted, cls _must_ be a keyword argument, @group(cmd=GroupCls, ...) -@t.overload -def group( - name: None = None, - *, - cls: t.Type[GrpType], - **attrs: t.Any, -) -> t.Callable[[_AnyCallable], GrpType]: - ... - - -# variant: with optional string name, no cls argument provided. -@t.overload -def group( - name: t.Optional[str] = ..., cls: None = None, **attrs: t.Any -) -> t.Callable[[_AnyCallable], Group]: - ... - - -def group( - name: t.Union[str, _AnyCallable, None] = None, - cls: t.Optional[t.Type[GrpType]] = None, - **attrs: t.Any, -) -> t.Union[Group, t.Callable[[_AnyCallable], t.Union[Group, GrpType]]]: - """Creates a new :class:`Group` with a function as callback. This - works otherwise the same as :func:`command` just that the `cls` - parameter is set to :class:`Group`. - - .. versionchanged:: 8.1 - This decorator can be applied without parentheses. - """ - if cls is None: - cls = t.cast(t.Type[GrpType], Group) - - if callable(name): - return command(cls=cls, **attrs)(name) - - return command(name, cls, **attrs) - - -def _param_memo(f: t.Callable[..., t.Any], param: Parameter) -> None: - if isinstance(f, Command): - f.params.append(param) - else: - if not hasattr(f, "__click_params__"): - f.__click_params__ = [] # type: ignore - - f.__click_params__.append(param) # type: ignore - - -def argument( - *param_decls: str, cls: t.Optional[t.Type[Argument]] = None, **attrs: t.Any -) -> t.Callable[[FC], FC]: - """Attaches an argument to the command. All positional arguments are - passed as parameter declarations to :class:`Argument`; all keyword - arguments are forwarded unchanged (except ``cls``). - This is equivalent to creating an :class:`Argument` instance manually - and attaching it to the :attr:`Command.params` list. - - For the default argument class, refer to :class:`Argument` and - :class:`Parameter` for descriptions of parameters. - - :param cls: the argument class to instantiate. This defaults to - :class:`Argument`. - :param param_decls: Passed as positional arguments to the constructor of - ``cls``. - :param attrs: Passed as keyword arguments to the constructor of ``cls``. - """ - if cls is None: - cls = Argument - - def decorator(f: FC) -> FC: - _param_memo(f, cls(param_decls, **attrs)) - return f - - return decorator - - -def option( - *param_decls: str, cls: t.Optional[t.Type[Option]] = None, **attrs: t.Any -) -> t.Callable[[FC], FC]: - """Attaches an option to the command. All positional arguments are - passed as parameter declarations to :class:`Option`; all keyword - arguments are forwarded unchanged (except ``cls``). - This is equivalent to creating an :class:`Option` instance manually - and attaching it to the :attr:`Command.params` list. - - For the default option class, refer to :class:`Option` and - :class:`Parameter` for descriptions of parameters. - - :param cls: the option class to instantiate. This defaults to - :class:`Option`. - :param param_decls: Passed as positional arguments to the constructor of - ``cls``. - :param attrs: Passed as keyword arguments to the constructor of ``cls``. - """ - if cls is None: - cls = Option - - def decorator(f: FC) -> FC: - _param_memo(f, cls(param_decls, **attrs)) - return f - - return decorator - - -def confirmation_option(*param_decls: str, **kwargs: t.Any) -> t.Callable[[FC], FC]: - """Add a ``--yes`` option which shows a prompt before continuing if - not passed. If the prompt is declined, the program will exit. - - :param param_decls: One or more option names. Defaults to the single - value ``"--yes"``. - :param kwargs: Extra arguments are passed to :func:`option`. - """ - - def callback(ctx: Context, param: Parameter, value: bool) -> None: - if not value: - ctx.abort() - - if not param_decls: - param_decls = ("--yes",) - - kwargs.setdefault("is_flag", True) - kwargs.setdefault("callback", callback) - kwargs.setdefault("expose_value", False) - kwargs.setdefault("prompt", "Do you want to continue?") - kwargs.setdefault("help", "Confirm the action without prompting.") - return option(*param_decls, **kwargs) - - -def password_option(*param_decls: str, **kwargs: t.Any) -> t.Callable[[FC], FC]: - """Add a ``--password`` option which prompts for a password, hiding - input and asking to enter the value again for confirmation. - - :param param_decls: One or more option names. Defaults to the single - value ``"--password"``. - :param kwargs: Extra arguments are passed to :func:`option`. - """ - if not param_decls: - param_decls = ("--password",) - - kwargs.setdefault("prompt", True) - kwargs.setdefault("confirmation_prompt", True) - kwargs.setdefault("hide_input", True) - return option(*param_decls, **kwargs) - - -def version_option( - version: t.Optional[str] = None, - *param_decls: str, - package_name: t.Optional[str] = None, - prog_name: t.Optional[str] = None, - message: t.Optional[str] = None, - **kwargs: t.Any, -) -> t.Callable[[FC], FC]: - """Add a ``--version`` option which immediately prints the version - number and exits the program. - - If ``version`` is not provided, Click will try to detect it using - :func:`importlib.metadata.version` to get the version for the - ``package_name``. On Python < 3.8, the ``importlib_metadata`` - backport must be installed. - - If ``package_name`` is not provided, Click will try to detect it by - inspecting the stack frames. This will be used to detect the - version, so it must match the name of the installed package. - - :param version: The version number to show. If not provided, Click - will try to detect it. - :param param_decls: One or more option names. Defaults to the single - value ``"--version"``. - :param package_name: The package name to detect the version from. If - not provided, Click will try to detect it. - :param prog_name: The name of the CLI to show in the message. If not - provided, it will be detected from the command. - :param message: The message to show. The values ``%(prog)s``, - ``%(package)s``, and ``%(version)s`` are available. Defaults to - ``"%(prog)s, version %(version)s"``. - :param kwargs: Extra arguments are passed to :func:`option`. - :raise RuntimeError: ``version`` could not be detected. - - .. versionchanged:: 8.0 - Add the ``package_name`` parameter, and the ``%(package)s`` - value for messages. - - .. versionchanged:: 8.0 - Use :mod:`importlib.metadata` instead of ``pkg_resources``. The - version is detected based on the package name, not the entry - point name. The Python package name must match the installed - package name, or be passed with ``package_name=``. - """ - if message is None: - message = _("%(prog)s, version %(version)s") - - if version is None and package_name is None: - frame = inspect.currentframe() - f_back = frame.f_back if frame is not None else None - f_globals = f_back.f_globals if f_back is not None else None - # break reference cycle - # https://docs.python.org/3/library/inspect.html#the-interpreter-stack - del frame - - if f_globals is not None: - package_name = f_globals.get("__name__") - - if package_name == "__main__": - package_name = f_globals.get("__package__") - - if package_name: - package_name = package_name.partition(".")[0] - - def callback(ctx: Context, param: Parameter, value: bool) -> None: - if not value or ctx.resilient_parsing: - return - - nonlocal prog_name - nonlocal version - - if prog_name is None: - prog_name = ctx.find_root().info_name - - if version is None and package_name is not None: - metadata: t.Optional[types.ModuleType] - - try: - from importlib import metadata # type: ignore - except ImportError: - # Python < 3.8 - import importlib_metadata as metadata # type: ignore - - try: - version = metadata.version(package_name) # type: ignore - except metadata.PackageNotFoundError: # type: ignore - raise RuntimeError( - f"{package_name!r} is not installed. Try passing" - " 'package_name' instead." - ) from None - - if version is None: - raise RuntimeError( - f"Could not determine the version for {package_name!r} automatically." - ) - - echo( - message % {"prog": prog_name, "package": package_name, "version": version}, - color=ctx.color, - ) - ctx.exit() - - if not param_decls: - param_decls = ("--version",) - - kwargs.setdefault("is_flag", True) - kwargs.setdefault("expose_value", False) - kwargs.setdefault("is_eager", True) - kwargs.setdefault("help", _("Show the version and exit.")) - kwargs["callback"] = callback - return option(*param_decls, **kwargs) - - -def help_option(*param_decls: str, **kwargs: t.Any) -> t.Callable[[FC], FC]: - """Add a ``--help`` option which immediately prints the help page - and exits the program. - - This is usually unnecessary, as the ``--help`` option is added to - each command automatically unless ``add_help_option=False`` is - passed. - - :param param_decls: One or more option names. Defaults to the single - value ``"--help"``. - :param kwargs: Extra arguments are passed to :func:`option`. - """ - - def callback(ctx: Context, param: Parameter, value: bool) -> None: - if not value or ctx.resilient_parsing: - return - - echo(ctx.get_help(), color=ctx.color) - ctx.exit() - - if not param_decls: - param_decls = ("--help",) - - kwargs.setdefault("is_flag", True) - kwargs.setdefault("expose_value", False) - kwargs.setdefault("is_eager", True) - kwargs.setdefault("help", _("Show this message and exit.")) - kwargs["callback"] = callback - return option(*param_decls, **kwargs) diff --git a/spaces/codertoro/gpt-academic/crazy_functions/test_project/cpp/cppipc/prod_cons.h b/spaces/codertoro/gpt-academic/crazy_functions/test_project/cpp/cppipc/prod_cons.h deleted file mode 100644 index c9004bb8043a12e32814436baa6262a00c8ef68e..0000000000000000000000000000000000000000 --- a/spaces/codertoro/gpt-academic/crazy_functions/test_project/cpp/cppipc/prod_cons.h +++ /dev/null @@ -1,433 +0,0 @@ -#pragma once - -#include -#include -#include -#include -#include - -#include "libipc/def.h" - -#include "libipc/platform/detail.h" -#include "libipc/circ/elem_def.h" -#include "libipc/utility/log.h" -#include "libipc/utility/utility.h" - -namespace ipc { - -//////////////////////////////////////////////////////////////// -/// producer-consumer implementation -//////////////////////////////////////////////////////////////// - -template -struct prod_cons_impl; - -template <> -struct prod_cons_impl> { - - template - struct elem_t { - std::aligned_storage_t data_ {}; - }; - - alignas(cache_line_size) std::atomic rd_; // read index - alignas(cache_line_size) std::atomic wt_; // write index - - constexpr circ::u2_t cursor() const noexcept { - return 0; - } - - template - bool push(W* /*wrapper*/, F&& f, E* elems) { - auto cur_wt = circ::index_of(wt_.load(std::memory_order_relaxed)); - if (cur_wt == circ::index_of(rd_.load(std::memory_order_acquire) - 1)) { - return false; // full - } - std::forward(f)(&(elems[cur_wt].data_)); - wt_.fetch_add(1, std::memory_order_release); - return true; - } - - /** - * In single-single-unicast, 'force_push' means 'no reader' or 'the only one reader is dead'. - * So we could just disconnect all connections of receiver, and return false. - */ - template - bool force_push(W* wrapper, F&&, E*) { - wrapper->elems()->disconnect_receiver(~static_cast(0u)); - return false; - } - - template - bool pop(W* /*wrapper*/, circ::u2_t& /*cur*/, F&& f, R&& out, E* elems) { - auto cur_rd = circ::index_of(rd_.load(std::memory_order_relaxed)); - if (cur_rd == circ::index_of(wt_.load(std::memory_order_acquire))) { - return false; // empty - } - std::forward(f)(&(elems[cur_rd].data_)); - std::forward(out)(true); - rd_.fetch_add(1, std::memory_order_release); - return true; - } -}; - -template <> -struct prod_cons_impl> - : prod_cons_impl> { - - template - bool force_push(W* wrapper, F&&, E*) { - wrapper->elems()->disconnect_receiver(1); - return false; - } - - template class E, std::size_t DS, std::size_t AS> - bool pop(W* /*wrapper*/, circ::u2_t& /*cur*/, F&& f, R&& out, E* elems) { - byte_t buff[DS]; - for (unsigned k = 0;;) { - auto cur_rd = rd_.load(std::memory_order_relaxed); - if (circ::index_of(cur_rd) == - circ::index_of(wt_.load(std::memory_order_acquire))) { - return false; // empty - } - std::memcpy(buff, &(elems[circ::index_of(cur_rd)].data_), sizeof(buff)); - if (rd_.compare_exchange_weak(cur_rd, cur_rd + 1, std::memory_order_release)) { - std::forward(f)(buff); - std::forward(out)(true); - return true; - } - ipc::yield(k); - } - } -}; - -template <> -struct prod_cons_impl> - : prod_cons_impl> { - - using flag_t = std::uint64_t; - - template - struct elem_t { - std::aligned_storage_t data_ {}; - std::atomic f_ct_ { 0 }; // commit flag - }; - - alignas(cache_line_size) std::atomic ct_; // commit index - - template - bool push(W* /*wrapper*/, F&& f, E* elems) { - circ::u2_t cur_ct, nxt_ct; - for (unsigned k = 0;;) { - cur_ct = ct_.load(std::memory_order_relaxed); - if (circ::index_of(nxt_ct = cur_ct + 1) == - circ::index_of(rd_.load(std::memory_order_acquire))) { - return false; // full - } - if (ct_.compare_exchange_weak(cur_ct, nxt_ct, std::memory_order_acq_rel)) { - break; - } - ipc::yield(k); - } - auto* el = elems + circ::index_of(cur_ct); - std::forward(f)(&(el->data_)); - // set flag & try update wt - el->f_ct_.store(~static_cast(cur_ct), std::memory_order_release); - while (1) { - auto cac_ct = el->f_ct_.load(std::memory_order_acquire); - if (cur_ct != wt_.load(std::memory_order_relaxed)) { - return true; - } - if ((~cac_ct) != cur_ct) { - return true; - } - if (!el->f_ct_.compare_exchange_strong(cac_ct, 0, std::memory_order_relaxed)) { - return true; - } - wt_.store(nxt_ct, std::memory_order_release); - cur_ct = nxt_ct; - nxt_ct = cur_ct + 1; - el = elems + circ::index_of(cur_ct); - } - return true; - } - - template - bool force_push(W* wrapper, F&&, E*) { - wrapper->elems()->disconnect_receiver(1); - return false; - } - - template class E, std::size_t DS, std::size_t AS> - bool pop(W* /*wrapper*/, circ::u2_t& /*cur*/, F&& f, R&& out, E* elems) { - byte_t buff[DS]; - for (unsigned k = 0;;) { - auto cur_rd = rd_.load(std::memory_order_relaxed); - auto cur_wt = wt_.load(std::memory_order_acquire); - auto id_rd = circ::index_of(cur_rd); - auto id_wt = circ::index_of(cur_wt); - if (id_rd == id_wt) { - auto* el = elems + id_wt; - auto cac_ct = el->f_ct_.load(std::memory_order_acquire); - if ((~cac_ct) != cur_wt) { - return false; // empty - } - if (el->f_ct_.compare_exchange_weak(cac_ct, 0, std::memory_order_relaxed)) { - wt_.store(cur_wt + 1, std::memory_order_release); - } - k = 0; - } - else { - std::memcpy(buff, &(elems[circ::index_of(cur_rd)].data_), sizeof(buff)); - if (rd_.compare_exchange_weak(cur_rd, cur_rd + 1, std::memory_order_release)) { - std::forward(f)(buff); - std::forward(out)(true); - return true; - } - ipc::yield(k); - } - } - } -}; - -template <> -struct prod_cons_impl> { - - using rc_t = std::uint64_t; - - enum : rc_t { - ep_mask = 0x00000000ffffffffull, - ep_incr = 0x0000000100000000ull - }; - - template - struct elem_t { - std::aligned_storage_t data_ {}; - std::atomic rc_ { 0 }; // read-counter - }; - - alignas(cache_line_size) std::atomic wt_; // write index - alignas(cache_line_size) rc_t epoch_ { 0 }; // only one writer - - circ::u2_t cursor() const noexcept { - return wt_.load(std::memory_order_acquire); - } - - template - bool push(W* wrapper, F&& f, E* elems) { - E* el; - for (unsigned k = 0;;) { - circ::cc_t cc = wrapper->elems()->connections(std::memory_order_relaxed); - if (cc == 0) return false; // no reader - el = elems + circ::index_of(wt_.load(std::memory_order_relaxed)); - // check all consumers have finished reading this element - auto cur_rc = el->rc_.load(std::memory_order_acquire); - circ::cc_t rem_cc = cur_rc & ep_mask; - if ((cc & rem_cc) && ((cur_rc & ~ep_mask) == epoch_)) { - return false; // has not finished yet - } - // consider rem_cc to be 0 here - if (el->rc_.compare_exchange_weak( - cur_rc, epoch_ | static_cast(cc), std::memory_order_release)) { - break; - } - ipc::yield(k); - } - std::forward(f)(&(el->data_)); - wt_.fetch_add(1, std::memory_order_release); - return true; - } - - template - bool force_push(W* wrapper, F&& f, E* elems) { - E* el; - epoch_ += ep_incr; - for (unsigned k = 0;;) { - circ::cc_t cc = wrapper->elems()->connections(std::memory_order_relaxed); - if (cc == 0) return false; // no reader - el = elems + circ::index_of(wt_.load(std::memory_order_relaxed)); - // check all consumers have finished reading this element - auto cur_rc = el->rc_.load(std::memory_order_acquire); - circ::cc_t rem_cc = cur_rc & ep_mask; - if (cc & rem_cc) { - ipc::log("force_push: k = %u, cc = %u, rem_cc = %u\n", k, cc, rem_cc); - cc = wrapper->elems()->disconnect_receiver(rem_cc); // disconnect all invalid readers - if (cc == 0) return false; // no reader - } - // just compare & exchange - if (el->rc_.compare_exchange_weak( - cur_rc, epoch_ | static_cast(cc), std::memory_order_release)) { - break; - } - ipc::yield(k); - } - std::forward(f)(&(el->data_)); - wt_.fetch_add(1, std::memory_order_release); - return true; - } - - template - bool pop(W* wrapper, circ::u2_t& cur, F&& f, R&& out, E* elems) { - if (cur == cursor()) return false; // acquire - auto* el = elems + circ::index_of(cur++); - std::forward(f)(&(el->data_)); - for (unsigned k = 0;;) { - auto cur_rc = el->rc_.load(std::memory_order_acquire); - if ((cur_rc & ep_mask) == 0) { - std::forward(out)(true); - return true; - } - auto nxt_rc = cur_rc & ~static_cast(wrapper->connected_id()); - if (el->rc_.compare_exchange_weak(cur_rc, nxt_rc, std::memory_order_release)) { - std::forward(out)((nxt_rc & ep_mask) == 0); - return true; - } - ipc::yield(k); - } - } -}; - -template <> -struct prod_cons_impl> { - - using rc_t = std::uint64_t; - using flag_t = std::uint64_t; - - enum : rc_t { - rc_mask = 0x00000000ffffffffull, - ep_mask = 0x00ffffffffffffffull, - ep_incr = 0x0100000000000000ull, - ic_mask = 0xff000000ffffffffull, - ic_incr = 0x0000000100000000ull - }; - - template - struct elem_t { - std::aligned_storage_t data_ {}; - std::atomic rc_ { 0 }; // read-counter - std::atomic f_ct_ { 0 }; // commit flag - }; - - alignas(cache_line_size) std::atomic ct_; // commit index - alignas(cache_line_size) std::atomic epoch_ { 0 }; - - circ::u2_t cursor() const noexcept { - return ct_.load(std::memory_order_acquire); - } - - constexpr static rc_t inc_rc(rc_t rc) noexcept { - return (rc & ic_mask) | ((rc + ic_incr) & ~ic_mask); - } - - constexpr static rc_t inc_mask(rc_t rc) noexcept { - return inc_rc(rc) & ~rc_mask; - } - - template - bool push(W* wrapper, F&& f, E* elems) { - E* el; - circ::u2_t cur_ct; - rc_t epoch = epoch_.load(std::memory_order_acquire); - for (unsigned k = 0;;) { - circ::cc_t cc = wrapper->elems()->connections(std::memory_order_relaxed); - if (cc == 0) return false; // no reader - el = elems + circ::index_of(cur_ct = ct_.load(std::memory_order_relaxed)); - // check all consumers have finished reading this element - auto cur_rc = el->rc_.load(std::memory_order_relaxed); - circ::cc_t rem_cc = cur_rc & rc_mask; - if ((cc & rem_cc) && ((cur_rc & ~ep_mask) == epoch)) { - return false; // has not finished yet - } - else if (!rem_cc) { - auto cur_fl = el->f_ct_.load(std::memory_order_acquire); - if ((cur_fl != cur_ct) && cur_fl) { - return false; // full - } - } - // consider rem_cc to be 0 here - if (el->rc_.compare_exchange_weak( - cur_rc, inc_mask(epoch | (cur_rc & ep_mask)) | static_cast(cc), std::memory_order_relaxed) && - epoch_.compare_exchange_weak(epoch, epoch, std::memory_order_acq_rel)) { - break; - } - ipc::yield(k); - } - // only one thread/process would touch here at one time - ct_.store(cur_ct + 1, std::memory_order_release); - std::forward(f)(&(el->data_)); - // set flag & try update wt - el->f_ct_.store(~static_cast(cur_ct), std::memory_order_release); - return true; - } - - template - bool force_push(W* wrapper, F&& f, E* elems) { - E* el; - circ::u2_t cur_ct; - rc_t epoch = epoch_.fetch_add(ep_incr, std::memory_order_release) + ep_incr; - for (unsigned k = 0;;) { - circ::cc_t cc = wrapper->elems()->connections(std::memory_order_relaxed); - if (cc == 0) return false; // no reader - el = elems + circ::index_of(cur_ct = ct_.load(std::memory_order_relaxed)); - // check all consumers have finished reading this element - auto cur_rc = el->rc_.load(std::memory_order_acquire); - circ::cc_t rem_cc = cur_rc & rc_mask; - if (cc & rem_cc) { - ipc::log("force_push: k = %u, cc = %u, rem_cc = %u\n", k, cc, rem_cc); - cc = wrapper->elems()->disconnect_receiver(rem_cc); // disconnect all invalid readers - if (cc == 0) return false; // no reader - } - // just compare & exchange - if (el->rc_.compare_exchange_weak( - cur_rc, inc_mask(epoch | (cur_rc & ep_mask)) | static_cast(cc), std::memory_order_relaxed)) { - if (epoch == epoch_.load(std::memory_order_acquire)) { - break; - } - else if (push(wrapper, std::forward(f), elems)) { - return true; - } - epoch = epoch_.fetch_add(ep_incr, std::memory_order_release) + ep_incr; - } - ipc::yield(k); - } - // only one thread/process would touch here at one time - ct_.store(cur_ct + 1, std::memory_order_release); - std::forward(f)(&(el->data_)); - // set flag & try update wt - el->f_ct_.store(~static_cast(cur_ct), std::memory_order_release); - return true; - } - - template - bool pop(W* wrapper, circ::u2_t& cur, F&& f, R&& out, E(& elems)[N]) { - auto* el = elems + circ::index_of(cur); - auto cur_fl = el->f_ct_.load(std::memory_order_acquire); - if (cur_fl != ~static_cast(cur)) { - return false; // empty - } - ++cur; - std::forward(f)(&(el->data_)); - for (unsigned k = 0;;) { - auto cur_rc = el->rc_.load(std::memory_order_acquire); - if ((cur_rc & rc_mask) == 0) { - std::forward(out)(true); - el->f_ct_.store(cur + N - 1, std::memory_order_release); - return true; - } - auto nxt_rc = inc_rc(cur_rc) & ~static_cast(wrapper->connected_id()); - bool last_one = false; - if ((last_one = (nxt_rc & rc_mask) == 0)) { - el->f_ct_.store(cur + N - 1, std::memory_order_release); - } - if (el->rc_.compare_exchange_weak(cur_rc, nxt_rc, std::memory_order_release)) { - std::forward(out)(last_one); - return true; - } - ipc::yield(k); - } - } -}; - -} // namespace ipc diff --git a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/ass.c b/spaces/colakin/video-generater/public/ffmpeg/libavcodec/ass.c deleted file mode 100644 index 5058dc83373cf4da9abf40b0d3326d78c46c3867..0000000000000000000000000000000000000000 --- a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/ass.c +++ /dev/null @@ -1,209 +0,0 @@ -/* - * SSA/ASS common functions - * Copyright (c) 2010 Aurelien Jacobs - * - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -#include "avcodec.h" -#include "ass.h" -#include "libavutil/avstring.h" -#include "libavutil/bprint.h" -#include "libavutil/common.h" -#include "version.h" - -int ff_ass_subtitle_header_full(AVCodecContext *avctx, - int play_res_x, int play_res_y, - const char *font, int font_size, - int primary_color, int secondary_color, - int outline_color, int back_color, - int bold, int italic, int underline, - int border_style, int alignment) -{ - avctx->subtitle_header = av_asprintf( - "[Script Info]\r\n" - "; Script generated by FFmpeg/Lavc%s\r\n" - "ScriptType: v4.00+\r\n" - "PlayResX: %d\r\n" - "PlayResY: %d\r\n" - "ScaledBorderAndShadow: yes\r\n" - "YCbCr Matrix: None\r\n" - "\r\n" - "[V4+ Styles]\r\n" - - /* ASS (v4+) header */ - "Format: Name, " - "Fontname, Fontsize, " - "PrimaryColour, SecondaryColour, OutlineColour, BackColour, " - "Bold, Italic, Underline, StrikeOut, " - "ScaleX, ScaleY, " - "Spacing, Angle, " - "BorderStyle, Outline, Shadow, " - "Alignment, MarginL, MarginR, MarginV, " - "Encoding\r\n" - - "Style: " - "Default," /* Name */ - "%s,%d," /* Font{name,size} */ - "&H%x,&H%x,&H%x,&H%x," /* {Primary,Secondary,Outline,Back}Colour */ - "%d,%d,%d,0," /* Bold, Italic, Underline, StrikeOut */ - "100,100," /* Scale{X,Y} */ - "0,0," /* Spacing, Angle */ - "%d,1,0," /* BorderStyle, Outline, Shadow */ - "%d,10,10,10," /* Alignment, Margin[LRV] */ - "1\r\n" /* Encoding */ - - "\r\n" - "[Events]\r\n" - "Format: Layer, Start, End, Style, Name, MarginL, MarginR, MarginV, Effect, Text\r\n", - !(avctx->flags & AV_CODEC_FLAG_BITEXACT) ? AV_STRINGIFY(LIBAVCODEC_VERSION) : "", - play_res_x, play_res_y, font, font_size, - primary_color, secondary_color, outline_color, back_color, - -bold, -italic, -underline, border_style, alignment); - - if (!avctx->subtitle_header) - return AVERROR(ENOMEM); - avctx->subtitle_header_size = strlen(avctx->subtitle_header); - return 0; -} - -int ff_ass_subtitle_header(AVCodecContext *avctx, - const char *font, int font_size, - int color, int back_color, - int bold, int italic, int underline, - int border_style, int alignment) -{ - return ff_ass_subtitle_header_full(avctx, - ASS_DEFAULT_PLAYRESX, ASS_DEFAULT_PLAYRESY, - font, font_size, color, color, - back_color, back_color, - bold, italic, underline, - border_style, alignment); -} - -int ff_ass_subtitle_header_default(AVCodecContext *avctx) -{ - return ff_ass_subtitle_header(avctx, ASS_DEFAULT_FONT, - ASS_DEFAULT_FONT_SIZE, - ASS_DEFAULT_COLOR, - ASS_DEFAULT_BACK_COLOR, - ASS_DEFAULT_BOLD, - ASS_DEFAULT_ITALIC, - ASS_DEFAULT_UNDERLINE, - ASS_DEFAULT_BORDERSTYLE, - ASS_DEFAULT_ALIGNMENT); -} - -char *ff_ass_get_dialog(int readorder, int layer, const char *style, - const char *speaker, const char *text) -{ - return av_asprintf("%d,%d,%s,%s,0,0,0,,%s", - readorder, layer, style ? style : "Default", - speaker ? speaker : "", text); -} - -int ff_ass_add_rect2(AVSubtitle *sub, const char *dialog, - int readorder, int layer, const char *style, - const char *speaker, unsigned *nb_rect_allocated) -{ - AVSubtitleRect **rects = sub->rects, *rect; - char *ass_str; - uint64_t new_nb = 0; - - if (sub->num_rects >= UINT_MAX) - return AVERROR(ENOMEM); - - if (nb_rect_allocated && *nb_rect_allocated <= sub->num_rects) { - if (sub->num_rects < UINT_MAX / 17 * 16) { - new_nb = sub->num_rects + sub->num_rects/16 + 1; - } else - new_nb = UINT_MAX; - } else if (!nb_rect_allocated) - new_nb = sub->num_rects + 1; - - if (new_nb) { - rects = av_realloc_array(rects, new_nb, sizeof(*sub->rects)); - if (!rects) - return AVERROR(ENOMEM); - if (nb_rect_allocated) - *nb_rect_allocated = new_nb; - sub->rects = rects; - } - - rect = av_mallocz(sizeof(*rect)); - if (!rect) - return AVERROR(ENOMEM); - rects[sub->num_rects++] = rect; - rect->type = SUBTITLE_ASS; - ass_str = ff_ass_get_dialog(readorder, layer, style, speaker, dialog); - if (!ass_str) - return AVERROR(ENOMEM); - rect->ass = ass_str; - return 0; -} - -int ff_ass_add_rect(AVSubtitle *sub, const char *dialog, - int readorder, int layer, const char *style, - const char *speaker) -{ - return ff_ass_add_rect2(sub, dialog, readorder, layer, style, speaker, NULL); -} - -void ff_ass_decoder_flush(AVCodecContext *avctx) -{ - FFASSDecoderContext *s = avctx->priv_data; - if (!(avctx->flags2 & AV_CODEC_FLAG2_RO_FLUSH_NOOP)) - s->readorder = 0; -} - -void ff_ass_bprint_text_event(AVBPrint *buf, const char *p, int size, - const char *linebreaks, int keep_ass_markup) -{ - const char *p_end = p + size; - - for (; p < p_end && *p; p++) { - - /* forced custom line breaks, not accounted as "normal" EOL */ - if (linebreaks && strchr(linebreaks, *p)) { - av_bprintf(buf, "\\N"); - - /* standard ASS escaping so random characters don't get mis-interpreted - * as ASS */ - } else if (!keep_ass_markup && strchr("{}\\", *p)) { - av_bprintf(buf, "\\%c", *p); - - /* some packets might end abruptly (no \0 at the end, like for example - * in some cases of demuxing from a classic video container), some - * might be terminated with \n or \r\n which we have to remove (for - * consistency with those who haven't), and we also have to deal with - * evil cases such as \r at the end of the buffer (and no \0 terminated - * character) */ - } else if (p[0] == '\n') { - /* some stuff left so we can insert a line break */ - if (p < p_end - 1) - av_bprintf(buf, "\\N"); - } else if (p[0] == '\r' && p < p_end - 1 && p[1] == '\n') { - /* \r followed by a \n, we can skip it. We don't insert the \N yet - * because we don't know if it is followed by more text */ - continue; - - /* finally, a sane character */ - } else { - av_bprint_chars(buf, *p, 1); - } - } -} diff --git a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/audiodsp.c b/spaces/colakin/video-generater/public/ffmpeg/libavcodec/audiodsp.c deleted file mode 100644 index eba6e809fd62661ba791787f9e27899cdae787db..0000000000000000000000000000000000000000 --- a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/audiodsp.c +++ /dev/null @@ -1,121 +0,0 @@ -/* - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -#include - -#include "libavutil/attributes.h" -#include "libavutil/common.h" -#include "audiodsp.h" - -static inline uint32_t clipf_c_one(uint32_t a, uint32_t mini, - uint32_t maxi, uint32_t maxisign) -{ - if (a > mini) - return mini; - else if ((a ^ (1U << 31)) > maxisign) - return maxi; - else - return a; -} - -static void vector_clipf_c_opposite_sign(float *dst, const float *src, - float *min, float *max, int len) -{ - int i; - uint32_t mini = *(uint32_t *) min; - uint32_t maxi = *(uint32_t *) max; - uint32_t maxisign = maxi ^ (1U << 31); - uint32_t *dsti = (uint32_t *) dst; - const uint32_t *srci = (const uint32_t *) src; - - for (i = 0; i < len; i += 8) { - dsti[i + 0] = clipf_c_one(srci[i + 0], mini, maxi, maxisign); - dsti[i + 1] = clipf_c_one(srci[i + 1], mini, maxi, maxisign); - dsti[i + 2] = clipf_c_one(srci[i + 2], mini, maxi, maxisign); - dsti[i + 3] = clipf_c_one(srci[i + 3], mini, maxi, maxisign); - dsti[i + 4] = clipf_c_one(srci[i + 4], mini, maxi, maxisign); - dsti[i + 5] = clipf_c_one(srci[i + 5], mini, maxi, maxisign); - dsti[i + 6] = clipf_c_one(srci[i + 6], mini, maxi, maxisign); - dsti[i + 7] = clipf_c_one(srci[i + 7], mini, maxi, maxisign); - } -} - -static void vector_clipf_c(float *dst, const float *src, int len, - float min, float max) -{ - int i; - - if (min < 0 && max > 0) { - vector_clipf_c_opposite_sign(dst, src, &min, &max, len); - } else { - for (i = 0; i < len; i += 8) { - dst[i] = av_clipf(src[i], min, max); - dst[i + 1] = av_clipf(src[i + 1], min, max); - dst[i + 2] = av_clipf(src[i + 2], min, max); - dst[i + 3] = av_clipf(src[i + 3], min, max); - dst[i + 4] = av_clipf(src[i + 4], min, max); - dst[i + 5] = av_clipf(src[i + 5], min, max); - dst[i + 6] = av_clipf(src[i + 6], min, max); - dst[i + 7] = av_clipf(src[i + 7], min, max); - } - } -} - -static int32_t scalarproduct_int16_c(const int16_t *v1, const int16_t *v2, - int order) -{ - unsigned res = 0; - - while (order--) - res += *v1++ **v2++; - - return res; -} - -static void vector_clip_int32_c(int32_t *dst, const int32_t *src, int32_t min, - int32_t max, unsigned int len) -{ - do { - *dst++ = av_clip(*src++, min, max); - *dst++ = av_clip(*src++, min, max); - *dst++ = av_clip(*src++, min, max); - *dst++ = av_clip(*src++, min, max); - *dst++ = av_clip(*src++, min, max); - *dst++ = av_clip(*src++, min, max); - *dst++ = av_clip(*src++, min, max); - *dst++ = av_clip(*src++, min, max); - len -= 8; - } while (len > 0); -} - -av_cold void ff_audiodsp_init(AudioDSPContext *c) -{ - c->scalarproduct_int16 = scalarproduct_int16_c; - c->vector_clip_int32 = vector_clip_int32_c; - c->vector_clipf = vector_clipf_c; - -#if ARCH_ARM - ff_audiodsp_init_arm(c); -#elif ARCH_PPC - ff_audiodsp_init_ppc(c); -#elif ARCH_RISCV - ff_audiodsp_init_riscv(c); -#elif ARCH_X86 - ff_audiodsp_init_x86(c); -#endif -} diff --git a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/dynamic_hdr_vivid.h b/spaces/colakin/video-generater/public/ffmpeg/libavcodec/dynamic_hdr_vivid.h deleted file mode 100644 index d521b3d263b5b345b4fb1b46e63d2cb6f7920e08..0000000000000000000000000000000000000000 --- a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/dynamic_hdr_vivid.h +++ /dev/null @@ -1,35 +0,0 @@ -/* - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -#ifndef AVCODEC_DYNAMIC_HDR_VIVID_H -#define AVCODEC_DYNAMIC_HDR_VIVID_H - -#include "libavutil/hdr_dynamic_vivid_metadata.h" - -/** - * Parse the user data registered ITU-T T.35 to AVbuffer (AVDynamicHDRVivid). - * @param s A pointer containing the decoded AVDynamicHDRVivid structure. - * @param data The byte array containing the raw ITU-T T.35 data. - * @param size Size of the data array in bytes. - * - * @return 0 if succeed. Otherwise, returns the appropriate AVERROR. - */ -int ff_parse_itu_t_t35_to_dynamic_hdr_vivid(AVDynamicHDRVivid *s, const uint8_t *data, - int size); - -#endif /* AVCODEC_DYNAMIC_HDR_VIVID_H */ diff --git a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/ivi_dsp.c b/spaces/colakin/video-generater/public/ffmpeg/libavcodec/ivi_dsp.c deleted file mode 100644 index d7111565c2f7fc486d095e16eb6b886609a20355..0000000000000000000000000000000000000000 --- a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/ivi_dsp.c +++ /dev/null @@ -1,847 +0,0 @@ -/* - * DSP functions for Indeo Video Interactive codecs (Indeo4 and Indeo5) - * - * Copyright (c) 2009-2011 Maxim Poliakovski - * - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -/** - * @file - * DSP functions (inverse transforms, motion compensation, wavelet recompositions) - * for Indeo Video Interactive codecs. - */ - -#include -#include "libavutil/common.h" -#include "ivi.h" -#include "ivi_dsp.h" - -void ff_ivi_recompose53(const IVIPlaneDesc *plane, uint8_t *dst, - const ptrdiff_t dst_pitch) -{ - int x, y, indx; - int32_t p0, p1, p2, p3, tmp0, tmp1, tmp2; - int32_t b0_1, b0_2, b1_1, b1_2, b1_3, b2_1, b2_2, b2_3, b2_4, b2_5, b2_6; - int32_t b3_1, b3_2, b3_3, b3_4, b3_5, b3_6, b3_7, b3_8, b3_9; - ptrdiff_t pitch, back_pitch; - const short *b0_ptr, *b1_ptr, *b2_ptr, *b3_ptr; - const int num_bands = 4; - - /* all bands should have the same pitch */ - pitch = plane->bands[0].pitch; - - /* pixels at the position "y-1" will be set to pixels at the "y" for the 1st iteration */ - back_pitch = 0; - - /* get pointers to the wavelet bands */ - b0_ptr = plane->bands[0].buf; - b1_ptr = plane->bands[1].buf; - b2_ptr = plane->bands[2].buf; - b3_ptr = plane->bands[3].buf; - - for (y = 0; y < plane->height; y += 2) { - - if (y+2 >= plane->height) - pitch= 0; - /* load storage variables with values */ - if (num_bands > 0) { - b0_1 = b0_ptr[0]; - b0_2 = b0_ptr[pitch]; - } - - if (num_bands > 1) { - b1_1 = b1_ptr[back_pitch]; - b1_2 = b1_ptr[0]; - b1_3 = b1_1 - b1_2*6 + b1_ptr[pitch]; - } - - if (num_bands > 2) { - b2_2 = b2_ptr[0]; // b2[x, y ] - b2_3 = b2_2; // b2[x+1,y ] = b2[x,y] - b2_5 = b2_ptr[pitch]; // b2[x ,y+1] - b2_6 = b2_5; // b2[x+1,y+1] = b2[x,y+1] - } - - if (num_bands > 3) { - b3_2 = b3_ptr[back_pitch]; // b3[x ,y-1] - b3_3 = b3_2; // b3[x+1,y-1] = b3[x ,y-1] - b3_5 = b3_ptr[0]; // b3[x ,y ] - b3_6 = b3_5; // b3[x+1,y ] = b3[x ,y ] - b3_8 = b3_2 - b3_5*6 + b3_ptr[pitch]; - b3_9 = b3_8; - } - - for (x = 0, indx = 0; x < plane->width; x+=2, indx++) { - if (x+2 >= plane->width) { - b0_ptr --; - b1_ptr --; - b2_ptr --; - b3_ptr --; - } - - /* some values calculated in the previous iterations can */ - /* be reused in the next ones, so do appropriate copying */ - b2_1 = b2_2; // b2[x-1,y ] = b2[x, y ] - b2_2 = b2_3; // b2[x ,y ] = b2[x+1,y ] - b2_4 = b2_5; // b2[x-1,y+1] = b2[x ,y+1] - b2_5 = b2_6; // b2[x ,y+1] = b2[x+1,y+1] - b3_1 = b3_2; // b3[x-1,y-1] = b3[x ,y-1] - b3_2 = b3_3; // b3[x ,y-1] = b3[x+1,y-1] - b3_4 = b3_5; // b3[x-1,y ] = b3[x ,y ] - b3_5 = b3_6; // b3[x ,y ] = b3[x+1,y ] - b3_7 = b3_8; // vert_HPF(x-1) - b3_8 = b3_9; // vert_HPF(x ) - - p0 = p1 = p2 = p3 = 0; - - /* process the LL-band by applying LPF both vertically and horizontally */ - if (num_bands > 0) { - tmp0 = b0_1; - tmp2 = b0_2; - b0_1 = b0_ptr[indx+1]; - b0_2 = b0_ptr[pitch+indx+1]; - tmp1 = tmp0 + b0_1; - - p0 = tmp0 * 16; - p1 = tmp1 * 8; - p2 = (tmp0 + tmp2) * 8; - p3 = (tmp1 + tmp2 + b0_2) * 4; - } - - /* process the HL-band by applying HPF vertically and LPF horizontally */ - if (num_bands > 1) { - tmp0 = b1_2; - tmp1 = b1_1; - b1_2 = b1_ptr[indx+1]; - b1_1 = b1_ptr[back_pitch+indx+1]; - - tmp2 = tmp1 - tmp0*6 + b1_3; - b1_3 = b1_1 - b1_2*6 + b1_ptr[pitch+indx+1]; - - p0 += (tmp0 + tmp1) * 8; - p1 += (tmp0 + tmp1 + b1_1 + b1_2) * 4; - p2 += tmp2 * 4; - p3 += (tmp2 + b1_3) * 2; - } - - /* process the LH-band by applying LPF vertically and HPF horizontally */ - if (num_bands > 2) { - b2_3 = b2_ptr[indx+1]; - b2_6 = b2_ptr[pitch+indx+1]; - - tmp0 = b2_1 + b2_2; - tmp1 = b2_1 - b2_2*6 + b2_3; - - p0 += tmp0 * 8; - p1 += tmp1 * 4; - p2 += (tmp0 + b2_4 + b2_5) * 4; - p3 += (tmp1 + b2_4 - b2_5*6 + b2_6) * 2; - } - - /* process the HH-band by applying HPF both vertically and horizontally */ - if (num_bands > 3) { - b3_6 = b3_ptr[indx+1]; // b3[x+1,y ] - b3_3 = b3_ptr[back_pitch+indx+1]; // b3[x+1,y-1] - - tmp0 = b3_1 + b3_4; - tmp1 = b3_2 + b3_5; - tmp2 = b3_3 + b3_6; - - b3_9 = b3_3 - b3_6*6 + b3_ptr[pitch+indx+1]; - - p0 += (tmp0 + tmp1) * 4; - p1 += (tmp0 - tmp1*6 + tmp2) * 2; - p2 += (b3_7 + b3_8) * 2; - p3 += b3_7 - b3_8*6 + b3_9; - } - - /* output four pixels */ - dst[x] = av_clip_uint8((p0 >> 6) + 128); - dst[x+1] = av_clip_uint8((p1 >> 6) + 128); - dst[dst_pitch+x] = av_clip_uint8((p2 >> 6) + 128); - dst[dst_pitch+x+1] = av_clip_uint8((p3 >> 6) + 128); - }// for x - - dst += dst_pitch << 1; - - back_pitch = -pitch; - - b0_ptr += pitch + 1; - b1_ptr += pitch + 1; - b2_ptr += pitch + 1; - b3_ptr += pitch + 1; - } -} - -void ff_ivi_recompose_haar(const IVIPlaneDesc *plane, uint8_t *dst, - const ptrdiff_t dst_pitch) -{ - int x, y, indx, b0, b1, b2, b3, p0, p1, p2, p3; - const short *b0_ptr, *b1_ptr, *b2_ptr, *b3_ptr; - ptrdiff_t pitch; - - /* all bands should have the same pitch */ - pitch = plane->bands[0].pitch; - - /* get pointers to the wavelet bands */ - b0_ptr = plane->bands[0].buf; - b1_ptr = plane->bands[1].buf; - b2_ptr = plane->bands[2].buf; - b3_ptr = plane->bands[3].buf; - - for (y = 0; y < plane->height; y += 2) { - for (x = 0, indx = 0; x < plane->width; x += 2, indx++) { - /* load coefficients */ - b0 = b0_ptr[indx]; //should be: b0 = (num_bands > 0) ? b0_ptr[indx] : 0; - b1 = b1_ptr[indx]; //should be: b1 = (num_bands > 1) ? b1_ptr[indx] : 0; - b2 = b2_ptr[indx]; //should be: b2 = (num_bands > 2) ? b2_ptr[indx] : 0; - b3 = b3_ptr[indx]; //should be: b3 = (num_bands > 3) ? b3_ptr[indx] : 0; - - /* haar wavelet recomposition */ - p0 = (b0 + b1 + b2 + b3 + 2) >> 2; - p1 = (b0 + b1 - b2 - b3 + 2) >> 2; - p2 = (b0 - b1 + b2 - b3 + 2) >> 2; - p3 = (b0 - b1 - b2 + b3 + 2) >> 2; - - /* bias, convert and output four pixels */ - dst[x] = av_clip_uint8(p0 + 128); - dst[x + 1] = av_clip_uint8(p1 + 128); - dst[dst_pitch + x] = av_clip_uint8(p2 + 128); - dst[dst_pitch + x + 1] = av_clip_uint8(p3 + 128); - }// for x - - dst += dst_pitch << 1; - - b0_ptr += pitch; - b1_ptr += pitch; - b2_ptr += pitch; - b3_ptr += pitch; - }// for y -} - -/** butterfly operation for the inverse Haar transform */ -#define IVI_HAAR_BFLY(s1, s2, o1, o2, t) \ - t = ((s1) - (s2)) >> 1;\ - o1 = ((s1) + (s2)) >> 1;\ - o2 = (t);\ - -/** inverse 8-point Haar transform */ -#define INV_HAAR8(s1, s5, s3, s7, s2, s4, s6, s8,\ - d1, d2, d3, d4, d5, d6, d7, d8,\ - t0, t1, t2, t3, t4, t5, t6, t7, t8) {\ - t1 = (s1) * 2; t5 = (s5) * 2;\ - IVI_HAAR_BFLY(t1, t5, t1, t5, t0); IVI_HAAR_BFLY(t1, s3, t1, t3, t0);\ - IVI_HAAR_BFLY(t5, s7, t5, t7, t0); IVI_HAAR_BFLY(t1, s2, t1, t2, t0);\ - IVI_HAAR_BFLY(t3, s4, t3, t4, t0); IVI_HAAR_BFLY(t5, s6, t5, t6, t0);\ - IVI_HAAR_BFLY(t7, s8, t7, t8, t0);\ - d1 = COMPENSATE(t1);\ - d2 = COMPENSATE(t2);\ - d3 = COMPENSATE(t3);\ - d4 = COMPENSATE(t4);\ - d5 = COMPENSATE(t5);\ - d6 = COMPENSATE(t6);\ - d7 = COMPENSATE(t7);\ - d8 = COMPENSATE(t8); } - -/** inverse 4-point Haar transform */ -#define INV_HAAR4(s1, s3, s5, s7, d1, d2, d3, d4, t0, t1, t2, t3, t4) {\ - IVI_HAAR_BFLY(s1, s3, t0, t1, t4);\ - IVI_HAAR_BFLY(t0, s5, t2, t3, t4);\ - d1 = COMPENSATE(t2);\ - d2 = COMPENSATE(t3);\ - IVI_HAAR_BFLY(t1, s7, t2, t3, t4);\ - d3 = COMPENSATE(t2);\ - d4 = COMPENSATE(t3); } - -void ff_ivi_inverse_haar_8x8(const int32_t *in, int16_t *out, ptrdiff_t pitch, - const uint8_t *flags) -{ - int i, shift, sp1, sp2, sp3, sp4; - const int32_t *src; - int32_t *dst; - int tmp[64]; - int t0, t1, t2, t3, t4, t5, t6, t7, t8; - - /* apply the InvHaar8 to all columns */ -#define COMPENSATE(x) (x) - src = in; - dst = tmp; - for (i = 0; i < 8; i++) { - if (flags[i]) { - /* pre-scaling */ - shift = !(i & 4); - sp1 = src[ 0] * (1 << shift); - sp2 = src[ 8] * (1 << shift); - sp3 = src[16] * (1 << shift); - sp4 = src[24] * (1 << shift); - INV_HAAR8( sp1, sp2, sp3, sp4, - src[32], src[40], src[48], src[56], - dst[ 0], dst[ 8], dst[16], dst[24], - dst[32], dst[40], dst[48], dst[56], - t0, t1, t2, t3, t4, t5, t6, t7, t8); - } else - dst[ 0] = dst[ 8] = dst[16] = dst[24] = - dst[32] = dst[40] = dst[48] = dst[56] = 0; - - src++; - dst++; - } -#undef COMPENSATE - - /* apply the InvHaar8 to all rows */ -#define COMPENSATE(x) (x) - src = tmp; - for (i = 0; i < 8; i++) { - if ( !src[0] && !src[1] && !src[2] && !src[3] - && !src[4] && !src[5] && !src[6] && !src[7]) { - memset(out, 0, 8 * sizeof(out[0])); - } else { - INV_HAAR8(src[0], src[1], src[2], src[3], - src[4], src[5], src[6], src[7], - out[0], out[1], out[2], out[3], - out[4], out[5], out[6], out[7], - t0, t1, t2, t3, t4, t5, t6, t7, t8); - } - src += 8; - out += pitch; - } -#undef COMPENSATE -} - -void ff_ivi_row_haar8(const int32_t *in, int16_t *out, ptrdiff_t pitch, - const uint8_t *flags) -{ - int i; - int t0, t1, t2, t3, t4, t5, t6, t7, t8; - - /* apply the InvHaar8 to all rows */ -#define COMPENSATE(x) (x) - for (i = 0; i < 8; i++) { - if ( !in[0] && !in[1] && !in[2] && !in[3] - && !in[4] && !in[5] && !in[6] && !in[7]) { - memset(out, 0, 8 * sizeof(out[0])); - } else { - INV_HAAR8(in[0], in[1], in[2], in[3], - in[4], in[5], in[6], in[7], - out[0], out[1], out[2], out[3], - out[4], out[5], out[6], out[7], - t0, t1, t2, t3, t4, t5, t6, t7, t8); - } - in += 8; - out += pitch; - } -#undef COMPENSATE -} - -void ff_ivi_col_haar8(const int32_t *in, int16_t *out, ptrdiff_t pitch, - const uint8_t *flags) -{ - int i; - int t0, t1, t2, t3, t4, t5, t6, t7, t8; - - /* apply the InvHaar8 to all columns */ -#define COMPENSATE(x) (x) - for (i = 0; i < 8; i++) { - if (flags[i]) { - INV_HAAR8(in[ 0], in[ 8], in[16], in[24], - in[32], in[40], in[48], in[56], - out[0 * pitch], out[1 * pitch], - out[2 * pitch], out[3 * pitch], - out[4 * pitch], out[5 * pitch], - out[6 * pitch], out[7 * pitch], - t0, t1, t2, t3, t4, t5, t6, t7, t8); - } else - out[0 * pitch] = out[1 * pitch] = - out[2 * pitch] = out[3 * pitch] = - out[4 * pitch] = out[5 * pitch] = - out[6 * pitch] = out[7 * pitch] = 0; - - in++; - out++; - } -#undef COMPENSATE -} - -void ff_ivi_inverse_haar_4x4(const int32_t *in, int16_t *out, ptrdiff_t pitch, - const uint8_t *flags) -{ - int i, shift, sp1, sp2; - const int32_t *src; - int32_t *dst; - int tmp[16]; - int t0, t1, t2, t3, t4; - - /* apply the InvHaar4 to all columns */ -#define COMPENSATE(x) (x) - src = in; - dst = tmp; - for (i = 0; i < 4; i++) { - if (flags[i]) { - /* pre-scaling */ - shift = !(i & 2); - sp1 = src[0] * (1 << shift); - sp2 = src[4] * (1 << shift); - INV_HAAR4( sp1, sp2, src[8], src[12], - dst[0], dst[4], dst[8], dst[12], - t0, t1, t2, t3, t4); - } else - dst[0] = dst[4] = dst[8] = dst[12] = 0; - - src++; - dst++; - } -#undef COMPENSATE - - /* apply the InvHaar8 to all rows */ -#define COMPENSATE(x) (x) - src = tmp; - for (i = 0; i < 4; i++) { - if (!src[0] && !src[1] && !src[2] && !src[3]) { - memset(out, 0, 4 * sizeof(out[0])); - } else { - INV_HAAR4(src[0], src[1], src[2], src[3], - out[0], out[1], out[2], out[3], - t0, t1, t2, t3, t4); - } - src += 4; - out += pitch; - } -#undef COMPENSATE -} - -void ff_ivi_row_haar4(const int32_t *in, int16_t *out, ptrdiff_t pitch, - const uint8_t *flags) -{ - int i; - int t0, t1, t2, t3, t4; - - /* apply the InvHaar4 to all rows */ -#define COMPENSATE(x) (x) - for (i = 0; i < 4; i++) { - if (!in[0] && !in[1] && !in[2] && !in[3]) { - memset(out, 0, 4 * sizeof(out[0])); - } else { - INV_HAAR4(in[0], in[1], in[2], in[3], - out[0], out[1], out[2], out[3], - t0, t1, t2, t3, t4); - } - in += 4; - out += pitch; - } -#undef COMPENSATE -} - -void ff_ivi_col_haar4(const int32_t *in, int16_t *out, ptrdiff_t pitch, - const uint8_t *flags) -{ - int i; - int t0, t1, t2, t3, t4; - - /* apply the InvHaar8 to all columns */ -#define COMPENSATE(x) (x) - for (i = 0; i < 4; i++) { - if (flags[i]) { - INV_HAAR4(in[0], in[4], in[8], in[12], - out[0 * pitch], out[1 * pitch], - out[2 * pitch], out[3 * pitch], - t0, t1, t2, t3, t4); - } else - out[0 * pitch] = out[1 * pitch] = - out[2 * pitch] = out[3 * pitch] = 0; - - in++; - out++; - } -#undef COMPENSATE -} - -void ff_ivi_dc_haar_2d(const int32_t *in, int16_t *out, ptrdiff_t pitch, - int blk_size) -{ - int x, y; - int16_t dc_coeff; - - dc_coeff = (*in + 0) >> 3; - - for (y = 0; y < blk_size; out += pitch, y++) { - for (x = 0; x < blk_size; x++) - out[x] = dc_coeff; - } -} - -/** butterfly operation for the inverse slant transform */ -#define IVI_SLANT_BFLY(s1, s2, o1, o2, t) \ - t = (s1) - (s2);\ - o1 = (s1) + (s2);\ - o2 = (t);\ - -/** This is a reflection a,b = 1/2, 5/4 for the inverse slant transform */ -#define IVI_IREFLECT(s1, s2, o1, o2, t) \ - t = (((s1) + (s2)*2 + 2) >> 2) + (s1);\ - o2 = (((s1)*2 - (s2) + 2) >> 2) - (s2);\ - o1 = (t);\ - -/** This is a reflection a,b = 1/2, 7/8 for the inverse slant transform */ -#define IVI_SLANT_PART4(s1, s2, o1, o2, t) \ - t = (s2) + (((s1)*4 - (s2) + 4) >> 3);\ - o2 = (s1) + ((-(s1) - (s2)*4 + 4) >> 3);\ - o1 = (t);\ - -/** inverse slant8 transform */ -#define IVI_INV_SLANT8(s1, s4, s8, s5, s2, s6, s3, s7,\ - d1, d2, d3, d4, d5, d6, d7, d8,\ - t0, t1, t2, t3, t4, t5, t6, t7, t8) {\ - IVI_SLANT_PART4(s4, s5, t4, t5, t0);\ -\ - IVI_SLANT_BFLY(s1, t5, t1, t5, t0); IVI_SLANT_BFLY(s2, s6, t2, t6, t0);\ - IVI_SLANT_BFLY(s7, s3, t7, t3, t0); IVI_SLANT_BFLY(t4, s8, t4, t8, t0);\ -\ - IVI_SLANT_BFLY(t1, t2, t1, t2, t0); IVI_IREFLECT (t4, t3, t4, t3, t0);\ - IVI_SLANT_BFLY(t5, t6, t5, t6, t0); IVI_IREFLECT (t8, t7, t8, t7, t0);\ - IVI_SLANT_BFLY(t1, t4, t1, t4, t0); IVI_SLANT_BFLY(t2, t3, t2, t3, t0);\ - IVI_SLANT_BFLY(t5, t8, t5, t8, t0); IVI_SLANT_BFLY(t6, t7, t6, t7, t0);\ - d1 = COMPENSATE(t1);\ - d2 = COMPENSATE(t2);\ - d3 = COMPENSATE(t3);\ - d4 = COMPENSATE(t4);\ - d5 = COMPENSATE(t5);\ - d6 = COMPENSATE(t6);\ - d7 = COMPENSATE(t7);\ - d8 = COMPENSATE(t8);} - -/** inverse slant4 transform */ -#define IVI_INV_SLANT4(s1, s4, s2, s3, d1, d2, d3, d4, t0, t1, t2, t3, t4) {\ - IVI_SLANT_BFLY(s1, s2, t1, t2, t0); IVI_IREFLECT (s4, s3, t4, t3, t0);\ -\ - IVI_SLANT_BFLY(t1, t4, t1, t4, t0); IVI_SLANT_BFLY(t2, t3, t2, t3, t0);\ - d1 = COMPENSATE(t1);\ - d2 = COMPENSATE(t2);\ - d3 = COMPENSATE(t3);\ - d4 = COMPENSATE(t4);} - -void ff_ivi_inverse_slant_8x8(const int32_t *in, int16_t *out, ptrdiff_t pitch, const uint8_t *flags) -{ - int i; - const int32_t *src; - int32_t *dst; - int tmp[64]; - int t0, t1, t2, t3, t4, t5, t6, t7, t8; - -#define COMPENSATE(x) (x) - src = in; - dst = tmp; - for (i = 0; i < 8; i++) { - if (flags[i]) { - IVI_INV_SLANT8(src[0], src[8], src[16], src[24], src[32], src[40], src[48], src[56], - dst[0], dst[8], dst[16], dst[24], dst[32], dst[40], dst[48], dst[56], - t0, t1, t2, t3, t4, t5, t6, t7, t8); - } else - dst[0] = dst[8] = dst[16] = dst[24] = dst[32] = dst[40] = dst[48] = dst[56] = 0; - - src++; - dst++; - } -#undef COMPENSATE - -#define COMPENSATE(x) (((x) + 1)>>1) - src = tmp; - for (i = 0; i < 8; i++) { - if (!src[0] && !src[1] && !src[2] && !src[3] && !src[4] && !src[5] && !src[6] && !src[7]) { - memset(out, 0, 8*sizeof(out[0])); - } else { - IVI_INV_SLANT8(src[0], src[1], src[2], src[3], src[4], src[5], src[6], src[7], - out[0], out[1], out[2], out[3], out[4], out[5], out[6], out[7], - t0, t1, t2, t3, t4, t5, t6, t7, t8); - } - src += 8; - out += pitch; - } -#undef COMPENSATE -} - -void ff_ivi_inverse_slant_4x4(const int32_t *in, int16_t *out, ptrdiff_t pitch, const uint8_t *flags) -{ - int i; - const int32_t *src; - int32_t *dst; - int tmp[16]; - int t0, t1, t2, t3, t4; - -#define COMPENSATE(x) (x) - src = in; - dst = tmp; - for (i = 0; i < 4; i++) { - if (flags[i]) { - IVI_INV_SLANT4(src[0], src[4], src[8], src[12], - dst[0], dst[4], dst[8], dst[12], - t0, t1, t2, t3, t4); - } else - dst[0] = dst[4] = dst[8] = dst[12] = 0; - - src++; - dst++; - } -#undef COMPENSATE - -#define COMPENSATE(x) (((x) + 1)>>1) - src = tmp; - for (i = 0; i < 4; i++) { - if (!src[0] && !src[1] && !src[2] && !src[3]) { - out[0] = out[1] = out[2] = out[3] = 0; - } else { - IVI_INV_SLANT4(src[0], src[1], src[2], src[3], - out[0], out[1], out[2], out[3], - t0, t1, t2, t3, t4); - } - src += 4; - out += pitch; - } -#undef COMPENSATE -} - -void ff_ivi_dc_slant_2d(const int32_t *in, int16_t *out, ptrdiff_t pitch, int blk_size) -{ - int x, y; - int16_t dc_coeff; - - dc_coeff = (*in + 1) >> 1; - - for (y = 0; y < blk_size; out += pitch, y++) { - for (x = 0; x < blk_size; x++) - out[x] = dc_coeff; - } -} - -void ff_ivi_row_slant8(const int32_t *in, int16_t *out, ptrdiff_t pitch, const uint8_t *flags) -{ - int i; - int t0, t1, t2, t3, t4, t5, t6, t7, t8; - -#define COMPENSATE(x) (((x) + 1)>>1) - for (i = 0; i < 8; i++) { - if (!in[0] && !in[1] && !in[2] && !in[3] && !in[4] && !in[5] && !in[6] && !in[7]) { - memset(out, 0, 8*sizeof(out[0])); - } else { - IVI_INV_SLANT8( in[0], in[1], in[2], in[3], in[4], in[5], in[6], in[7], - out[0], out[1], out[2], out[3], out[4], out[5], out[6], out[7], - t0, t1, t2, t3, t4, t5, t6, t7, t8); - } - in += 8; - out += pitch; - } -#undef COMPENSATE -} - -void ff_ivi_dc_row_slant(const int32_t *in, int16_t *out, ptrdiff_t pitch, int blk_size) -{ - int x, y; - int16_t dc_coeff; - - dc_coeff = (*in + 1) >> 1; - - for (x = 0; x < blk_size; x++) - out[x] = dc_coeff; - - out += pitch; - - for (y = 1; y < blk_size; out += pitch, y++) { - for (x = 0; x < blk_size; x++) - out[x] = 0; - } -} - -void ff_ivi_col_slant8(const int32_t *in, int16_t *out, ptrdiff_t pitch, const uint8_t *flags) -{ - int i, row2, row4, row8; - int t0, t1, t2, t3, t4, t5, t6, t7, t8; - - row2 = pitch << 1; - row4 = pitch << 2; - row8 = pitch << 3; - -#define COMPENSATE(x) (((x) + 1)>>1) - for (i = 0; i < 8; i++) { - if (flags[i]) { - IVI_INV_SLANT8(in[0], in[8], in[16], in[24], in[32], in[40], in[48], in[56], - out[0], out[pitch], out[row2], out[row2 + pitch], out[row4], - out[row4 + pitch], out[row4 + row2], out[row8 - pitch], - t0, t1, t2, t3, t4, t5, t6, t7, t8); - } else { - out[0] = out[pitch] = out[row2] = out[row2 + pitch] = out[row4] = - out[row4 + pitch] = out[row4 + row2] = out[row8 - pitch] = 0; - } - - in++; - out++; - } -#undef COMPENSATE -} - -void ff_ivi_dc_col_slant(const int32_t *in, int16_t *out, ptrdiff_t pitch, int blk_size) -{ - int x, y; - int16_t dc_coeff; - - dc_coeff = (*in + 1) >> 1; - - for (y = 0; y < blk_size; out += pitch, y++) { - out[0] = dc_coeff; - for (x = 1; x < blk_size; x++) - out[x] = 0; - } -} - -void ff_ivi_row_slant4(const int32_t *in, int16_t *out, ptrdiff_t pitch, const uint8_t *flags) -{ - int i; - int t0, t1, t2, t3, t4; - -#define COMPENSATE(x) (((x) + 1)>>1) - for (i = 0; i < 4; i++) { - if (!in[0] && !in[1] && !in[2] && !in[3]) { - memset(out, 0, 4*sizeof(out[0])); - } else { - IVI_INV_SLANT4( in[0], in[1], in[2], in[3], - out[0], out[1], out[2], out[3], - t0, t1, t2, t3, t4); - } - in += 4; - out += pitch; - } -#undef COMPENSATE -} - -void ff_ivi_col_slant4(const int32_t *in, int16_t *out, ptrdiff_t pitch, const uint8_t *flags) -{ - int i, row2; - int t0, t1, t2, t3, t4; - - row2 = pitch << 1; - -#define COMPENSATE(x) (((x) + 1)>>1) - for (i = 0; i < 4; i++) { - if (flags[i]) { - IVI_INV_SLANT4(in[0], in[4], in[8], in[12], - out[0], out[pitch], out[row2], out[row2 + pitch], - t0, t1, t2, t3, t4); - } else { - out[0] = out[pitch] = out[row2] = out[row2 + pitch] = 0; - } - - in++; - out++; - } -#undef COMPENSATE -} - -void ff_ivi_put_pixels_8x8(const int32_t *in, int16_t *out, ptrdiff_t pitch, - const uint8_t *flags) -{ - int x, y; - - for (y = 0; y < 8; out += pitch, in += 8, y++) - for (x = 0; x < 8; x++) - out[x] = in[x]; -} - -void ff_ivi_put_dc_pixel_8x8(const int32_t *in, int16_t *out, ptrdiff_t pitch, - int blk_size) -{ - int y; - - out[0] = in[0]; - memset(out + 1, 0, 7*sizeof(out[0])); - out += pitch; - - for (y = 1; y < 8; out += pitch, y++) - memset(out, 0, 8*sizeof(out[0])); -} - -#define IVI_MC_TEMPLATE(size, suffix, OP) \ -static void ivi_mc_ ## size ##x## size ## suffix(int16_t *buf, \ - ptrdiff_t dpitch, \ - const int16_t *ref_buf, \ - ptrdiff_t pitch, int mc_type) \ -{ \ - int i, j; \ - const int16_t *wptr; \ -\ - switch (mc_type) { \ - case 0: /* fullpel (no interpolation) */ \ - for (i = 0; i < size; i++, buf += dpitch, ref_buf += pitch) { \ - for (j = 0; j < size; j++) {\ - OP(buf[j], ref_buf[j]); \ - } \ - } \ - break; \ - case 1: /* horizontal halfpel interpolation */ \ - for (i = 0; i < size; i++, buf += dpitch, ref_buf += pitch) \ - for (j = 0; j < size; j++) \ - OP(buf[j], (ref_buf[j] + ref_buf[j+1]) >> 1); \ - break; \ - case 2: /* vertical halfpel interpolation */ \ - wptr = ref_buf + pitch; \ - for (i = 0; i < size; i++, buf += dpitch, wptr += pitch, ref_buf += pitch) \ - for (j = 0; j < size; j++) \ - OP(buf[j], (ref_buf[j] + wptr[j]) >> 1); \ - break; \ - case 3: /* vertical and horizontal halfpel interpolation */ \ - wptr = ref_buf + pitch; \ - for (i = 0; i < size; i++, buf += dpitch, wptr += pitch, ref_buf += pitch) \ - for (j = 0; j < size; j++) \ - OP(buf[j], (ref_buf[j] + ref_buf[j+1] + wptr[j] + wptr[j+1]) >> 2); \ - break; \ - } \ -} \ -\ -void ff_ivi_mc_ ## size ##x## size ## suffix(int16_t *buf, const int16_t *ref_buf, \ - ptrdiff_t pitch, int mc_type) \ -{ \ - ivi_mc_ ## size ##x## size ## suffix(buf, pitch, ref_buf, pitch, mc_type); \ -} \ - -#define IVI_MC_AVG_TEMPLATE(size, suffix, OP) \ -void ff_ivi_mc_avg_ ## size ##x## size ## suffix(int16_t *buf, \ - const int16_t *ref_buf, \ - const int16_t *ref_buf2, \ - ptrdiff_t pitch, \ - int mc_type, int mc_type2) \ -{ \ - int16_t tmp[size * size]; \ - int i, j; \ -\ - ivi_mc_ ## size ##x## size ## _no_delta(tmp, size, ref_buf, pitch, mc_type); \ - ivi_mc_ ## size ##x## size ## _delta(tmp, size, ref_buf2, pitch, mc_type2); \ - for (i = 0; i < size; i++, buf += pitch) { \ - for (j = 0; j < size; j++) {\ - OP(buf[j], tmp[i * size + j] >> 1); \ - } \ - } \ -} \ - -#define OP_PUT(a, b) (a) = (b) -#define OP_ADD(a, b) (a) += (b) - -IVI_MC_TEMPLATE(8, _no_delta, OP_PUT) -IVI_MC_TEMPLATE(8, _delta, OP_ADD) -IVI_MC_TEMPLATE(4, _no_delta, OP_PUT) -IVI_MC_TEMPLATE(4, _delta, OP_ADD) -IVI_MC_AVG_TEMPLATE(8, _no_delta, OP_PUT) -IVI_MC_AVG_TEMPLATE(8, _delta, OP_ADD) -IVI_MC_AVG_TEMPLATE(4, _no_delta, OP_PUT) -IVI_MC_AVG_TEMPLATE(4, _delta, OP_ADD) diff --git a/spaces/congsaPfin/Manga-OCR/logs/Burn Rubber on Track with Drift Clash Online Racing Mod APK 1.85 [Free Download].md b/spaces/congsaPfin/Manga-OCR/logs/Burn Rubber on Track with Drift Clash Online Racing Mod APK 1.85 [Free Download].md deleted file mode 100644 index 255d2a574f5d8a2baf96a7ac8c439ce9e6a25911..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/Burn Rubber on Track with Drift Clash Online Racing Mod APK 1.85 [Free Download].md +++ /dev/null @@ -1,145 +0,0 @@ - -

    Drift Clash Online Racing 1.85 Mod APK: A Guide for Drift Racing Fans

    -

    If you are a fan of drift racing games, you might have heard of Drift Clash Online Racing, a fun and challenging game with real-time battles and realistic physics. But did you know that there is a modified version of the game that gives you unlimited money and rewards? In this article, we will tell you everything you need to know about Drift Clash Online Racing 1.85 Mod APK, including how to download and install it, what are the benefits of using it, and some tips and tricks for drifting like a pro in the game.

    -

    drift clash online racing 1.85 mod apk


    Download File ……… https://urlca.com/2uO6wR



    -

    What is Drift Clash Online Racing?

    -

    Drift Clash Online Racing is a drift racing game published by AKPublish pty ltd. It is the first drift racing game with real-time battles and realistic physics. You can win most wanted cars, burn tyres on track, play with your friends in free-roam, and enjoy the unique retro style of the game.

    -

    A fun and challenging drift racing game with real-time battles and realistic physics

    -

    In Drift Clash Online Racing, you can battle with other players in real-time and become the drift king. You can also drift on motorcycles, which is a rare feature in drift racing games. The game has a drift points scoring system based on car speed and angle, but you can also get extra combo for drifting on clipping zones. The game has no drift helpers, steering assistants, or any other wiles, so everything depends on your skill and control.

    -

    Retro-style graphics, easy controls, and unique features

    -

    The game has a retro-style graphics that gives it a nostalgic feel. The game also has easy controls that allow you to steer, accelerate, brake, and handbrake with simple taps and swipes. The game also has some unique features, such as motorcycles drifting, clipping zones, different sport cars, and customization options.

    -

    How to play Drift Clash Online Racing

    -

    To play Drift Clash Online Racing, you need to download and install the game from the Google Play Store or the App Store. The game will start with a tutorial that will teach you the basics of the game's controls and mechanics. You can then choose from different modes, such as free-roam, single player, or multiplayer. You can also select from different tracks and cars. Your goal is to drift as much as possible and score more points than your opponents.

    -

    What is Drift Clash Online Racing 1.85 Mod APK?

    -

    Drift Clash Online Racing 1.85 Mod APK is a modified version of the original game that gives you unlimited money and rewards. You can use this mod apk to unlock all the cars, customize them as you like, and enjoy the game without any ads or limitations.

    -

    drift clash online racing hack mod apk
    -drift clash online racing unlimited money mod apk
    -drift clash online racing latest version mod apk
    -drift clash online racing mod apk download for android
    -drift clash online racing mod apk free download
    -drift clash online racing mod apk happymod
    -drift clash online racing mod apk revdl
    -drift clash online racing mod apk rexdl
    -drift clash online racing mod apk android 1
    -drift clash online racing mod apk obb
    -drift clash online racing mod apk offline
    -drift clash online racing mod apk no root
    -drift clash online racing mod apk unlimited coins
    -drift clash online racing mod apk unlimited gems
    -drift clash online racing mod apk unlimited gold
    -drift clash online racing mod apk unlimited nitro
    -drift clash online racing mod apk all cars unlocked
    -drift clash online racing mod apk all tracks unlocked
    -drift clash online racing mod apk premium
    -drift clash online racing mod apk pro
    -drift clash online racing mod apk vip
    -drift clash online racing mod apk mega
    -drift clash online racing mod apk full version
    -drift clash online racing mod apk data
    -drift clash online racing mod apk file
    -drift clash online racing cheat mod apk
    -drift clash online racing hack cheat mod apk
    -drift clash online racing hack tool mod apk
    -drift clash online racing hack version mod apk
    -drift clash online racing hacked game mod apk
    -drift clash online racing cracked mod apk
    -drift clash online racing patched mod apk
    -drift clash online racing unlocked mod apk
    -drift clash online racing updated mod apk
    -drift clash online racing new version mod apk
    -drift clash online racing old version mod apk
    -drift clash online racing original version mod apk
    -drift clash online racing official version mod apk
    -drift clash online racing best version mod apk
    -drift clash online racing latest update mod apk
    -drift clash online racing new update mod apk
    -drift clash online racing old update mod apk
    -drift clash online racing beta version mod apk
    -drift clash online racing alpha version mod apk
    -drift clash online racing real version mod apk
    -drift clash online racing fake version mod apk
    -drift clash online racing working version mod apk
    -drift clash online racing non working version mod apk
    -drift clash online racing tested version mod apk

    -

    A modified version of the original game that gives you unlimited money and rewards

    -

    With Drift Clash Online Racing 1.85 Mod APK, you can get unlimited money and rewards that you can use to buy and upgrade any car you want. You can also get free access to all the tracks and modes in the game. You don't have to worry about ads or in-app purchases, as they are removed in this mod apk.

    -

    How to download and install Drift Clash Online Racing 1.85 Mod APK

    -

    To download and install Drift Clash Online Racing 1.85 Mod APK, you need to follow these steps:

    -
      -
    1. Go to the link and download the mod apk file.
    2. -
    3. Enable the installation of apps from unknown sources on your device's settings.
    4. -
    5. Locate the downloaded file and tap on it to start the installation.
    6. -
    7. Wait for the installation to finish and launch the game.
    8. -
    9. Enjoy the game with unlimited money and rewards.
    10. -
    -

    The benefits of using Drift Clash Online Racing 1.85 Mod APK

    -

    By using Drift Clash Online Racing 1.85 Mod APK, you can enjoy the following benefits:

    -
      -
    • You can unlock all the cars in the game, including the rare and exclusive ones.
    • -
    • You can customize your car with different rims, colors, stickers, and decals.
    • -
    • You can play on any track and mode you want, without any restrictions.
    • -
    • You can challenge other players in real-time multiplayer mode, without any lag or connection issues.
    • -
    • You can experience the realistic physics and graphics of the game, without any bugs or glitches.
    • -
    -

    Tips and Tricks for Drifting Like a Pro in Drift Clash Online Racing

    -

    If you want to improve your drifting skills and score more points in Drift Clash Online Racing, you can follow these tips and tricks:

    -

    Choose the right car for your style and preference

    -

    The game offers a variety of cars, each with different characteristics and performance. You can choose from different categories, such as muscle cars, sports cars, supercars, or motorcycles. You can also test drive each car before buying it, to see how it handles and drifts. You should choose a car that suits your style and preference, whether you like speed, power, or control.

    -

    Master the drifting techniques and scoring system

    -

    The game has a drifting points scoring system based on car speed and angle. The faster and sharper you drift, the more points you get. You can also get extra combo points for drifting on clipping zones, which are marked by yellow lines on the track. You should master the drifting techniques, such as handbrake drifting, counter-steering, throttle control, and weight transfer. You should also practice on different tracks and learn their curves and corners.

    -

    Customize your car with different rims, colors, stickers, and decals

    -

    The game allows you to customize your car with different rims, colors, stickers, and decals. You can change the appearance of your car according to your taste and personality. You can also create your own unique design and style. Customizing your car can also affect its performance, as some rims and colors can increase its speed or handling. You should experiment with different combinations and see what works best for you.

    Challenge other players in real-time multiplayer mode

    -

    The game has a real-time multiplayer mode, where you can challenge other players from around the world and compete for the drift king title. You can join or create a room, and invite your friends or random players to join. You can also chat with other players and send them emojis. The multiplayer mode is fast and smooth, without any lag or connection issues. You should challenge other players and show off your drifting skills and style.

    -

    Explore different tracks and clipping zones

    -

    The game has different tracks, each with its own layout and scenery. You can choose from urban, industrial, desert, or snow tracks. Each track has different clipping zones, which are marked by yellow lines on the track. You can get extra combo points for drifting on clipping zones, but you have to be careful not to hit the walls or obstacles. You should explore different tracks and clipping zones, and find the best spots to drift and score.

    -

    Reviews and Ratings of Drift Clash Online Racing

    -

    Drift Clash Online Racing is a popular and well-received drift racing game, with over 10 million downloads and 4.4 stars rating on the Google Play Store. The game has also received positive reviews from players and critics alike. Here are some of the reviews and ratings of Drift Clash Online Racing:

    -

    What do other players think about the game?

    -

    Here are some of the comments from the players who have played the game:

    -
    -

    "This game is awesome! The graphics are retro but cool, the physics are realistic, and the multiplayer mode is fun and addictive. I love drifting on motorcycles, it's so unique and challenging. The game is also updated regularly with new cars and tracks. I highly recommend this game to anyone who loves drift racing games."

    -

    "This is one of the best drift racing games I have ever played. The game has everything you need: real-time battles, realistic physics, customization options, different modes, and tracks. The game is also easy to control and play. The only thing I don't like is that there are some ads, but they are not too annoying."

    -

    "This game is amazing! The game has a retro style that reminds me of the old arcade games. The game also has a realistic physics engine that makes drifting feel natural and satisfying. The game also has a lot of cars to choose from, each with its own characteristics and performance. The game also has a multiplayer mode that lets you challenge other players from around the world."

    -
    -

    How does Drift Clash Online Racing compare to other drift racing games?

    -

    Drift Clash Online Racing is one of the best drift racing games available on the market, as it offers a unique and exciting gameplay experience that sets it apart from other drift racing games. Here are some of the features that make Drift Clash Online Racing stand out from other drift racing games:

    - - - - - - - - - - - - -< - -
    FeatureDrift Clash Online RacingOther Drift Racing Games
    Real-time battlesYesNo
    Motorcycles driftingYesNo
    Clipping zonesYesNo
    Retro-style graphicsYesNo
    Realistic physicsYesNo
    Customization optionsYesNo
    Different modes and tracksYesNo
    Easy controlsYesNo
    Multiplayer modeYesNo
    No drift helpers or steering assistantsYesNo
    No in-app purchases or limitationsYes (with mod apk)No
    -

    What are the pros and cons of Drift Clash Online Racing?

    -

    Like any other game, Drift Clash Online Racing has its pros and cons. Here are some of the advantages and disadvantages of playing Drift Clash Online Racing:

    - - - - - - - - - -
    ProsCons
    Fun and challenging gameplaySome ads in the original game
    Real-time battles and multiplayer modeSome bugs and glitches in the game
    Realistic physics and graphicsSome tracks and cars are locked in the original game
    Customization options and clipping zonesSome cars are hard to control and drift
    Retro-style graphics and easy controlsSome players may not like the retro style or the easy controls
    No drift helpers or steering assistantsSome players may find the game too difficult or frustrating
    No in-app purchases or limitations (with mod apk)Some players may not like using mod apk or may face security issues
    -

    Conclusion

    -

    Drift Clash Online Racing is a drift racing game that offers a unique and exciting gameplay experience for drift racing fans. The game has real-time battles, realistic physics, customization options, different modes and tracks, retro-style graphics, and easy controls. The game also has a mod apk version that gives you unlimited money and rewards, and removes ads and limitations. The game is fun and challenging, but also has some ads, bugs, glitches, and locked features in the original game. The game is suitable for anyone who loves drift racing games, but also requires skill and control to master the drifting techniques and score more points.

    -

    FAQs

    -

    Here are some of the frequently asked questions about Drift Clash Online Racing:

    -
      -
    1. Q: How can I get more money and rewards in Drift Clash Online Racing?
      A: You can get more money and rewards by winning battles, completing missions, watching ads, or using the mod apk version.
    2. -
    3. Q: How can I unlock more cars and tracks in Drift Clash Online Racing?
      A: You can unlock more cars and tracks by earning stars, leveling up, or using the mod apk version.
    4. -
    5. Q: How can I customize my car in Drift Clash Online Racing?
      A: You can customize your car by tapping on the garage icon on the main menu, and then selecting the car you want to customize. You can change the rims, colors, stickers, and decals of your car.
    6. -
    7. Q: How can I join or create a room in Drift Clash Online Racing?
      A: You can join or create a room by tapping on the multiplayer icon on the main menu, and then selecting the join or create option. You can invite your friends or random players to join your room.
    8. -
    9. Q: How can I chat with other players in Drift Clash Online Racing?
      A: You can chat with other players by tapping on the chat icon on the top right corner of the screen. You can send text messages or emojis to other players.
    10. -

    197e85843d
    -
    -
    \ No newline at end of file diff --git a/spaces/congsaPfin/Manga-OCR/logs/Clash of Phoenix APK Mod Everything You Need to Know.md b/spaces/congsaPfin/Manga-OCR/logs/Clash of Phoenix APK Mod Everything You Need to Know.md deleted file mode 100644 index 5b8ea7b04de4a7e09cd02248ec3086d1c130613d..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/Clash of Phoenix APK Mod Everything You Need to Know.md +++ /dev/null @@ -1,102 +0,0 @@ - -

    Clash of Clans Phoenix APK Mod Download: How to Play with Unlimited Resources and a Stronger Army

    -

    Are you a fan of Clash of Clans, the addictive strategy game where you build your own village, train your army, and fight against other players? Do you want to play with unlimited resources, such as gems, gold, and elixir, and have access to all the troops, buildings, and characters in the game? If yes, then you should try Clash of Clans Phoenix APK Mod, a modified version of the game that offers all these features and more.

    -

    What is Clash of Clans Phoenix APK Mod?

    -

    Clash of Clans Phoenix APK Mod is a modified version of the original Clash of Clans game that has been developed by some fans and enthusiasts. It works quite similarly to the original game, but with some amazing features that make it more fun and convenient to play.

    -

    clash of clans phoenix apk mod download


    Downloadhttps://urlca.com/2uO7kh



    -

    A modified version of the popular strategy game Clash of Clans

    -

    Clash of Clans is one of the most popular and successful strategy games in the world, with over 500 million downloads on Google Play Store. It is a game where you have to build your own village, train your army, and fight against other players online. You can also join clans, chat with other players, participate in clan wars, and complete various missions and challenges.

    -

    Features unlimited resources, troops, buildings, and characters

    -

    The main difference between Clash of Clans Phoenix APK Mod and the original game is that it features unlimited resources, such as gems, gold, and elixir. These are the currencies that you need to upgrade your buildings, train your troops, and buy items in the game. Normally, you have to either pay real money or wait for a long time to get these resources in the original game. But with Clash of Clans Phoenix APK Mod, you can get them instantly and without any limitations.

    -

    Not only that, but Clash of Clans Phoenix APK Mod also features all the troops, buildings, and characters that are available in the original game. You can build your own powerful army with unlimited troops, such as barbarians, archers, giants, wizards, dragons, pekkas, golems, hog riders, miners, electro dragons, wall breakers, healers, goblins, balloons, valkyries, witches, bowlers, lava hounds, ice golems, yetis, super barbarians, super archers, super giants, super wall breakers, super goblins, inferno dragons, sneaky goblins, and headhunters.

    -

    clash of phoenix apk free download
    -clash of clans mod apk unlimited resources
    -clash of phoenix private server download
    -clash of clans phoenix apk latest version
    -clash of clans mod apk with new town hall 15
    -clash of phoenix apk for android and ios
    -clash of clans mod apk plenixclash
    -clash of phoenix apk 2023 update
    -clash of clans mod apk with custom buildings and heroes
    -clash of phoenix apk no root required
    -clash of clans mod apk fast and stable
    -clash of phoenix apk easy to install
    -clash of clans mod apk with clan wars
    -clash of phoenix apk with unlimited gems and gold
    -clash of clans mod apk hope my worlds
    -clash of phoenix apk with new kings and troops
    -clash of clans mod apk with net energy gain
    -clash of phoenix apk secure and safe
    -clash of clans mod apk with creative features
    -clash of phoenix apk user friendly and simple
    -clash of clans mod apk with unlimited elixir and dark elixir
    -clash of phoenix apk with powerful servers and databases
    -clash of clans mod apk with mini sun feature
    -clash of phoenix apk with pvp battles and chat
    -clash of clans mod apk with holy grail fusion experiment
    -clash of phoenix apk with news and updates tab
    -clash of clans mod apk with 30 seconds fusion reaction
    -clash of phoenix apk with multiple devices compatibility
    -clash of clans mod apk with 100 million degrees temperature
    -clash of phoenix apk with easy to use commands
    -clash of clans mod apk with amazing graphics and sound effects
    -clash of phoenix apk with no connection error or lag
    -clash of clans mod apk with 24/7 online support and service
    -clash of phoenix apk with frequently asked questions section
    -clash of clans mod apk with unlimited data and storage space
    -clash of phoenix apk with custom mods and hacks
    -clash of clans mod apk with no ads or in-app purchases
    -clash of phoenix apk with donation option and rewards
    -clash of clans mod apk with unlimited fun and entertainment
    -clash of phoenix apk with regular maintenance and bug fixes

    -

    You can also construct new buildings and defenses in your village, such as town hall, clan castle, barracks, army camp, laboratory, spell factory, gold mine, elixir collector, dark elixir drill, gold storage, elixir storage, dark elixir storage, builder hut, walls, cannon, archer tower, mortar, air defense, wizard tower, hidden tesla, bomb tower, x-bow, inferno tower, eagle artillery, scatter shot, air sweeper, bomb, spring trap, giant bomb, air bomb, seeking air mine, skeleton trap, tornado trap, and builder base.

    -

    You can also unlock and use new characters and heroes in the game, such as king barbarian, queen archer, grand warden, royal champion, battle machine, master builder, and otto.

    -

    Works similarly to the original game but with more freedom and customization

    -

    Clash of Clans Phoenix APK Mod works similarly to the original game in terms of gameplay and mechanics. You still have to build your village, train your army, and fight against other players online. You can also join clans, chat with other players, participate in clan wars, and complete various missions and challenges.

    -

    However, with Clash of Clans Phoenix APK Mod, you have more freedom and customization options in the game. You can create your own unique village layout, choose your own army composition and strategy, and experiment with different combinations of troops, buildings, and characters. You can also play the game at your own pace and without any restrictions or limitations.

    -

    Why Download Clash of Clans Phoenix APK Mod?

    -

    There are many reasons why you should download Clash of Clans Phoenix APK Mod if you are a fan of Clash of Clans. Here are some of them:

    -

    To enjoy the game without spending real money or waiting for resources

    -

    One of the main drawbacks of the original game is that it requires you to spend real money or wait for a long time to get enough resources to upgrade your buildings, train your troops, and buy items in the game. This can be frustrating and annoying for many players who want to enjoy the game without spending a fortune or wasting their time.

    -

    With Clash of Clans Phoenix APK Mod, you don't have to worry about this problem anymore. You can get unlimited resources instantly and without any cost. You can upgrade your buildings, train your troops, and buy items as much as you want and whenever you want. You can also skip the waiting time for building and training by using gems. This way, you can enjoy the game more and have more fun.

    -

    To battle against other players with a powerful army and base

    -

    Another reason why you should download Clash of Clans Phoenix APK Mod is that it allows you to battle against other players with a powerful army and base. You can challenge other players online and show them who is the boss. You can also join clan wars and help your clan win more trophies and rewards.

    -

    With Clash of Clans Phoenix APK Mod, you can have access to all the troops, buildings, and characters in the game. You can build your own powerful army with unlimited troops of different types and levels. You can also construct new buildings and defenses in your village to make it stronger and more secure. You can also unlock and use new characters and heroes in the game to boost your army's strength and abilities.

    -

    With such a powerful army and base, you can easily defeat your enemies and dominate the game. You can also have more fun and excitement in the game by trying out different strategies and tactics.

    -

    To explore new possibilities and challenges in the game

    -

    A third reason why you should download Clash of Clans Phoenix APK Mod is that it allows you to explore new possibilities and challenges in the game. You can discover new features and content that are not available in the original game. You can also experience new difficulties and surprises that will keep you on your toes.

    -

    With Clash of Clans Phoenix APK Mod, you can play the game with more freedom and customization options. You can create your own unique village layout, choose your own army composition and strategy, and experiment with different combinations of troops, buildings, and characters. You can also play the game at your own pace and without any restrictions or limitations.

    -

    With such a modified version of the game, you can have more fun and enjoyment in the game by exploring new possibilities and challenges.

    -

    How to Download and Install Clash of Clans Phoenix APK Mod?

    -

    If you are interested in downloading and installing Clash of Clans Phoenix APK Mod on your device, here are the steps that you need to follow:

    -

    Download the APK file from a trusted source

    -

    The first step is to download the APK file of Clash of Clans Phoenix APK Mod from a trusted source. You can search for the file online or use the link provided below. Make sure that you download the file from a reliable and secure website that does not contain any viruses or malware.

    -

    Link: [Clash of Clans Phoenix APK Mod Download]

    -

    Enable unknown sources in your device settings

    -

    The second step is to enable unknown sources in your device settings. This is necessary because Clash of Clans Phoenix APK Mod is not available on the official app stores, such as Google Play Store or Apple App Store. Therefore, you need to allow your device to install apps from unknown sources.

    -

    To do this, go to your device settings and look for the security or privacy option. Then, find the unknown sources option and toggle it on. You may see a warning message that says installing apps from unknown sources may harm your device. Ignore this message and proceed with the installation.

    -

    Open the APK file and follow the instructions

    -

    The third step is to open the APK file that you have downloaded and follow the instructions on the screen. You may need to grant some permissions to the app, such as access to your storage, camera, microphone, etc. Accept these permissions and continue with the installation.

    -

    The installation process may take a few minutes, depending on your device and internet speed. Wait patiently until the installation is complete. You may see a confirmation message that says Clash of Clans Phoenix APK Mod has been successfully installed on your device.

    -

    Launch the game and start playing

    -

    The final step is to launch the game and start playing. You can find the game icon on your home screen or app drawer. Tap on it and open the game. You may need to sign in with your Google account or create a new account to play the game.

    -

    Once you are in the game, you can enjoy all the features and benefits of Clash of Clans Phoenix APK Mod. You can get unlimited resources, troops, buildings, and characters in the game. You can also battle against other players with a powerful army and base. You can also explore new possibilities and challenges in the game.

    -

    Frequently Asked Questions (FAQs) about Clash of Clans Phoenix APK Mod

    -

    Here are some of the most common questions that people ask about Clash of Clans Phoenix APK Mod:

    -

    Is Clash of Clans Phoenix APK Mod safe and secure?

    -

    Yes, Clash of Clans Phoenix APK Mod is safe and secure to use. It does not contain any viruses or malware that can harm your device or data. It also does not require any root or jailbreak access to work. However, you should always download the APK file from a trusted source and enable unknown sources in your device settings before installing it.

    -

    Can I play Clash of Clans Phoenix APK Mod on multiple devices?

    -

    Yes, you can play Clash of Clans Phoenix APK Mod on multiple devices as long as you use the same Google account or create a new account for each device. You can also sync your progress and data across different devices using Google Play Games or Facebook.

    -

    Will I get banned for using Clash of Clans Phoenix APK Mod?

    -

    No, you will not get banned for using Clash of Clans Phoenix APK Mod as long as you do not abuse it or use it for cheating purposes. However, you should be aware that using Clash of Clans Phoenix APK Mod is against the terms and conditions of the original game and may result in some risks or consequences. Therefore, you should use it at your own discretion and responsibility.

    -

    How can I update Clash of Clans Phoenix APK Mod?

    -

    You can update Clash of Clans Phoenix APK Mod by downloading and installing the latest version of the APK file from a trusted source. You should always check for updates regularly to enjoy the latest features and bug fixes in the game.

    -

    Can I join clans and chat with other players in Clash of Clans Phoenix APK Mod?

    -

    Yes, you can join clans and chat with other players in Clash of Clans Phoenix APK Mod as you would in the original game. However, you should be careful not to reveal that you are using a modified version of the game or share any information that may expose your identity or location.

    -

    Conclusion

    -

    Clash of Clans Phoenix APK Mod is a modified version of the original Clash of Clans game that offers unlimited resources, troops, buildings, and characters in the game. It also allows you to battle against other players with a powerful army and base and explore new possibilities and challenges in the game.

    -

    If you are a fan of Clash of Clans and want to play with unlimited resources and a stronger army, you should download and install Clash of Clans Phoenix APK Mod on your device. You can enjoy the game without spending real money or waiting for resources. You can also have more fun and excitement in the game by trying out different strategies and tactics.

    -

    However, you should also be aware that using Clash of Clans Phoenix APK Mod is against the terms and conditions of the original game and may result in some risks or consequences. Therefore, you should use it at your own discretion and responsibility. You should also download the APK file from a trusted source and enable unknown sources in your device settings before installing it.

    -

    We hope that this article has helped you understand what Clash of Clans Phoenix APK Mod is, why you should download it, how to download and install it, and what are the FAQs about it. If you have any questions or feedback, please feel free to leave a comment below. Thank you for reading and happy gaming!

    197e85843d
    -
    -
    \ No newline at end of file diff --git a/spaces/congsaPfin/Manga-OCR/logs/Download Cars 2 Movie in Tamil - Enjoy the Racing Adventure of Lightning McQueen and Mater.md b/spaces/congsaPfin/Manga-OCR/logs/Download Cars 2 Movie in Tamil - Enjoy the Racing Adventure of Lightning McQueen and Mater.md deleted file mode 100644 index 4e516d37ed9436bb771886ae015b82fbdda83d19..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/Download Cars 2 Movie in Tamil - Enjoy the Racing Adventure of Lightning McQueen and Mater.md +++ /dev/null @@ -1,128 +0,0 @@ -
    -

    How to Download Cars 2 Movie in Tamil

    -

    Are you a fan of animated movies? Do you love cars, racing, and adventure? Do you want to watch one of the most entertaining and thrilling movies of Pixar? If your answer is yes, then you should definitely watch Cars 2, the sequel to the hit movie Cars. But what if you want to watch it in Tamil, your native language or the language you are learning? Don't worry, we have got you covered. In this article, we will tell you everything you need to know about how to download Cars 2 movie in Tamil. We will explain what is Cars 2 movie and why it is popular, what are the benefits of watching it in Tamil, and how to find and download the Tamil dubbed version online. So, buckle up and get ready for a ride!

    -

    how to download cars 2 movie in tamil


    Downloadhttps://urlca.com/2uO6T4



    -

    What is Cars 2 Movie and Why It is Popular

    -

    Cars 2 is a 2011 American computer-animated spy comedy film produced by Pixar Animation Studios for Walt Disney Pictures. It is the sequel to Cars (2006), the second film in the Cars franchise, and the 12th animated film from the studio. The film was directed by John Lasseter (in his final outing as director of a Pixar film to date), co-directed by Brad Lewis, and produced by Denise Ream, from a screenplay written by Ben Queen, and a story by Lasseter, Lewis, and Dan Fogelman.

    -

    In the film's ensemble voice cast, Owen Wilson, Larry the Cable Guy, Tony Shalhoub, Guido Quaroni, Bonnie Hunt, and John Ratzenberger reprise their roles from the first film. Paul Newman, who voiced Doc Hudson in the previous film, died in September 2008, so his character was written out of the film; George Carlin, who previously voiced Fillmore, died during the same year, and his role was passed to Lloyd Sherr. The returning cast is joined by Michael Caine, Emily Mortimer, John Turturro, Eddie Izzard, and Thomas Kretschmann, who voice the new characters introduced in this film.

    -

    The film follows race car Lightning McQueen (Wilson) and tow truck Mater (Cable Guy), who head to Japan and Europe to compete in the World Grand Prix, but Mater accidentally becomes sidetracked with international espionage, and ends up attempting to uncover a conspiracy led by a mysterious criminal mastermind and his gang which threatens all competitors in the Grand Prix, with the help

    of British spy Finn McMissile (Caine) and rookie field agent Holley Shiftwell (Mortimer).

    -

    Cars 2 is a movie that appeals to a wide range of audiences, from children to adults, from car enthusiasts to spy fans, from comedy lovers to action seekers. The movie is full of humor, adventure, suspense, and heart. It also showcases the beauty and diversity of different cultures and locations around the world, such as Tokyo, Paris, London, and Italy.

    -

    Cars 2 received mixed reviews from critics, who praised its animation, voice acting, music, and action sequences, but criticized its plot, humor, characters, and lack of emotional depth. However, the film was a box office success, grossing over $562 million worldwide against a budget of $200 million. It also won the Golden Globe Award for Best Animated Feature Film and was nominated for the Academy Award for Best Animated Feature and the BAFTA Award for Best Animated Film.

    -

    How to watch Cars 2 movie online in Tamil
    -Cars 2 Tamil dubbed animation movie download
    -Best sites to download Cars 2 movie in Tamil
    -Cars 2 full movie in Tamil free download
    -How to stream Cars 2 movie on Disney+ Hotstar in Tamil
    -Cars 2 Tamil dubbed movie watch online
    -Download Cars 2 movie in Tamil HD quality
    -Cars 2 movie Tamil dubbed free download
    -How to get Cars 2 movie subtitles in Tamil
    -Cars 2 movie download in Tamilrockers
    -Cars 2 movie Tamil dubbed YouTube video
    -How to download Cars 2 movie in Tamil using torrent
    -Cars 2 movie in Tamil language download
    -Cars 2 movie review in Tamil
    -How to download Cars 2 movie soundtrack in Tamil
    -Cars 2 movie cast and crew in Tamil
    -Cars 2 movie trivia and facts in Tamil
    -How to download Cars 2 movie wallpapers in Tamil
    -Cars 2 movie behind the scenes in Tamil
    -Cars 2 movie quotes and dialogues in Tamil
    -How to download Cars 2 movie games in Tamil
    -Cars 2 movie characters and voice actors in Tamil
    -Cars 2 movie plot summary in Tamil
    -How to download Cars 2 movie comics in Tamil
    -Cars 2 movie awards and nominations in Tamil
    -How to download Cars 2 movie sequel in Tamil
    -Cars 2 movie box office collection in Tamil
    -How to download Cars 2 movie spin-off in Tamil
    -Cars 2 movie fan art and memes in Tamil
    -How to download Cars 2 movie merchandise in Tamil
    -Cars 2 movie rating and feedback in Tamil
    -How to download Cars 2 movie bonus features in Tamil
    -Cars 2 movie release date and time in Tamil
    -How to download Cars 2 movie trailer in Tamil
    -Cars 2 movie director and producer in Tamil
    -How to download Cars 2 movie script in Tamil
    -Cars 2 movie genre and theme in Tamil
    -How to download Cars 2 movie coloring pages in Tamil
    -Cars 2 movie songs and lyrics in Tamil
    -How to download Cars 2 movie stickers and emojis in Tamil

    -

    What are the Benefits of Watching It in Tamil

    -

    Watching Cars 2 in Tamil can have many benefits for you, whether you are a native speaker of Tamil or a learner of the language. Here are some of the benefits you can enjoy by watching Cars 2 in Tamil:

    -
      -
    • Enhance your enjoyment and understanding of the movie: Watching Cars 2 in Tamil can make you feel more connected and immersed in the story and the characters. You can appreciate the jokes, dialogues, expressions, and emotions better in your own language or the language you are familiar with. You can also catch the subtle nuances and references that may be lost in translation or dubbing. You can also avoid the distraction and inconvenience of reading subtitles or listening to unfamiliar voices.
    • -
    • Learn a new language and culture: Watching Cars 2 in Tamil can help you learn a new language and culture in a fun and easy way. You can pick up new words, phrases, idioms, and slang from the movie. You can also learn about the grammar, pronunciation, accent, and intonation of Tamil. You can also get exposed to the culture, history, traditions, values, and customs of Tamil people. You can also compare and contrast the similarities and differences between Tamil and other languages and cultures.
    • -
    • Support the local dubbing industry and artists: Watching Cars 2 in Tamil can help you support the local dubbing industry and artists who work hard to bring you quality entertainment in your language. You can appreciate their talent, skill, and creativity in adapting and delivering the movie in Tamil. You can also encourage them to continue their work and improve their standards. You can also show your respect and gratitude to them for making the movie accessible and enjoyable for you.
    • -
    -

    How to Find and Download the Tamil Dubbed Version Online

    -

    Now that you know what is Cars 2 movie and why it is popular, and what are the benefits of watching it in Tamil, you may be wondering how to find and download the Tamil dubbed version online. Well, there are two ways to do that: the legal and ethical way, and the illegal and risky way. Let's see what they are:

    -

    The legal and ethical way to download Cars 2 movie in Tamil

    -

    The legal and ethical way to download Cars 2 movie in Tamil is to use the official platforms and websites that offer Cars 2 movie in Tamil. These platforms have obtained the rights and permissions from the original creators and distributors of the movie to provide it in different languages, including Tamil. Some of these platforms are:

    - - - - - - - -
    PlatformSubscription FeeQualityAvailability
    Disney+ HotstarRs. 299 per month or Rs. 1499 per yearHDAvailable
    YouTube MoviesRs. 100 per rental or Rs. 490 per purchaseHDAvailable
    iTunes$19.99 per purchaseHDLimited availability
    Google Play Movies & TV$3.99 per rental or $19.99 per purchaseHDLimited availability
    Amazon Prime Video$12.99 per month or $119 per yearHDLimited availability
    -

    The advantages of using these platforms are:

    -
      -
    • They are legal and ethical: You can watch Cars 2 movie in Tamil without breaking any laws or violating any rights. You can also support the original creators and distributors of the movie by paying them for their work.
    • -
    • They are safe and secure: You can watch Cars 2 movie in Tamil without exposing your device or data to any viruses, malware, or hackers. You can also enjoy the movie without any interruptions, ads, or pop-ups.
    • -
    • They are high-quality and reliable: You can watch Cars 2 movie in Tamil in high-definition and with clear sound. You can also access the movie anytime and anywhere, as long as you have an internet connection and a compatible device.
    • -
    -

    The disadvantages of using these platforms are:

    -
      -
    • They are expensive and limited: You may have to pay a subscription fee or a rental/purchase fee to watch Cars 2 movie in Tamil. You may also have to deal with geo-restrictions, availability issues, or compatibility issues depending on the platform and your location.
    • -
    • They are not always updated and accurate: You may not find the latest or the best version of Cars 2 movie in Tamil on these platforms. You may also encounter errors, glitches, or delays in the streaming or downloading process.
    • -
    • They are not always satisfying and enjoyable: You may not like the quality or the style of the dubbing of Cars 2 movie in Tamil on these platforms. You may also miss some of the original features or extras of the movie, such as subtitles, commentary, or bonus scenes.
    • -
    -

    The illegal and risky way to download Cars 2 movie in Tamil

    -

    The illegal and risky way to download Cars 2 movie in Tamil is to use the unofficial sources and websites that offer Cars 2 movie in Tamil. These sources have not obtained the rights and permissions from the original creators and distributors of the movie to provide it in different languages, including Tamil. They are usually involved in piracy, torrenting, or streaming of copyrighted content. Some of these sources are:

    - - - - - - - -
    SourceURL
    Tamilrockers
    Tamilyogi
    Moviesda
    Isaimini
    Kuttymovies
    -

    The dangers, drawbacks, and consequences of using these sources are:

    -
      -
    • They are illegal and unethical: You can watch Cars 2 movie in Tamil by breaking laws and violating rights. You can also harm the original creators and distributors of the movie by depriving them of their deserved income.
    • -
    • They are unsafe and insecure: You can watch Cars 2 movie in Tamil by exposing your device or data to viruses, malware, or hackers. You can also suffer from interruptions, ads, or pop-ups that may ruin your experience.
    • -
    • They are low-quality and unreliable: You can watch Cars 2 movie in Tamil in poor-definition and with distorted sound. You can also face difficulties in accessing, streaming, or downloading the movie due to network issues, server issues, or legal issues.
    • -
    -

    The reasons to avoid these sources and respect the intellectual property rights of the creators are:

    -
      -
    • They are unfair and disrespectful: You can watch Cars 2 movie in Tamil by disrespecting the hard work and creativity of the people who made it. You can also discourage them from making more movies like this in the future.
    • -
    • They are harmful and detrimental: You can watch Cars 2 movie in Tamil by harming yourself and others. You can also damage your device, data, reputation, or even face legal actions or penalties.
    • -
    • They are unnecessary and avoidable: You can watch Cars 2 movie in Tamil by using other alternatives that are legal, ethical, safe, secure, high-quality, and reliable. You can also enjoy the movie more by watching it in its original language or with subtitles.
    • -
    -

    Conclusion

    -

    In conclusion, Cars 2 is a great movie that you can watch in Tamil for various reasons. It is a fun, exciting, and colorful movie that has a lot to offer for different audiences. It is also a movie that can help you learn a new language and culture, enhance your enjoyment and understanding of the movie, and support the local dubbing industry and artists. However, you should be careful about how to find and download the Tamil dubbed version online. You should use the legal and ethical ways, such as the official platforms and websites that offer Cars 2 movie in Tamil. You should avoid the illegal and risky ways, such as the unofficial sources and websites that offer Cars 2 movie in Tamil. By doing so, you can watch Cars 2 movie in Tamil without any problems or regrets. We hope this article has helped you learn how to download Cars 2 movie in Tamil. We recommend you to watch Cars 2 movie in Tamil and have a great time. Thank you for reading this article and happy watching!

    -

    FAQs

    -

    Here are some of the frequently asked questions about how to download Cars 2 movie in Tamil:

    -
      -
    1. Q: Is Cars 2 movie available in Tamil on Netflix?
    2. -
    3. A: No, Cars 2 movie is not available in Tamil on Netflix. However, you can watch it in English or with subtitles on Netflix.
    4. -
    5. Q: How can I watch Cars 2 movie in Tamil for free?
    6. -
    7. A: You can watch Cars 2 movie in Tamil for free by using some of the official platforms and websites that offer free trials or discounts, such as Disney+ Hotstar or YouTube Movies. However, you should be aware of the terms and conditions of these platforms and cancel your subscription before the trial period ends.
    8. -
    9. Q: What are some of the best Tamil dubbed movies to watch?
    10. -
    11. A: Some of the best Tamil dubbed movies to watch are The Lion King, Frozen, Avatar, The Avengers, Harry Potter, The Lord of the Rings, and The Matrix.
    12. -
    13. Q: How can I improve my Tamil language skills by watching movies?
    14. -
    15. A: You can improve your Tamil language skills by watching movies by following these tips:
    16. -
        -
      • Choose movies that match your level of proficiency and interest.
      • -
      • Watch movies with subtitles or captions in Tamil or English.
      • -
      • Repeat the dialogues or phrases that you like or want to learn.
      • -
      • Write down the new words or expressions that you encounter and look up their meanings.
      • -
      • Practice speaking or writing about the movie in Tamil.
      • -
      -
    17. Q: How can I find more articles like this one?
    18. -
    19. A: You can find more articles like this one by using Microsoft Bing search. Just type in your topic or query and click on the search button. You will get a list of relevant and reliable articles that you can read and enjoy.
    20. -

    401be4b1e0
    -
    -
    \ No newline at end of file diff --git a/spaces/congsaPfin/Manga-OCR/logs/Enjoy Building Your Dream Town with Township Offline Game APK for Android.md b/spaces/congsaPfin/Manga-OCR/logs/Enjoy Building Your Dream Town with Township Offline Game APK for Android.md deleted file mode 100644 index 5924286b96d1313377dd91afc25cb350e38371f3..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/Enjoy Building Your Dream Town with Township Offline Game APK for Android.md +++ /dev/null @@ -1,143 +0,0 @@ -
    -

    Township Offline Game APK: A Unique Blend of City-Building and Farming

    -

    Do you love city-building games? Do you enjoy farming games? If you answered yes to both questions, then you will love Township Offline Game APK. This is a game that combines both genres into one fun and addictive experience.

    -

    township offline game apk


    DOWNLOAD >>>>> https://urlca.com/2uO5JH



    -

    Township Offline Game APK is developed by Playrix, a leading mobile game developer that also created popular titles such as Gardenscapes, Homescapes, Fishdom, and more. The game has over 100 million downloads on Google Play Store and has received positive reviews from critics and players alike.

    -

    In this game, you can build your dream town from scratch. You can harvest crops at your farms, process them at your facilities, and sell goods to develop your town. You can trade with exotic countries and unlock new items. You can open restaurants, cinemas, and other community buildings to give life in your town special flavor. You can also interact with other players and join a co-op to cooperate and compete with them.

    -

    But what if you don't have an internet connection or you want to save your data usage? Don't worry, you can still play Township Offline Game APK without any problem. In this article, we will show you how to download and install Township Offline Game APK on your device. We will also explain how to play the game offline mode and what are the pros and cons of doing so. Finally, we will answer some frequently asked questions that you may have about the game offline mode.

    -

    township offline game apk download free
    -township offline game apk mod unlimited money
    -township offline game apk latest version
    -township offline game apk for android
    -township offline game apk no internet
    -township offline game apk without wifi
    -township offline game apk hack
    -township offline game apk full version
    -township offline game apk old version
    -township offline game apk update
    -township offline game apk play store
    -township offline game apk pure
    -township offline game apk 2023
    -township offline game apk revdl
    -township offline game apk rexdl
    -township offline game apk obb
    -township offline game apk data
    -township offline game apk mirror
    -township offline game apk uptodown
    -township offline game apk apkpure
    -township offline game apk android 1
    -township offline game apk android oyun club
    -township offline game apk andropalace
    -township offline game apk appvn
    -township offline game apk blackmod
    -township offline game apk bluestacks
    -township offline game apk cheat
    -township offline game apk cracked
    -township offline game apk file
    -township offline game apk free shopping
    -township offline game apk google drive
    -township offline game apk happymod
    -township offline game apk highly compressed
    -township offline game apk install
    -township offline game apk ios
    -township offline game apk lenov.ru
    -township offline game apk mob.org
    -township offline game apk new version
    -township offline game apk original
    -township offline game apk pc
    -township offline game apk platinmods
    -township offline game apk pro
    -township offline game apk premium
    -township offline game apk qooapp
    -township offline game apk unlimited cash and coins
    -township offline game apk vip mod menu

    -

    How to Download and Install Township Offline Game APK

    -

    If you want to play Township Offline Game APK, you need to download and install the APK file on your device. An APK file is an Android Package file that contains all the files and code needed to run an app on your device. However, before you can install an APK file, you need to enable unknown sources on your device. Here are the steps to do so:

    -

    Step 1: Enable Unknown Sources on Your Device

    -

    Unknown sources are sources that are not verified by Google or the device manufacturer. By default, your device does not allow installation of apps from unknown sources for security reasons. However, you can change this setting by following these steps:

    -
      -
    • Go to your device's Settings and tap on Security or Privacy.
    • -
    • Find the option that says Unknown Sources or Install Unknown Apps and toggle it on.
    • -
    • A warning message will pop up. Read it carefully and tap on OK or Allow.
    • -
    -

    Now you have enabled unknown sources on your device and you can install APK files from other sources than the Google Play Store.

    -

    Step 2: Download the APK File from a Trusted Source

    -

    The next step is to download the APK file of Township Offline Game APK from a trusted source. There are many websites that offer APK files for download, but not all of them are safe and reliable. Some of them may contain malware or viruses that can harm your device or steal your personal information. Therefore, you should be careful when choosing a website to download the APK file from.

    -

    One of the websites that we recommend is [APKPure]. This is a reputable website that provides original and pure APK files for various apps and games. You can download the APK file of Township Offline Game APK from this website by following these steps:

    -
      -
    • Open your browser and go to [APKPure].
    • -
    • Type Township Offline Game APK in the search bar and hit enter.
    • -
    • Select the app from the search results and tap on Download APK.
    • -
    • Wait for the download to complete and save the file in a location that you can easily access.
    • -
    -

    You have now downloaded the APK file of Township Offline Game APK from a trusted source.

    -

    Step 3: Locate and Install the APK File on Your Device

    -

    The final step is to locate and install the APK file on your device. You can do this by following these steps:

    -
      -
    • Open your file manager app and find the folder where you saved the APK file.
    • -
    • Tap on the APK file and a prompt will appear. Tap on Install or Next.
    • -
    • Wait for the installation to finish and tap on Open or Done.
    • -
    -

    You have now installed Township Offline Game APK on your device and you can start playing it offline.

    How to Play Township Offline Game APK

    -

    Now that you have downloaded and installed Township Offline Game APK on your device, you can start playing it offline. The game is very easy to play and has a lot of features and activities to keep you entertained. Here are some of the things you can do in the game:

    -

    Build Your Dream Town

    -

    The main goal of the game is to build your dream town from scratch. You can design and create your own town by building and upgrading various structures, such as houses, factories, farms, and more. You can also customize your town with different decorations, such as flowers, trees, statues, fountains, and more. You can also build landmarks from famous cities around the world, such as the Eiffel Tower, the Statue of Liberty, the Big Ben, and more.

    -

    As you build your town, you will also attract new citizens who will live and work in your town. You can see them walking around, shopping, working, and enjoying their lives. You can also interact with them and fulfill their requests and wishes. You can also name your town and change its appearance according to your preference.

    -

    Harvest Crops and Process Them at Your Facilities

    -

    Another important aspect of the game is farming. You can grow and harvest different crops at your farms, such as wheat, corn, carrots, tomatoes, and more. You can also raise animals, such as cows, chickens, pigs, and more. You can collect various products from your crops and animals, such as milk, eggs, wool, and more.

    -

    You can also process these products at your facilities, such as bakeries, dairies, sugar factories, and more. You can make various goods from these products, such as bread, cheese, juice, candy, and more. You can then sell these goods to earn money and experience points. You can also use these goods to complete orders from your customers or to trade with other countries.

    -

    Trade with Exotic Countries and Unlock New Items

    -

    One of the fun features of the game is trading with exotic countries. You can use the airport and the harbor to trade with other countries, such as China, Egypt, France, Japan, and more. You can send and receive various items from these countries, such as silk, spices, wine, sushi, and more.

    -

    By trading with these countries, you can also unlock new items that you can use in your town. For example, you can unlock new animals, such as pandas, camels, kangaroos, and more. You can also unlock new decorations, such as pagodas, pyramids, windmills, and more. You can also unlock new landmarks from these countries that you can build in your town.

    -

    Open Restaurants, Cinemas, and Other Community Buildings

    -

    To make your town more lively and attractive to your citizens and visitors alike, you can open restaurants, cinemas, and other community buildings that provide services and entertainment to them. You can choose from a variety of options, such as cafes, pizzerias, sushi bars, movie theaters, museums, and more. You can also upgrade these buildings to increase their capacity and quality.

    -

    By opening these buildings, you can also earn more money and experience points. You can also attract more tourists who will visit your town and spend money on your goods and services. You can also see how happy and satisfied your citizens and visitors are by checking their happiness level and feedback.

    -

    Interact with Other Players and Join a Co-op

    -

    If you want to play with other players, you can connect with them via social media or in-game chat. You can add them as friends and visit their towns to see how they are doing. You can also help them by sending them gifts or completing their requests. You can also receive help from them in return.

    -

    You can also join or create a co-op to cooperate and compete with other players. A co-op is a group of players who share a common chat room and a common goal. You can chat with your co-op members and exchange tips and strategies. You can also participate in co-op events and tasks that require teamwork and coordination. You can also compete with other co-ops in the leaderboard and win rewards.

    Pros and Cons of Township Offline Game APK

    -

    Playing Township Offline Game APK has its pros and cons. Depending on your preferences and needs, you may find the offline mode more suitable or less enjoyable than the online mode. Here are some of the pros and cons of playing the game offline:

    -

    Pros

    -
      -
    • You can save your data usage. Playing the game offline means you don't need to use your mobile data or Wi-Fi to access the game. This can help you save your data usage and avoid extra charges from your service provider.
    • -
    • You can avoid ads and in-app purchases. Playing the game offline means you don't have to deal with annoying ads that pop up every now and then. You also don't have to worry about spending real money on in-app purchases that may tempt you to buy more coins, cash, or other items.
    • -
    • You can play anytime and anywhere. Playing the game offline means you don't need to have an internet connection to play the game. You can play the game anytime and anywhere you want, whether you are at home, at work, on the road, or on a plane.
    • -
    -

    Cons

    -
      -
    • You may miss out on updates and new features. Playing the game offline means you don't get to enjoy the latest updates and new features that the developers add to the game. You may miss out on new items, events, tasks, co-ops, and more that make the game more fun and exciting.
    • -
    • You may not be able to interact with other players or join a co-op. Playing the game offline means you don't get to connect with other players who play the game online. You may not be able to chat with them, visit their towns, help them, or receive help from them. You may also not be able to join or create a co-op to cooperate and compete with other players.
    • -
    • You may risk losing your progress if you uninstall or switch devices. Playing the game offline means you don't get to sync your progress with the cloud server. This means that if you uninstall the game or switch devices, you may lose all your progress and have to start over from scratch.
    • -

    Frequently Asked Questions about Township Offline Game APK

    -

    Here are some of the frequently asked questions that users may have about Township Offline Game APK and their answers:

    - - - - - - - - - - - - - - - - - - - - - - - - - -
    QuestionAnswer
    How can I update the game offline?To update the game offline, you need to download and install the latest version of the APK file from a trusted source. You can follow the same steps as mentioned above to do so. However, you should make sure that you backup your progress before updating the game offline, as you may lose it if something goes wrong during the installation.
    How can I backup and restore my progress offline?To backup your progress offline, you need to use a file manager app to copy the data folder of the game from your device's internal storage to an external storage, such as a SD card or a USB drive. The data folder of the game is usually located in Android/data/com.playrix.township or Android/obb/com.playrix.township. To restore your progress offline, you need to copy the data folder back to your device's internal storage and overwrite the existing files.
    How can I switch between online and offline modes?To switch between online and offline modes, you need to have an internet connection and tap on the settings icon on the top right corner of the game screen. Then, tap on the switch button next to Online Mode. If you switch from online to offline mode, you will be asked to confirm your choice and warned that you will not be able to access some features of the game. If you switch from offline to online mode, you will be asked to log in with your Facebook or Google account and sync your progress with the cloud server.
    Can I play Township Offline Game APK on PC?Yes, you can play Township Offline Game APK on PC using an Android emulator. An Android emulator is a software that allows you to run Android apps and games on your PC. There are many Android emulators available for PC, such as BlueStacks, NoxPlayer, LDPlayer, and more. You can download and install any of these emulators on your PC and then download and install Township Offline Game APK on them using the same steps as mentioned above.
    Is Township Offline Game APK safe and legal?Township Offline Game APK is safe and legal as long as you download it from a trusted source and use it for personal and non-commercial purposes. However, you should be aware that playing the game offline may violate the terms of service of Playrix, the developer of the game. Playrix may not support or endorse the use of Township Offline Game APK and may not be responsible for any issues or damages that may arise from using it.
    -

    Conclusion

    -

    Township Offline Game APK is a unique blend of city-building and farming that allows you to build your dream town and enjoy various activities without an internet connection. You can download and install Township Offline Game APK on your device by following the steps in this article. You can also learn how to play the game offline mode and what are the pros and cons of doing so. You can also find answers to some frequently asked questions that you may have about the game offline mode.

    -

    If you are looking for a fun and relaxing game that you can play anytime and anywhere, then Township Offline Game APK is a great choice for you. You can unleash your creativity and imagination as you design and create your own town. You can also experience different cultures and lifestyles as you trade with exotic countries and unlock new items. You can also have fun with other players and join a co-op if you want to play online mode.

    -

    So what are you waiting for? Download Township Offline Game APK today and start building your dream town!

    401be4b1e0
    -
    -
    \ No newline at end of file diff --git a/spaces/congsaPfin/Manga-OCR/logs/Experience the Best of My Talking Angela 2 with MOD APK Version.md b/spaces/congsaPfin/Manga-OCR/logs/Experience the Best of My Talking Angela 2 with MOD APK Version.md deleted file mode 100644 index ca2ce0d870248a470db4fd057376bacf5517fc9c..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/Experience the Best of My Talking Angela 2 with MOD APK Version.md +++ /dev/null @@ -1,84 +0,0 @@ - -

    My Talking Angela 2 Mod APK: A Fun and Interactive Game for Cat Lovers

    -

    If you are a fan of cute and adorable cats, you will love My Talking Angela 2, a virtual pet game that lets you adopt, raise, and play with your own Angela the cat. You can customize her appearance, dress her up, take her to different places, and interact with her in various ways. You can also enjoy mini-games, activities, and surprises with Angela and her friends.

    -

    my talking angela 2mod apk


    DOWNLOAD →→→ https://urlca.com/2uOfTG



    -

    But what if you want to have more fun and freedom in the game? What if you want to access all the features and items without spending real money? Well, you can do that with My Talking Angela 2 Mod APK, a modified version of the game that gives you unlimited money, diamonds, outfits, accessories, and more. In this article, we will tell you more about this amazing game and how you can download and install it on your device.

    -

    What is My Talking Angela 2?

    -

    My Talking Angela 2 is a sequel to the popular My Talking Angela game, which has over 500 million downloads on Google Play Store. It is developed by Outfit7 Limited, the same company behind other hit games like My Talking Tom, My Talking Tom Friends, and Talking Tom Gold Run.

    -

    My Talking Angela 2 is a virtual pet game that allows you to adopt a baby Angela and take care of her as she grows up. You can feed her, bathe her, brush her teeth, put her to bed, and watch her develop her personality and skills. You can also play with her, talk to her, sing with her, dance with her, and make her happy.

    -

    my talking angela 2 mod apk unlimited money
    -my talking angela 2 mod apk download for android
    -my talking angela 2 mod apk latest version
    -my talking angela 2 mod apk free shopping
    -my talking angela 2 mod apk hack
    -my talking angela 2 mod apk revdl
    -my talking angela 2 mod apk offline
    -my talking angela 2 mod apk no ads
    -my talking angela 2 mod apk vip unlocked
    -my talking angela 2 mod apk rexdl
    -my talking angela 2 mod apk happymod
    -my talking angela 2 mod apk unlimited diamonds
    -my talking angela 2 mod apk android 1
    -my talking angela 2 mod apk all unlocked
    -my talking angela 2 mod apk unlimited coins
    -my talking angela 2 mod apk online
    -my talking angela 2 mod apk pure
    -my talking angela 2 mod apk full version
    -my talking angela 2 mod apk unlimited everything
    -my talking angela 2 mod apk new update
    -my talking angela 2 mod apk obb
    -my talking angela 2 mod apk old version
    -my talking angela 2 mod apk premium
    -my talking angela 2 mod apk unlimited stars
    -my talking angela 2 mod apk mega
    -my talking angela 2 mod apk data
    -my talking angela 2 mod apk pro
    -my talking angela 2 mod apk unlimited hearts
    -my talking angela 2 mod apk mirror
    -my talking angela 2 mod apk original

    -

    Features of My Talking Angela 2

    -

    My Talking Angela 2 has many features that make it an enjoyable and interactive game for cat lovers. Here are some of them:

    -

    Customize your Angela

    -

    You can make your Angela look unique by choosing from hundreds of outfits, hairstyles, makeup, accessories, and stickers. You can also change the color of her eyes, fur, clothes, and more. You can mix and match different items to create your own style for your Angela.

    -

    Explore different locations

    -

    You can take your Angela to different locations in the game world, such as the city, the beach, the forest, the desert, and the snow. Each location has its own theme, scenery, music, and activities. You can also decorate your home with various furniture and items to make it cozy and comfortable.

    -

    Play mini-games and activities

    -

    You can have fun with your Angela by playing mini-games and activities with her. You can play piano, guitar, drums, or saxophone with her in the music studio. You can bake cakes, cookies, pies, or cupcakes with her in the kitchen. You can do yoga, aerobics, or dance with her in the gym. You can also play puzzles, memory games, coloring games, or arcade games with her on your phone.

    -

    Interact with Angela and other characters

    -

    You can interact with your Angela in various ways by tapping, swiping, or shaking your device. You can pet her, tickle her, poke her, or annoy her. You can also talk to her using voice or text chat. She will respond to you with cute expressions and sounds. You can also meet other characters in the game world, such as Tom, Hank, G inger, Ben, and Becca. You can chat with them, play with them, or prank them.

    -

    Benefits of My Talking Angela 2 Mod APK

    -

    My Talking Angela 2 is a free game that you can download and play on your device. However, some features and items in the game require real money to unlock or purchase. For example, you need coins and diamonds to buy outfits, accessories, furniture, and stickers. You also need to watch ads or complete tasks to earn rewards or bonuses.

    -

    If you want to enjoy the game without any limitations or interruptions, you can use My Talking Angela 2 Mod APK, a modified version of the game that gives you many benefits, such as:

    -

    Unlimited money and diamonds

    -

    With My Talking Angela 2 Mod APK, you will have unlimited coins and diamonds in your account. You can use them to buy anything you want in the game without worrying about running out of money. You can also upgrade your home, wardrobe, and phone with ease.

    -

    Unlocked outfits and accessories

    -

    With My Talking Angela 2 Mod APK, you will have access to all the outfits and accessories in the game. You can dress up your Angela in any style you like, from casual to glamorous, from sporty to elegant. You can also change her look anytime you want without spending any money.

    -

    No ads and pop-ups

    -

    With My Talking Angela 2 Mod APK, you will not see any ads or pop-ups in the game. You can play the game smoothly and peacefully without any distractions or annoyances. You can also save your data and battery life by avoiding unnecessary ads.

    -

    How to download and install My Talking Angela 2 Mod APK

    -

    If you are interested in downloading and installing My Talking Angela 2 Mod APK on your device, you can follow these simple steps:

    -

    Step 1: Download the APK file from a trusted source

    -

    You can search for My Talking Angela 2 Mod APK on Google or any other search engine. You will find many websites that offer the APK file for free download. However, not all of them are safe and reliable. Some of them may contain viruses, malware, or spyware that can harm your device or steal your personal information.

    -

    To avoid any risks, you should download the APK file from a trusted source that has positive reviews and ratings from other users. You can also check the file size, version, and permissions before downloading it.

    -

    Step 2: Enable unknown sources on your device

    -

    Before you can install the APK file on your device, you need to enable unknown sources on your device. This is a security setting that prevents you from installing apps from sources other than Google Play Store. To enable unknown sources on your device, you need to follow these steps:

    -
      -
    • Go to Settings on your device.
    • -
    • Tap on Security or Privacy.
    • -
    • Find and enable Unknown Sources or Install Unknown Apps.
    • -
    • Confirm your choice by tapping OK or Allow.
    • -
    -

    Step 3: Install the APK file and launch the game

    -

    After you have enabled unknown sources on your device, you can install the APK file by following these steps:

    -
      -
    • Locate the APK file on your device using a file manager app or your browser.
    • -
    • Tap on the APK file and select Install.
    • -
    • Wait for the installation process to finish.
    • -
    • Tap on Open or Launch to start the game.
    • -
    -

    Congratulations! You have successfully installed My Talking Angela 2 Mod APK on your device. Now you can enjoy the game with unlimited money, diamonds, outfits, accessories, and no ads.

    -

    Conclusion

    -

    My Talking Angela 2 is a fun and interactive game for cat lovers who want to adopt, raise, and play with their own virtual pet. You can customize your Angela's appearance, take her to different locations, play mini-games and activities with her, and interact with her and other characters in various ways. You can also use My Talking Angela 2 Mod APK to get unlimited money, diamonds, outfits, accessories, and no ads in the game. You can download and install it on your device by following the steps we have provided in this article. We hope you found this article helpful and informative. If you have any questions or feedback, please feel free to leave a comment below. Thank you for reading!

    - FAQs Q: Is My Talking Angela 2 Mod APK safe to use? A: Yes, My Talking Angela 2 Mod APK is safe to use as long as you download it from a trusted source and enable unknown sources on your device. However, you should always be careful when installing apps from sources other than Google Play Store and scan them with an antivirus app before opening them. Q: Can I play My Talking Angela 2 Mod APK online with other players? A: No, My Talking Angela 2 Mod APK is an offline game that does not require an internet connection to play. You can play it anytime and anywhere you want without worrying about data usage or network issues. Q: Can I update My Talking Angela 2 Mod APK to the latest version? A: Yes, you can update My Talking Angela 2 Mod APK to the latest version by downloading and installing the new APK file from the same source you got the previous one. However, you may lose your progress and data if you uninstall the old version before installing the new one. To avoid this, you can backup your data using a cloud service or a file manager app. Q: How can I restore my purchases in My Talking Angela 2 Mod APK? A: You do not need to restore your purchases in My Talking Angela 2 Mod APK because you already have unlimited money and diamonds in your account. You can buy anything you want in the game without spending any real money. Q: How can I contact the developer of My Talking Angela 2 Mod APK? A: You can contact the developer of My Talking Angela 2 Mod APK by visiting their official website or social media pages. You can also send them an email or leave a review on their app page. However, they may not respond to your queries or complaints because they are not affiliated with Outfit7 Limited, the original developer of My Talking Angela 2.

    197e85843d
    -
    -
    \ No newline at end of file diff --git a/spaces/congsaPfin/Manga-OCR/logs/Improve Your Photography Skills with GCam Nikita 2.0 APK and Config for All Android Phones.md b/spaces/congsaPfin/Manga-OCR/logs/Improve Your Photography Skills with GCam Nikita 2.0 APK and Config for All Android Phones.md deleted file mode 100644 index e3b3359a0a90572f9019e2edfa8e7962315bd0ee..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/Improve Your Photography Skills with GCam Nikita 2.0 APK and Config for All Android Phones.md +++ /dev/null @@ -1,95 +0,0 @@ -
    -

    Download Nikita 2.0 APK: A Powerful and Versatile Camera App for Android

    -

    If you are looking for a camera app that can enhance your photography skills and produce stunning photos on your Android device, you should try Nikita 2.0 APK. This is a modified version of Google Camera (Gcam) that works on most Android phones, not just Google Pixel devices. It offers many features and options that can help you capture amazing shots in any situation.

    -

    download nikita 2.0 apk


    Download Zip ✪✪✪ https://urlca.com/2uOcUq



    -

    What is Nikita 2.0 APK?

    -

    Nikita 2.0 APK is a camera app developed by Nikita, a well-known Gcam modder. It is based on Google Camera 7.4, but it has many improvements and additions from the previous versions, such as 6.3. It uses advanced algorithms and artificial intelligence to process images and videos, resulting in high-quality output.

    -

    Features of Nikita 2.0 APK

    -

    Nikita 2.0 APK has many features that make it one of the best camera apps for Android. Here are some of them:

    -

    HDR+ and Night Sight

    -

    HDR+ is a feature that combines multiple exposures to create a balanced image with rich details and colors. Night Sight is a feature that allows you to take clear and bright photos in low-light conditions without using flash. Both features are available on Nikita 2.0 APK, and they can improve your photos significantly.

    -

    Portrait Mode and Astrophotography

    -

    Portrait Mode is a feature that blurs the background of your subject, creating a bokeh effect that makes your photos look more professional. Astrophotography is a feature that lets you capture the stars and the night sky with long exposure and noise reduction. Both features are available on Nikita 2.0 APK, and they can make your photos more artistic and impressive.

    -

    Video Stabilization and Slow Motion

    -

    Video Stabilization is a feature that reduces the shakiness and blurriness of your videos, making them smoother and sharper. Slow Motion is a feature that lets you record videos at a high frame rate and play them back at a slower speed, creating a dramatic effect that highlights every movement. Both features are available on Nikita 2.0 APK, and they can make your videos more fun and exciting.

    -

    Config Files and Customization

    -

    Config Files are files that contain settings and parameters for different devices, modes, and scenarios. They can help you optimize the performance of Nikita 2.0 APK on your device, as well as customize it to your preferences. You can download config files from various sources online, or create your own using the app's settings menu.

    -

    How to Download and Install Nikita 2.0 APK?

    -

    If you want to download and install Nikita 2.0 APK on your Android device, you need to follow these steps:

    -

    Step 1: Download the APK file and the config file

    -

    You can download the latest version of Nikita 2.0 APK from this link: https://www.celsoazevedo.com/files/android/google-camera/dev-nikita/. You can also download the config file for your device from this link: https://www.celsoazevedo.com/files/android/google-camera/f/configs-nikita-02/. Make sure you save both files in a folder that you can easily access.

    -

    download nikita gcam apk
    -download nikita google camera apk
    -download nikita 2.0 gcam mod apk
    -download nikita 2.0 google camera mod apk
    -download nikita 2.0 apk for android
    -download nikita 2.0 apk latest version
    -download nikita 2.0 apk + config
    -download nikita 2.0 apk + config file
    -download nikita 2.0 apk + config terbaru
    -download nikita 2.0 apk + config update
    -download nikita 2.0 apk for oneplus 5/5t
    -download nikita 2.0 apk for oneplus devices
    -download nikita 2.0 apk for samsung devices
    -download nikita 2.0 apk for xiaomi devices
    -download nikita 2.0 apk for realme devices
    -download nikita 2.0 apk for oppo devices
    -download nikita 2.0 apk for vivo devices
    -download nikita 2.0 apk for huawei devices
    -download nikita 2.0 apk for asus devices
    -download nikita 2.0 apk for nokia devices
    -download nikita 2.0 apk from celso azevedo website
    -download nikita 2.0 apk from gudangapp website
    -download nikita 2.0 apk from steadfast marine website
    -download nikita 2.0 apk from metodegames website
    -download nikita 2.0 apk from teknolalat website
    -how to download nikita 2.0 apk on android phone
    -how to install nikita 2.0 apk on android phone
    -how to use nikita 2.0 apk on android phone
    -how to update nikita 2.0 apk on android phone
    -how to uninstall nikita 2.0 apk on android phone
    -what is nikita 2.0 apk and why you should download it
    -what are the features of nikita 2.0 apk and how to use them
    -what are the benefits of using nikita 2.0 apk over other camera apps
    -what are the drawbacks of using nikita 2.0 apk and how to fix them
    -what are the best settings for nikita 2.0 apk and how to apply them
    -where to find the best configs for nikita 2.0 apk and how to load them
    -where to get the latest updates for nikita 2.0 apk and how to install them
    -where to get support for nikita 2.0 apk and how to contact the developer
    -where to get feedback for nikita 2.0 apk and how to share your experience
    -where to get more information about nikita 2.0 apk and how to learn more about it

    -

    Step 2: Enable unknown sources on your device

    -

    Before you can install Nikita 2.0 APK, you need to enable unknown sources on your device. This will allow you to install apps that are not from the Google Play Store. To do this, go to Settings > Security > Unknown Sources and toggle it on. You may also need to grant permission to your browser or file manager to install apps.

    -

    Step 3: Install the APK file and copy the config file to the Gcam folder

    -

    Now, you can install Nikita 2.0 APK by tapping on the APK file and following the instructions on the screen. Once the installation is complete, you need to copy the config file to the Gcam folder on your device. To do this, go to the folder where you saved the config file and select it. Then, go to the Gcam folder (usually located in Internal Storage > Gcam) and paste it there.

    -

    Step 4: Launch the app and load the config file

    -

    Finally, you can launch Nikita 2.0 APK by tapping on its icon on your home screen or app drawer. To load the config file, you need to double tap on the black area next to the shutter button. Then, select the config file from the list and tap on Restore. This will apply the settings and parameters of the config file to the app.

    -

    How to Use Nikita 2.0 APK?

    -

    Nikita 2.0 APK is easy to use, but it also has many options and features that you can explore and experiment with. Here are some tips and tricks for taking better photos with Nikita 2.0 APK:

    -

    Tips and Tricks for Taking Better Photos with Nikita 2.0 APK

    -

    Adjust the exposure and white balance

    -

    Exposure and white balance are two important factors that affect the brightness and color of your photos. You can adjust them manually by tapping on the screen and dragging the sliders that appear. You can also use the Auto mode or choose from different presets such as Sunny, Cloudy, Fluorescent, etc.

    -

    Use the grid and level indicators

    -

    The grid and level indicators are useful tools that can help you compose your photos better. The grid divides the screen into nine equal parts, which can help you follow the rule of thirds or other composition techniques. The level indicator shows you if your phone is tilted or not, which can help you avoid crooked or distorted photos.

    -

    Experiment with different modes and settings

    -

    Nikita 2.0 APK has many modes and settings that you can try out, such as HDR+, Night Sight, Portrait Mode, Astrophotography, Video Stabilization, Slow Motion, etc. Each mode has its own advantages and disadvantages, depending on the situation and your preference. You can also change various settings such as resolution, frame rate, quality, etc., to suit your needs.

    -

    Edit your photos with the built-in editor or other apps

    -

    Nikita 2.0 APK has a built-in editor that lets you crop, rotate, adjust, filter, and share your photos easily. You can access it by swiping up from the bottom of the screen after taking a photo. You can also use other apps such as Snapseed, Lightroom, Photoshop Express, etc., to edit your photos further.

    -

    Conclusion

    -

    Nikita 2.0 APK is a powerful and versatile camera app for Android that can help you take amazing photos and videos with your device. It has many features and options that can enhance your photography skills and creativity. You can download it from this link: https://www.celsoazevedo.com/files/android/google-camera/dev-nikita/. You can also download a config file for your device from this link: https://www.c om/files/android/google-camera/f/configs-nikita-02/. You can also follow the steps and tips in this article to download, install, and use Nikita 2.0 APK effectively. We hope you enjoy using this app and taking amazing photos and videos with it.

    -

    FAQs

    -

    Here are some frequently asked questions about Nikita 2.0 APK:

    -
      -
    • Is Nikita 2.0 APK safe to use?
    • -

      Yes, Nikita 2.0 APK is safe to use, as long as you download it from a trusted source such as the link we provided. It does not contain any malware or viruses that can harm your device or data.

      -
    • Does Nikita 2.0 APK work on all Android devices?
    • -

      No, Nikita 2.0 APK does not work on all Android devices. It works best on devices that have a Snapdragon processor and support Camera2 API. You can check if your device is compatible by using an app such as Camera2 API Probe.

      -
    • What is the difference between Nikita 2.0 APK and other Gcam mods?
    • -

      Nikita 2.0 APK is one of the many Gcam mods that are available online. Each mod has its own features, settings, and performance, depending on the developer and the device. Nikita 2.0 APK is known for its stability, compatibility, and versatility, as well as its many improvements and additions from the previous versions.

      -
    • How can I update Nikita 2.0 APK?
    • -

      You can update Nikita 2.0 APK by downloading the latest version from the same link we provided and installing it over the old version. You do not need to uninstall the old version or delete the config file.

      -
    • How can I contact the developer of Nikita 2.0 APK?
    • -

      You can contact the developer of Nikita 2.0 APK by joining his Telegram group: https://t.me/nikgapps. You can also follow him on Twitter: https://twitter.com/nikgapps. You can ask him questions, give feedback, report bugs, or request features.

      -

    401be4b1e0
    -
    -
    \ No newline at end of file diff --git a/spaces/congsaPfin/Manga-OCR/logs/LokiCraft 2023 Explore Craft and Survive in a Sandbox World.md b/spaces/congsaPfin/Manga-OCR/logs/LokiCraft 2023 Explore Craft and Survive in a Sandbox World.md deleted file mode 100644 index 668ee5eff1888c3cb3acdb1c56ce48e36b094de1..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/LokiCraft 2023 Explore Craft and Survive in a Sandbox World.md +++ /dev/null @@ -1,112 +0,0 @@ - -

    LokiCraft 2023 APK Download: A Guide for Android Users

    -

    If you are a fan of crafting and building games, you might have heard of LokiCraft, a popular sandbox game that lets you create your own pixel world. But did you know that there is a new version of the game called LokiCraft 2023 APK? This is a free update that adds new features, modes, and environments to the game, making it more fun and exciting than ever. In this article, we will tell you everything you need to know about LokiCraft 2023 APK, including its features, how to download and install it on your Android device, and some tips and tricks for playing it. Read on to find out more!

    -

    lokicraft 2023 apk download


    Download Filehttps://urlca.com/2uO4zG



    -

    Features of LokiCraft 2023 APK

    -

    LokiCraft 2023 APK is a game that combines crafting, building, exploration, and survival in a pixelated world. You can use your imagination and creativity to create anything you want, from cities and villages to castles and fortresses. You can also explore different environments, such as forests, deserts, mountains, caves, and oceans. You can play in different modes, such as creative mode, where you have unlimited resources and no enemies, or survival mode, where you have to gather resources, craft tools and weapons, fight enemies, and survive challenges. Here are some of the features of LokiCraft 2023 APK that make it stand out from other games:

    -
      -
    • Crafting and building in a pixel world: You can use various blocks and items to create your own structures and designs. You can also customize your character with different skins and outfits. You can use powerful weapons and armor to protect yourself from dangers.
    • -
    • Exploring different modes and environments: You can choose between two modes: creative mode, where you can build anything you want without limits or threats, or survival mode, where you have to collect resources, craft items, fight enemies, and survive challenges. You can also explore different environments, such as forests, deserts, mountains, caves, and oceans. You can find unique animals and monsters that are only available in this game.
    • -
    • Fighting enemies and surviving challenges: You can encounter various enemies in survival mode, such as zombies, spiders, skeletons, creepers, dragons, etc. You have to use your weapons and skills to defeat them. You also have to face different challenges, such as hunger, thirst, weather conditions, etc. You have to find food and water sources, build shelters and defenses, and use fire to keep warm.
    • -
    -

    How to Download and Install LokiCraft 2023 APK on Android

    -

    If you want to play LokiCraft 2023 APK on your Android device, you have to download and install the APK file from a reliable source. An APK file is an Android application package that contains all the files needed to run an app on your device. However, since LokiCraft 2023 APK is not available on the Google Play Store or any other official app store, you have to download it from a third-party website or platform. Here are the steps you need to follow to download and install LokiCraft 2023 APK on your Android device:

    -
      -
    1. Step 1: Find a reliable source for the APK file: You have to be careful when downloading APK files from unknown sources, as some of them may contain viruses or malware that can harm your device or steal your data. You have to find a trustworthy website or platform that offers the latest version of LokiCraft 2023 APK. You can use a search engine or a review site to find the best option for you. For example, you can use [this link] to download LokiCraft 2023 APK from a reputable source.
    2. -
    3. Step 2: Enable unknown sources on your device: Before you can install the APK file, you have to enable the option to allow unknown sources on your device. This means that you can install apps that are not from the Google Play Store or any other official app store. To do this, you have to go to your device settings, then security, then unknown sources, and toggle the switch to on. You may see a warning message that tells you about the risks of installing apps from unknown sources, but you can ignore it if you trust the source of the APK file.
    4. -
    5. Step 3: Download and install the APK file: After you have enabled unknown sources, you can download the APK file from the source you have chosen. You can use your browser or a file manager app to locate the file on your device. Then, you can tap on the file and follow the instructions to install it. You may see a pop-up message that asks for your permission to install the app, but you can grant it if you trust the source of the APK file.
    6. -
    7. Step 4: Launch the game and enjoy: Once you have installed the APK file, you can launch the game by tapping on its icon on your home screen or app drawer. You can then enjoy playing LokiCraft 2023 APK on your Android device. You can also update the game whenever there is a new version available from the same source.
    8. -
    -

    Tips and Tricks for Playing LokiCraft 2023 APK

    -

    LokiCraft 2023 APK is a game that requires creativity, strategy, and skill. You have to use your imagination and logic to create your own pixel world and survive in it. You also have to face various enemies and challenges that will test your abilities and knowledge. Here are some tips and tricks that will help you play LokiCraft 2023 APK better:

    -
      -
    • Tip 1: Use the creative mode to practice and experiment: If you are new to LokiCraft 2023 APK or crafting and building games in general, you may want to start with the creative mode. This mode gives you unlimited resources and no enemies, so you can build anything you want without any restrictions or threats. You can use this mode to practice your skills, experiment with different blocks and items, and learn how the game works. You can also use this mode to create amazing structures and designs that you can show off to your friends.
    • -
    • Tip 2: Collect resources and craft tools and weapons: If you are playing in survival mode, you have to collect resources and craft tools and weapons to survive. You can find resources such as wood, stone, iron, gold, diamond, etc. by mining, chopping, digging, etc. You can use these resources to craft tools such as pickaxes, axes, shovels, etc. that will help you gather more resources faster and easier. You can also use these resources to craft weapons such as swords, bows, arrows, etc. that will help you fight enemies and defend yourself.
    • -
    • Tip 3: Build shelters and defenses to protect yourself: Another important aspect of survival mode is building shelters and defenses to protect yourself from enemies and challenges. You have to build shelters such as houses, cabins, tents, etc. that will provide you with a safe place to rest, store your items, and craft more items. You also have to build defenses such as walls, fences, traps, turrets, etc. that will prevent enemies from entering your shelter or attacking you.
    • -
    • Tip 4: Explore the map and find hidden treasures: One of the most fun parts of LokiCraft 2023 APK is exploring the map and finding hidden treasures. The map is huge and diverse, with different environments such as forests, deserts, mountains, caves, and oceans. You can explore these environments and discover new animals and monsters that are only available in this game. You can also find hidden treasures such as chests, dungeons, temples, etc. that contain valuable items such as coins, gems, weapons, armor, etc.
    • -
    -

    Pros and the pros and cons of LokiCraft 2023 APK:

    - - - - - - - - - - - - - - - - - - - - - -
    ProsCons
    Free: You can download and play LokiCraft 2023 APK for free, without any subscription or in-app purchases. You can enjoy the game without spending any money.Graphics: The graphics of LokiCraft 2023 APK are pixelated and low-quality, which may not appeal to some players who prefer more realistic and high-definition graphics. The game may look outdated and dull compared to other games.
    Fun: LokiCraft 2023 APK is a fun and addictive game that will keep you entertained for hours. You can create your own pixel world and explore different environments and modes. You can also play with your friends online or offline.Bugs: LokiCraft 2023 APK may have some bugs and glitches that can affect the gameplay and performance of the game. The game may crash, freeze, lag, or not load properly on some devices. The game may also have some errors and issues that need to be fixed.
    Customizable: LokiCraft 2023 APK is a customizable game that lets you choose your own character, skin, outfit, weapon, armor, etc. You can also modify the game settings and options to suit your preferences and needs.Ads: LokiCraft 2023 APK may have some ads that can interrupt the gameplay and annoy the players. The ads may pop up randomly or frequently, which can be distracting and frustrating. The ads may also consume data and battery power.
    Offline mode: LokiCraft 2023 APK has an offline mode that lets you play the game without an internet connection. You can play the game anywhere and anytime, without worrying about data usage or network availability.Compatibility issues: LokiCraft 2023 APK may not be compatible with some devices or operating systems. The game may not work or run smoothly on some devices, especially older or low-end ones. The game may also require a certain amount of storage space and memory to function properly.
    -

    Conclusion

    -

    LokiCraft 2023 APK is a game that lets you craft, build, explore, and survive in a pixelated world. You can play in different modes and environments, and create anything you want with your imagination and creativity. You can also fight enemies and challenges, and collect resources and items. LokiCraft 2023 APK is a free, fun, addictive, customizable, and offline game that you can download and play on your Android device. However, the game also has some drawbacks, such as graphics, bugs, ads, and compatibility issues. You have to weigh the pros and cons of the game before you decide to download and play it. We hope this article has helped you learn more about LokiCraft 2023 APK and how to download and install it on your Android device. If you have any questions or feedback, please feel free to leave a comment below.

    -

    Frequently Asked Questions

    -

    Here are some of the frequently asked questions about LokiCraft 2023 APK:

    -
      -
    1. What is the difference between LokiCraft 2023 APK and LokiCraft?
    2. -

      LokiCraft 2023 APK is a new version of LokiCraft that adds new features, modes, and environments to the game. It is a free update that improves the gameplay and performance of the game.

      -

      lokicraft 2023 apk free download
      -lokicraft 2023 apk latest version
      -lokicraft 2023 apk mod unlimited resources
      -lokicraft 2023 apk for android tv
      -lokicraft 2023 apk offline installer
      -lokicraft 2023 apk update new features
      -lokicraft 2023 apk download for pc windows
      -lokicraft 2023 apk full unlocked premium
      -lokicraft 2023 apk no ads ad-free
      -lokicraft 2023 apk hack cheats codes
      -lokicraft 2023 apk review ratings
      -lokicraft 2023 apk gameplay screenshots
      -lokicraft 2023 apk tips tricks guide
      -lokicraft 2023 apk best settings optimization
      -lokicraft 2023 apk how to install tutorial
      -lokicraft 2023 apk download link mirror
      -lokicraft 2023 apk alternative similar apps
      -lokicraft 2023 apk compatible devices list
      -lokicraft 2023 apk file size mb
      -lokicraft 2023 apk developer contact support
      -lokicraft 2023 apk new crafting game
      -lokicraft 2023 apk world craft sandbox
      -lokicraft 2023 apk block crafting building
      -lokicraft 2023 apk crafting and building 2023
      -lokicraft 2023 apk loki crafting games
      -lokicraft 2023 apk loki craft 3d free exploration
      -lokicraft 2023 apk survivalcraft free mode
      -lokicraft 2023 apk powerful loki craft weapon and armor
      -lokicraft 2023 apk cool graphics super realistic water ultra shader
      -lokicraft 2023 apk best building simulator crafting and building game
      -lokicraft 2023 apk survive at night zombies monsters
      -lokicraft 2023 apk unlimited resource to build with plus the ability to fly
      -lokicraft 2023 apk variety of animals sheep horse wolf chicken fish cow rat steer
      -lokicraft 2023 apk be creative in your own generated infinity world
      -lokicraft 2023 apk world generated in real time crafting of building city build craft exploration
      -lokicraft 2023 apk craftsman explore building tools and building blocks for the construction of houses and their equipment
      -lokicraft 2023 apk craftsman survival craft the necessary items for survival and protect yourself from wild predators and night zombies
      -lokicraft 2023 apk amazing crafting and building game in the style of craft game world craft dream island
      -lokicraft 2023 apk loki craft and building free game
      -lokicraft 2023 apk loki craft and building this is the most important thing in the game craftsman exploration the craftsman explore building

      -
    3. Is LokiCraft 2023 APK safe to download and install?
    4. -

      LokiCraft 2023 APK is safe to download and install if you get it from a reliable source. However, since it is not available on the Google Play Store or any other official app store, you have to be careful when downloading it from unknown sources. You have to make sure that the source is trustworthy and does not contain any viruses or malware that can harm your device or steal your data.

      -
    5. How do I update LokiCraft 2023 APK?
    6. -

      You can update LokiCraft 2023 APK whenever there is a new version available from the same source where you downloaded it. You have to follow the same steps as downloading and installing the game, but make sure that you delete the old version first before installing the new one.

      -
    7. Can I play LokiCraft 2023 APK with my friends?
    8. -

      Yes, you can play LokiCraft 2023 APK with your friends online or offline. You can join or create a multiplayer server where you can chat, cooperate, or compete with your friends. You can also play offline with your friends by using a local network or a hotspot.

      -
    9. What are some alternatives to LokiCraft 2023 APK?
    10. -

      If you are looking for some alternatives to LokiCraft 2023 APK, you may want to try some of these games:

      -
        -
      • Minecraft: This is the most popular and famous crafting and building game that inspired LokiCraft and many other games. You can create your own world and explore different modes and environments. You can also play with millions of players online or offline.
      • -
      • Roblox: This is a game platform that lets you create and play various games in different genres and themes. You can also customize your character and join a community of millions of players online.
      • -
      • Terraria: This is a game that combines crafting, building, exploration, and adventure in a 2D pixelated world. You can discover hundreds of items, enemies, bosses, biomes, and events. You can also play with your friends online or offline.
      • -

      401be4b1e0
      -
      -
      \ No newline at end of file diff --git a/spaces/contluForse/HuggingGPT/assets/2012 End Of The World Movie Hindi Dubbed Free 307.md b/spaces/contluForse/HuggingGPT/assets/2012 End Of The World Movie Hindi Dubbed Free 307.md deleted file mode 100644 index a4c6a3318398cdcc95bc4b388f6f044e73fc82d1..0000000000000000000000000000000000000000 --- a/spaces/contluForse/HuggingGPT/assets/2012 End Of The World Movie Hindi Dubbed Free 307.md +++ /dev/null @@ -1,32 +0,0 @@ -

      2012 end of the world movie hindi dubbed free 307


      Download Ziphttps://ssurll.com/2uzxNM



      -
      -IMDB info: - -A: - -I'm going to have to go with Interstellar. - -From the movie synopsis, it sounds like it has a lot in common with the plot of your movie, but I'll explain. - -Asteroid LISE is on a collision course with Earth and while a team of specialists attempt to divert it, an even more unusual spaceship approaches the atmosphere of our planet. - -It looks like at the very end, there's a scene that hints that there's something on board that has great worth to the character, and there's also a hint that things go wrong on the ship. - -"Professor Brand agrees to allow the Air Force to mine the ship in exchange for a chance to be the first in history to board an alien vessel. When Brand takes the ship into orbit to use its tractor beam to capture the ship, a deadly attack from the ship's forces on the asteroid means Brand must decide to either save the ship or Earth. - -All of these are close matches to what your movie has. - -Radiolabelled antibodies: from early labelling techniques to nanotechnology. - -Antibodies are ideal agents for the selective targeting of tumour cells. However, limitations in terms of potency, selectivity and applicability have long remained a problem in the development of these reagents. Over the last decade, significant research efforts have been put into improving the pharmacokinetics and safety profiles of antibody-based therapies, with an overall positive impact. However, since the introduction of radiometal-conjugated antibody fragments in the 1990s, it has become increasingly apparent that the new technologies have further enhanced the potential of these agents. This review highlights the recent progress made towards the development of antibody-based therapeutics as well as the future prospects of such immunoconjugates.Q: - -Configure maven jar plugin to release Artifactory before packaging jar - -I'm currently trying to set up a clean Artifactory server and a clean maven project. I'm able to have the maven project build and test without problems, but when i'm trying to deploy the jar to the artifactory server it fails. - -The error i get is the following: - -Caused by: java.lang.Exception: Artifact not found or not a file:build-info.properties,build.number,build.type,build.version,commit. 4fefd39f24
      -
      -
      -

      diff --git a/spaces/contluForse/HuggingGPT/assets/Download Internet Download Manager 6.11 Full Crack.md b/spaces/contluForse/HuggingGPT/assets/Download Internet Download Manager 6.11 Full Crack.md deleted file mode 100644 index c65122aab26b1911b37b5b791c8e07f50c1ee288..0000000000000000000000000000000000000000 --- a/spaces/contluForse/HuggingGPT/assets/Download Internet Download Manager 6.11 Full Crack.md +++ /dev/null @@ -1,6 +0,0 @@ -

      download internet download manager 6.11 full crack


      Download Zip » https://ssurll.com/2uzw5U



      - - 3cee63e6c2
      -
      -
      -

      diff --git a/spaces/cooelf/Multimodal-CoT/timm/models/dla.py b/spaces/cooelf/Multimodal-CoT/timm/models/dla.py deleted file mode 100644 index f6e4dd285db53cd547ecb1f913219890517e3c00..0000000000000000000000000000000000000000 --- a/spaces/cooelf/Multimodal-CoT/timm/models/dla.py +++ /dev/null @@ -1,443 +0,0 @@ -""" Deep Layer Aggregation and DLA w/ Res2Net -DLA original adapted from Official Pytorch impl at: -DLA Paper: `Deep Layer Aggregation` - https://arxiv.org/abs/1707.06484 - -Res2Net additions from: https://github.com/gasvn/Res2Net/ -Res2Net Paper: `Res2Net: A New Multi-scale Backbone Architecture` - https://arxiv.org/abs/1904.01169 -""" -import math - -import torch -import torch.nn as nn -import torch.nn.functional as F - -from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD -from .helpers import build_model_with_cfg -from .layers import create_classifier -from .registry import register_model - -__all__ = ['DLA'] - - -def _cfg(url='', **kwargs): - return { - 'url': url, - 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), - 'crop_pct': 0.875, 'interpolation': 'bilinear', - 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, - 'first_conv': 'base_layer.0', 'classifier': 'fc', - **kwargs - } - - -default_cfgs = { - 'dla34': _cfg(url='http://dl.yf.io/dla/models/imagenet/dla34-ba72cf86.pth'), - 'dla46_c': _cfg(url='http://dl.yf.io/dla/models/imagenet/dla46_c-2bfd52c3.pth'), - 'dla46x_c': _cfg(url='http://dl.yf.io/dla/models/imagenet/dla46x_c-d761bae7.pth'), - 'dla60x_c': _cfg(url='http://dl.yf.io/dla/models/imagenet/dla60x_c-b870c45c.pth'), - 'dla60': _cfg(url='http://dl.yf.io/dla/models/imagenet/dla60-24839fc4.pth'), - 'dla60x': _cfg(url='http://dl.yf.io/dla/models/imagenet/dla60x-d15cacda.pth'), - 'dla102': _cfg(url='http://dl.yf.io/dla/models/imagenet/dla102-d94d9790.pth'), - 'dla102x': _cfg(url='http://dl.yf.io/dla/models/imagenet/dla102x-ad62be81.pth'), - 'dla102x2': _cfg(url='http://dl.yf.io/dla/models/imagenet/dla102x2-262837b6.pth'), - 'dla169': _cfg(url='http://dl.yf.io/dla/models/imagenet/dla169-0914e092.pth'), - 'dla60_res2net': _cfg( - url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-res2net/res2net_dla60_4s-d88db7f9.pth'), - 'dla60_res2next': _cfg( - url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-res2net/res2next_dla60_4s-d327927b.pth'), -} - - -class DlaBasic(nn.Module): - """DLA Basic""" - - def __init__(self, inplanes, planes, stride=1, dilation=1, **_): - super(DlaBasic, self).__init__() - self.conv1 = nn.Conv2d( - inplanes, planes, kernel_size=3, stride=stride, padding=dilation, bias=False, dilation=dilation) - self.bn1 = nn.BatchNorm2d(planes) - self.relu = nn.ReLU(inplace=True) - self.conv2 = nn.Conv2d( - planes, planes, kernel_size=3, stride=1, padding=dilation, bias=False, dilation=dilation) - self.bn2 = nn.BatchNorm2d(planes) - self.stride = stride - - def forward(self, x, shortcut=None): - if shortcut is None: - shortcut = x - - out = self.conv1(x) - out = self.bn1(out) - out = self.relu(out) - - out = self.conv2(out) - out = self.bn2(out) - - out += shortcut - out = self.relu(out) - - return out - - -class DlaBottleneck(nn.Module): - """DLA/DLA-X Bottleneck""" - expansion = 2 - - def __init__(self, inplanes, outplanes, stride=1, dilation=1, cardinality=1, base_width=64): - super(DlaBottleneck, self).__init__() - self.stride = stride - mid_planes = int(math.floor(outplanes * (base_width / 64)) * cardinality) - mid_planes = mid_planes // self.expansion - - self.conv1 = nn.Conv2d(inplanes, mid_planes, kernel_size=1, bias=False) - self.bn1 = nn.BatchNorm2d(mid_planes) - self.conv2 = nn.Conv2d( - mid_planes, mid_planes, kernel_size=3, stride=stride, padding=dilation, - bias=False, dilation=dilation, groups=cardinality) - self.bn2 = nn.BatchNorm2d(mid_planes) - self.conv3 = nn.Conv2d(mid_planes, outplanes, kernel_size=1, bias=False) - self.bn3 = nn.BatchNorm2d(outplanes) - self.relu = nn.ReLU(inplace=True) - - def forward(self, x, shortcut=None): - if shortcut is None: - shortcut = x - - out = self.conv1(x) - out = self.bn1(out) - out = self.relu(out) - - out = self.conv2(out) - out = self.bn2(out) - out = self.relu(out) - - out = self.conv3(out) - out = self.bn3(out) - - out += shortcut - out = self.relu(out) - - return out - - -class DlaBottle2neck(nn.Module): - """ Res2Net/Res2NeXT DLA Bottleneck - Adapted from https://github.com/gasvn/Res2Net/blob/master/dla.py - """ - expansion = 2 - - def __init__(self, inplanes, outplanes, stride=1, dilation=1, scale=4, cardinality=8, base_width=4): - super(DlaBottle2neck, self).__init__() - self.is_first = stride > 1 - self.scale = scale - mid_planes = int(math.floor(outplanes * (base_width / 64)) * cardinality) - mid_planes = mid_planes // self.expansion - self.width = mid_planes - - self.conv1 = nn.Conv2d(inplanes, mid_planes * scale, kernel_size=1, bias=False) - self.bn1 = nn.BatchNorm2d(mid_planes * scale) - - num_scale_convs = max(1, scale - 1) - convs = [] - bns = [] - for _ in range(num_scale_convs): - convs.append(nn.Conv2d( - mid_planes, mid_planes, kernel_size=3, stride=stride, - padding=dilation, dilation=dilation, groups=cardinality, bias=False)) - bns.append(nn.BatchNorm2d(mid_planes)) - self.convs = nn.ModuleList(convs) - self.bns = nn.ModuleList(bns) - if self.is_first: - self.pool = nn.AvgPool2d(kernel_size=3, stride=stride, padding=1) - - self.conv3 = nn.Conv2d(mid_planes * scale, outplanes, kernel_size=1, bias=False) - self.bn3 = nn.BatchNorm2d(outplanes) - self.relu = nn.ReLU(inplace=True) - - def forward(self, x, shortcut=None): - if shortcut is None: - shortcut = x - - out = self.conv1(x) - out = self.bn1(out) - out = self.relu(out) - - spx = torch.split(out, self.width, 1) - spo = [] - for i, (conv, bn) in enumerate(zip(self.convs, self.bns)): - sp = spx[i] if i == 0 or self.is_first else sp + spx[i] - sp = conv(sp) - sp = bn(sp) - sp = self.relu(sp) - spo.append(sp) - if self.scale > 1: - spo.append(self.pool(spx[-1]) if self.is_first else spx[-1]) - out = torch.cat(spo, 1) - - out = self.conv3(out) - out = self.bn3(out) - - out += shortcut - out = self.relu(out) - - return out - - -class DlaRoot(nn.Module): - def __init__(self, in_channels, out_channels, kernel_size, shortcut): - super(DlaRoot, self).__init__() - self.conv = nn.Conv2d( - in_channels, out_channels, 1, stride=1, bias=False, padding=(kernel_size - 1) // 2) - self.bn = nn.BatchNorm2d(out_channels) - self.relu = nn.ReLU(inplace=True) - self.shortcut = shortcut - - def forward(self, *x): - children = x - x = self.conv(torch.cat(x, 1)) - x = self.bn(x) - if self.shortcut: - x += children[0] - x = self.relu(x) - - return x - - -class DlaTree(nn.Module): - def __init__(self, levels, block, in_channels, out_channels, stride=1, - dilation=1, cardinality=1, base_width=64, - level_root=False, root_dim=0, root_kernel_size=1, root_shortcut=False): - super(DlaTree, self).__init__() - if root_dim == 0: - root_dim = 2 * out_channels - if level_root: - root_dim += in_channels - self.downsample = nn.MaxPool2d(stride, stride=stride) if stride > 1 else nn.Identity() - self.project = nn.Identity() - cargs = dict(dilation=dilation, cardinality=cardinality, base_width=base_width) - if levels == 1: - self.tree1 = block(in_channels, out_channels, stride, **cargs) - self.tree2 = block(out_channels, out_channels, 1, **cargs) - if in_channels != out_channels: - # NOTE the official impl/weights have project layers in levels > 1 case that are never - # used, I've moved the project layer here to avoid wasted params but old checkpoints will - # need strict=False while loading. - self.project = nn.Sequential( - nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=1, bias=False), - nn.BatchNorm2d(out_channels)) - else: - cargs.update(dict(root_kernel_size=root_kernel_size, root_shortcut=root_shortcut)) - self.tree1 = DlaTree( - levels - 1, block, in_channels, out_channels, stride, root_dim=0, **cargs) - self.tree2 = DlaTree( - levels - 1, block, out_channels, out_channels, root_dim=root_dim + out_channels, **cargs) - if levels == 1: - self.root = DlaRoot(root_dim, out_channels, root_kernel_size, root_shortcut) - self.level_root = level_root - self.root_dim = root_dim - self.levels = levels - - def forward(self, x, shortcut=None, children=None): - children = [] if children is None else children - bottom = self.downsample(x) - shortcut = self.project(bottom) - if self.level_root: - children.append(bottom) - x1 = self.tree1(x, shortcut) - if self.levels == 1: - x2 = self.tree2(x1) - x = self.root(x2, x1, *children) - else: - children.append(x1) - x = self.tree2(x1, children=children) - return x - - -class DLA(nn.Module): - def __init__(self, levels, channels, output_stride=32, num_classes=1000, in_chans=3, - cardinality=1, base_width=64, block=DlaBottle2neck, shortcut_root=False, - drop_rate=0.0, global_pool='avg'): - super(DLA, self).__init__() - self.channels = channels - self.num_classes = num_classes - self.cardinality = cardinality - self.base_width = base_width - self.drop_rate = drop_rate - assert output_stride == 32 # FIXME support dilation - - self.base_layer = nn.Sequential( - nn.Conv2d(in_chans, channels[0], kernel_size=7, stride=1, padding=3, bias=False), - nn.BatchNorm2d(channels[0]), - nn.ReLU(inplace=True)) - self.level0 = self._make_conv_level(channels[0], channels[0], levels[0]) - self.level1 = self._make_conv_level(channels[0], channels[1], levels[1], stride=2) - cargs = dict(cardinality=cardinality, base_width=base_width, root_shortcut=shortcut_root) - self.level2 = DlaTree(levels[2], block, channels[1], channels[2], 2, level_root=False, **cargs) - self.level3 = DlaTree(levels[3], block, channels[2], channels[3], 2, level_root=True, **cargs) - self.level4 = DlaTree(levels[4], block, channels[3], channels[4], 2, level_root=True, **cargs) - self.level5 = DlaTree(levels[5], block, channels[4], channels[5], 2, level_root=True, **cargs) - self.feature_info = [ - dict(num_chs=channels[0], reduction=1, module='level0'), # rare to have a meaningful stride 1 level - dict(num_chs=channels[1], reduction=2, module='level1'), - dict(num_chs=channels[2], reduction=4, module='level2'), - dict(num_chs=channels[3], reduction=8, module='level3'), - dict(num_chs=channels[4], reduction=16, module='level4'), - dict(num_chs=channels[5], reduction=32, module='level5'), - ] - - self.num_features = channels[-1] - self.global_pool, self.fc = create_classifier( - self.num_features, self.num_classes, pool_type=global_pool, use_conv=True) - self.flatten = nn.Flatten(1) if global_pool else nn.Identity() - - for m in self.modules(): - if isinstance(m, nn.Conv2d): - n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels - m.weight.data.normal_(0, math.sqrt(2. / n)) - elif isinstance(m, nn.BatchNorm2d): - m.weight.data.fill_(1) - m.bias.data.zero_() - - def _make_conv_level(self, inplanes, planes, convs, stride=1, dilation=1): - modules = [] - for i in range(convs): - modules.extend([ - nn.Conv2d(inplanes, planes, kernel_size=3, stride=stride if i == 0 else 1, - padding=dilation, bias=False, dilation=dilation), - nn.BatchNorm2d(planes), - nn.ReLU(inplace=True)]) - inplanes = planes - return nn.Sequential(*modules) - - def get_classifier(self): - return self.fc - - def reset_classifier(self, num_classes, global_pool='avg'): - self.num_classes = num_classes - self.global_pool, self.fc = create_classifier( - self.num_features, self.num_classes, pool_type=global_pool, use_conv=True) - self.flatten = nn.Flatten(1) if global_pool else nn.Identity() - - def forward_features(self, x): - x = self.base_layer(x) - x = self.level0(x) - x = self.level1(x) - x = self.level2(x) - x = self.level3(x) - x = self.level4(x) - x = self.level5(x) - return x - - def forward(self, x): - x = self.forward_features(x) - x = self.global_pool(x) - if self.drop_rate > 0.: - x = F.dropout(x, p=self.drop_rate, training=self.training) - x = self.fc(x) - x = self.flatten(x) - return x - - -def _create_dla(variant, pretrained=False, **kwargs): - return build_model_with_cfg( - DLA, variant, pretrained, - default_cfg=default_cfgs[variant], - pretrained_strict=False, - feature_cfg=dict(out_indices=(1, 2, 3, 4, 5)), - **kwargs) - - -@register_model -def dla60_res2net(pretrained=False, **kwargs): - model_kwargs = dict( - levels=(1, 1, 1, 2, 3, 1), channels=(16, 32, 128, 256, 512, 1024), - block=DlaBottle2neck, cardinality=1, base_width=28, **kwargs) - return _create_dla('dla60_res2net', pretrained, **model_kwargs) - - -@register_model -def dla60_res2next(pretrained=False,**kwargs): - model_kwargs = dict( - levels=(1, 1, 1, 2, 3, 1), channels=(16, 32, 128, 256, 512, 1024), - block=DlaBottle2neck, cardinality=8, base_width=4, **kwargs) - return _create_dla('dla60_res2next', pretrained, **model_kwargs) - - -@register_model -def dla34(pretrained=False, **kwargs): # DLA-34 - model_kwargs = dict( - levels=[1, 1, 1, 2, 2, 1], channels=[16, 32, 64, 128, 256, 512], - block=DlaBasic, **kwargs) - return _create_dla('dla34', pretrained, **model_kwargs) - - -@register_model -def dla46_c(pretrained=False, **kwargs): # DLA-46-C - model_kwargs = dict( - levels=[1, 1, 1, 2, 2, 1], channels=[16, 32, 64, 64, 128, 256], - block=DlaBottleneck, **kwargs) - return _create_dla('dla46_c', pretrained, **model_kwargs) - - -@register_model -def dla46x_c(pretrained=False, **kwargs): # DLA-X-46-C - model_kwargs = dict( - levels=[1, 1, 1, 2, 2, 1], channels=[16, 32, 64, 64, 128, 256], - block=DlaBottleneck, cardinality=32, base_width=4, **kwargs) - return _create_dla('dla46x_c', pretrained, **model_kwargs) - - -@register_model -def dla60x_c(pretrained=False, **kwargs): # DLA-X-60-C - model_kwargs = dict( - levels=[1, 1, 1, 2, 3, 1], channels=[16, 32, 64, 64, 128, 256], - block=DlaBottleneck, cardinality=32, base_width=4, **kwargs) - return _create_dla('dla60x_c', pretrained, **model_kwargs) - - -@register_model -def dla60(pretrained=False, **kwargs): # DLA-60 - model_kwargs = dict( - levels=[1, 1, 1, 2, 3, 1], channels=[16, 32, 128, 256, 512, 1024], - block=DlaBottleneck, **kwargs) - return _create_dla('dla60', pretrained, **model_kwargs) - - -@register_model -def dla60x(pretrained=False, **kwargs): # DLA-X-60 - model_kwargs = dict( - levels=[1, 1, 1, 2, 3, 1], channels=[16, 32, 128, 256, 512, 1024], - block=DlaBottleneck, cardinality=32, base_width=4, **kwargs) - return _create_dla('dla60x', pretrained, **model_kwargs) - - -@register_model -def dla102(pretrained=False, **kwargs): # DLA-102 - model_kwargs = dict( - levels=[1, 1, 1, 3, 4, 1], channels=[16, 32, 128, 256, 512, 1024], - block=DlaBottleneck, shortcut_root=True, **kwargs) - return _create_dla('dla102', pretrained, **model_kwargs) - - -@register_model -def dla102x(pretrained=False, **kwargs): # DLA-X-102 - model_kwargs = dict( - levels=[1, 1, 1, 3, 4, 1], channels=[16, 32, 128, 256, 512, 1024], - block=DlaBottleneck, cardinality=32, base_width=4, shortcut_root=True, **kwargs) - return _create_dla('dla102x', pretrained, **model_kwargs) - - -@register_model -def dla102x2(pretrained=False, **kwargs): # DLA-X-102 64 - model_kwargs = dict( - levels=[1, 1, 1, 3, 4, 1], channels=[16, 32, 128, 256, 512, 1024], - block=DlaBottleneck, cardinality=64, base_width=4, shortcut_root=True, **kwargs) - return _create_dla('dla102x2', pretrained, **model_kwargs) - - -@register_model -def dla169(pretrained=False, **kwargs): # DLA-169 - model_kwargs = dict( - levels=[1, 1, 2, 3, 5, 1], channels=[16, 32, 128, 256, 512, 1024], - block=DlaBottleneck, shortcut_root=True, **kwargs) - return _create_dla('dla169', pretrained, **model_kwargs) diff --git a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/lama/saicinpainting/training/modules/depthwise_sep_conv.py b/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/lama/saicinpainting/training/modules/depthwise_sep_conv.py deleted file mode 100644 index 83dd15c3df1d9f40baf0091a373fa224532c9ddd..0000000000000000000000000000000000000000 --- a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/lama/saicinpainting/training/modules/depthwise_sep_conv.py +++ /dev/null @@ -1,17 +0,0 @@ -import torch -import torch.nn as nn - -class DepthWiseSeperableConv(nn.Module): - def __init__(self, in_dim, out_dim, *args, **kwargs): - super().__init__() - if 'groups' in kwargs: - # ignoring groups for Depthwise Sep Conv - del kwargs['groups'] - - self.depthwise = nn.Conv2d(in_dim, in_dim, *args, groups=in_dim, **kwargs) - self.pointwise = nn.Conv2d(in_dim, out_dim, kernel_size=1) - - def forward(self, x): - out = self.depthwise(x) - out = self.pointwise(out) - return out \ No newline at end of file diff --git a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/mmpkg/mmcv/runner/__init__.py b/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/mmpkg/mmcv/runner/__init__.py deleted file mode 100644 index 52e4b48d383a84a055dcd7f6236f6e8e58eab924..0000000000000000000000000000000000000000 --- a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/mmpkg/mmcv/runner/__init__.py +++ /dev/null @@ -1,47 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from .base_module import BaseModule, ModuleList, Sequential -from .base_runner import BaseRunner -from .builder import RUNNERS, build_runner -from .checkpoint import (CheckpointLoader, _load_checkpoint, - _load_checkpoint_with_prefix, load_checkpoint, - load_state_dict, save_checkpoint, weights_to_cpu) -from .default_constructor import DefaultRunnerConstructor -from .dist_utils import (allreduce_grads, allreduce_params, get_dist_info, - init_dist, master_only) -from .epoch_based_runner import EpochBasedRunner, Runner -from .fp16_utils import LossScaler, auto_fp16, force_fp32, wrap_fp16_model -from .hooks import (HOOKS, CheckpointHook, ClosureHook, DistEvalHook, - DistSamplerSeedHook, DvcliveLoggerHook, EMAHook, EvalHook, - Fp16OptimizerHook, GradientCumulativeFp16OptimizerHook, - GradientCumulativeOptimizerHook, Hook, IterTimerHook, - LoggerHook, LrUpdaterHook, MlflowLoggerHook, - NeptuneLoggerHook, OptimizerHook, PaviLoggerHook, - SyncBuffersHook, TensorboardLoggerHook, TextLoggerHook, - WandbLoggerHook) -from .iter_based_runner import IterBasedRunner, IterLoader -from .log_buffer import LogBuffer -from .optimizer import (OPTIMIZER_BUILDERS, OPTIMIZERS, - DefaultOptimizerConstructor, build_optimizer, - build_optimizer_constructor) -from .priority import Priority, get_priority -from .utils import get_host_info, get_time_str, obj_from_dict, set_random_seed - -__all__ = [ - 'BaseRunner', 'Runner', 'EpochBasedRunner', 'IterBasedRunner', 'LogBuffer', - 'HOOKS', 'Hook', 'CheckpointHook', 'ClosureHook', 'LrUpdaterHook', - 'OptimizerHook', 'IterTimerHook', 'DistSamplerSeedHook', 'LoggerHook', - 'PaviLoggerHook', 'TextLoggerHook', 'TensorboardLoggerHook', - 'NeptuneLoggerHook', 'WandbLoggerHook', 'MlflowLoggerHook', - 'DvcliveLoggerHook', '_load_checkpoint', 'load_state_dict', - 'load_checkpoint', 'weights_to_cpu', 'save_checkpoint', 'Priority', - 'get_priority', 'get_host_info', 'get_time_str', 'obj_from_dict', - 'init_dist', 'get_dist_info', 'master_only', 'OPTIMIZER_BUILDERS', - 'OPTIMIZERS', 'DefaultOptimizerConstructor', 'build_optimizer', - 'build_optimizer_constructor', 'IterLoader', 'set_random_seed', - 'auto_fp16', 'force_fp32', 'wrap_fp16_model', 'Fp16OptimizerHook', - 'SyncBuffersHook', 'EMAHook', 'build_runner', 'RUNNERS', 'allreduce_grads', - 'allreduce_params', 'LossScaler', 'CheckpointLoader', 'BaseModule', - '_load_checkpoint_with_prefix', 'EvalHook', 'DistEvalHook', 'Sequential', - 'ModuleList', 'GradientCumulativeOptimizerHook', - 'GradientCumulativeFp16OptimizerHook', 'DefaultRunnerConstructor' -] diff --git a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/mmpkg/mmseg/models/decode_heads/gc_head.py b/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/mmpkg/mmseg/models/decode_heads/gc_head.py deleted file mode 100644 index 6342811f67e4affac7886c8fc745a28abcc32c55..0000000000000000000000000000000000000000 --- a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/mmpkg/mmseg/models/decode_heads/gc_head.py +++ /dev/null @@ -1,47 +0,0 @@ -import torch -from annotator.mmpkg.mmcv.cnn import ContextBlock - -from ..builder import HEADS -from .fcn_head import FCNHead - - -@HEADS.register_module() -class GCHead(FCNHead): - """GCNet: Non-local Networks Meet Squeeze-Excitation Networks and Beyond. - - This head is the implementation of `GCNet - `_. - - Args: - ratio (float): Multiplier of channels ratio. Default: 1/4. - pooling_type (str): The pooling type of context aggregation. - Options are 'att', 'avg'. Default: 'avg'. - fusion_types (tuple[str]): The fusion type for feature fusion. - Options are 'channel_add', 'channel_mul'. Default: ('channel_add',) - """ - - def __init__(self, - ratio=1 / 4., - pooling_type='att', - fusion_types=('channel_add', ), - **kwargs): - super(GCHead, self).__init__(num_convs=2, **kwargs) - self.ratio = ratio - self.pooling_type = pooling_type - self.fusion_types = fusion_types - self.gc_block = ContextBlock( - in_channels=self.channels, - ratio=self.ratio, - pooling_type=self.pooling_type, - fusion_types=self.fusion_types) - - def forward(self, inputs): - """Forward function.""" - x = self._transform_inputs(inputs) - output = self.convs[0](x) - output = self.gc_block(output) - output = self.convs[1](output) - if self.concat_input: - output = self.conv_cat(torch.cat([x, output], dim=1)) - output = self.cls_seg(output) - return output diff --git a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/oneformer/oneformer/modeling/matcher.py b/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/oneformer/oneformer/modeling/matcher.py deleted file mode 100644 index 4dba337a0f99ccd394931f52b063c8fb575bafbd..0000000000000000000000000000000000000000 --- a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/oneformer/oneformer/modeling/matcher.py +++ /dev/null @@ -1,212 +0,0 @@ -# ------------------------------------------------------------------------------ -# Reference: https://github.com/facebookresearch/Mask2Former/blob/main/mask2former/modeling/matcher.py -# Modified by Jitesh Jain (https://github.com/praeclarumjj3) -# ------------------------------------------------------------------------------ - -""" -Modules to compute the matching cost and solve the corresponding LSAP. -""" -import torch -import torch.nn.functional as F -from scipy.optimize import linear_sum_assignment -from torch import nn -from torch.cuda.amp import autocast -import numpy as np - -# from annotator.oneformer.detectron2.projects.point_rend.point_features import point_sample - - -def linear_sum_assignment_with_nan(cost_matrix): - cost_matrix = np.asarray(cost_matrix) - nan = np.isnan(cost_matrix).any() - nan_all = np.isnan(cost_matrix).all() - empty = cost_matrix.size == 0 - - if not empty: - if nan_all: - print('Matrix contains all NaN values!') - elif nan: - print('Matrix contains NaN values!') - - if nan_all: - cost_matrix = np.empty(shape=(0, 0)) - elif nan: - cost_matrix[np.isnan(cost_matrix)] = 100 - - return linear_sum_assignment(cost_matrix) - -def batch_dice_loss(inputs: torch.Tensor, targets: torch.Tensor): - """ - Compute the DICE loss, similar to generalized IOU for masks - Args: - inputs: A float tensor of arbitrary shape. - The predictions for each example. - targets: A float tensor with the same shape as inputs. Stores the binary - classification label for each element in inputs - (0 for the negative class and 1 for the positive class). - """ - inputs = inputs.sigmoid() - inputs = inputs.flatten(1) - numerator = 2 * torch.einsum("nc,mc->nm", inputs, targets) - denominator = inputs.sum(-1)[:, None] + targets.sum(-1)[None, :] - loss = 1 - (numerator + 1) / (denominator + 1) - return loss - - -batch_dice_loss_jit = torch.jit.script( - batch_dice_loss -) # type: torch.jit.ScriptModule - - -def batch_sigmoid_ce_loss(inputs: torch.Tensor, targets: torch.Tensor): - """ - Args: - inputs: A float tensor of arbitrary shape. - The predictions for each example. - targets: A float tensor with the same shape as inputs. Stores the binary - classification label for each element in inputs - (0 for the negative class and 1 for the positive class). - Returns: - Loss tensor - """ - hw = inputs.shape[1] - - pos = F.binary_cross_entropy_with_logits( - inputs, torch.ones_like(inputs), reduction="none" - ) - neg = F.binary_cross_entropy_with_logits( - inputs, torch.zeros_like(inputs), reduction="none" - ) - - loss = torch.einsum("nc,mc->nm", pos, targets) + torch.einsum( - "nc,mc->nm", neg, (1 - targets) - ) - - return loss / hw - - -batch_sigmoid_ce_loss_jit = torch.jit.script( - batch_sigmoid_ce_loss -) # type: torch.jit.ScriptModule - - -class HungarianMatcher(nn.Module): - """This class computes an assignment between the targets and the predictions of the network - - For efficiency reasons, the targets don't include the no_object. Because of this, in general, - there are more predictions than targets. In this case, we do a 1-to-1 matching of the best predictions, - while the others are un-matched (and thus treated as non-objects). - """ - - def __init__(self, cost_class: float = 1, cost_mask: float = 1, - cost_dice: float = 1, num_points: int = 0): - """Creates the matcher - - Params: - cost_class: This is the relative weight of the classification error in the matching cost - cost_mask: This is the relative weight of the focal loss of the binary mask in the matching cost - cost_dice: This is the relative weight of the dice loss of the binary mask in the matching cost - """ - super().__init__() - self.cost_class = cost_class - self.cost_mask = cost_mask - self.cost_dice = cost_dice - - assert cost_class != 0 or cost_mask != 0 or cost_dice != 0, "all costs cant be 0" - - self.num_points = num_points - - @torch.no_grad() - def memory_efficient_forward(self, outputs, targets): - """More memory-friendly matching""" - bs, num_queries = outputs["pred_logits"].shape[:2] - - indices = [] - - # Iterate through batch size - for b in range(bs): - out_prob = outputs["pred_logits"][b].softmax(-1) # [num_queries, num_classes] - tgt_ids = targets[b]["labels"] - - # Compute the classification cost. Contrary to the loss, we don't use the NLL, - # but approximate it in 1 - proba[target class]. - # The 1 is a constant that doesn't change the matching, it can be ommitted. - cost_class = -out_prob[:, tgt_ids] - - out_mask = outputs["pred_masks"][b] # [num_queries, H_pred, W_pred] - # gt masks are already padded when preparing target - tgt_mask = targets[b]["masks"].to(out_mask) - - out_mask = out_mask[:, None] - tgt_mask = tgt_mask[:, None] - # all masks share the same set of points for efficient matching! - point_coords = torch.rand(1, self.num_points, 2, device=out_mask.device) - # get gt labels - tgt_mask = point_sample( - tgt_mask, - point_coords.repeat(tgt_mask.shape[0], 1, 1), - align_corners=False, - ).squeeze(1) - - out_mask = point_sample( - out_mask, - point_coords.repeat(out_mask.shape[0], 1, 1), - align_corners=False, - ).squeeze(1) - - with autocast(enabled=False): - out_mask = out_mask.float() - tgt_mask = tgt_mask.float() - # Compute the focal loss between masks - cost_mask = batch_sigmoid_ce_loss_jit(out_mask, tgt_mask) - # Compute the dice loss betwen masks - cost_dice = batch_dice_loss(out_mask, tgt_mask) - - # Final cost matrix - C = ( - self.cost_mask * cost_mask - + self.cost_class * cost_class - + self.cost_dice * cost_dice - ) - C = C.reshape(num_queries, -1).cpu() - - indices.append(linear_sum_assignment_with_nan(C)) - - return [ - (torch.as_tensor(i, dtype=torch.int64), torch.as_tensor(j, dtype=torch.int64)) - for i, j in indices - ] - - @torch.no_grad() - def forward(self, outputs, targets): - """Performs the matching - - Params: - outputs: This is a dict that contains at least these entries: - "pred_logits": Tensor of dim [batch_size, num_queries, num_classes] with the classification logits - "pred_masks": Tensor of dim [batch_size, num_queries, H_pred, W_pred] with the predicted masks - - targets: This is a list of targets (len(targets) = batch_size), where each target is a dict containing: - "labels": Tensor of dim [num_target_boxes] (where num_target_boxes is the number of ground-truth - objects in the target) containing the class labels - "masks": Tensor of dim [num_target_boxes, H_gt, W_gt] containing the target masks - - Returns: - A list of size batch_size, containing tuples of (index_i, index_j) where: - - index_i is the indices of the selected predictions (in order) - - index_j is the indices of the corresponding selected targets (in order) - For each batch element, it holds: - len(index_i) = len(index_j) = min(num_queries, num_target_boxes) - """ - - return self.memory_efficient_forward(outputs, targets) - - def __repr__(self, _repr_indent=4): - head = "Matcher " + self.__class__.__name__ - body = [ - "cost_class: {}".format(self.cost_class), - "cost_mask: {}".format(self.cost_mask), - "cost_dice: {}".format(self.cost_dice), - ] - lines = [head] + [" " * _repr_indent + line for line in body] - return "\n".join(lines) diff --git a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/uniformer/mmseg/datasets/chase_db1.py b/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/uniformer/mmseg/datasets/chase_db1.py deleted file mode 100644 index 8bc29bea14704a4407f83474610cbc3bef32c708..0000000000000000000000000000000000000000 --- a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/uniformer/mmseg/datasets/chase_db1.py +++ /dev/null @@ -1,27 +0,0 @@ -import os.path as osp - -from .builder import DATASETS -from .custom import CustomDataset - - -@DATASETS.register_module() -class ChaseDB1Dataset(CustomDataset): - """Chase_db1 dataset. - - In segmentation map annotation for Chase_db1, 0 stands for background, - which is included in 2 categories. ``reduce_zero_label`` is fixed to False. - The ``img_suffix`` is fixed to '.png' and ``seg_map_suffix`` is fixed to - '_1stHO.png'. - """ - - CLASSES = ('background', 'vessel') - - PALETTE = [[120, 120, 120], [6, 230, 230]] - - def __init__(self, **kwargs): - super(ChaseDB1Dataset, self).__init__( - img_suffix='.png', - seg_map_suffix='_1stHO.png', - reduce_zero_label=False, - **kwargs) - assert osp.exists(self.img_dir) diff --git a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/zoe/zoedepth/data/hypersim.py b/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/zoe/zoedepth/data/hypersim.py deleted file mode 100644 index 4334198971830200f72ea2910d03f4c7d6a43334..0000000000000000000000000000000000000000 --- a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/zoe/zoedepth/data/hypersim.py +++ /dev/null @@ -1,138 +0,0 @@ -# MIT License - -# Copyright (c) 2022 Intelligent Systems Lab Org - -# Permission is hereby granted, free of charge, to any person obtaining a copy -# of this software and associated documentation files (the "Software"), to deal -# in the Software without restriction, including without limitation the rights -# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -# copies of the Software, and to permit persons to whom the Software is -# furnished to do so, subject to the following conditions: - -# The above copyright notice and this permission notice shall be included in all -# copies or substantial portions of the Software. - -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -# SOFTWARE. - -# File author: Shariq Farooq Bhat - -import glob -import os - -import h5py -import numpy as np -import torch -from PIL import Image -from torch.utils.data import DataLoader, Dataset -from torchvision import transforms - - -def hypersim_distance_to_depth(npyDistance): - intWidth, intHeight, fltFocal = 1024, 768, 886.81 - - npyImageplaneX = np.linspace((-0.5 * intWidth) + 0.5, (0.5 * intWidth) - 0.5, intWidth).reshape( - 1, intWidth).repeat(intHeight, 0).astype(np.float32)[:, :, None] - npyImageplaneY = np.linspace((-0.5 * intHeight) + 0.5, (0.5 * intHeight) - 0.5, - intHeight).reshape(intHeight, 1).repeat(intWidth, 1).astype(np.float32)[:, :, None] - npyImageplaneZ = np.full([intHeight, intWidth, 1], fltFocal, np.float32) - npyImageplane = np.concatenate( - [npyImageplaneX, npyImageplaneY, npyImageplaneZ], 2) - - npyDepth = npyDistance / np.linalg.norm(npyImageplane, 2, 2) * fltFocal - return npyDepth - - -class ToTensor(object): - def __init__(self): - # self.normalize = transforms.Normalize( - # mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) - self.normalize = lambda x: x - self.resize = transforms.Resize((480, 640)) - - def __call__(self, sample): - image, depth = sample['image'], sample['depth'] - image = self.to_tensor(image) - image = self.normalize(image) - depth = self.to_tensor(depth) - - image = self.resize(image) - - return {'image': image, 'depth': depth, 'dataset': "hypersim"} - - def to_tensor(self, pic): - - if isinstance(pic, np.ndarray): - img = torch.from_numpy(pic.transpose((2, 0, 1))) - return img - - # # handle PIL Image - if pic.mode == 'I': - img = torch.from_numpy(np.array(pic, np.int32, copy=False)) - elif pic.mode == 'I;16': - img = torch.from_numpy(np.array(pic, np.int16, copy=False)) - else: - img = torch.ByteTensor( - torch.ByteStorage.from_buffer(pic.tobytes())) - # PIL image mode: 1, L, P, I, F, RGB, YCbCr, RGBA, CMYK - if pic.mode == 'YCbCr': - nchannel = 3 - elif pic.mode == 'I;16': - nchannel = 1 - else: - nchannel = len(pic.mode) - img = img.view(pic.size[1], pic.size[0], nchannel) - - img = img.transpose(0, 1).transpose(0, 2).contiguous() - if isinstance(img, torch.ByteTensor): - return img.float() - else: - return img - - -class HyperSim(Dataset): - def __init__(self, data_dir_root): - # image paths are of the form //images/scene_cam_#_final_preview/*.tonemap.jpg - # depth paths are of the form //images/scene_cam_#_final_preview/*.depth_meters.hdf5 - self.image_files = glob.glob(os.path.join( - data_dir_root, '*', 'images', 'scene_cam_*_final_preview', '*.tonemap.jpg')) - self.depth_files = [r.replace("_final_preview", "_geometry_hdf5").replace( - ".tonemap.jpg", ".depth_meters.hdf5") for r in self.image_files] - self.transform = ToTensor() - - def __getitem__(self, idx): - image_path = self.image_files[idx] - depth_path = self.depth_files[idx] - - image = np.asarray(Image.open(image_path), dtype=np.float32) / 255.0 - - # depth from hdf5 - depth_fd = h5py.File(depth_path, "r") - # in meters (Euclidean distance) - distance_meters = np.array(depth_fd['dataset']) - depth = hypersim_distance_to_depth( - distance_meters) # in meters (planar depth) - - # depth[depth > 8] = -1 - depth = depth[..., None] - - sample = dict(image=image, depth=depth) - sample = self.transform(sample) - - if idx == 0: - print(sample["image"].shape) - - return sample - - def __len__(self): - return len(self.image_files) - - -def get_hypersim_loader(data_dir_root, batch_size=1, **kwargs): - dataset = HyperSim(data_dir_root) - return DataLoader(dataset, batch_size, **kwargs) diff --git a/spaces/cownclown/Image-and-3D-Model-Creator/PIFu/lib/renderer/gl/cam_render.py b/spaces/cownclown/Image-and-3D-Model-Creator/PIFu/lib/renderer/gl/cam_render.py deleted file mode 100644 index 7b766af057b9c052388aceb152b0191fa2e4ea25..0000000000000000000000000000000000000000 --- a/spaces/cownclown/Image-and-3D-Model-Creator/PIFu/lib/renderer/gl/cam_render.py +++ /dev/null @@ -1,48 +0,0 @@ -from .render import Render - -GLUT = None - -class CamRender(Render): - def __init__(self, width=1600, height=1200, name='Cam Renderer', - program_files=['simple.fs', 'simple.vs'], color_size=1, ms_rate=1, egl=False): - Render.__init__(self, width, height, name, program_files, color_size, ms_rate=ms_rate, egl=egl) - self.camera = None - - if not egl: - global GLUT - import OpenGL.GLUT as GLUT - GLUT.glutDisplayFunc(self.display) - GLUT.glutKeyboardFunc(self.keyboard) - - def set_camera(self, camera): - self.camera = camera - self.projection_matrix, self.model_view_matrix = camera.get_gl_matrix() - - def keyboard(self, key, x, y): - # up - eps = 1 - # print(key) - if key == b'w': - self.camera.center += eps * self.camera.direction - elif key == b's': - self.camera.center -= eps * self.camera.direction - if key == b'a': - self.camera.center -= eps * self.camera.right - elif key == b'd': - self.camera.center += eps * self.camera.right - if key == b' ': - self.camera.center += eps * self.camera.up - elif key == b'x': - self.camera.center -= eps * self.camera.up - elif key == b'i': - self.camera.near += 0.1 * eps - self.camera.far += 0.1 * eps - elif key == b'o': - self.camera.near -= 0.1 * eps - self.camera.far -= 0.1 * eps - - self.projection_matrix, self.model_view_matrix = self.camera.get_gl_matrix() - - def show(self): - if GLUT is not None: - GLUT.glutMainLoop() diff --git a/spaces/cscan/CodeFormer/CodeFormer/README.md b/spaces/cscan/CodeFormer/CodeFormer/README.md deleted file mode 100644 index 65810cdf4ce36d8ba152de80df00fa4c8802ee81..0000000000000000000000000000000000000000 --- a/spaces/cscan/CodeFormer/CodeFormer/README.md +++ /dev/null @@ -1,123 +0,0 @@ -

      - -

      - -## Towards Robust Blind Face Restoration with Codebook Lookup Transformer - -[Paper](https://arxiv.org/abs/2206.11253) | [Project Page](https://shangchenzhou.com/projects/CodeFormer/) | [Video](https://youtu.be/d3VDpkXlueI) - - -google colab logo [![Replicate](https://img.shields.io/badge/Demo-%F0%9F%9A%80%20Replicate-blue)](https://replicate.com/sczhou/codeformer) ![visitors](https://visitor-badge.glitch.me/badge?page_id=sczhou/CodeFormer) - -[Shangchen Zhou](https://shangchenzhou.com/), [Kelvin C.K. Chan](https://ckkelvinchan.github.io/), [Chongyi Li](https://li-chongyi.github.io/), [Chen Change Loy](https://www.mmlab-ntu.com/person/ccloy/) - -S-Lab, Nanyang Technological University - - - - -:star: If CodeFormer is helpful to your images or projects, please help star this repo. Thanks! :hugs: - -### Update - -- **2022.09.09**: Integrated to :rocket: [Replicate](https://replicate.com/). Try out online demo! [![Replicate](https://img.shields.io/badge/Demo-%F0%9F%9A%80%20Replicate-blue)](https://replicate.com/sczhou/codeformer) -- **2022.09.04**: Add face upsampling `--face_upsample` for high-resolution AI-created face enhancement. -- **2022.08.23**: Some modifications on face detection and fusion for better AI-created face enhancement. -- **2022.08.07**: Integrate [Real-ESRGAN](https://github.com/xinntao/Real-ESRGAN) to support background image enhancement. -- **2022.07.29**: Integrate new face detectors of `['RetinaFace'(default), 'YOLOv5']`. -- **2022.07.17**: Add Colab demo of CodeFormer. google colab logo -- **2022.07.16**: Release inference code for face restoration. :blush: -- **2022.06.21**: This repo is created. - -### TODO -- [ ] Add checkpoint for face inpainting -- [ ] Add training code and config files -- [x] ~~Add background image enhancement~~ - -#### Face Restoration - - - - -#### Face Color Enhancement and Restoration - - - -#### Face Inpainting - - - - - -### Dependencies and Installation - -- Pytorch >= 1.7.1 -- CUDA >= 10.1 -- Other required packages in `requirements.txt` -``` -# git clone this repository -git clone https://github.com/sczhou/CodeFormer -cd CodeFormer - -# create new anaconda env -conda create -n codeformer python=3.8 -y -conda activate codeformer - -# install python dependencies -pip3 install -r requirements.txt -python basicsr/setup.py develop -``` - - -### Quick Inference - -##### Download Pre-trained Models: -Download the facelib pretrained models from [[Google Drive](https://drive.google.com/drive/folders/1b_3qwrzY_kTQh0-SnBoGBgOrJ_PLZSKm?usp=sharing) | [OneDrive](https://entuedu-my.sharepoint.com/:f:/g/personal/s200094_e_ntu_edu_sg/EvDxR7FcAbZMp_MA9ouq7aQB8XTppMb3-T0uGZ_2anI2mg?e=DXsJFo)] to the `weights/facelib` folder. You can manually download the pretrained models OR download by runing the following command. -``` -python scripts/download_pretrained_models.py facelib -``` - -Download the CodeFormer pretrained models from [[Google Drive](https://drive.google.com/drive/folders/1CNNByjHDFt0b95q54yMVp6Ifo5iuU6QS?usp=sharing) | [OneDrive](https://entuedu-my.sharepoint.com/:f:/g/personal/s200094_e_ntu_edu_sg/EoKFj4wo8cdIn2-TY2IV6CYBhZ0pIG4kUOeHdPR_A5nlbg?e=AO8UN9)] to the `weights/CodeFormer` folder. You can manually download the pretrained models OR download by runing the following command. -``` -python scripts/download_pretrained_models.py CodeFormer -``` - -##### Prepare Testing Data: -You can put the testing images in the `inputs/TestWhole` folder. If you would like to test on cropped and aligned faces, you can put them in the `inputs/cropped_faces` folder. - - -##### Testing on Face Restoration: -``` -# For cropped and aligned faces -python inference_codeformer.py --w 0.5 --has_aligned --test_path [input folder] - -# For the whole images -# Add '--bg_upsampler realesrgan' to enhance the background regions with Real-ESRGAN -# Add '--face_upsample' to further upsample restorated face with Real-ESRGAN -python inference_codeformer.py --w 0.7 --test_path [input folder] -``` - -NOTE that *w* is in [0, 1]. Generally, smaller *w* tends to produce a higher-quality result, while larger *w* yields a higher-fidelity result. - -The results will be saved in the `results` folder. - -### Citation -If our work is useful for your research, please consider citing: - - @article{zhou2022codeformer, - author = {Zhou, Shangchen and Chan, Kelvin C.K. and Li, Chongyi and Loy, Chen Change}, - title = {Towards Robust Blind Face Restoration with Codebook Lookup TransFormer}, - journal = {arXiv preprint arXiv:2206.11253}, - year = {2022} - } - -### License - -Creative Commons License
      This work is licensed under a Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International License. - -### Acknowledgement - -This project is based on [BasicSR](https://github.com/XPixelGroup/BasicSR). We also borrow some codes from [Unleashing Transformers](https://github.com/samb-t/unleashing-transformers), [YOLOv5-face](https://github.com/deepcam-cn/yolov5-face), and [FaceXLib](https://github.com/xinntao/facexlib). Thanks for their awesome works. - -### Contact -If you have any question, please feel free to reach me out at `shangchenzhou@gmail.com`. \ No newline at end of file diff --git a/spaces/cvlab/zero123-live/ldm/data/coco.py b/spaces/cvlab/zero123-live/ldm/data/coco.py deleted file mode 100644 index 5e5e27e6ec6a51932f67b83dd88533cb39631e26..0000000000000000000000000000000000000000 --- a/spaces/cvlab/zero123-live/ldm/data/coco.py +++ /dev/null @@ -1,253 +0,0 @@ -import os -import json -import albumentations -import numpy as np -from PIL import Image -from tqdm import tqdm -from torch.utils.data import Dataset -from abc import abstractmethod - - -class CocoBase(Dataset): - """needed for (image, caption, segmentation) pairs""" - def __init__(self, size=None, dataroot="", datajson="", onehot_segmentation=False, use_stuffthing=False, - crop_size=None, force_no_crop=False, given_files=None, use_segmentation=True,crop_type=None): - self.split = self.get_split() - self.size = size - if crop_size is None: - self.crop_size = size - else: - self.crop_size = crop_size - - assert crop_type in [None, 'random', 'center'] - self.crop_type = crop_type - self.use_segmenation = use_segmentation - self.onehot = onehot_segmentation # return segmentation as rgb or one hot - self.stuffthing = use_stuffthing # include thing in segmentation - if self.onehot and not self.stuffthing: - raise NotImplemented("One hot mode is only supported for the " - "stuffthings version because labels are stored " - "a bit different.") - - data_json = datajson - with open(data_json) as json_file: - self.json_data = json.load(json_file) - self.img_id_to_captions = dict() - self.img_id_to_filepath = dict() - self.img_id_to_segmentation_filepath = dict() - - assert data_json.split("/")[-1] in [f"captions_train{self.year()}.json", - f"captions_val{self.year()}.json"] - # TODO currently hardcoded paths, would be better to follow logic in - # cocstuff pixelmaps - if self.use_segmenation: - if self.stuffthing: - self.segmentation_prefix = ( - f"data/cocostuffthings/val{self.year()}" if - data_json.endswith(f"captions_val{self.year()}.json") else - f"data/cocostuffthings/train{self.year()}") - else: - self.segmentation_prefix = ( - f"data/coco/annotations/stuff_val{self.year()}_pixelmaps" if - data_json.endswith(f"captions_val{self.year()}.json") else - f"data/coco/annotations/stuff_train{self.year()}_pixelmaps") - - imagedirs = self.json_data["images"] - self.labels = {"image_ids": list()} - for imgdir in tqdm(imagedirs, desc="ImgToPath"): - self.img_id_to_filepath[imgdir["id"]] = os.path.join(dataroot, imgdir["file_name"]) - self.img_id_to_captions[imgdir["id"]] = list() - pngfilename = imgdir["file_name"].replace("jpg", "png") - if self.use_segmenation: - self.img_id_to_segmentation_filepath[imgdir["id"]] = os.path.join( - self.segmentation_prefix, pngfilename) - if given_files is not None: - if pngfilename in given_files: - self.labels["image_ids"].append(imgdir["id"]) - else: - self.labels["image_ids"].append(imgdir["id"]) - - capdirs = self.json_data["annotations"] - for capdir in tqdm(capdirs, desc="ImgToCaptions"): - # there are in average 5 captions per image - #self.img_id_to_captions[capdir["image_id"]].append(np.array([capdir["caption"]])) - self.img_id_to_captions[capdir["image_id"]].append(capdir["caption"]) - - self.rescaler = albumentations.SmallestMaxSize(max_size=self.size) - if self.split=="validation": - self.cropper = albumentations.CenterCrop(height=self.crop_size, width=self.crop_size) - else: - # default option for train is random crop - if self.crop_type in [None, 'random']: - self.cropper = albumentations.RandomCrop(height=self.crop_size, width=self.crop_size) - else: - self.cropper = albumentations.CenterCrop(height=self.crop_size, width=self.crop_size) - self.preprocessor = albumentations.Compose( - [self.rescaler, self.cropper], - additional_targets={"segmentation": "image"}) - if force_no_crop: - self.rescaler = albumentations.Resize(height=self.size, width=self.size) - self.preprocessor = albumentations.Compose( - [self.rescaler], - additional_targets={"segmentation": "image"}) - - @abstractmethod - def year(self): - raise NotImplementedError() - - def __len__(self): - return len(self.labels["image_ids"]) - - def preprocess_image(self, image_path, segmentation_path=None): - image = Image.open(image_path) - if not image.mode == "RGB": - image = image.convert("RGB") - image = np.array(image).astype(np.uint8) - if segmentation_path: - segmentation = Image.open(segmentation_path) - if not self.onehot and not segmentation.mode == "RGB": - segmentation = segmentation.convert("RGB") - segmentation = np.array(segmentation).astype(np.uint8) - if self.onehot: - assert self.stuffthing - # stored in caffe format: unlabeled==255. stuff and thing from - # 0-181. to be compatible with the labels in - # https://github.com/nightrome/cocostuff/blob/master/labels.txt - # we shift stuffthing one to the right and put unlabeled in zero - # as long as segmentation is uint8 shifting to right handles the - # latter too - assert segmentation.dtype == np.uint8 - segmentation = segmentation + 1 - - processed = self.preprocessor(image=image, segmentation=segmentation) - - image, segmentation = processed["image"], processed["segmentation"] - else: - image = self.preprocessor(image=image,)['image'] - - image = (image / 127.5 - 1.0).astype(np.float32) - if segmentation_path: - if self.onehot: - assert segmentation.dtype == np.uint8 - # make it one hot - n_labels = 183 - flatseg = np.ravel(segmentation) - onehot = np.zeros((flatseg.size, n_labels), dtype=np.bool) - onehot[np.arange(flatseg.size), flatseg] = True - onehot = onehot.reshape(segmentation.shape + (n_labels,)).astype(int) - segmentation = onehot - else: - segmentation = (segmentation / 127.5 - 1.0).astype(np.float32) - return image, segmentation - else: - return image - - def __getitem__(self, i): - img_path = self.img_id_to_filepath[self.labels["image_ids"][i]] - if self.use_segmenation: - seg_path = self.img_id_to_segmentation_filepath[self.labels["image_ids"][i]] - image, segmentation = self.preprocess_image(img_path, seg_path) - else: - image = self.preprocess_image(img_path) - captions = self.img_id_to_captions[self.labels["image_ids"][i]] - # randomly draw one of all available captions per image - caption = captions[np.random.randint(0, len(captions))] - example = {"image": image, - #"caption": [str(caption[0])], - "caption": caption, - "img_path": img_path, - "filename_": img_path.split(os.sep)[-1] - } - if self.use_segmenation: - example.update({"seg_path": seg_path, 'segmentation': segmentation}) - return example - - -class CocoImagesAndCaptionsTrain2017(CocoBase): - """returns a pair of (image, caption)""" - def __init__(self, size, onehot_segmentation=False, use_stuffthing=False, crop_size=None, force_no_crop=False,): - super().__init__(size=size, - dataroot="data/coco/train2017", - datajson="data/coco/annotations/captions_train2017.json", - onehot_segmentation=onehot_segmentation, - use_stuffthing=use_stuffthing, crop_size=crop_size, force_no_crop=force_no_crop) - - def get_split(self): - return "train" - - def year(self): - return '2017' - - -class CocoImagesAndCaptionsValidation2017(CocoBase): - """returns a pair of (image, caption)""" - def __init__(self, size, onehot_segmentation=False, use_stuffthing=False, crop_size=None, force_no_crop=False, - given_files=None): - super().__init__(size=size, - dataroot="data/coco/val2017", - datajson="data/coco/annotations/captions_val2017.json", - onehot_segmentation=onehot_segmentation, - use_stuffthing=use_stuffthing, crop_size=crop_size, force_no_crop=force_no_crop, - given_files=given_files) - - def get_split(self): - return "validation" - - def year(self): - return '2017' - - - -class CocoImagesAndCaptionsTrain2014(CocoBase): - """returns a pair of (image, caption)""" - def __init__(self, size, onehot_segmentation=False, use_stuffthing=False, crop_size=None, force_no_crop=False,crop_type='random'): - super().__init__(size=size, - dataroot="data/coco/train2014", - datajson="data/coco/annotations2014/annotations/captions_train2014.json", - onehot_segmentation=onehot_segmentation, - use_stuffthing=use_stuffthing, crop_size=crop_size, force_no_crop=force_no_crop, - use_segmentation=False, - crop_type=crop_type) - - def get_split(self): - return "train" - - def year(self): - return '2014' - -class CocoImagesAndCaptionsValidation2014(CocoBase): - """returns a pair of (image, caption)""" - def __init__(self, size, onehot_segmentation=False, use_stuffthing=False, crop_size=None, force_no_crop=False, - given_files=None,crop_type='center',**kwargs): - super().__init__(size=size, - dataroot="data/coco/val2014", - datajson="data/coco/annotations2014/annotations/captions_val2014.json", - onehot_segmentation=onehot_segmentation, - use_stuffthing=use_stuffthing, crop_size=crop_size, force_no_crop=force_no_crop, - given_files=given_files, - use_segmentation=False, - crop_type=crop_type) - - def get_split(self): - return "validation" - - def year(self): - return '2014' - -if __name__ == '__main__': - with open("data/coco/annotations2014/annotations/captions_val2014.json", "r") as json_file: - json_data = json.load(json_file) - capdirs = json_data["annotations"] - import pudb; pudb.set_trace() - #d2 = CocoImagesAndCaptionsTrain2014(size=256) - d2 = CocoImagesAndCaptionsValidation2014(size=256) - print("constructed dataset.") - print(f"length of {d2.__class__.__name__}: {len(d2)}") - - ex2 = d2[0] - # ex3 = d3[0] - # print(ex1["image"].shape) - print(ex2["image"].shape) - # print(ex3["image"].shape) - # print(ex1["segmentation"].shape) - print(ex2["caption"].__class__.__name__) diff --git a/spaces/cvlab/zero123-live/ldm/modules/losses/contperceptual.py b/spaces/cvlab/zero123-live/ldm/modules/losses/contperceptual.py deleted file mode 100644 index 672c1e32a1389def02461c0781339681060c540e..0000000000000000000000000000000000000000 --- a/spaces/cvlab/zero123-live/ldm/modules/losses/contperceptual.py +++ /dev/null @@ -1,111 +0,0 @@ -import torch -import torch.nn as nn - -from taming.modules.losses.vqperceptual import * # TODO: taming dependency yes/no? - - -class LPIPSWithDiscriminator(nn.Module): - def __init__(self, disc_start, logvar_init=0.0, kl_weight=1.0, pixelloss_weight=1.0, - disc_num_layers=3, disc_in_channels=3, disc_factor=1.0, disc_weight=1.0, - perceptual_weight=1.0, use_actnorm=False, disc_conditional=False, - disc_loss="hinge"): - - super().__init__() - assert disc_loss in ["hinge", "vanilla"] - self.kl_weight = kl_weight - self.pixel_weight = pixelloss_weight - self.perceptual_loss = LPIPS().eval() - self.perceptual_weight = perceptual_weight - # output log variance - self.logvar = nn.Parameter(torch.ones(size=()) * logvar_init) - - self.discriminator = NLayerDiscriminator(input_nc=disc_in_channels, - n_layers=disc_num_layers, - use_actnorm=use_actnorm - ).apply(weights_init) - self.discriminator_iter_start = disc_start - self.disc_loss = hinge_d_loss if disc_loss == "hinge" else vanilla_d_loss - self.disc_factor = disc_factor - self.discriminator_weight = disc_weight - self.disc_conditional = disc_conditional - - def calculate_adaptive_weight(self, nll_loss, g_loss, last_layer=None): - if last_layer is not None: - nll_grads = torch.autograd.grad(nll_loss, last_layer, retain_graph=True)[0] - g_grads = torch.autograd.grad(g_loss, last_layer, retain_graph=True)[0] - else: - nll_grads = torch.autograd.grad(nll_loss, self.last_layer[0], retain_graph=True)[0] - g_grads = torch.autograd.grad(g_loss, self.last_layer[0], retain_graph=True)[0] - - d_weight = torch.norm(nll_grads) / (torch.norm(g_grads) + 1e-4) - d_weight = torch.clamp(d_weight, 0.0, 1e4).detach() - d_weight = d_weight * self.discriminator_weight - return d_weight - - def forward(self, inputs, reconstructions, posteriors, optimizer_idx, - global_step, last_layer=None, cond=None, split="train", - weights=None): - rec_loss = torch.abs(inputs.contiguous() - reconstructions.contiguous()) - if self.perceptual_weight > 0: - p_loss = self.perceptual_loss(inputs.contiguous(), reconstructions.contiguous()) - rec_loss = rec_loss + self.perceptual_weight * p_loss - - nll_loss = rec_loss / torch.exp(self.logvar) + self.logvar - weighted_nll_loss = nll_loss - if weights is not None: - weighted_nll_loss = weights*nll_loss - weighted_nll_loss = torch.sum(weighted_nll_loss) / weighted_nll_loss.shape[0] - nll_loss = torch.sum(nll_loss) / nll_loss.shape[0] - kl_loss = posteriors.kl() - kl_loss = torch.sum(kl_loss) / kl_loss.shape[0] - - # now the GAN part - if optimizer_idx == 0: - # generator update - if cond is None: - assert not self.disc_conditional - logits_fake = self.discriminator(reconstructions.contiguous()) - else: - assert self.disc_conditional - logits_fake = self.discriminator(torch.cat((reconstructions.contiguous(), cond), dim=1)) - g_loss = -torch.mean(logits_fake) - - if self.disc_factor > 0.0: - try: - d_weight = self.calculate_adaptive_weight(nll_loss, g_loss, last_layer=last_layer) - except RuntimeError: - assert not self.training - d_weight = torch.tensor(0.0) - else: - d_weight = torch.tensor(0.0) - - disc_factor = adopt_weight(self.disc_factor, global_step, threshold=self.discriminator_iter_start) - loss = weighted_nll_loss + self.kl_weight * kl_loss + d_weight * disc_factor * g_loss - - log = {"{}/total_loss".format(split): loss.clone().detach().mean(), "{}/logvar".format(split): self.logvar.detach(), - "{}/kl_loss".format(split): kl_loss.detach().mean(), "{}/nll_loss".format(split): nll_loss.detach().mean(), - "{}/rec_loss".format(split): rec_loss.detach().mean(), - "{}/d_weight".format(split): d_weight.detach(), - "{}/disc_factor".format(split): torch.tensor(disc_factor), - "{}/g_loss".format(split): g_loss.detach().mean(), - } - return loss, log - - if optimizer_idx == 1: - # second pass for discriminator update - if cond is None: - logits_real = self.discriminator(inputs.contiguous().detach()) - logits_fake = self.discriminator(reconstructions.contiguous().detach()) - else: - logits_real = self.discriminator(torch.cat((inputs.contiguous().detach(), cond), dim=1)) - logits_fake = self.discriminator(torch.cat((reconstructions.contiguous().detach(), cond), dim=1)) - - disc_factor = adopt_weight(self.disc_factor, global_step, threshold=self.discriminator_iter_start) - d_loss = disc_factor * self.disc_loss(logits_real, logits_fake) - - log = {"{}/disc_loss".format(split): d_loss.clone().detach().mean(), - "{}/logits_real".format(split): logits_real.detach().mean(), - "{}/logits_fake".format(split): logits_fake.detach().mean() - } - return d_loss, log - diff --git a/spaces/danterivers/music-generation-samples/audiocraft/utils/export.py b/spaces/danterivers/music-generation-samples/audiocraft/utils/export.py deleted file mode 100644 index b513b52267f7bf5aae09282c15b0a2e20c8a8fee..0000000000000000000000000000000000000000 --- a/spaces/danterivers/music-generation-samples/audiocraft/utils/export.py +++ /dev/null @@ -1,56 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -""" -Utility to export a training checkpoint to a lightweight release checkpoint. -""" - -from pathlib import Path -import typing as tp - -from omegaconf import OmegaConf, DictConfig -import torch - - -def _clean_lm_cfg(cfg: DictConfig): - OmegaConf.set_struct(cfg, False) - # This used to be set automatically in the LM solver, need a more robust solution - # for the future. - cfg['transformer_lm']['card'] = 2048 - cfg['transformer_lm']['n_q'] = 4 - # Experimental params no longer supported. - bad_params = ['spectral_norm_attn_iters', 'spectral_norm_ff_iters', - 'residual_balancer_attn', 'residual_balancer_ff', 'layer_drop'] - for name in bad_params: - del cfg['transformer_lm'][name] - OmegaConf.set_struct(cfg, True) - return cfg - - -def export_encodec(checkpoint_path: tp.Union[Path, str], out_folder: tp.Union[Path, str]): - sig = Path(checkpoint_path).parent.name - assert len(sig) == 8, "Not a valid Dora signature" - pkg = torch.load(checkpoint_path, 'cpu') - new_pkg = { - 'best_state': pkg['ema']['state']['model'], - 'xp.cfg': OmegaConf.to_yaml(pkg['xp.cfg']), - } - out_file = Path(out_folder) / f'{sig}.th' - torch.save(new_pkg, out_file) - return out_file - - -def export_lm(checkpoint_path: tp.Union[Path, str], out_folder: tp.Union[Path, str]): - sig = Path(checkpoint_path).parent.name - assert len(sig) == 8, "Not a valid Dora signature" - pkg = torch.load(checkpoint_path, 'cpu') - new_pkg = { - 'best_state': pkg['fsdp_best_state']['model'], - 'xp.cfg': OmegaConf.to_yaml(_clean_lm_cfg(pkg['xp.cfg'])) - } - out_file = Path(out_folder) / f'{sig}.th' - torch.save(new_pkg, out_file) - return out_file diff --git a/spaces/datasciencedojo/Chatbot/README.md b/spaces/datasciencedojo/Chatbot/README.md deleted file mode 100644 index a3da5e720132f9833d0b25213c6e05ecad17d56b..0000000000000000000000000000000000000000 --- a/spaces/datasciencedojo/Chatbot/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Chatbot -emoji: ⚡ -colorFrom: green -colorTo: purple -sdk: gradio -sdk_version: 3.4.1 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/datasciencedojo/Describe-Dataset/README.md b/spaces/datasciencedojo/Describe-Dataset/README.md deleted file mode 100644 index e277b215ceeda4a5ad4be265c50e13d14e94436d..0000000000000000000000000000000000000000 --- a/spaces/datasciencedojo/Describe-Dataset/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Describe Dataset -emoji: 💩 -colorFrom: yellow -colorTo: indigo -sdk: gradio -sdk_version: 3.4.1 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/anyio/__init__.py b/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/anyio/__init__.py deleted file mode 100644 index 29fb3561e4f2dc9d3a764e756439c0dea2c9897a..0000000000000000000000000000000000000000 --- a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/anyio/__init__.py +++ /dev/null @@ -1,169 +0,0 @@ -from __future__ import annotations - -__all__ = ( - "maybe_async", - "maybe_async_cm", - "run", - "sleep", - "sleep_forever", - "sleep_until", - "current_time", - "get_all_backends", - "get_cancelled_exc_class", - "BrokenResourceError", - "BrokenWorkerProcess", - "BusyResourceError", - "ClosedResourceError", - "DelimiterNotFound", - "EndOfStream", - "ExceptionGroup", - "IncompleteRead", - "TypedAttributeLookupError", - "WouldBlock", - "AsyncFile", - "Path", - "open_file", - "wrap_file", - "aclose_forcefully", - "open_signal_receiver", - "connect_tcp", - "connect_unix", - "create_tcp_listener", - "create_unix_listener", - "create_udp_socket", - "create_connected_udp_socket", - "getaddrinfo", - "getnameinfo", - "wait_socket_readable", - "wait_socket_writable", - "create_memory_object_stream", - "run_process", - "open_process", - "create_lock", - "CapacityLimiter", - "CapacityLimiterStatistics", - "Condition", - "ConditionStatistics", - "Event", - "EventStatistics", - "Lock", - "LockStatistics", - "Semaphore", - "SemaphoreStatistics", - "create_condition", - "create_event", - "create_semaphore", - "create_capacity_limiter", - "open_cancel_scope", - "fail_after", - "move_on_after", - "current_effective_deadline", - "TASK_STATUS_IGNORED", - "CancelScope", - "create_task_group", - "TaskInfo", - "get_current_task", - "get_running_tasks", - "wait_all_tasks_blocked", - "run_sync_in_worker_thread", - "run_async_from_thread", - "run_sync_from_thread", - "current_default_worker_thread_limiter", - "create_blocking_portal", - "start_blocking_portal", - "typed_attribute", - "TypedAttributeSet", - "TypedAttributeProvider", -) - -from typing import Any - -from ._core._compat import maybe_async, maybe_async_cm -from ._core._eventloop import ( - current_time, - get_all_backends, - get_cancelled_exc_class, - run, - sleep, - sleep_forever, - sleep_until, -) -from ._core._exceptions import ( - BrokenResourceError, - BrokenWorkerProcess, - BusyResourceError, - ClosedResourceError, - DelimiterNotFound, - EndOfStream, - ExceptionGroup, - IncompleteRead, - TypedAttributeLookupError, - WouldBlock, -) -from ._core._fileio import AsyncFile, Path, open_file, wrap_file -from ._core._resources import aclose_forcefully -from ._core._signals import open_signal_receiver -from ._core._sockets import ( - connect_tcp, - connect_unix, - create_connected_udp_socket, - create_tcp_listener, - create_udp_socket, - create_unix_listener, - getaddrinfo, - getnameinfo, - wait_socket_readable, - wait_socket_writable, -) -from ._core._streams import create_memory_object_stream -from ._core._subprocesses import open_process, run_process -from ._core._synchronization import ( - CapacityLimiter, - CapacityLimiterStatistics, - Condition, - ConditionStatistics, - Event, - EventStatistics, - Lock, - LockStatistics, - Semaphore, - SemaphoreStatistics, - create_capacity_limiter, - create_condition, - create_event, - create_lock, - create_semaphore, -) -from ._core._tasks import ( - TASK_STATUS_IGNORED, - CancelScope, - create_task_group, - current_effective_deadline, - fail_after, - move_on_after, - open_cancel_scope, -) -from ._core._testing import ( - TaskInfo, - get_current_task, - get_running_tasks, - wait_all_tasks_blocked, -) -from ._core._typedattr import TypedAttributeProvider, TypedAttributeSet, typed_attribute - -# Re-exported here, for backwards compatibility -# isort: off -from .to_thread import current_default_worker_thread_limiter, run_sync_in_worker_thread -from .from_thread import ( - create_blocking_portal, - run_async_from_thread, - run_sync_from_thread, - start_blocking_portal, -) - -# Re-export imports so they look like they live directly in this package -key: str -value: Any -for key, value in list(locals().items()): - if getattr(value, "__module__", "").startswith("anyio."): - value.__module__ = __name__ diff --git a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/fontTools/colorLib/geometry.py b/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/fontTools/colorLib/geometry.py deleted file mode 100644 index 1ce161bfa117df1632b507d161f0dd4abb633bcc..0000000000000000000000000000000000000000 --- a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/fontTools/colorLib/geometry.py +++ /dev/null @@ -1,143 +0,0 @@ -"""Helpers for manipulating 2D points and vectors in COLR table.""" - -from math import copysign, cos, hypot, isclose, pi -from fontTools.misc.roundTools import otRound - - -def _vector_between(origin, target): - return (target[0] - origin[0], target[1] - origin[1]) - - -def _round_point(pt): - return (otRound(pt[0]), otRound(pt[1])) - - -def _unit_vector(vec): - length = hypot(*vec) - if length == 0: - return None - return (vec[0] / length, vec[1] / length) - - -_CIRCLE_INSIDE_TOLERANCE = 1e-4 - - -# The unit vector's X and Y components are respectively -# U = (cos(α), sin(α)) -# where α is the angle between the unit vector and the positive x axis. -_UNIT_VECTOR_THRESHOLD = cos(3 / 8 * pi) # == sin(1/8 * pi) == 0.38268343236508984 - - -def _rounding_offset(direction): - # Return 2-tuple of -/+ 1.0 or 0.0 approximately based on the direction vector. - # We divide the unit circle in 8 equal slices oriented towards the cardinal - # (N, E, S, W) and intermediate (NE, SE, SW, NW) directions. To each slice we - # map one of the possible cases: -1, 0, +1 for either X and Y coordinate. - # E.g. Return (+1.0, -1.0) if unit vector is oriented towards SE, or - # (-1.0, 0.0) if it's pointing West, etc. - uv = _unit_vector(direction) - if not uv: - return (0, 0) - - result = [] - for uv_component in uv: - if -_UNIT_VECTOR_THRESHOLD <= uv_component < _UNIT_VECTOR_THRESHOLD: - # unit vector component near 0: direction almost orthogonal to the - # direction of the current axis, thus keep coordinate unchanged - result.append(0) - else: - # nudge coord by +/- 1.0 in direction of unit vector - result.append(copysign(1.0, uv_component)) - return tuple(result) - - -class Circle: - def __init__(self, centre, radius): - self.centre = centre - self.radius = radius - - def __repr__(self): - return f"Circle(centre={self.centre}, radius={self.radius})" - - def round(self): - return Circle(_round_point(self.centre), otRound(self.radius)) - - def inside(self, outer_circle, tolerance=_CIRCLE_INSIDE_TOLERANCE): - dist = self.radius + hypot(*_vector_between(self.centre, outer_circle.centre)) - return ( - isclose(outer_circle.radius, dist, rel_tol=_CIRCLE_INSIDE_TOLERANCE) - or outer_circle.radius > dist - ) - - def concentric(self, other): - return self.centre == other.centre - - def move(self, dx, dy): - self.centre = (self.centre[0] + dx, self.centre[1] + dy) - - -def round_start_circle_stable_containment(c0, r0, c1, r1): - """Round start circle so that it stays inside/outside end circle after rounding. - - The rounding of circle coordinates to integers may cause an abrupt change - if the start circle c0 is so close to the end circle c1's perimiter that - it ends up falling outside (or inside) as a result of the rounding. - To keep the gradient unchanged, we nudge it in the right direction. - - See: - https://github.com/googlefonts/colr-gradients-spec/issues/204 - https://github.com/googlefonts/picosvg/issues/158 - """ - start, end = Circle(c0, r0), Circle(c1, r1) - - inside_before_round = start.inside(end) - - round_start = start.round() - round_end = end.round() - inside_after_round = round_start.inside(round_end) - - if inside_before_round == inside_after_round: - return round_start - elif inside_after_round: - # start was outside before rounding: we need to push start away from end - direction = _vector_between(round_end.centre, round_start.centre) - radius_delta = +1.0 - else: - # start was inside before rounding: we need to push start towards end - direction = _vector_between(round_start.centre, round_end.centre) - radius_delta = -1.0 - dx, dy = _rounding_offset(direction) - - # At most 2 iterations ought to be enough to converge. Before the loop, we - # know the start circle didn't keep containment after normal rounding; thus - # we continue adjusting by -/+ 1.0 until containment is restored. - # Normal rounding can at most move each coordinates -/+0.5; in the worst case - # both the start and end circle's centres and radii will be rounded in opposite - # directions, e.g. when they move along a 45 degree diagonal: - # c0 = (1.5, 1.5) ===> (2.0, 2.0) - # r0 = 0.5 ===> 1.0 - # c1 = (0.499, 0.499) ===> (0.0, 0.0) - # r1 = 2.499 ===> 2.0 - # In this example, the relative distance between the circles, calculated - # as r1 - (r0 + distance(c0, c1)) is initially 0.57437 (c0 is inside c1), and - # -1.82842 after rounding (c0 is now outside c1). Nudging c0 by -1.0 on both - # x and y axes moves it towards c1 by hypot(-1.0, -1.0) = 1.41421. Two of these - # moves cover twice that distance, which is enough to restore containment. - max_attempts = 2 - for _ in range(max_attempts): - if round_start.concentric(round_end): - # can't move c0 towards c1 (they are the same), so we change the radius - round_start.radius += radius_delta - assert round_start.radius >= 0 - else: - round_start.move(dx, dy) - if inside_before_round == round_start.inside(round_end): - break - else: # likely a bug - raise AssertionError( - f"Rounding circle {start} " - f"{'inside' if inside_before_round else 'outside'} " - f"{end} failed after {max_attempts} attempts!" - ) - - return round_start diff --git a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/fontTools/pens/cocoaPen.py b/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/fontTools/pens/cocoaPen.py deleted file mode 100644 index 5369c3097187b6929df58e93284199a1729ea275..0000000000000000000000000000000000000000 --- a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/fontTools/pens/cocoaPen.py +++ /dev/null @@ -1,26 +0,0 @@ -from fontTools.pens.basePen import BasePen - - -__all__ = ["CocoaPen"] - - -class CocoaPen(BasePen): - def __init__(self, glyphSet, path=None): - BasePen.__init__(self, glyphSet) - if path is None: - from AppKit import NSBezierPath - - path = NSBezierPath.bezierPath() - self.path = path - - def _moveTo(self, p): - self.path.moveToPoint_(p) - - def _lineTo(self, p): - self.path.lineToPoint_(p) - - def _curveToOne(self, p1, p2, p3): - self.path.curveToPoint_controlPoint1_controlPoint2_(p3, p1, p2) - - def _closePath(self): - self.path.closePath() diff --git a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/gradio/templates/frontend/assets/index-aef3869a.css b/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/gradio/templates/frontend/assets/index-aef3869a.css deleted file mode 100644 index a1f402a49e82009fd7eafa923615d67793b8751c..0000000000000000000000000000000000000000 --- a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/gradio/templates/frontend/assets/index-aef3869a.css +++ /dev/null @@ -1 +0,0 @@ -td.svelte-xrr240.svelte-xrr240{width:45%}td.svelte-xrr240.svelte-xrr240:last-child{width:10%;text-align:right}.file-preview-holder.svelte-xrr240.svelte-xrr240{overflow-x:auto}.file-preview.svelte-xrr240.svelte-xrr240{width:var(--size-full);max-height:var(--size-60);overflow-y:auto;color:var(--body-text-color)}.file.svelte-xrr240.svelte-xrr240{width:var(--size-full)}.file.svelte-xrr240>.svelte-xrr240{padding:var(--size-1) var(--size-2-5)}.download.svelte-xrr240.svelte-xrr240:hover{text-decoration:underline}.download.svelte-xrr240>a.svelte-xrr240{color:var(--link-text-color)}.download.svelte-xrr240>a.svelte-xrr240:hover{color:var(--link-text-color-hover)}.download.svelte-xrr240>a.svelte-xrr240:visited{color:var(--link-text-color-visited)}.download.svelte-xrr240>a.svelte-xrr240:active{color:var(--link-text-color-active)}.selectable.svelte-xrr240.svelte-xrr240{cursor:pointer} diff --git a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/gradio/templates/frontend/assets/module-3b9777eb.js b/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/gradio/templates/frontend/assets/module-3b9777eb.js deleted file mode 100644 index b08c3837a31dab83a06c5ae906867658c7d9b171..0000000000000000000000000000000000000000 --- a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/gradio/templates/frontend/assets/module-3b9777eb.js +++ /dev/null @@ -1,9 +0,0 @@ -import{a6 as sn}from"./index-39fce9e2.js";const vr=e=>t=>{const n=e(t);return t.add(n),n},_r=e=>(t,n)=>(e.set(t,n),n),Lt=Number.MAX_SAFE_INTEGER===void 0?9007199254740991:Number.MAX_SAFE_INTEGER,an=536870912,Pt=an*2,Er=(e,t)=>n=>{const r=t.get(n);let o=r===void 0?n.size:rLt)throw new Error("Congratulations, you created a collection of unique numbers which uses all available integers!");for(;n.has(o);)o=Math.floor(Math.random()*Lt);return e(n,o)},cn=new WeakMap,yr=_r(cn),un=Er(yr,cn),Ar=vr(un),br=e=>typeof e.start=="function",xt=new WeakMap,Cr=e=>({...e,connect:({call:t})=>async()=>{const{port1:n,port2:r}=new MessageChannel,o=await t("connect",{port:n},[n]);return xt.set(r,o),r},disconnect:({call:t})=>async n=>{const r=xt.get(n);if(r===void 0)throw new Error("The given port is not connected.");await t("disconnect",{portId:r})},isSupported:({call:t})=>()=>t("isSupported")}),Je=new WeakMap,Tr=e=>{if(Je.has(e))return Je.get(e);const t=new Map;return Je.set(e,t),t},Mr=e=>{const t=Cr(e);return n=>{const r=Tr(n);n.addEventListener("message",({data:a})=>{const{id:c}=a;if(c!==null&&r.has(c)){const{reject:u,resolve:d}=r.get(c);r.delete(c),a.error===void 0?d(a.result):u(new Error(a.error.message))}}),br(n)&&n.start();const o=(a,c=null,u=[])=>new Promise((d,l)=>{const m=un(r);r.set(m,{reject:l,resolve:d}),c===null?n.postMessage({id:m,method:a},u):n.postMessage({id:m,method:a,params:c},u)}),s=(a,c,u=[])=>{n.postMessage({id:null,method:a,params:c},u)};let i={};for(const[a,c]of Object.entries(t))i={...i,[a]:c({call:o,notify:s})};return{...i}}},Ut=new Set,Nr=Mr({encode:({call:e})=>async(t,n)=>{const r=await e("encode",{encoderId:t,timeslice:n});return Ut.delete(t),r},instantiate:({call:e})=>async(t,n)=>{const r=Ar(Ut),o=await e("instantiate",{encoderId:r,mimeType:t,sampleRate:n});return{encoderId:r,port:o}},register:({call:e})=>t=>e("register",{port:t},[t])}),Or=e=>{const t=new Worker(e);return Nr(t)},Rr=`(()=>{var e={775:function(e,t,r){!function(e,t,r,n){"use strict";var o=function(e,t){return void 0===t?e:t.reduce((function(e,t){if("capitalize"===t){var o=e.charAt(0).toUpperCase(),s=e.slice(1);return"".concat(o).concat(s)}return"dashify"===t?r(e):"prependIndefiniteArticle"===t?"".concat(n(e)," ").concat(e):e}),e)},s=function(e){var t=e.name+e.modifiers.map((function(e){return"\\\\.".concat(e,"\\\\(\\\\)")})).join("");return new RegExp("\\\\$\\\\{".concat(t,"}"),"g")},a=function(e,r){for(var n=/\\\${([^.}]+)((\\.[^(]+\\(\\))*)}/g,a=[],i=n.exec(e);null!==i;){var c={modifiers:[],name:i[1]};if(void 0!==i[3])for(var u=/\\.[^(]+\\(\\)/g,l=u.exec(i[2]);null!==l;)c.modifiers.push(l[0].slice(1,-2)),l=u.exec(i[2]);a.push(c),i=n.exec(e)}var d=a.reduce((function(e,n){return e.map((function(e){return"string"==typeof e?e.split(s(n)).reduce((function(e,s,a){return 0===a?[s]:n.name in r?[].concat(t(e),[o(r[n.name],n.modifiers),s]):[].concat(t(e),[function(e){return o(e[n.name],n.modifiers)},s])}),[]):[e]})).reduce((function(e,r){return[].concat(t(e),t(r))}),[])}),[e]);return function(e){return d.reduce((function(r,n){return[].concat(t(r),"string"==typeof n?[n]:[n(e)])}),[]).join("")}},i=function(e){var t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{},r=void 0===e.code?void 0:a(e.code,t),n=void 0===e.message?void 0:a(e.message,t);function o(){var t=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{},o=arguments.length>1?arguments[1]:void 0,s=void 0===o&&(t instanceof Error||void 0!==t.code&&"Exception"===t.code.slice(-9))?{cause:t,missingParameters:{}}:{cause:o,missingParameters:t},a=s.cause,i=s.missingParameters,c=void 0===n?new Error:new Error(n(i));return null!==a&&(c.cause=a),void 0!==r&&(c.code=r(i)),void 0!==e.status&&(c.status=e.status),c}return o};e.compile=i}(t,r(106),r(881),r(507))},881:e=>{"use strict";e.exports=(e,t)=>{if("string"!=typeof e)throw new TypeError("expected a string");return e.trim().replace(/([a-z])([A-Z])/g,"$1-$2").replace(/\\W/g,(e=>/[À-ž]/.test(e)?e:"-")).replace(/^-+|-+$/g,"").replace(/-{2,}/g,(e=>t&&t.condense?"-":e)).toLowerCase()}},107:function(e,t){!function(e){"use strict";var t=function(e){return function(t){var r=e(t);return t.add(r),r}},r=function(e){return function(t,r){return e.set(t,r),r}},n=void 0===Number.MAX_SAFE_INTEGER?9007199254740991:Number.MAX_SAFE_INTEGER,o=536870912,s=2*o,a=function(e,t){return function(r){var a=t.get(r),i=void 0===a?r.size:an)throw new Error("Congratulations, you created a collection of unique numbers which uses all available integers!");for(;r.has(i);)i=Math.floor(Math.random()*n);return e(r,i)}},i=new WeakMap,c=r(i),u=a(c,i),l=t(u);e.addUniqueNumber=l,e.generateUniqueNumber=u}(t)},507:e=>{var t=function(e){var t,r,n=/\\w+/.exec(e);if(!n)return"an";var o=(r=n[0]).toLowerCase(),s=["honest","hour","hono"];for(t in s)if(0==o.indexOf(s[t]))return"an";if(1==o.length)return"aedhilmnorsx".indexOf(o)>=0?"an":"a";if(r.match(/(?!FJO|[HLMNS]Y.|RY[EO]|SQU|(F[LR]?|[HL]|MN?|N|RH?|S[CHKLMNPTVW]?|X(YL)?)[AEIOU])[FHLMNRSX][A-Z]/))return"an";var a=[/^e[uw]/,/^onc?e\\b/,/^uni([^nmd]|mo)/,/^u[bcfhjkqrst][aeiou]/];for(t=0;t=0?"an":"a":"aeiou".indexOf(o[0])>=0||o.match(/^y(b[lor]|cl[ea]|fere|gg|p[ios]|rou|tt)/)?"an":"a"};void 0!==e.exports?e.exports=t:window.indefiniteArticle=t},768:e=>{e.exports=function(e,t){(null==t||t>e.length)&&(t=e.length);for(var r=0,n=new Array(t);r{var n=r(768);e.exports=function(e){if(Array.isArray(e))return n(e)},e.exports.__esModule=!0,e.exports.default=e.exports},642:e=>{e.exports=function(e){if("undefined"!=typeof Symbol&&null!=e[Symbol.iterator]||null!=e["@@iterator"])return Array.from(e)},e.exports.__esModule=!0,e.exports.default=e.exports},344:e=>{e.exports=function(){throw new TypeError("Invalid attempt to spread non-iterable instance.\\nIn order to be iterable, non-array objects must have a [Symbol.iterator]() method.")},e.exports.__esModule=!0,e.exports.default=e.exports},106:(e,t,r)=>{var n=r(907),o=r(642),s=r(906),a=r(344);e.exports=function(e){return n(e)||o(e)||s(e)||a()},e.exports.__esModule=!0,e.exports.default=e.exports},906:(e,t,r)=>{var n=r(768);e.exports=function(e,t){if(e){if("string"==typeof e)return n(e,t);var r=Object.prototype.toString.call(e).slice(8,-1);return"Object"===r&&e.constructor&&(r=e.constructor.name),"Map"===r||"Set"===r?Array.from(e):"Arguments"===r||/^(?:Ui|I)nt(?:8|16|32)(?:Clamped)?Array$/.test(r)?n(e,t):void 0}},e.exports.__esModule=!0,e.exports.default=e.exports}},t={};function r(n){var o=t[n];if(void 0!==o)return o.exports;var s=t[n]={exports:{}};return e[n].call(s.exports,s,s.exports,r),s.exports}(()=>{"use strict";var e=r(775);const t=-32603,n=-32602,o=-32601,s=(0,e.compile)({message:'The requested method called "\${method}" is not supported.',status:o}),a=(0,e.compile)({message:'The handler of the method called "\${method}" returned no required result.',status:t}),i=(0,e.compile)({message:'The handler of the method called "\${method}" returned an unexpected result.',status:t}),c=(0,e.compile)({message:'The specified parameter called "portId" with the given value "\${portId}" does not identify a port connected to this worker.',status:n});var u=r(107);const l=new Map,d=(e,t,r)=>({...t,connect:r=>{let{port:n}=r;n.start();const o=e(n,t),s=(0,u.generateUniqueNumber)(l);return l.set(s,(()=>{o(),n.close(),l.delete(s)})),{result:s}},disconnect:e=>{let{portId:t}=e;const r=l.get(t);if(void 0===r)throw c({portId:t.toString()});return r(),{result:null}},isSupported:async()=>{if(await new Promise((e=>{const t=new ArrayBuffer(0),{port1:r,port2:n}=new MessageChannel;r.onmessage=t=>{let{data:r}=t;return e(null!==r)},n.postMessage(t,[t])}))){const e=r();return{result:e instanceof Promise?await e:e}}return{result:!1}}}),f=function(e,t){const r=d(f,t,arguments.length>2&&void 0!==arguments[2]?arguments[2]:()=>!0),n=((e,t)=>async r=>{let{data:{id:n,method:o,params:c}}=r;const u=t[o];try{if(void 0===u)throw s({method:o});const t=void 0===c?u():u(c);if(void 0===t)throw a({method:o});const r=t instanceof Promise?await t:t;if(null===n){if(void 0!==r.result)throw i({method:o})}else{if(void 0===r.result)throw i({method:o});const{result:t,transferables:s=[]}=r;e.postMessage({id:n,result:t},s)}}catch(t){const{message:r,status:o=-32603}=t;e.postMessage({error:{code:o,message:r},id:n})}})(e,r);return e.addEventListener("message",n),()=>e.removeEventListener("message",n)},p=e=>{e.onmessage=null,e.close()},m=new WeakMap,h=new WeakMap,g=(e=>{const t=(r=e,{...r,connect:e=>{let{call:t}=e;return async()=>{const{port1:e,port2:r}=new MessageChannel,n=await t("connect",{port:e},[e]);return m.set(r,n),r}},disconnect:e=>{let{call:t}=e;return async e=>{const r=m.get(e);if(void 0===r)throw new Error("The given port is not connected.");await t("disconnect",{portId:r})}},isSupported:e=>{let{call:t}=e;return()=>t("isSupported")}});var r;return e=>{const r=(e=>{if(h.has(e))return h.get(e);const t=new Map;return h.set(e,t),t})(e);e.addEventListener("message",(e=>{let{data:t}=e;const{id:n}=t;if(null!==n&&r.has(n)){const{reject:e,resolve:o}=r.get(n);r.delete(n),void 0===t.error?o(t.result):e(new Error(t.error.message))}})),(e=>"function"==typeof e.start)(e)&&e.start();const n=function(t){let n=arguments.length>1&&void 0!==arguments[1]?arguments[1]:null,o=arguments.length>2&&void 0!==arguments[2]?arguments[2]:[];return new Promise(((s,a)=>{const i=(0,u.generateUniqueNumber)(r);r.set(i,{reject:a,resolve:s}),null===n?e.postMessage({id:i,method:t},o):e.postMessage({id:i,method:t,params:n},o)}))},o=function(t,r){let n=arguments.length>2&&void 0!==arguments[2]?arguments[2]:[];e.postMessage({id:null,method:t,params:r},n)};let s={};for(const[e,r]of Object.entries(t))s={...s,[e]:r({call:n,notify:o})};return{...s}}})({characterize:e=>{let{call:t}=e;return()=>t("characterize")},encode:e=>{let{call:t}=e;return(e,r)=>t("encode",{recordingId:e,timeslice:r})},record:e=>{let{call:t}=e;return async(e,r,n)=>{await t("record",{recordingId:e,sampleRate:r,typedArrays:n},n.map((e=>{let{buffer:t}=e;return t})))}}}),v=async(e,t)=>{const r=g(t),n=await r.characterize(),o=n.toString();if(e.has(o))throw new Error("There is already an encoder stored which handles exactly the same mime types.");return e.set(o,[n,r]),n},w=new Map,x=(e=>t=>{const r=e.get(t);if(void 0===r)throw new Error("There was no instance of an encoder stored with the given id.");return r})(w),y=((e,t)=>r=>{const n=t(r);return e.delete(r),n})(w,x),M=new Map,b=((e,t)=>r=>{const[n,o,s,a]=t(r);return s?new Promise((t=>{o.onmessage=s=>{let{data:i}=s;0===i.length?(e(o),t(n.encode(r,null))):n.record(r,a,i)}})):n.encode(r,null)})(p,y),E=(e=>t=>{for(const[r,n]of Array.from(e.values()))if(r.test(t))return n;throw new Error("There is no encoder registered which could handle the given mimeType.")})(M),A=((e,t,r)=>(n,o,s)=>{if(t.has(n))throw new Error('There is already an encoder registered with an id called "'.concat(n,'".'));const a=r(o),{port1:i,port2:c}=new MessageChannel,u=[a,i,!0,s];return t.set(n,u),i.onmessage=t=>{let{data:r}=t;0===r.length?(e(i),u[2]=!1):a.record(n,s,r.map((e=>"number"==typeof e?new Float32Array(e):e)))},c})(p,w,E),I=(e=>(t,r)=>{const[n]=e(t);return n.encode(t,r)})(x);f(self,{encode:async e=>{let{encoderId:t,timeslice:r}=e;const n=null===r?await b(t):await I(t,r);return{result:n,transferables:n}},instantiate:e=>{let{encoderId:t,mimeType:r,sampleRate:n}=e;const o=A(t,r,n);return{result:o,transferables:[o]}},register:async e=>{let{port:t}=e;return{result:await v(M,t)}}})})()})();`,Ir=new Blob([Rr],{type:"application/javascript; charset=utf-8"}),ln=URL.createObjectURL(Ir),pt=Or(ln),Se=pt.encode,dn=pt.instantiate,Sr=pt.register;URL.revokeObjectURL(ln);const kr=e=>(t,n)=>{if(e===null)throw new Error("A native BlobEvent could not be created.");return new e(t,n)},Lr=(e,t)=>(n,r,o)=>{const s=[];let i=r,a=0;for(;aclass{constructor(r=null){this._listeners=new WeakMap,this._nativeEventTarget=r===null?e():r}addEventListener(r,o,s){if(o!==null){let i=this._listeners.get(o);i===void 0&&(i=t(this,o),typeof o=="function"&&this._listeners.set(o,i)),this._nativeEventTarget.addEventListener(r,i,s)}}dispatchEvent(r){return this._nativeEventTarget.dispatchEvent(r)}removeEventListener(r,o,s){const i=o===null?void 0:this._listeners.get(o);this._nativeEventTarget.removeEventListener(r,i===void 0?null:i,s)}},xr=e=>()=>{if(e===null)throw new Error("A native EventTarget could not be created.");return e.document.createElement("p")},mt=(e="")=>{try{return new DOMException(e,"InvalidModificationError")}catch(t){return t.code=13,t.message=e,t.name="InvalidModificationError",t}},Ur=()=>{try{return new DOMException("","InvalidStateError")}catch(e){return e.code=11,e.name="InvalidStateError",e}},Wr=e=>{if(e!==null&&e.BlobEvent!==void 0&&e.MediaStream!==void 0&&(e.MediaRecorder===void 0||e.MediaRecorder.isTypeSupported!==void 0)){if(e.MediaRecorder===void 0)return Promise.resolve(!0);const t=e.document.createElement("canvas");if(t.getContext("2d"),typeof t.captureStream!="function")return Promise.resolve(!1);const n=t.captureStream();return Promise.all([new Promise(r=>{const o="audio/webm";try{const s=new e.MediaRecorder(n,{mimeType:o});s.addEventListener("dataavailable",({data:i})=>r(i.type===o)),s.start(),setTimeout(()=>s.stop(),10)}catch(s){r(s.name==="NotSupportedError")}}),new Promise(r=>{const o=new e.MediaRecorder(n);o.addEventListener("error",s=>{r("error"in s&&s.error!==null&&typeof s.error=="object"&&"name"in s.error&&s.error.name!=="UnknownError"),o.stop()}),o.start(),n.removeTrack(n.getVideoTracks()[0])})]).then(r=>r.every(o=>o))}return Promise.resolve(!1)},Br=(e,t,n,r,o,s,i)=>class extends s{constructor(c,u={}){const{mimeType:d}=u;if(i!==null&&(d===void 0||i.isTypeSupported!==void 0&&i.isTypeSupported(d))){const l=e(i,c,u);super(l),this._internalMediaRecorder=l}else if(d!==void 0&&o.some(l=>l.test(d)))super(),i!==null&&i.isTypeSupported!==void 0&&i.isTypeSupported("audio/webm;codecs=pcm")?this._internalMediaRecorder=r(this,i,c,d):this._internalMediaRecorder=n(this,c,d);else throw i!==null&&e(i,c,u),t();this._ondataavailable=null,this._onerror=null,this._onpause=null,this._onresume=null,this._onstart=null,this._onstop=null}get mimeType(){return this._internalMediaRecorder.mimeType}get ondataavailable(){return this._ondataavailable===null?this._ondataavailable:this._ondataavailable[0]}set ondataavailable(c){if(this._ondataavailable!==null&&this.removeEventListener("dataavailable",this._ondataavailable[1]),typeof c=="function"){const u=c.bind(this);this.addEventListener("dataavailable",u),this._ondataavailable=[c,u]}else this._ondataavailable=null}get onerror(){return this._onerror===null?this._onerror:this._onerror[0]}set onerror(c){if(this._onerror!==null&&this.removeEventListener("error",this._onerror[1]),typeof c=="function"){const u=c.bind(this);this.addEventListener("error",u),this._onerror=[c,u]}else this._onerror=null}get onpause(){return this._onpause===null?this._onpause:this._onpause[0]}set onpause(c){if(this._onpause!==null&&this.removeEventListener("pause",this._onpause[1]),typeof c=="function"){const u=c.bind(this);this.addEventListener("pause",u),this._onpause=[c,u]}else this._onpause=null}get onresume(){return this._onresume===null?this._onresume:this._onresume[0]}set onresume(c){if(this._onresume!==null&&this.removeEventListener("resume",this._onresume[1]),typeof c=="function"){const u=c.bind(this);this.addEventListener("resume",u),this._onresume=[c,u]}else this._onresume=null}get onstart(){return this._onstart===null?this._onstart:this._onstart[0]}set onstart(c){if(this._onstart!==null&&this.removeEventListener("start",this._onstart[1]),typeof c=="function"){const u=c.bind(this);this.addEventListener("start",u),this._onstart=[c,u]}else this._onstart=null}get onstop(){return this._onstop===null?this._onstop:this._onstop[0]}set onstop(c){if(this._onstop!==null&&this.removeEventListener("stop",this._onstop[1]),typeof c=="function"){const u=c.bind(this);this.addEventListener("stop",u),this._onstop=[c,u]}else this._onstop=null}get state(){return this._internalMediaRecorder.state}pause(){return this._internalMediaRecorder.pause()}resume(){return this._internalMediaRecorder.resume()}start(c){return this._internalMediaRecorder.start(c)}stop(){return this._internalMediaRecorder.stop()}static isTypeSupported(c){return i!==null&&i.isTypeSupported!==void 0&&i.isTypeSupported(c)||o.some(u=>u.test(c))}},Dr=e=>e!==null&&e.BlobEvent!==void 0?e.BlobEvent:null,Vr=(e,t)=>(n,r,o)=>{const s=[],i=new WeakMap,a=new WeakMap,c=new n(r,o),u=new WeakMap;let d=!0;return c.addEventListener=(l=>(m,g,w)=>{let f=g;return typeof g=="function"&&(m==="dataavailable"?(f=p=>{setTimeout(()=>{if(d&&c.state==="inactive")s.push(p.data);else{if(s.length>0){const h=p.data;Object.defineProperty(p,"data",{value:new Blob([...s,h],{type:h.type})}),s.length=0}g.call(c,p)}})},i.set(g,f)):m==="error"?(f=p=>{p.error===void 0?g.call(c,new ErrorEvent("error",{error:e()})):p instanceof ErrorEvent?g.call(c,p):g.call(c,new ErrorEvent("error",{error:p.error}))},a.set(g,f)):m==="stop"&&(f=p=>{d=!1,setTimeout(()=>g.call(c,p))},u.set(g,f))),l.call(c,m,f,w)})(c.addEventListener),c.dispatchEvent=(l=>m=>{let g;setTimeout(()=>{g=d,d=!1});const w=l.call(c,m);return setTimeout(()=>d=g),w})(c.dispatchEvent),c.removeEventListener=(l=>(m,g,w)=>{let f=g;if(typeof g=="function"){if(m==="dataavailable"){const p=i.get(g);p!==void 0&&(f=p)}else if(m==="error"){const p=a.get(g);p!==void 0&&(f=p)}else if(m==="stop"){const p=u.get(g);p!==void 0&&(f=p)}}return l.call(c,m,f,w)})(c.removeEventListener),c.start=(l=>m=>{if(o.mimeType!==void 0&&o.mimeType.startsWith("audio/")&&r.getVideoTracks().length>0)throw t();return d=m!==void 0,m===void 0?l.call(c):l.call(c,m)})(c.start),c},Fr=e=>e===null||e.MediaRecorder===void 0?null:e.MediaRecorder,De=()=>{try{return new DOMException("","NotSupportedError")}catch(e){return e.code=9,e.name="NotSupportedError",e}},jr=e=>(t,n,r,o=2)=>{const s=e(t,n);if(s===null)return s;const{length:i,value:a}=s;if(r==="master")return{content:null,length:i};if(n+i+a>t.byteLength)return null;if(r==="binary"){const c=(a/Float32Array.BYTES_PER_ELEMENT-1)/o,u=Array.from({length:o},()=>new Float32Array(c));for(let d=0;d(t,n)=>{const r=e(t,n);if(r===null)return r;const{length:o,value:s}=r;return s===35?{length:o,type:"binary"}:s===46||s===97||s===88713574||s===106212971||s===139690087||s===172351395||s===256095861?{length:o,type:"master"}:{length:o,type:"unknown"}},Gr=e=>(t,n)=>{const r=e(t,n);if(r===null)return r;const o=n+Math.floor((r-1)/8);if(o+r>t.byteLength)return null;let i=t.getUint8(o)&(1<<8-r%8)-1;for(let a=1;a(t,n)=>(e.set(t,n),n),Wt=Number.MAX_SAFE_INTEGER===void 0?9007199254740991:Number.MAX_SAFE_INTEGER,fn=536870912,Bt=fn*2,zr=(e,t)=>n=>{const r=t.get(n);let o=r===void 0?n.size:rWt)throw new Error("Congratulations, you created a collection of unique numbers which uses all available integers!");for(;n.has(o);)o=Math.floor(Math.random()*Wt);return e(n,o)},hn=new WeakMap,Hr=qr(hn),Xr=zr(Hr,hn),Dt=Symbol.observable||"@@observable";function pn(e){return Symbol.observable||(typeof e=="function"&&e.prototype&&e.prototype[Symbol.observable]?(e.prototype[Dt]=e.prototype[Symbol.observable],delete e.prototype[Symbol.observable]):(e[Dt]=e[Symbol.observable],delete e[Symbol.observable])),e}const Ne=()=>{},Vt=e=>{throw e};function mn(e){return e?e.next&&e.error&&e.complete?e:{complete:(e.complete??Ne).bind(e),error:(e.error??Vt).bind(e),next:(e.next??Ne).bind(e)}:{complete:Ne,error:Vt,next:Ne}}const Yr=e=>(t,n,r)=>e(o=>{const s=i=>o.next(i);return t.addEventListener(n,s,r),()=>t.removeEventListener(n,s,r)}),Zr=(e,t)=>{const n=()=>{},r=o=>typeof o[0]=="function";return o=>{const s=(...i)=>{const a=o(r(i)?t({next:i[0]}):t(...i));return a!==void 0?a:n};return s[Symbol.observable]=()=>({subscribe:(...i)=>({unsubscribe:s(...i)})}),e(s)}},Kr=Zr(pn,mn),Qr=Yr(Kr);/*! - * dashify - * - * Copyright (c) 2015-2017, Jon Schlinkert. - * Released under the MIT License. - */var Jr=(e,t)=>{if(typeof e!="string")throw new TypeError("expected a string");return e.trim().replace(/([a-z])([A-Z])/g,"$1-$2").replace(/\W/g,n=>/[À-ž]/.test(n)?n:"-").replace(/^-+|-+$/g,"").replace(/-{2,}/g,n=>t&&t.condense?"-":n).toLowerCase()};const eo=sn(Jr);var gn={exports:{}};(function(e){var t=function(n){var r,o,s=/\w+/.exec(n);if(s)o=s[0];else return"an";var i=o.toLowerCase(),a=["honest","hour","hono"];for(r in a)if(i.indexOf(a[r])==0)return"an";if(i.length==1)return"aedhilmnorsx".indexOf(i)>=0?"an":"a";if(o.match(/(?!FJO|[HLMNS]Y.|RY[EO]|SQU|(F[LR]?|[HL]|MN?|N|RH?|S[CHKLMNPTVW]?|X(YL)?)[AEIOU])[FHLMNRSX][A-Z]/))return"an";var c=[/^e[uw]/,/^onc?e\b/,/^uni([^nmd]|mo)/,/^u[bcfhjkqrst][aeiou]/];for(r=0;r=0?"an":"a":"aeiou".indexOf(i[0])>=0||i.match(/^y(b[lor]|cl[ea]|fere|gg|p[ios]|rou|tt)/)?"an":"a"};e.exports=t})(gn);var to=gn.exports;const no=sn(to),Ft=(e,t)=>t===void 0?e:t.reduce((n,r)=>{if(r==="capitalize"){const o=n.charAt(0).toUpperCase(),s=n.slice(1);return`${o}${s}`}return r==="dashify"?eo(n):r==="prependIndefiniteArticle"?`${no(n)} ${n}`:n},e),ro=e=>{const t=e.name+e.modifiers.map(n=>`\\.${n}\\(\\)`).join("");return new RegExp(`\\$\\{${t}}`,"g")},jt=(e,t)=>{const n=/\${([^.}]+)((\.[^(]+\(\))*)}/g,r=[];let o=n.exec(e);for(;o!==null;){const i={modifiers:[],name:o[1]};if(o[3]!==void 0){const a=/\.[^(]+\(\)/g;let c=a.exec(o[2]);for(;c!==null;)i.modifiers.push(c[0].slice(1,-2)),c=a.exec(o[2])}r.push(i),o=n.exec(e)}const s=r.reduce((i,a)=>i.map(c=>typeof c=="string"?c.split(ro(a)).reduce((u,d,l)=>l===0?[d]:a.name in t?[...u,Ft(t[a.name],a.modifiers),d]:[...u,m=>Ft(m[a.name],a.modifiers),d],[]):[c]).reduce((c,u)=>[...c,...u],[]),[e]);return i=>s.reduce((a,c)=>typeof c=="string"?[...a,c]:[...a,c(i)],[]).join("")},Ve=(e,t={})=>{const n=e.code===void 0?void 0:jt(e.code,t),r=e.message===void 0?void 0:jt(e.message,t);function o(s={},i){const a=i===void 0&&(s instanceof Error||s.code!==void 0&&s.code.slice(-9)==="Exception"),{cause:c,missingParameters:u}=a?{cause:s,missingParameters:{}}:{cause:i,missingParameters:s},d=r===void 0?new Error:new Error(r(u));return c!==null&&(d.cause=c),n!==void 0&&(d.code=n(u)),e.status!==void 0&&(d.status=e.status),d}return o},Fe={INTERNAL_ERROR:-32603,INVALID_PARAMS:-32602,METHOD_NOT_FOUND:-32601};Ve({message:'The requested method called "${method}" is not supported.',status:Fe.METHOD_NOT_FOUND});Ve({message:'The handler of the method called "${method}" returned no required result.',status:Fe.INTERNAL_ERROR});Ve({message:'The handler of the method called "${method}" returned an unexpected result.',status:Fe.INTERNAL_ERROR});Ve({message:'The specified parameter called "portId" with the given value "${portId}" does not identify a port connected to this worker.',status:Fe.INVALID_PARAMS});const oo=(e,t,n)=>async r=>{const o=new e([n],{type:"application/javascript; charset=utf-8"}),s=t.createObjectURL(o);try{await r(s)}finally{t.revokeObjectURL(s)}},so=e=>({data:t})=>{const{id:n}=t;if(n!==null){const r=e.get(n);if(r!==void 0){const{reject:o,resolve:s}=r;e.delete(n),t.error===void 0?s(t.result):o(new Error(t.error.message))}}},io=e=>(t,n)=>(r,o=[])=>new Promise((s,i)=>{const a=e(t);t.set(a,{reject:i,resolve:s}),n.postMessage({id:a,...r},o)}),ao=(e,t,n,r)=>(o,s,i={})=>{const a=new o(s,"recorder-audio-worklet-processor",{...i,channelCountMode:"explicit",numberOfInputs:1,numberOfOutputs:0}),c=new Map,u=t(c,a.port),d=n(a.port,"message")(e(c));a.port.start();let l="inactive";return Object.defineProperties(a,{pause:{get(){return async()=>(r(["recording"],l),l="paused",u({method:"pause"}))}},port:{get(){throw new Error("The port of a RecorderAudioWorkletNode can't be accessed.")}},record:{get(){return async m=>(r(["inactive"],l),l="recording",u({method:"record",params:{encoderPort:m}},[m]))}},resume:{get(){return async()=>(r(["paused"],l),l="recording",u({method:"resume"}))}},stop:{get(){return async()=>{r(["paused","recording"],l),l="stopped";try{await u({method:"stop"})}finally{d()}}}}}),a},co=(e,t)=>{if(!e.includes(t))throw new Error(`Expected the state to be ${e.map(n=>`"${n}"`).join(" or ")} but it was "${t}".`)},uo='(()=>{"use strict";class e extends AudioWorkletProcessor{constructor(){super(),this._encoderPort=null,this._numberOfChannels=0,this._state="inactive",this.port.onmessage=e=>{let{data:t}=e;"pause"===t.method?"active"===this._state||"recording"===this._state?(this._state="paused",this._sendAcknowledgement(t.id)):this._sendUnexpectedStateError(t.id):"record"===t.method?"inactive"===this._state?(this._encoderPort=t.params.encoderPort,this._state="active",this._sendAcknowledgement(t.id)):this._sendUnexpectedStateError(t.id):"resume"===t.method?"paused"===this._state?(this._state="active",this._sendAcknowledgement(t.id)):this._sendUnexpectedStateError(t.id):"stop"===t.method?"active"!==this._state&&"paused"!==this._state&&"recording"!==this._state||null===this._encoderPort?this._sendUnexpectedStateError(t.id):(this._stop(this._encoderPort),this._sendAcknowledgement(t.id)):"number"==typeof t.id&&this.port.postMessage({error:{code:-32601,message:"The requested method is not supported."},id:t.id})}}process(e){let[t]=e;if("inactive"===this._state||"paused"===this._state)return!0;if("active"===this._state){if(void 0===t)throw new Error("No channelData was received for the first input.");if(0===t.length)return!0;this._state="recording"}if("recording"===this._state&&null!==this._encoderPort){if(void 0===t)throw new Error("No channelData was received for the first input.");return 0===t.length?this._encoderPort.postMessage(Array.from({length:this._numberOfChannels},(()=>128))):(this._encoderPort.postMessage(t,t.map((e=>{let{buffer:t}=e;return t}))),this._numberOfChannels=t.length),!0}return!1}_sendAcknowledgement(e){this.port.postMessage({id:e,result:null})}_sendUnexpectedStateError(e){this.port.postMessage({error:{code:-32603,message:"The internal state does not allow to process the given message."},id:e})}_stop(e){e.postMessage([]),e.close(),this._encoderPort=null,this._state="stopped"}}e.parameterDescriptors=[],registerProcessor("recorder-audio-worklet-processor",e)})();',lo=oo(Blob,URL,uo),fo=ao(so,io(Xr),Qr,co),$t=(e,t,n)=>({endTime:t,insertTime:n,type:"exponentialRampToValue",value:e}),Gt=(e,t,n)=>({endTime:t,insertTime:n,type:"linearRampToValue",value:e}),nt=(e,t)=>({startTime:t,type:"setValue",value:e}),wn=(e,t,n)=>({duration:n,startTime:t,type:"setValueCurve",values:e}),vn=(e,t,{startTime:n,target:r,timeConstant:o})=>r+(t-r)*Math.exp((n-e)/o),ge=e=>e.type==="exponentialRampToValue",ke=e=>e.type==="linearRampToValue",se=e=>ge(e)||ke(e),gt=e=>e.type==="setValue",te=e=>e.type==="setValueCurve",Le=(e,t,n,r)=>{const o=e[t];return o===void 0?r:se(o)||gt(o)?o.value:te(o)?o.values[o.values.length-1]:vn(n,Le(e,t-1,o.startTime,r),o)},qt=(e,t,n,r,o)=>n===void 0?[r.insertTime,o]:se(n)?[n.endTime,n.value]:gt(n)?[n.startTime,n.value]:te(n)?[n.startTime+n.duration,n.values[n.values.length-1]]:[n.startTime,Le(e,t-1,n.startTime,o)],rt=e=>e.type==="cancelAndHold",ot=e=>e.type==="cancelScheduledValues",oe=e=>rt(e)||ot(e)?e.cancelTime:ge(e)||ke(e)?e.endTime:e.startTime,zt=(e,t,n,{endTime:r,value:o})=>n===o?o:0n+(e-t)/(r-t)*(o-n),ho=(e,t)=>{const n=Math.floor(t),r=Math.ceil(t);return n===r?e[n]:(1-(t-n))*e[n]+(1-(r-t))*e[r]},po=(e,{duration:t,startTime:n,values:r})=>{const o=(e-n)/t*(r.length-1);return ho(r,o)},Oe=e=>e.type==="setTarget";class mo{constructor(t){this._automationEvents=[],this._currenTime=0,this._defaultValue=t}[Symbol.iterator](){return this._automationEvents[Symbol.iterator]()}add(t){const n=oe(t);if(rt(t)||ot(t)){const r=this._automationEvents.findIndex(s=>ot(t)&&te(s)?s.startTime+s.duration>=n:oe(s)>=n),o=this._automationEvents[r];if(r!==-1&&(this._automationEvents=this._automationEvents.slice(0,r)),rt(t)){const s=this._automationEvents[this._automationEvents.length-1];if(o!==void 0&&se(o)){if(s!==void 0&&Oe(s))throw new Error("The internal list is malformed.");const i=s===void 0?o.insertTime:te(s)?s.startTime+s.duration:oe(s),a=s===void 0?this._defaultValue:te(s)?s.values[s.values.length-1]:s.value,c=ge(o)?zt(n,i,a,o):Ht(n,i,a,o),u=ge(o)?$t(c,n,this._currenTime):Gt(c,n,this._currenTime);this._automationEvents.push(u)}if(s!==void 0&&Oe(s)&&this._automationEvents.push(nt(this.getValue(n),n)),s!==void 0&&te(s)&&s.startTime+s.duration>n){const i=n-s.startTime,a=(s.values.length-1)/s.duration,c=Math.max(2,1+Math.ceil(i*a)),u=i/(c-1)*a,d=s.values.slice(0,c);if(u<1)for(let l=1;loe(i)>n),o=r===-1?this._automationEvents[this._automationEvents.length-1]:this._automationEvents[r-1];if(o!==void 0&&te(o)&&oe(o)+o.duration>n)return!1;const s=ge(t)?$t(t.value,t.endTime,this._currenTime):ke(t)?Gt(t.value,n,this._currenTime):t;if(r===-1)this._automationEvents.push(s);else{if(te(t)&&n+t.duration>oe(this._automationEvents[r]))return!1;this._automationEvents.splice(r,0,s)}}return!0}flush(t){const n=this._automationEvents.findIndex(r=>oe(r)>t);if(n>1){const r=this._automationEvents.slice(n-1),o=r[0];Oe(o)&&r.unshift(nt(Le(this._automationEvents,n-2,o.startTime,this._defaultValue),o.startTime)),this._automationEvents=r}}getValue(t){if(this._automationEvents.length===0)return this._defaultValue;const n=this._automationEvents.findIndex(i=>oe(i)>t),r=this._automationEvents[n],o=(n===-1?this._automationEvents.length:n)-1,s=this._automationEvents[o];if(s!==void 0&&Oe(s)&&(r===void 0||!se(r)||r.insertTime>t))return vn(t,Le(this._automationEvents,o-1,s.startTime,this._defaultValue),s);if(s!==void 0&>(s)&&(r===void 0||!se(r)))return s.value;if(s!==void 0&&te(s)&&(r===void 0||!se(r)||s.startTime+s.duration>t))return t({cancelTime:e,type:"cancelAndHold"}),wo=e=>({cancelTime:e,type:"cancelScheduledValues"}),vo=(e,t)=>({endTime:t,type:"exponentialRampToValue",value:e}),_o=(e,t)=>({endTime:t,type:"linearRampToValue",value:e}),Eo=(e,t,n)=>({startTime:t,target:e,timeConstant:n,type:"setTarget"}),yo=()=>new DOMException("","AbortError"),Ao=e=>(t,n,[r,o,s],i)=>{e(t[o],[n,r,s],a=>a[0]===n&&a[1]===r,i)},bo=e=>(t,n,r)=>{const o=[];for(let s=0;s(t,n)=>{e.set(t,{activeInputs:new Set,passiveInputs:new WeakMap,renderer:n})},we=new WeakSet,_n=new WeakMap,En=new WeakMap,yn=new WeakMap,An=new WeakMap,bn=new WeakMap,Cn=new WeakMap,st=new WeakMap,it=new WeakMap,at=new WeakMap,Tn={construct(){return Tn}},To=e=>{try{const t=new Proxy(e,Tn);new t}catch{return!1}return!0},Xt=/^import(?:(?:[\s]+[\w]+|(?:[\s]+[\w]+[\s]*,)?[\s]*\{[\s]*[\w]+(?:[\s]+as[\s]+[\w]+)?(?:[\s]*,[\s]*[\w]+(?:[\s]+as[\s]+[\w]+)?)*[\s]*}|(?:[\s]+[\w]+[\s]*,)?[\s]*\*[\s]+as[\s]+[\w]+)[\s]+from)?(?:[\s]*)("([^"\\]|\\.)+"|'([^'\\]|\\.)+')(?:[\s]*);?/,Yt=(e,t)=>{const n=[];let r=e.replace(/^[\s]+/,""),o=r.match(Xt);for(;o!==null;){const s=o[1].slice(1,-1),i=o[0].replace(/([\s]+)?;?$/,"").replace(s,new URL(s,t).toString());n.push(i),r=r.slice(o[0].length).replace(/^[\s]+/,""),o=r.match(Xt)}return[n.join(";"),r]},Zt=e=>{if(e!==void 0&&!Array.isArray(e))throw new TypeError("The parameterDescriptors property of given value for processorCtor is not an array.")},Kt=e=>{if(!To(e))throw new TypeError("The given value for processorCtor should be a constructor.");if(e.prototype===null||typeof e.prototype!="object")throw new TypeError("The given value for processorCtor should have a prototype.")},Mo=(e,t,n,r,o,s,i,a,c,u,d,l,m)=>{let g=0;return(w,f,p={credentials:"omit"})=>{const h=d.get(w);if(h!==void 0&&h.has(f))return Promise.resolve();const _=u.get(w);if(_!==void 0){const A=_.get(f);if(A!==void 0)return A}const E=s(w),T=E.audioWorklet===void 0?o(f).then(([A,b])=>{const[y,v]=Yt(A,b),M=`${y};((a,b)=>{(a[b]=a[b]||[]).push((AudioWorkletProcessor,global,registerProcessor,sampleRate,self,window)=>{${v} -})})(window,'_AWGS')`;return n(M)}).then(()=>{const A=m._AWGS.pop();if(A===void 0)throw new SyntaxError;r(E.currentTime,E.sampleRate,()=>A(class{},void 0,(b,y)=>{if(b.trim()==="")throw t();const v=it.get(E);if(v!==void 0){if(v.has(b))throw t();Kt(y),Zt(y.parameterDescriptors),v.set(b,y)}else Kt(y),Zt(y.parameterDescriptors),it.set(E,new Map([[b,y]]))},E.sampleRate,void 0,void 0))}):Promise.all([o(f),Promise.resolve(e(l,l))]).then(([[A,b],y])=>{const v=g+1;g=v;const[M,k]=Yt(A,b),W=`${M};((AudioWorkletProcessor,registerProcessor)=>{${k} -})(${y?"AudioWorkletProcessor":"class extends AudioWorkletProcessor {__b=new WeakSet();constructor(){super();(p=>p.postMessage=(q=>(m,t)=>q.call(p,m,t?t.filter(u=>!this.__b.has(u)):t))(p.postMessage))(this.port)}}"},(n,p)=>registerProcessor(n,class extends p{${y?"":"__c = (a) => a.forEach(e=>this.__b.add(e.buffer));"}process(i,o,p){${y?"":"i.forEach(this.__c);o.forEach(this.__c);this.__c(Object.values(p));"}return super.process(i.map(j=>j.some(k=>k.length===0)?[]:j),o,p)}}));registerProcessor('__sac${v}',class extends AudioWorkletProcessor{process(){return !1}})`,L=new Blob([W],{type:"application/javascript; charset=utf-8"}),I=URL.createObjectURL(L);return E.audioWorklet.addModule(I,p).then(()=>{if(a(E))return E;const S=i(E);return S.audioWorklet.addModule(I,p).then(()=>S)}).then(S=>{if(c===null)throw new SyntaxError;try{new c(S,`__sac${v}`)}catch{throw new SyntaxError}}).finally(()=>URL.revokeObjectURL(I))});return _===void 0?u.set(w,new Map([[f,T]])):_.set(f,T),T.then(()=>{const A=d.get(w);A===void 0?d.set(w,new Set([f])):A.add(f)}).finally(()=>{const A=u.get(w);A!==void 0&&A.delete(f)}),T}},K=(e,t)=>{const n=e.get(t);if(n===void 0)throw new Error("A value with the given key could not be found.");return n},je=(e,t)=>{const n=Array.from(e).filter(t);if(n.length>1)throw Error("More than one element was found.");if(n.length===0)throw Error("No element was found.");const[r]=n;return e.delete(r),r},Mn=(e,t,n,r)=>{const o=K(e,t),s=je(o,i=>i[0]===n&&i[1]===r);return o.size===0&&e.delete(t),s},Ae=e=>K(Cn,e),Pe=e=>{if(we.has(e))throw new Error("The AudioNode is already stored.");we.add(e),Ae(e).forEach(t=>t(!0))},Nn=e=>"port"in e,wt=e=>{if(!we.has(e))throw new Error("The AudioNode is not stored.");we.delete(e),Ae(e).forEach(t=>t(!1))},ct=(e,t)=>{!Nn(e)&&t.every(n=>n.size===0)&&wt(e)},No=(e,t,n,r,o,s,i,a,c,u,d,l,m)=>{const g=new WeakMap;return(w,f,p,h,_)=>{const{activeInputs:E,passiveInputs:T}=s(f),{outputs:A}=s(w),b=a(w),y=v=>{const M=c(f),k=c(w);if(v){const N=Mn(T,w,p,h);e(E,w,N,!1),!_&&!l(w)&&n(k,M,p,h),m(f)&&Pe(f)}else{const N=r(E,w,p,h);t(T,h,N,!1),!_&&!l(w)&&o(k,M,p,h);const U=i(f);if(U===0)d(f)&&ct(f,E);else{const x=g.get(f);x!==void 0&&clearTimeout(x),g.set(f,setTimeout(()=>{d(f)&&ct(f,E)},U*1e3))}}};return u(A,[f,p,h],v=>v[0]===f&&v[1]===p&&v[2]===h,!0)?(b.add(y),d(w)?e(E,w,[p,h,y],!0):t(T,h,[w,p,y],!0),!0):!1}},Oo=e=>(t,n,[r,o,s],i)=>{const a=t.get(r);a===void 0?t.set(r,new Set([[o,n,s]])):e(a,[o,n,s],c=>c[0]===o&&c[1]===n,i)},Ro=e=>(t,n)=>{const r=e(t,{channelCount:1,channelCountMode:"explicit",channelInterpretation:"discrete",gain:0});n.connect(r).connect(t.destination);const o=()=>{n.removeEventListener("ended",o),n.disconnect(r),r.disconnect()};n.addEventListener("ended",o)},Io=e=>(t,n)=>{e(t).add(n)},On=(e,t)=>e.context===t,ut=e=>{try{e.copyToChannel(new Float32Array(1),0,-1)}catch{return!1}return!0},ae=()=>new DOMException("","IndexSizeError"),Rn=e=>{e.getChannelData=(t=>n=>{try{return t.call(e,n)}catch(r){throw r.code===12?ae():r}})(e.getChannelData)},So={numberOfChannels:1},ko=(e,t,n,r,o,s,i,a)=>{let c=null;return class In{constructor(d){if(o===null)throw new Error("Missing the native OfflineAudioContext constructor.");const{length:l,numberOfChannels:m,sampleRate:g}={...So,...d};c===null&&(c=new o(1,1,44100));const w=r!==null&&t(s,s)?new r({length:l,numberOfChannels:m,sampleRate:g}):c.createBuffer(m,l,g);if(w.numberOfChannels===0)throw n();return typeof w.copyFromChannel!="function"?(i(w),Rn(w)):t(ut,()=>ut(w))||a(w),e.add(w),w}static[Symbol.hasInstance](d){return d!==null&&typeof d=="object"&&Object.getPrototypeOf(d)===In.prototype||e.has(d)}}},$e=-34028234663852886e22,vt=-$e,ue=e=>we.has(e),Lo={buffer:null,channelCount:2,channelCountMode:"max",channelInterpretation:"speakers",loop:!1,loopEnd:0,loopStart:0,playbackRate:1},Po=(e,t,n,r,o,s,i,a)=>class extends e{constructor(u,d){const l=s(u),m={...Lo,...d},g=o(l,m),w=i(l),f=w?t():null;super(u,!1,g,f),this._audioBufferSourceNodeRenderer=f,this._isBufferNullified=!1,this._isBufferSet=m.buffer!==null,this._nativeAudioBufferSourceNode=g,this._onended=null,this._playbackRate=n(this,w,g.playbackRate,vt,$e)}get buffer(){return this._isBufferNullified?null:this._nativeAudioBufferSourceNode.buffer}set buffer(u){if(this._nativeAudioBufferSourceNode.buffer=u,u!==null){if(this._isBufferSet)throw r();this._isBufferSet=!0}}get loop(){return this._nativeAudioBufferSourceNode.loop}set loop(u){this._nativeAudioBufferSourceNode.loop=u}get loopEnd(){return this._nativeAudioBufferSourceNode.loopEnd}set loopEnd(u){this._nativeAudioBufferSourceNode.loopEnd=u}get loopStart(){return this._nativeAudioBufferSourceNode.loopStart}set loopStart(u){this._nativeAudioBufferSourceNode.loopStart=u}get onended(){return this._onended}set onended(u){const d=typeof u=="function"?a(this,u):null;this._nativeAudioBufferSourceNode.onended=d;const l=this._nativeAudioBufferSourceNode.onended;this._onended=l!==null&&l===d?u:l}get playbackRate(){return this._playbackRate}start(u=0,d=0,l){if(this._nativeAudioBufferSourceNode.start(u,d,l),this._audioBufferSourceNodeRenderer!==null&&(this._audioBufferSourceNodeRenderer.start=l===void 0?[u,d]:[u,d,l]),this.context.state!=="closed"){Pe(this);const m=()=>{this._nativeAudioBufferSourceNode.removeEventListener("ended",m),ue(this)&&wt(this)};this._nativeAudioBufferSourceNode.addEventListener("ended",m)}}stop(u=0){this._nativeAudioBufferSourceNode.stop(u),this._audioBufferSourceNodeRenderer!==null&&(this._audioBufferSourceNodeRenderer.stop=u)}},xo=(e,t,n,r,o)=>()=>{const s=new WeakMap;let i=null,a=null;const c=async(u,d)=>{let l=n(u);const m=On(l,d);if(!m){const g={buffer:l.buffer,channelCount:l.channelCount,channelCountMode:l.channelCountMode,channelInterpretation:l.channelInterpretation,loop:l.loop,loopEnd:l.loopEnd,loopStart:l.loopStart,playbackRate:l.playbackRate.value};l=t(d,g),i!==null&&l.start(...i),a!==null&&l.stop(a)}return s.set(d,l),m?await e(d,u.playbackRate,l.playbackRate):await r(d,u.playbackRate,l.playbackRate),await o(u,d,l),l};return{set start(u){i=u},set stop(u){a=u},render(u,d){const l=s.get(d);return l!==void 0?Promise.resolve(l):c(u,d)}}},Uo=e=>"playbackRate"in e,Wo=e=>"frequency"in e&&"gain"in e,Bo=e=>"offset"in e,Do=e=>!("frequency"in e)&&"gain"in e,Vo=e=>"detune"in e&&"frequency"in e,Fo=e=>"pan"in e,z=e=>K(_n,e),be=e=>K(yn,e),lt=(e,t)=>{const{activeInputs:n}=z(e);n.forEach(o=>o.forEach(([s])=>{t.includes(e)||lt(s,[...t,e])}));const r=Uo(e)?[e.playbackRate]:Nn(e)?Array.from(e.parameters.values()):Wo(e)?[e.Q,e.detune,e.frequency,e.gain]:Bo(e)?[e.offset]:Do(e)?[e.gain]:Vo(e)?[e.detune,e.frequency]:Fo(e)?[e.pan]:[];for(const o of r){const s=be(o);s!==void 0&&s.activeInputs.forEach(([i])=>lt(i,t))}ue(e)&&wt(e)},jo=e=>{lt(e.destination,[])},$o=e=>e===void 0||typeof e=="number"||typeof e=="string"&&(e==="balanced"||e==="interactive"||e==="playback"),Go=(e,t,n,r,o,s,i,a)=>class extends e{constructor(u,d){const l=s(u),m=i(l),g=o(l,d,m),w=m?t(a):null;super(u,!1,g,w),this._isNodeOfNativeOfflineAudioContext=m,this._nativeAudioDestinationNode=g}get channelCount(){return this._nativeAudioDestinationNode.channelCount}set channelCount(u){if(this._isNodeOfNativeOfflineAudioContext)throw r();if(u>this._nativeAudioDestinationNode.maxChannelCount)throw n();this._nativeAudioDestinationNode.channelCount=u}get channelCountMode(){return this._nativeAudioDestinationNode.channelCountMode}set channelCountMode(u){if(this._isNodeOfNativeOfflineAudioContext)throw r();this._nativeAudioDestinationNode.channelCountMode=u}get maxChannelCount(){return this._nativeAudioDestinationNode.maxChannelCount}},qo=e=>{const t=new WeakMap,n=async(r,o)=>{const s=o.destination;return t.set(o,s),await e(r,o,s),s};return{render(r,o){const s=t.get(o);return s!==void 0?Promise.resolve(s):n(r,o)}}},zo=(e,t,n,r,o,s,i,a)=>(c,u)=>{const d=u.listener,l=()=>{const A=new Float32Array(1),b=t(u,{channelCount:1,channelCountMode:"explicit",channelInterpretation:"speakers",numberOfInputs:9}),y=i(u);let v=!1,M=[0,0,-1,0,1,0],k=[0,0,0];const N=()=>{if(v)return;v=!0;const L=r(u,256,9,0);L.onaudioprocess=({inputBuffer:I})=>{const S=[s(I,A,0),s(I,A,1),s(I,A,2),s(I,A,3),s(I,A,4),s(I,A,5)];S.some((O,P)=>O!==M[P])&&(d.setOrientation(...S),M=S);const V=[s(I,A,6),s(I,A,7),s(I,A,8)];V.some((O,P)=>O!==k[P])&&(d.setPosition(...V),k=V)},b.connect(L)},U=L=>I=>{I!==M[L]&&(M[L]=I,d.setOrientation(...M))},x=L=>I=>{I!==k[L]&&(k[L]=I,d.setPosition(...k))},W=(L,I,S)=>{const V=n(u,{channelCount:1,channelCountMode:"explicit",channelInterpretation:"discrete",offset:I});V.connect(b,0,L),V.start(),Object.defineProperty(V.offset,"defaultValue",{get(){return I}});const O=e({context:c},y,V.offset,vt,$e);return a(O,"value",P=>()=>P.call(O),P=>B=>{try{P.call(O,B)}catch(F){if(F.code!==9)throw F}N(),y&&S(B)}),O.cancelAndHoldAtTime=(P=>y?()=>{throw o()}:(...B)=>{const F=P.apply(O,B);return N(),F})(O.cancelAndHoldAtTime),O.cancelScheduledValues=(P=>y?()=>{throw o()}:(...B)=>{const F=P.apply(O,B);return N(),F})(O.cancelScheduledValues),O.exponentialRampToValueAtTime=(P=>y?()=>{throw o()}:(...B)=>{const F=P.apply(O,B);return N(),F})(O.exponentialRampToValueAtTime),O.linearRampToValueAtTime=(P=>y?()=>{throw o()}:(...B)=>{const F=P.apply(O,B);return N(),F})(O.linearRampToValueAtTime),O.setTargetAtTime=(P=>y?()=>{throw o()}:(...B)=>{const F=P.apply(O,B);return N(),F})(O.setTargetAtTime),O.setValueAtTime=(P=>y?()=>{throw o()}:(...B)=>{const F=P.apply(O,B);return N(),F})(O.setValueAtTime),O.setValueCurveAtTime=(P=>y?()=>{throw o()}:(...B)=>{const F=P.apply(O,B);return N(),F})(O.setValueCurveAtTime),O};return{forwardX:W(0,0,U(0)),forwardY:W(1,0,U(1)),forwardZ:W(2,-1,U(2)),positionX:W(6,0,x(0)),positionY:W(7,0,x(1)),positionZ:W(8,0,x(2)),upX:W(3,0,U(3)),upY:W(4,1,U(4)),upZ:W(5,0,U(5))}},{forwardX:m,forwardY:g,forwardZ:w,positionX:f,positionY:p,positionZ:h,upX:_,upY:E,upZ:T}=d.forwardX===void 0?l():d;return{get forwardX(){return m},get forwardY(){return g},get forwardZ(){return w},get positionX(){return f},get positionY(){return p},get positionZ(){return h},get upX(){return _},get upY(){return E},get upZ(){return T}}},xe=e=>"context"in e,Ce=e=>xe(e[0]),de=(e,t,n,r)=>{for(const o of e)if(n(o)){if(r)return!1;throw Error("The set contains at least one similar element.")}return e.add(t),!0},Qt=(e,t,[n,r],o)=>{de(e,[t,n,r],s=>s[0]===t&&s[1]===n,o)},Jt=(e,[t,n,r],o)=>{const s=e.get(t);s===void 0?e.set(t,new Set([[n,r]])):de(s,[n,r],i=>i[0]===n,o)},Sn=e=>"inputs"in e,dt=(e,t,n,r)=>{if(Sn(t)){const o=t.inputs[r];return e.connect(o,n,0),[o,n,0]}return e.connect(t,n,r),[t,n,r]},kn=(e,t,n)=>{for(const r of e)if(r[0]===t&&r[1]===n)return e.delete(r),r;return null},Ho=(e,t,n)=>je(e,r=>r[0]===t&&r[1]===n),Ln=(e,t)=>{if(!Ae(e).delete(t))throw new Error("Missing the expected event listener.")},Pn=(e,t,n)=>{const r=K(e,t),o=je(r,s=>s[0]===n);return r.size===0&&e.delete(t),o},ft=(e,t,n,r)=>{Sn(t)?e.disconnect(t.inputs[r],n,0):e.disconnect(t,n,r)},Y=e=>K(En,e),Ee=e=>K(An,e),le=e=>st.has(e),Ie=e=>!we.has(e),en=(e,t)=>new Promise(n=>{if(t!==null)n(!0);else{const r=e.createScriptProcessor(256,1,1),o=e.createGain(),s=e.createBuffer(1,2,44100),i=s.getChannelData(0);i[0]=1,i[1]=1;const a=e.createBufferSource();a.buffer=s,a.loop=!0,a.connect(r).connect(e.destination),a.connect(o),a.disconnect(o),r.onaudioprocess=c=>{const u=c.inputBuffer.getChannelData(0);Array.prototype.some.call(u,d=>d===1)?n(!0):n(!1),a.stop(),r.onaudioprocess=null,a.disconnect(r),r.disconnect(e.destination)},a.start()}}),et=(e,t)=>{const n=new Map;for(const r of e)for(const o of r){const s=n.get(o);n.set(o,s===void 0?1:s+1)}n.forEach((r,o)=>t(o,r))},Ue=e=>"context"in e,Xo=e=>{const t=new Map;e.connect=(n=>(r,o=0,s=0)=>{const i=Ue(r)?n(r,o,s):n(r,o),a=t.get(r);return a===void 0?t.set(r,[{input:s,output:o}]):a.every(c=>c.input!==s||c.output!==o)&&a.push({input:s,output:o}),i})(e.connect.bind(e)),e.disconnect=(n=>(r,o,s)=>{if(n.apply(e),r===void 0)t.clear();else if(typeof r=="number")for(const[i,a]of t){const c=a.filter(u=>u.output!==r);c.length===0?t.delete(i):t.set(i,c)}else if(t.has(r))if(o===void 0)t.delete(r);else{const i=t.get(r);if(i!==void 0){const a=i.filter(c=>c.output!==o&&(c.input!==s||s===void 0));a.length===0?t.delete(r):t.set(r,a)}}for(const[i,a]of t)a.forEach(c=>{Ue(i)?e.connect(i,c.output,c.input):e.connect(i,c.output)})})(e.disconnect)},Yo=(e,t,n,r)=>{const{activeInputs:o,passiveInputs:s}=be(t),{outputs:i}=z(e),a=Ae(e),c=u=>{const d=Y(e),l=Ee(t);if(u){const m=Pn(s,e,n);Qt(o,e,m,!1),!r&&!le(e)&&d.connect(l,n)}else{const m=Ho(o,e,n);Jt(s,m,!1),!r&&!le(e)&&d.disconnect(l,n)}};return de(i,[t,n],u=>u[0]===t&&u[1]===n,!0)?(a.add(c),ue(e)?Qt(o,e,[n,c],!0):Jt(s,[e,n,c],!0),!0):!1},Zo=(e,t,n,r)=>{const{activeInputs:o,passiveInputs:s}=z(t),i=kn(o[r],e,n);return i===null?[Mn(s,e,n,r)[2],!1]:[i[2],!0]},Ko=(e,t,n)=>{const{activeInputs:r,passiveInputs:o}=be(t),s=kn(r,e,n);return s===null?[Pn(o,e,n)[1],!1]:[s[2],!0]},_t=(e,t,n,r,o)=>{const[s,i]=Zo(e,n,r,o);if(s!==null&&(Ln(e,s),i&&!t&&!le(e)&&ft(Y(e),Y(n),r,o)),ue(n)){const{activeInputs:a}=z(n);ct(n,a)}},Et=(e,t,n,r)=>{const[o,s]=Ko(e,n,r);o!==null&&(Ln(e,o),s&&!t&&!le(e)&&Y(e).disconnect(Ee(n),r))},Qo=(e,t)=>{const n=z(e),r=[];for(const o of n.outputs)Ce(o)?_t(e,t,...o):Et(e,t,...o),r.push(o[0]);return n.outputs.clear(),r},Jo=(e,t,n)=>{const r=z(e),o=[];for(const s of r.outputs)s[1]===n&&(Ce(s)?_t(e,t,...s):Et(e,t,...s),o.push(s[0]),r.outputs.delete(s));return o},es=(e,t,n,r,o)=>{const s=z(e);return Array.from(s.outputs).filter(i=>i[0]===n&&(r===void 0||i[1]===r)&&(o===void 0||i[2]===o)).map(i=>(Ce(i)?_t(e,t,...i):Et(e,t,...i),s.outputs.delete(i),i[0]))},ts=(e,t,n,r,o,s,i,a,c,u,d,l,m,g,w,f)=>class extends u{constructor(h,_,E,T){super(E),this._context=h,this._nativeAudioNode=E;const A=d(h);l(A)&&n(en,()=>en(A,f))!==!0&&Xo(E),En.set(this,E),Cn.set(this,new Set),h.state!=="closed"&&_&&Pe(this),e(this,T,E)}get channelCount(){return this._nativeAudioNode.channelCount}set channelCount(h){this._nativeAudioNode.channelCount=h}get channelCountMode(){return this._nativeAudioNode.channelCountMode}set channelCountMode(h){this._nativeAudioNode.channelCountMode=h}get channelInterpretation(){return this._nativeAudioNode.channelInterpretation}set channelInterpretation(h){this._nativeAudioNode.channelInterpretation=h}get context(){return this._context}get numberOfInputs(){return this._nativeAudioNode.numberOfInputs}get numberOfOutputs(){return this._nativeAudioNode.numberOfOutputs}connect(h,_=0,E=0){if(_<0||_>=this._nativeAudioNode.numberOfOutputs)throw o();const T=d(this._context),A=w(T);if(m(h)||g(h))throw s();if(xe(h)){const v=Y(h);try{const k=dt(this._nativeAudioNode,v,_,E),N=Ie(this);(A||N)&&this._nativeAudioNode.disconnect(...k),this.context.state!=="closed"&&!N&&Ie(h)&&Pe(h)}catch(k){throw k.code===12?s():k}if(t(this,h,_,E,A)){const k=c([this],h);et(k,r(A))}return h}const b=Ee(h);if(b.name==="playbackRate"&&b.maxValue===1024)throw i();try{this._nativeAudioNode.connect(b,_),(A||Ie(this))&&this._nativeAudioNode.disconnect(b,_)}catch(v){throw v.code===12?s():v}if(Yo(this,h,_,A)){const v=c([this],h);et(v,r(A))}}disconnect(h,_,E){let T;const A=d(this._context),b=w(A);if(h===void 0)T=Qo(this,b);else if(typeof h=="number"){if(h<0||h>=this.numberOfOutputs)throw o();T=Jo(this,b,h)}else{if(_!==void 0&&(_<0||_>=this.numberOfOutputs)||xe(h)&&E!==void 0&&(E<0||E>=h.numberOfInputs))throw o();if(T=es(this,b,h,_,E),T.length===0)throw s()}for(const y of T){const v=c([this],y);et(v,a)}}},ns=(e,t,n,r,o,s,i,a,c,u,d,l,m)=>(g,w,f,p=null,h=null)=>{const _=f.value,E=new mo(_),T=w?r(E):null,A={get defaultValue(){return _},get maxValue(){return p===null?f.maxValue:p},get minValue(){return h===null?f.minValue:h},get value(){return f.value},set value(b){f.value=b,A.setValueAtTime(b,g.context.currentTime)},cancelAndHoldAtTime(b){if(typeof f.cancelAndHoldAtTime=="function")T===null&&E.flush(g.context.currentTime),E.add(o(b)),f.cancelAndHoldAtTime(b);else{const y=Array.from(E).pop();T===null&&E.flush(g.context.currentTime),E.add(o(b));const v=Array.from(E).pop();f.cancelScheduledValues(b),y!==v&&v!==void 0&&(v.type==="exponentialRampToValue"?f.exponentialRampToValueAtTime(v.value,v.endTime):v.type==="linearRampToValue"?f.linearRampToValueAtTime(v.value,v.endTime):v.type==="setValue"?f.setValueAtTime(v.value,v.startTime):v.type==="setValueCurve"&&f.setValueCurveAtTime(v.values,v.startTime,v.duration))}return A},cancelScheduledValues(b){return T===null&&E.flush(g.context.currentTime),E.add(s(b)),f.cancelScheduledValues(b),A},exponentialRampToValueAtTime(b,y){if(b===0)throw new RangeError;if(!Number.isFinite(y)||y<0)throw new RangeError;const v=g.context.currentTime;return T===null&&E.flush(v),Array.from(E).length===0&&(E.add(u(_,v)),f.setValueAtTime(_,v)),E.add(i(b,y)),f.exponentialRampToValueAtTime(b,y),A},linearRampToValueAtTime(b,y){const v=g.context.currentTime;return T===null&&E.flush(v),Array.from(E).length===0&&(E.add(u(_,v)),f.setValueAtTime(_,v)),E.add(a(b,y)),f.linearRampToValueAtTime(b,y),A},setTargetAtTime(b,y,v){return T===null&&E.flush(g.context.currentTime),E.add(c(b,y,v)),f.setTargetAtTime(b,y,v),A},setValueAtTime(b,y){return T===null&&E.flush(g.context.currentTime),E.add(u(b,y)),f.setValueAtTime(b,y),A},setValueCurveAtTime(b,y,v){const M=b instanceof Float32Array?b:new Float32Array(b);if(l!==null&&l.name==="webkitAudioContext"){const k=y+v,N=g.context.sampleRate,U=Math.ceil(y*N),x=Math.floor(k*N),W=x-U,L=new Float32Array(W);for(let S=0;S({replay(t){for(const n of e)if(n.type==="exponentialRampToValue"){const{endTime:r,value:o}=n;t.exponentialRampToValueAtTime(o,r)}else if(n.type==="linearRampToValue"){const{endTime:r,value:o}=n;t.linearRampToValueAtTime(o,r)}else if(n.type==="setTarget"){const{startTime:r,target:o,timeConstant:s}=n;t.setTargetAtTime(o,r,s)}else if(n.type==="setValue"){const{startTime:r,value:o}=n;t.setValueAtTime(o,r)}else if(n.type==="setValueCurve"){const{duration:r,startTime:o,values:s}=n;t.setValueCurveAtTime(s,o,r)}else throw new Error("Can't apply an unknown automation.")}});class xn{constructor(t){this._map=new Map(t)}get size(){return this._map.size}entries(){return this._map.entries()}forEach(t,n=null){return this._map.forEach((r,o)=>t.call(n,r,o,this))}get(t){return this._map.get(t)}has(t){return this._map.has(t)}keys(){return this._map.keys()}values(){return this._map.values()}}const os={channelCount:2,channelCountMode:"explicit",channelInterpretation:"speakers",numberOfInputs:1,numberOfOutputs:1,parameterData:{},processorOptions:{}},ss=(e,t,n,r,o,s,i,a,c,u,d,l,m,g)=>class extends t{constructor(f,p,h){var _;const E=a(f),T=c(E),A=d({...os,...h});m(A);const b=it.get(E),y=b?.get(p),v=T||E.state!=="closed"?E:(_=i(E))!==null&&_!==void 0?_:E,M=o(v,T?null:f.baseLatency,u,p,y,A),k=T?r(p,A,y):null;super(f,!0,M,k);const N=[];M.parameters.forEach((x,W)=>{const L=n(this,T,x);N.push([W,L])}),this._nativeAudioWorkletNode=M,this._onprocessorerror=null,this._parameters=new xn(N),T&&e(E,this);const{activeInputs:U}=s(this);l(M,U)}get onprocessorerror(){return this._onprocessorerror}set onprocessorerror(f){const p=typeof f=="function"?g(this,f):null;this._nativeAudioWorkletNode.onprocessorerror=p;const h=this._nativeAudioWorkletNode.onprocessorerror;this._onprocessorerror=h!==null&&h===p?f:h}get parameters(){return this._parameters===null?this._nativeAudioWorkletNode.parameters:this._parameters}get port(){return this._nativeAudioWorkletNode.port}};function We(e,t,n,r,o){if(typeof e.copyFromChannel=="function")t[n].byteLength===0&&(t[n]=new Float32Array(128)),e.copyFromChannel(t[n],r,o);else{const s=e.getChannelData(r);if(t[n].byteLength===0)t[n]=s.slice(o,o+128);else{const i=new Float32Array(s.buffer,o*Float32Array.BYTES_PER_ELEMENT,128);t[n].set(i)}}}const Un=(e,t,n,r,o)=>{typeof e.copyToChannel=="function"?t[n].byteLength!==0&&e.copyToChannel(t[n],r,o):t[n].byteLength!==0&&e.getChannelData(r).set(t[n],o)},Be=(e,t)=>{const n=[];for(let r=0;r{const n=K(at,e),r=Y(t);return K(n,r)},as=async(e,t,n,r,o,s,i)=>{const a=t===null?Math.ceil(e.context.length/128)*128:t.length,c=r.channelCount*r.numberOfInputs,u=o.reduce((p,h)=>p+h,0),d=u===0?null:n.createBuffer(u,a,n.sampleRate);if(s===void 0)throw new Error("Missing the processor constructor.");const l=z(e),m=await is(n,e),g=Be(r.numberOfInputs,r.channelCount),w=Be(r.numberOfOutputs,o),f=Array.from(e.parameters.keys()).reduce((p,h)=>({...p,[h]:new Float32Array(128)}),{});for(let p=0;p0&&t!==null)for(let h=0;h{We(t,f,h,c+_,p)});for(let h=0;hl.activeInputs[T].size===0?[]:E),_=i(p/n.sampleRate,n.sampleRate,()=>m.process(h,w,f));if(d!==null)for(let E=0,T=0;E(p,h,_)=>{const E=new WeakMap;let T=null;const A=async(b,y)=>{let v=d(b),M=null;const k=On(v,y),N=Array.isArray(h.outputChannelCount)?h.outputChannelCount:Array.from(h.outputChannelCount);if(l===null){const U=N.reduce((I,S)=>I+S,0),x=o(y,{channelCount:Math.max(1,U),channelCountMode:"explicit",channelInterpretation:"discrete",numberOfOutputs:Math.max(1,U)}),W=[];for(let I=0;I{const B=new m(O,Math.ceil(b.context.length/128)*128,y.sampleRate),F=[],Q=[];for(let $=0;${const H=s(B,{channelCount:1,channelCountMode:"explicit",channelInterpretation:"discrete",offset:$.value});return await g(B,$,H.offset),H})),me=r(B,{channelCount:1,channelCountMode:"explicit",channelInterpretation:"speakers",numberOfInputs:Math.max(1,S+V)});for(let $=0;$w(b,B,$))),f(B)})(),y,h,N,_,u)}const U=await T,x=n(y,{buffer:null,channelCount:2,channelCountMode:"max",channelInterpretation:"speakers",loop:!1,loopEnd:0,loopStart:0,playbackRate:1}),[W,L,I]=M;U!==null&&(x.buffer=U,x.start(0)),x.connect(W);for(let S=0,V=0;S(n,r)=>{const o=t.get(n);if(o!==void 0)return o;const s=e.get(n);if(s!==void 0)return s;try{const i=r();return i instanceof Promise?(e.set(n,i),i.catch(()=>!1).then(a=>(e.delete(n),t.set(n,a),a))):(t.set(n,i),i)}catch{return t.set(n,!1),!1}},ls=e=>(t,n,r)=>e(n,t,r),ds=e=>(t,n,r=0,o=0)=>{const s=t[r];if(s===void 0)throw e();return Ue(n)?s.connect(n,0,o):s.connect(n,0)},fs=e=>t=>(e[0]=t,e[0]),hs=()=>new DOMException("","DataCloneError"),tn=e=>{const{port1:t,port2:n}=new MessageChannel;return new Promise(r=>{const o=()=>{n.onmessage=null,t.close(),n.close(),r()};n.onmessage=()=>o();try{t.postMessage(e,[e])}catch{}finally{o()}})},ps=(e,t,n,r,o,s,i,a,c,u,d)=>(l,m)=>{const g=i(l)?l:s(l);if(o.has(m)){const w=n();return Promise.reject(w)}try{o.add(m)}catch{}return t(c,()=>c(g))?g.decodeAudioData(m).then(w=>(tn(m).catch(()=>{}),t(a,()=>a(w))||d(w),e.add(w),w)):new Promise((w,f)=>{const p=async()=>{try{await tn(m)}catch{}},h=_=>{f(_),p()};try{g.decodeAudioData(m,_=>{typeof _.copyFromChannel!="function"&&(u(_),Rn(_)),e.add(_),p().then(()=>w(_))},_=>{h(_===null?r():_)})}catch(_){h(_)}})},ms=(e,t,n,r,o,s,i,a)=>(c,u)=>{const d=t.get(c);if(d===void 0)throw new Error("Missing the expected cycle count.");const l=s(c.context),m=a(l);if(d===u){if(t.delete(c),!m&&i(c)){const g=r(c),{outputs:w}=n(c);for(const f of w)if(Ce(f)){const p=r(f[0]);e(g,p,f[1],f[2])}else{const p=o(f[0]);g.connect(p,f[1])}}}else t.set(c,d-u)},gs=e=>(t,n,r,o)=>e(t[o],s=>s[0]===n&&s[1]===r),ws=e=>(t,n)=>{e(t).delete(n)},vs=e=>"delayTime"in e,_s=(e,t,n)=>function r(o,s){const i=xe(s)?s:n(e,s);if(vs(i))return[];if(o[0]===i)return[o];if(o.includes(i))return[];const{outputs:a}=t(i);return Array.from(a).map(c=>r([...o,i],c[0])).reduce((c,u)=>c.concat(u),[])},Re=(e,t,n)=>{const r=t[n];if(r===void 0)throw e();return r},Es=e=>(t,n=void 0,r=void 0,o=0)=>n===void 0?t.forEach(s=>s.disconnect()):typeof n=="number"?Re(e,t,n).disconnect():Ue(n)?r===void 0?t.forEach(s=>s.disconnect(n)):o===void 0?Re(e,t,r).disconnect(n,0):Re(e,t,r).disconnect(n,0,o):r===void 0?t.forEach(s=>s.disconnect(n)):Re(e,t,r).disconnect(n,0),ys=()=>new DOMException("","EncodingError"),As=e=>t=>new Promise((n,r)=>{if(e===null){r(new SyntaxError);return}const o=e.document.head;if(o===null)r(new SyntaxError);else{const s=e.document.createElement("script"),i=new Blob([t],{type:"application/javascript"}),a=URL.createObjectURL(i),c=e.onerror,u=()=>{e.onerror=c,URL.revokeObjectURL(a)};e.onerror=(d,l,m,g,w)=>{if(l===a||l===e.location.href&&m===1&&g===1)return u(),r(w),!1;if(c!==null)return c(d,l,m,g,w)},s.onerror=()=>{u(),r(new SyntaxError)},s.onload=()=>{u(),n()},s.src=a,s.type="module",o.appendChild(s)}}),bs=e=>class{constructor(n){this._nativeEventTarget=n,this._listeners=new WeakMap}addEventListener(n,r,o){if(r!==null){let s=this._listeners.get(r);s===void 0&&(s=e(this,r),typeof r=="function"&&this._listeners.set(r,s)),this._nativeEventTarget.addEventListener(n,s,o)}}dispatchEvent(n){return this._nativeEventTarget.dispatchEvent(n)}removeEventListener(n,r,o){const s=r===null?void 0:this._listeners.get(r);this._nativeEventTarget.removeEventListener(n,s===void 0?null:s,o)}},Cs=e=>(t,n,r)=>{Object.defineProperties(e,{currentFrame:{configurable:!0,get(){return Math.round(t*n)}},currentTime:{configurable:!0,get(){return t}}});try{return r()}finally{e!==null&&(delete e.currentFrame,delete e.currentTime)}},Ts=e=>async t=>{try{const n=await fetch(t);if(n.ok)return[await n.text(),n.url]}catch{}throw e()},Ms=(e,t)=>n=>t(e,n),Ns=e=>t=>{const n=e(t);if(n.renderer===null)throw new Error("Missing the renderer of the given AudioNode in the audio graph.");return n.renderer},Os=e=>t=>{var n;return(n=e.get(t))!==null&&n!==void 0?n:0},Rs=e=>t=>{const n=e(t);if(n.renderer===null)throw new Error("Missing the renderer of the given AudioParam in the audio graph.");return n.renderer},Is=e=>t=>e.get(t),Z=()=>new DOMException("","InvalidStateError"),Ss=e=>t=>{const n=e.get(t);if(n===void 0)throw Z();return n},ks=(e,t)=>n=>{let r=e.get(n);if(r!==void 0)return r;if(t===null)throw new Error("Missing the native OfflineAudioContext constructor.");return r=new t(1,1,44100),e.set(n,r),r},Ls=e=>t=>{const n=e.get(t);if(n===void 0)throw new Error("The context has no set of AudioWorkletNodes.");return n},Ps=()=>new DOMException("","InvalidAccessError"),xs=(e,t,n,r,o,s)=>i=>(a,c)=>{const u=e.get(a);if(u===void 0){if(!i&&s(a)){const d=r(a),{outputs:l}=n(a);for(const m of l)if(Ce(m)){const g=r(m[0]);t(d,g,m[1],m[2])}else{const g=o(m[0]);d.disconnect(g,m[1])}}e.set(a,c)}else e.set(a,u+c)},Us=e=>t=>e!==null&&t instanceof e,Ws=e=>t=>e!==null&&typeof e.AudioNode=="function"&&t instanceof e.AudioNode,Bs=e=>t=>e!==null&&typeof e.AudioParam=="function"&&t instanceof e.AudioParam,Ds=(e,t)=>n=>e(n)||t(n),Vs=e=>t=>e!==null&&t instanceof e,Fs=e=>e!==null&&e.isSecureContext,js=(e,t,n,r)=>class extends e{constructor(s,i){const a=n(s),c=t(a,i);if(r(a))throw new TypeError;super(s,!0,c,null),this._nativeMediaStreamAudioSourceNode=c}get mediaStream(){return this._nativeMediaStreamAudioSourceNode.mediaStream}},$s=(e,t,n,r,o)=>class extends r{constructor(i={}){if(o===null)throw new Error("Missing the native AudioContext constructor.");let a;try{a=new o(i)}catch(d){throw d.code===12&&d.message==="sampleRate is not in range"?t():d}if(a===null)throw n();if(!$o(i.latencyHint))throw new TypeError(`The provided value '${i.latencyHint}' is not a valid enum value of type AudioContextLatencyCategory.`);if(i.sampleRate!==void 0&&a.sampleRate!==i.sampleRate)throw t();super(a,2);const{latencyHint:c}=i,{sampleRate:u}=a;if(this._baseLatency=typeof a.baseLatency=="number"?a.baseLatency:c==="balanced"?512/u:c==="interactive"||c===void 0?256/u:c==="playback"?1024/u:Math.max(2,Math.min(128,Math.round(c*u/128)))*128/u,this._nativeAudioContext=a,o.name==="webkitAudioContext"?(this._nativeGainNode=a.createGain(),this._nativeOscillatorNode=a.createOscillator(),this._nativeGainNode.gain.value=1e-37,this._nativeOscillatorNode.connect(this._nativeGainNode).connect(a.destination),this._nativeOscillatorNode.start()):(this._nativeGainNode=null,this._nativeOscillatorNode=null),this._state=null,a.state==="running"){this._state="suspended";const d=()=>{this._state==="suspended"&&(this._state=null),a.removeEventListener("statechange",d)};a.addEventListener("statechange",d)}}get baseLatency(){return this._baseLatency}get state(){return this._state!==null?this._state:this._nativeAudioContext.state}close(){return this.state==="closed"?this._nativeAudioContext.close().then(()=>{throw e()}):(this._state==="suspended"&&(this._state=null),this._nativeAudioContext.close().then(()=>{this._nativeGainNode!==null&&this._nativeOscillatorNode!==null&&(this._nativeOscillatorNode.stop(),this._nativeGainNode.disconnect(),this._nativeOscillatorNode.disconnect()),jo(this)}))}resume(){return this._state==="suspended"?new Promise((i,a)=>{const c=()=>{this._nativeAudioContext.removeEventListener("statechange",c),this._nativeAudioContext.state==="running"?i():this.resume().then(i,a)};this._nativeAudioContext.addEventListener("statechange",c)}):this._nativeAudioContext.resume().catch(i=>{throw i===void 0||i.code===15?e():i})}suspend(){return this._nativeAudioContext.suspend().catch(i=>{throw i===void 0?e():i})}},Gs=(e,t,n,r,o,s)=>class extends n{constructor(a,c){super(a),this._nativeContext=a,bn.set(this,a),r(a)&&o.set(a,new Set),this._destination=new e(this,c),this._listener=t(this,a),this._onstatechange=null}get currentTime(){return this._nativeContext.currentTime}get destination(){return this._destination}get listener(){return this._listener}get onstatechange(){return this._onstatechange}set onstatechange(a){const c=typeof a=="function"?s(this,a):null;this._nativeContext.onstatechange=c;const u=this._nativeContext.onstatechange;this._onstatechange=u!==null&&u===c?a:u}get sampleRate(){return this._nativeContext.sampleRate}get state(){return this._nativeContext.state}},ht=e=>{const t=new Uint32Array([1179011410,40,1163280727,544501094,16,131073,44100,176400,1048580,1635017060,4,0]);try{const n=e.decodeAudioData(t.buffer,()=>{});return n===void 0?!1:(n.catch(()=>{}),!0)}catch{}return!1},qs=(e,t)=>(n,r,o)=>{const s=new Set;return n.connect=(i=>(a,c=0,u=0)=>{const d=s.size===0;if(t(a))return i.call(n,a,c,u),e(s,[a,c,u],l=>l[0]===a&&l[1]===c&&l[2]===u,!0),d&&r(),a;i.call(n,a,c),e(s,[a,c],l=>l[0]===a&&l[1]===c,!0),d&&r()})(n.connect),n.disconnect=(i=>(a,c,u)=>{const d=s.size>0;if(a===void 0)i.apply(n),s.clear();else if(typeof a=="number"){i.call(n,a);for(const m of s)m[1]===a&&s.delete(m)}else{t(a)?i.call(n,a,c,u):i.call(n,a,c);for(const m of s)m[0]===a&&(c===void 0||m[1]===c)&&(u===void 0||m[2]===u)&&s.delete(m)}const l=s.size===0;d&&l&&o()})(n.disconnect),n},ce=(e,t,n)=>{const r=t[n];r!==void 0&&r!==e[n]&&(e[n]=r)},Te=(e,t)=>{ce(e,t,"channelCount"),ce(e,t,"channelCountMode"),ce(e,t,"channelInterpretation")},zs=e=>e===null?null:e.hasOwnProperty("AudioBuffer")?e.AudioBuffer:null,yt=(e,t,n)=>{const r=t[n];r!==void 0&&r!==e[n].value&&(e[n].value=r)},Hs=e=>{e.start=(t=>{let n=!1;return(r=0,o=0,s)=>{if(n)throw Z();t.call(e,r,o,s),n=!0}})(e.start)},Wn=e=>{e.start=(t=>(n=0,r=0,o)=>{if(typeof o=="number"&&o<0||r<0||n<0)throw new RangeError("The parameters can't be negative.");t.call(e,n,r,o)})(e.start)},Bn=e=>{e.stop=(t=>(n=0)=>{if(n<0)throw new RangeError("The parameter can't be negative.");t.call(e,n)})(e.stop)},Xs=(e,t,n,r,o,s,i,a,c,u,d)=>(l,m)=>{const g=l.createBufferSource();return Te(g,m),yt(g,m,"playbackRate"),ce(g,m,"buffer"),ce(g,m,"loop"),ce(g,m,"loopEnd"),ce(g,m,"loopStart"),t(n,()=>n(l))||Hs(g),t(r,()=>r(l))||c(g),t(o,()=>o(l))||u(g,l),t(s,()=>s(l))||Wn(g),t(i,()=>i(l))||d(g,l),t(a,()=>a(l))||Bn(g),e(l,g),g},Ys=e=>e===null?null:e.hasOwnProperty("AudioContext")?e.AudioContext:e.hasOwnProperty("webkitAudioContext")?e.webkitAudioContext:null,Zs=(e,t)=>(n,r,o)=>{const s=n.destination;if(s.channelCount!==r)try{s.channelCount=r}catch{}o&&s.channelCountMode!=="explicit"&&(s.channelCountMode="explicit"),s.maxChannelCount===0&&Object.defineProperty(s,"maxChannelCount",{value:r});const i=e(n,{channelCount:r,channelCountMode:s.channelCountMode,channelInterpretation:s.channelInterpretation,gain:1});return t(i,"channelCount",a=>()=>a.call(i),a=>c=>{a.call(i,c);try{s.channelCount=c}catch(u){if(c>s.maxChannelCount)throw u}}),t(i,"channelCountMode",a=>()=>a.call(i),a=>c=>{a.call(i,c),s.channelCountMode=c}),t(i,"channelInterpretation",a=>()=>a.call(i),a=>c=>{a.call(i,c),s.channelInterpretation=c}),Object.defineProperty(i,"maxChannelCount",{get:()=>s.maxChannelCount}),i.connect(s),i},Ks=e=>e===null?null:e.hasOwnProperty("AudioWorkletNode")?e.AudioWorkletNode:null,Qs=e=>{const{port1:t}=new MessageChannel;try{t.postMessage(e)}finally{t.close()}},Js=(e,t,n,r,o)=>(s,i,a,c,u,d)=>{if(a!==null)try{const l=new a(s,c,d),m=new Map;let g=null;if(Object.defineProperties(l,{channelCount:{get:()=>d.channelCount,set:()=>{throw e()}},channelCountMode:{get:()=>"explicit",set:()=>{throw e()}},onprocessorerror:{get:()=>g,set:w=>{typeof g=="function"&&l.removeEventListener("processorerror",g),g=typeof w=="function"?w:null,typeof g=="function"&&l.addEventListener("processorerror",g)}}}),l.addEventListener=(w=>(...f)=>{if(f[0]==="processorerror"){const p=typeof f[1]=="function"?f[1]:typeof f[1]=="object"&&f[1]!==null&&typeof f[1].handleEvent=="function"?f[1].handleEvent:null;if(p!==null){const h=m.get(f[1]);h!==void 0?f[1]=h:(f[1]=_=>{_.type==="error"?(Object.defineProperties(_,{type:{value:"processorerror"}}),p(_)):p(new ErrorEvent(f[0],{..._}))},m.set(p,f[1]))}}return w.call(l,"error",f[1],f[2]),w.call(l,...f)})(l.addEventListener),l.removeEventListener=(w=>(...f)=>{if(f[0]==="processorerror"){const p=m.get(f[1]);p!==void 0&&(m.delete(f[1]),f[1]=p)}return w.call(l,"error",f[1],f[2]),w.call(l,f[0],f[1],f[2])})(l.removeEventListener),d.numberOfOutputs!==0){const w=n(s,{channelCount:1,channelCountMode:"explicit",channelInterpretation:"discrete",gain:0});return l.connect(w).connect(s.destination),o(l,()=>w.disconnect(),()=>w.connect(s.destination))}return l}catch(l){throw l.code===11?r():l}if(u===void 0)throw r();return Qs(d),t(s,i,u,d)},ei=(e,t)=>e===null?512:Math.max(512,Math.min(16384,Math.pow(2,Math.round(Math.log2(e*t))))),ti=e=>new Promise((t,n)=>{const{port1:r,port2:o}=new MessageChannel;r.onmessage=({data:s})=>{r.close(),o.close(),t(s)},r.onmessageerror=({data:s})=>{r.close(),o.close(),n(s)},o.postMessage(e)}),ni=async(e,t)=>{const n=await ti(t);return new e(n)},ri=(e,t,n,r)=>{let o=at.get(e);o===void 0&&(o=new WeakMap,at.set(e,o));const s=ni(n,r);return o.set(t,s),s},oi=(e,t,n,r,o,s,i,a,c,u,d,l,m)=>(g,w,f,p)=>{if(p.numberOfInputs===0&&p.numberOfOutputs===0)throw c();const h=Array.isArray(p.outputChannelCount)?p.outputChannelCount:Array.from(p.outputChannelCount);if(h.some(C=>C<1))throw c();if(h.length!==p.numberOfOutputs)throw t();if(p.channelCountMode!=="explicit")throw c();const _=p.channelCount*p.numberOfInputs,E=h.reduce((C,R)=>C+R,0),T=f.parameterDescriptors===void 0?0:f.parameterDescriptors.length;if(_+T>6||E>6)throw c();const A=new MessageChannel,b=[],y=[];for(let C=0;CC===void 0?0:C},maxValue:{get:()=>R===void 0?vt:R},minValue:{get:()=>q===void 0?$e:q}}),v.push(D)}const M=r(g,{channelCount:1,channelCountMode:"explicit",channelInterpretation:"speakers",numberOfInputs:Math.max(1,_+T)}),k=ei(w,g.sampleRate),N=a(g,k,_+T,Math.max(1,E)),U=o(g,{channelCount:Math.max(1,E),channelCountMode:"explicit",channelInterpretation:"discrete",numberOfOutputs:Math.max(1,E)}),x=[];for(let C=0;C{const q=v[R];return q.connect(M,0,_+R),q.start(0),[C,q.offset]}));M.connect(N);let L=p.channelInterpretation,I=null;const S=p.numberOfOutputs===0?[N]:x,V={get bufferSize(){return k},get channelCount(){return p.channelCount},set channelCount(C){throw n()},get channelCountMode(){return p.channelCountMode},set channelCountMode(C){throw n()},get channelInterpretation(){return L},set channelInterpretation(C){for(const R of b)R.channelInterpretation=C;L=C},get context(){return N.context},get inputs(){return b},get numberOfInputs(){return p.numberOfInputs},get numberOfOutputs(){return p.numberOfOutputs},get onprocessorerror(){return I},set onprocessorerror(C){typeof I=="function"&&V.removeEventListener("processorerror",I),I=typeof C=="function"?C:null,typeof I=="function"&&V.addEventListener("processorerror",I)},get parameters(){return W},get port(){return A.port2},addEventListener(...C){return N.addEventListener(C[0],C[1],C[2])},connect:e.bind(null,S),disconnect:u.bind(null,S),dispatchEvent(...C){return N.dispatchEvent(C[0])},removeEventListener(...C){return N.removeEventListener(C[0],C[1],C[2])}},O=new Map;A.port1.addEventListener=(C=>(...R)=>{if(R[0]==="message"){const q=typeof R[1]=="function"?R[1]:typeof R[1]=="object"&&R[1]!==null&&typeof R[1].handleEvent=="function"?R[1].handleEvent:null;if(q!==null){const j=O.get(R[1]);j!==void 0?R[1]=j:(R[1]=D=>{d(g.currentTime,g.sampleRate,()=>q(D))},O.set(q,R[1]))}}return C.call(A.port1,R[0],R[1],R[2])})(A.port1.addEventListener),A.port1.removeEventListener=(C=>(...R)=>{if(R[0]==="message"){const q=O.get(R[1]);q!==void 0&&(O.delete(R[1]),R[1]=q)}return C.call(A.port1,R[0],R[1],R[2])})(A.port1.removeEventListener);let P=null;Object.defineProperty(A.port1,"onmessage",{get:()=>P,set:C=>{typeof P=="function"&&A.port1.removeEventListener("message",P),P=typeof C=="function"?C:null,typeof P=="function"&&(A.port1.addEventListener("message",P),A.port1.start())}}),f.prototype.port=A.port1;let B=null;ri(g,V,f,p).then(C=>B=C);const Q=Be(p.numberOfInputs,p.channelCount),pe=Be(p.numberOfOutputs,h),me=f.parameterDescriptors===void 0?[]:f.parameterDescriptors.reduce((C,{name:R})=>({...C,[R]:new Float32Array(128)}),{});let $=!0;const H=()=>{p.numberOfOutputs>0&&N.disconnect(U);for(let C=0,R=0;C{if(B!==null){const q=l(V);for(let j=0;j{We(C,me,D,_+G,j)});for(let D=0;D{if(q[re].size>0)return Me.set(re,k/128),X;const Qe=Me.get(re);return Qe===void 0?[]:(X.every(gr=>gr.every(wr=>wr===0))&&(Qe===1?Me.delete(re):Me.set(re,Qe-1)),X)});$=d(g.currentTime+j/g.sampleRate,g.sampleRate,()=>B.process(D,pe,me));for(let X=0,re=0;XN.connect(Ke).connect(g.destination),kt=()=>{N.disconnect(Ke),Ke.disconnect()},pr=()=>{if($){kt(),p.numberOfOutputs>0&&N.connect(U);for(let C=0,R=0;C{$&&(St(),H()),Ze=!1};return St(),m(V,pr,mr)},si=(e,t)=>(n,r)=>{const o=n.createChannelMerger(r.numberOfInputs);return e!==null&&e.name==="webkitAudioContext"&&t(n,o),Te(o,r),o},ii=e=>{const t=e.numberOfOutputs;Object.defineProperty(e,"channelCount",{get:()=>t,set:n=>{if(n!==t)throw Z()}}),Object.defineProperty(e,"channelCountMode",{get:()=>"explicit",set:n=>{if(n!=="explicit")throw Z()}}),Object.defineProperty(e,"channelInterpretation",{get:()=>"discrete",set:n=>{if(n!=="discrete")throw Z()}})},Dn=(e,t)=>{const n=e.createChannelSplitter(t.numberOfOutputs);return Te(n,t),ii(n),n},ai=(e,t,n,r,o)=>(s,i)=>{if(s.createConstantSource===void 0)return n(s,i);const a=s.createConstantSource();return Te(a,i),yt(a,i,"offset"),t(r,()=>r(s))||Wn(a),t(o,()=>o(s))||Bn(a),e(s,a),a},Vn=(e,t)=>(e.connect=t.connect.bind(t),e.disconnect=t.disconnect.bind(t),e),ci=(e,t,n,r)=>(o,{offset:s,...i})=>{const a=o.createBuffer(1,2,44100),c=t(o,{buffer:null,channelCount:2,channelCountMode:"max",channelInterpretation:"speakers",loop:!1,loopEnd:0,loopStart:0,playbackRate:1}),u=n(o,{...i,gain:s}),d=a.getChannelData(0);d[0]=1,d[1]=1,c.buffer=a,c.loop=!0;const l={get bufferSize(){},get channelCount(){return u.channelCount},set channelCount(w){u.channelCount=w},get channelCountMode(){return u.channelCountMode},set channelCountMode(w){u.channelCountMode=w},get channelInterpretation(){return u.channelInterpretation},set channelInterpretation(w){u.channelInterpretation=w},get context(){return u.context},get inputs(){return[]},get numberOfInputs(){return c.numberOfInputs},get numberOfOutputs(){return u.numberOfOutputs},get offset(){return u.gain},get onended(){return c.onended},set onended(w){c.onended=w},addEventListener(...w){return c.addEventListener(w[0],w[1],w[2])},dispatchEvent(...w){return c.dispatchEvent(w[0])},removeEventListener(...w){return c.removeEventListener(w[0],w[1],w[2])},start(w=0){c.start.call(c,w)},stop(w=0){c.stop.call(c,w)}},m=()=>c.connect(u),g=()=>c.disconnect(u);return e(o,c),r(Vn(l,u),m,g)},ie=(e,t)=>{const n=e.createGain();return Te(n,t),yt(n,t,"gain"),n},ui=(e,{mediaStream:t})=>{const n=t.getAudioTracks();n.sort((s,i)=>s.idi.id?1:0);const r=n.slice(0,1),o=e.createMediaStreamSource(new MediaStream(r));return Object.defineProperty(o,"mediaStream",{value:t}),o},li=e=>e===null?null:e.hasOwnProperty("OfflineAudioContext")?e.OfflineAudioContext:e.hasOwnProperty("webkitOfflineAudioContext")?e.webkitOfflineAudioContext:null,di=e=>(t,{disableNormalization:n,imag:r,real:o})=>{const s=r instanceof Float32Array?r:new Float32Array(r),i=o instanceof Float32Array?o:new Float32Array(o),a=t.createPeriodicWave(i,s,{disableNormalization:n});if(Array.from(r).length<2)throw e();return a},At=(e,t,n,r)=>e.createScriptProcessor(t,n,r),fe=()=>new DOMException("","NotSupportedError"),fi={disableNormalization:!1},hi=(e,t,n,r)=>class Fn{constructor(s,i){const a=t(s),c=r({...fi,...i}),u=e(a,c);return n.add(u),u}static[Symbol.hasInstance](s){return s!==null&&typeof s=="object"&&Object.getPrototypeOf(s)===Fn.prototype||n.has(s)}},pi=(e,t)=>(n,r,o)=>(e(r).replay(o),t(r,n,o)),mi=(e,t,n)=>async(r,o,s)=>{const i=e(r);await Promise.all(i.activeInputs.map((a,c)=>Array.from(a).map(async([u,d])=>{const m=await t(u).render(u,o),g=r.context.destination;!n(u)&&(r!==g||!n(r))&&m.connect(s,d,c)})).reduce((a,c)=>[...a,...c],[]))},gi=(e,t,n)=>async(r,o,s)=>{const i=t(r);await Promise.all(Array.from(i.activeInputs).map(async([a,c])=>{const d=await e(a).render(a,o);n(a)||d.connect(s,c)}))},wi=(e,t,n,r)=>o=>e(ht,()=>ht(o))?Promise.resolve(e(r,r)).then(s=>{if(!s){const i=n(o,512,0,1);o.oncomplete=()=>{i.onaudioprocess=null,i.disconnect()},i.onaudioprocess=()=>o.currentTime,i.connect(o.destination)}return o.startRendering()}):new Promise(s=>{const i=t(o,{channelCount:1,channelCountMode:"explicit",channelInterpretation:"discrete",gain:0});o.oncomplete=a=>{i.disconnect(),s(a.renderedBuffer)},i.connect(o.destination),o.startRendering()}),vi=e=>(t,n)=>{e.set(t,n)},_i=e=>()=>{if(e===null)return!1;try{new e({length:1,sampleRate:44100})}catch{return!1}return!0},Ei=(e,t)=>async()=>{if(e===null)return!0;if(t===null)return!1;const n=new Blob(['class A extends AudioWorkletProcessor{process(i){this.port.postMessage(i,[i[0][0].buffer])}}registerProcessor("a",A)'],{type:"application/javascript; charset=utf-8"}),r=new t(1,128,44100),o=URL.createObjectURL(n);let s=!1,i=!1;try{await r.audioWorklet.addModule(o);const a=new e(r,"a",{numberOfOutputs:0}),c=r.createOscillator();a.port.onmessage=()=>s=!0,a.onprocessorerror=()=>i=!0,c.connect(a),c.start(0),await r.startRendering()}catch{}finally{URL.revokeObjectURL(o)}return s&&!i},yi=(e,t)=>()=>{if(t===null)return Promise.resolve(!1);const n=new t(1,1,44100),r=e(n,{channelCount:1,channelCountMode:"explicit",channelInterpretation:"discrete",gain:0});return new Promise(o=>{n.oncomplete=()=>{r.disconnect(),o(n.currentTime!==0)},n.startRendering()})},Ai=()=>new DOMException("","UnknownError"),bi=()=>typeof window>"u"?null:window,Ci=(e,t)=>n=>{n.copyFromChannel=(r,o,s=0)=>{const i=e(s),a=e(o);if(a>=n.numberOfChannels)throw t();const c=n.length,u=n.getChannelData(a),d=r.length;for(let l=i<0?-i:0;l+i{const i=e(s),a=e(o);if(a>=n.numberOfChannels)throw t();const c=n.length,u=n.getChannelData(a),d=r.length;for(let l=i<0?-i:0;l+it=>{t.copyFromChannel=(n=>(r,o,s=0)=>{const i=e(s),a=e(o);if(i(r,o,s=0)=>{const i=e(s),a=e(o);if(i(t,n)=>{const r=n.createBuffer(1,1,44100);t.buffer===null&&(t.buffer=r),e(t,"buffer",o=>()=>{const s=o.call(t);return s===r?null:s},o=>s=>o.call(t,s===null?r:s))},Ni=(e,t)=>(n,r)=>{r.channelCount=1,r.channelCountMode="explicit",Object.defineProperty(r,"channelCount",{get:()=>1,set:()=>{throw e()}}),Object.defineProperty(r,"channelCountMode",{get:()=>"explicit",set:()=>{throw e()}});const o=n.createBufferSource();t(r,()=>{const a=r.numberOfInputs;for(let c=0;co.disconnect(r))},Oi=(e,t,n)=>e.copyFromChannel===void 0?e.getChannelData(n)[0]:(e.copyFromChannel(t,n),t[0]),bt=(e,t,n,r)=>{let o=e;for(;!o.hasOwnProperty(t);)o=Object.getPrototypeOf(o);const{get:s,set:i}=Object.getOwnPropertyDescriptor(o,t);Object.defineProperty(e,t,{get:n(s),set:r(i)})},Ri=e=>({...e,outputChannelCount:e.outputChannelCount!==void 0?e.outputChannelCount:e.numberOfInputs===1&&e.numberOfOutputs===1?[e.channelCount]:Array.from({length:e.numberOfOutputs},()=>1)}),Ii=e=>{const{imag:t,real:n}=e;return t===void 0?n===void 0?{...e,imag:[0,0],real:[0,0]}:{...e,imag:Array.from(n,()=>0),real:n}:n===void 0?{...e,imag:t,real:Array.from(t,()=>0)}:{...e,imag:t,real:n}},jn=(e,t,n)=>{try{e.setValueAtTime(t,n)}catch(r){if(r.code!==9)throw r;jn(e,t,n+1e-7)}},Si=e=>{const t=e.createBufferSource();t.start();try{t.start()}catch{return!0}return!1},ki=e=>{const t=e.createBufferSource(),n=e.createBuffer(1,1,44100);t.buffer=n;try{t.start(0,1)}catch{return!1}return!0},Li=e=>{const t=e.createBufferSource();t.start();try{t.stop()}catch{return!1}return!0},$n=e=>{const t=e.createOscillator();try{t.start(-1)}catch(n){return n instanceof RangeError}return!1},Pi=e=>{const t=e.createBuffer(1,1,44100),n=e.createBufferSource();n.buffer=t,n.start(),n.stop();try{return n.stop(),!0}catch{return!1}},Gn=e=>{const t=e.createOscillator();try{t.stop(-1)}catch(n){return n instanceof RangeError}return!1},xi=e=>{const{port1:t,port2:n}=new MessageChannel;try{t.postMessage(e)}finally{t.close(),n.close()}},Ui=e=>{e.start=(t=>(n=0,r=0,o)=>{const s=e.buffer,i=s===null?r:Math.min(s.duration,r);s!==null&&i>s.duration-.5/e.context.sampleRate?t.call(e,n,0,0):t.call(e,n,i,o)})(e.start)},Wi=(e,t)=>{const n=t.createGain();e.connect(n);const r=(o=>()=>{o.call(e,n),e.removeEventListener("ended",r)})(e.disconnect);e.addEventListener("ended",r),Vn(e,n),e.stop=(o=>{let s=!1;return(i=0)=>{if(s)try{o.call(e,i)}catch{n.gain.setValueAtTime(0,i)}else o.call(e,i),s=!0}})(e.stop)},Ge=(e,t)=>n=>{const r={value:e};return Object.defineProperties(n,{currentTarget:r,target:r}),typeof t=="function"?t.call(e,n):t.handleEvent.call(e,n)},Bi=Ao(de),Di=Oo(de),Vi=gs(je),Fi=new WeakMap,ji=Os(Fi),he=us(new Map,new WeakMap),J=bi(),qn=Ns(z),Ct=mi(z,qn,le),ne=Ss(bn),ve=li(J),ee=Vs(ve),zn=new WeakMap,Hn=bs(Ge),qe=Ys(J),Xn=Us(qe),Yn=Ws(J),$i=Bs(J),ye=Ks(J),ze=ts(bo(_n),No(Bi,Di,dt,Vi,ft,z,ji,Ae,Y,de,ue,le,Ie),he,xs(st,ft,z,Y,Ee,ue),ae,Ps,fe,ms(dt,st,z,Y,Ee,ne,ue,ee),_s(zn,z,K),Hn,ne,Xn,Yn,$i,ee,ye),Zn=new WeakSet,nn=zs(J),Kn=fs(new Uint32Array(1)),Qn=Ci(Kn,ae),Jn=Ti(Kn),Gi=ko(Zn,he,fe,nn,ve,_i(nn),Qn,Jn),Tt=Ro(ie),er=gi(qn,be,le),tr=ls(er),He=Xs(Tt,he,Si,ki,Li,$n,Pi,Gn,Ui,Mi(bt),Wi),nr=pi(Rs(be),er),qi=xo(tr,He,Y,nr,Ct),Mt=ns(Co(yn),zn,An,rs,go,wo,vo,_o,Eo,nt,wn,qe,jn),zi=Po(ze,qi,Mt,Z,He,ne,ee,Ge),Hi=Go(ze,qo,ae,Z,Zs(ie,bt),ne,ee,Ct),Xe=qs(de,Yn),Xi=Ni(Z,Xe),Nt=si(qe,Xi),Yi=ci(Tt,He,ie,Xe),Ot=ai(Tt,he,Yi,$n,Gn),Zi=wi(he,ie,At,yi(ie,ve)),Ki=zo(Mt,Nt,Ot,At,fe,Oi,ee,bt),rr=new WeakMap,Qi=Gs(Hi,Ki,Hn,ee,rr,Ge),Ji=di(ae);hi(Ji,ne,new WeakSet,Ii);const or=Fs(J),Rt=Cs(J),sr=new WeakMap,ea=ks(sr,ve),rn=or?Mo(he,fe,As(J),Rt,Ts(yo),ne,ea,ee,ye,new WeakMap,new WeakMap,Ei(ye,ve),J):void 0,ta=Ds(Xn,ee);ps(Zn,he,hs,ys,new WeakSet,ne,ta,ut,ht,Qn,Jn);const na=js(ze,ui,ne,ee),ir=Ls(rr),ra=Io(ir),ar=ds(ae),oa=ws(ir),cr=Es(ae),ur=new WeakMap,sa=Ms(ur,K),ia=oi(ar,ae,Z,Nt,Dn,Ot,ie,At,fe,cr,Rt,sa,Xe),aa=Js(Z,ia,ie,fe,Xe),ca=cs(tr,ar,He,Nt,Dn,Ot,ie,oa,cr,Rt,Y,ye,ve,nr,Ct,Zi),ua=Is(sr),la=vi(ur),on=or?ss(ra,ze,Mt,ca,aa,z,ua,ne,ee,ye,Ri,la,xi,Ge):void 0,da=$s(Z,fe,Ai,Qi,qe),lr="Missing AudioWorklet support. Maybe this is not running in a secure context.",fa=async(e,t,n,r,o)=>{const{encoderId:s,port:i}=await dn(o,t.sampleRate);if(on===void 0)throw new Error(lr);const a=new zi(t,{buffer:e}),c=new na(t,{mediaStream:r}),u=fo(on,t,{channelCount:n});return{audioBufferSourceNode:a,encoderId:s,mediaStreamAudioSourceNode:c,port:i,recorderAudioWorkletNode:u}},ha=(e,t,n,r)=>(o,s,i)=>{var a;const c=(a=s.getAudioTracks()[0])===null||a===void 0?void 0:a.getSettings().sampleRate,u=new da({latencyHint:"playback",sampleRate:c}),d=Math.max(1024,Math.ceil(u.baseLatency*u.sampleRate)),l=new Gi({length:d,sampleRate:u.sampleRate}),m=[],g=lo(v=>{if(rn===void 0)throw new Error(lr);return rn(u,v)});let w=null,f=null,p=null,h=null,_=!0;const E=v=>{o.dispatchEvent(e("dataavailable",{data:new Blob(v,{type:i})}))},T=async(v,M)=>{const k=await Se(v,M);p===null?m.push(...k):(E(k),h=T(v,M))},A=()=>(_=!0,u.resume()),b=()=>{p!==null&&(w!==null&&(s.removeEventListener("addtrack",w),s.removeEventListener("removetrack",w)),f!==null&&clearTimeout(f),p.then(async({encoderId:v,mediaStreamAudioSourceNode:M,recorderAudioWorkletNode:k})=>{h!==null&&(h.catch(()=>{}),h=null),await k.stop(),M.disconnect(k);const N=await Se(v,null);p===null&&await y(),E([...m,...N]),m.length=0,o.dispatchEvent(new Event("stop"))}),p=null)},y=()=>(_=!1,u.suspend());return y(),{get mimeType(){return i},get state(){return p===null?"inactive":_?"recording":"paused"},pause(){if(p===null)throw n();_&&(y(),o.dispatchEvent(new Event("pause")))},resume(){if(p===null)throw n();_||(A(),o.dispatchEvent(new Event("resume")))},start(v){var M;if(p!==null)throw n();if(s.getVideoTracks().length>0)throw r();o.dispatchEvent(new Event("start"));const k=s.getAudioTracks(),N=k.length===0?2:(M=k[0].getSettings().channelCount)!==null&&M!==void 0?M:2;p=Promise.all([A(),g.then(()=>fa(l,u,N,s,i))]).then(async([,{audioBufferSourceNode:x,encoderId:W,mediaStreamAudioSourceNode:L,port:I,recorderAudioWorkletNode:S}])=>(L.connect(S),await new Promise(V=>{x.onended=V,x.connect(S),x.start(u.currentTime+d/u.sampleRate)}),x.disconnect(S),await S.record(I),v!==void 0&&(h=T(W,v)),{encoderId:W,mediaStreamAudioSourceNode:L,recorderAudioWorkletNode:S}));const U=s.getTracks();w=()=>{b(),o.dispatchEvent(new ErrorEvent("error",{error:t()}))},s.addEventListener("addtrack",w),s.addEventListener("removetrack",w),f=setInterval(()=>{const x=s.getTracks();(x.length!==U.length||x.some((W,L)=>W!==U[L]))&&w!==null&&w()},1e3)},stop:b}};class tt{constructor(t,n=0,r){if(n<0||r!==void 0&&r<0)throw new RangeError;const o=t.reduce((d,l)=>d+l.byteLength,0);if(n>o||r!==void 0&&n+r>o)throw new RangeError;const s=[],i=r===void 0?o-n:r,a=[];let c=0,u=n;for(const d of t)if(a.length===0)if(d.byteLength>u){c=d.byteLength-u;const l=c>i?i:c;s.push(new DataView(d,u,l)),a.push(d)}else u-=d.byteLength;else if(ci?d.byteLength-c+i:d.byteLength;s.push(new DataView(d,0,l)),a.push(d)}this._buffers=a,this._byteLength=i,this._byteOffset=u,this._dataViews=s,this._internalBuffer=new DataView(new ArrayBuffer(8))}get buffers(){return this._buffers}get byteLength(){return this._byteLength}get byteOffset(){return this._byteOffset}getFloat32(t,n){return this._internalBuffer.setUint8(0,this.getUint8(t+0)),this._internalBuffer.setUint8(1,this.getUint8(t+1)),this._internalBuffer.setUint8(2,this.getUint8(t+2)),this._internalBuffer.setUint8(3,this.getUint8(t+3)),this._internalBuffer.getFloat32(0,n)}getFloat64(t,n){return this._internalBuffer.setUint8(0,this.getUint8(t+0)),this._internalBuffer.setUint8(1,this.getUint8(t+1)),this._internalBuffer.setUint8(2,this.getUint8(t+2)),this._internalBuffer.setUint8(3,this.getUint8(t+3)),this._internalBuffer.setUint8(4,this.getUint8(t+4)),this._internalBuffer.setUint8(5,this.getUint8(t+5)),this._internalBuffer.setUint8(6,this.getUint8(t+6)),this._internalBuffer.setUint8(7,this.getUint8(t+7)),this._internalBuffer.getFloat64(0,n)}getInt16(t,n){return this._internalBuffer.setUint8(0,this.getUint8(t+0)),this._internalBuffer.setUint8(1,this.getUint8(t+1)),this._internalBuffer.getInt16(0,n)}getInt32(t,n){return this._internalBuffer.setUint8(0,this.getUint8(t+0)),this._internalBuffer.setUint8(1,this.getUint8(t+1)),this._internalBuffer.setUint8(2,this.getUint8(t+2)),this._internalBuffer.setUint8(3,this.getUint8(t+3)),this._internalBuffer.getInt32(0,n)}getInt8(t){const[n,r]=this._findDataViewWithOffset(t);return n.getInt8(t-r)}getUint16(t,n){return this._internalBuffer.setUint8(0,this.getUint8(t+0)),this._internalBuffer.setUint8(1,this.getUint8(t+1)),this._internalBuffer.getUint16(0,n)}getUint32(t,n){return this._internalBuffer.setUint8(0,this.getUint8(t+0)),this._internalBuffer.setUint8(1,this.getUint8(t+1)),this._internalBuffer.setUint8(2,this.getUint8(t+2)),this._internalBuffer.setUint8(3,this.getUint8(t+3)),this._internalBuffer.getUint32(0,n)}getUint8(t){const[n,r]=this._findDataViewWithOffset(t);return n.getUint8(t-r)}setFloat32(t,n,r){this._internalBuffer.setFloat32(0,n,r),this.setUint8(t,this._internalBuffer.getUint8(0)),this.setUint8(t+1,this._internalBuffer.getUint8(1)),this.setUint8(t+2,this._internalBuffer.getUint8(2)),this.setUint8(t+3,this._internalBuffer.getUint8(3))}setFloat64(t,n,r){this._internalBuffer.setFloat64(0,n,r),this.setUint8(t,this._internalBuffer.getUint8(0)),this.setUint8(t+1,this._internalBuffer.getUint8(1)),this.setUint8(t+2,this._internalBuffer.getUint8(2)),this.setUint8(t+3,this._internalBuffer.getUint8(3)),this.setUint8(t+4,this._internalBuffer.getUint8(4)),this.setUint8(t+5,this._internalBuffer.getUint8(5)),this.setUint8(t+6,this._internalBuffer.getUint8(6)),this.setUint8(t+7,this._internalBuffer.getUint8(7))}setInt16(t,n,r){this._internalBuffer.setInt16(0,n,r),this.setUint8(t,this._internalBuffer.getUint8(0)),this.setUint8(t+1,this._internalBuffer.getUint8(1))}setInt32(t,n,r){this._internalBuffer.setInt32(0,n,r),this.setUint8(t,this._internalBuffer.getUint8(0)),this.setUint8(t+1,this._internalBuffer.getUint8(1)),this.setUint8(t+2,this._internalBuffer.getUint8(2)),this.setUint8(t+3,this._internalBuffer.getUint8(3))}setInt8(t,n){const[r,o]=this._findDataViewWithOffset(t);r.setInt8(t-o,n)}setUint16(t,n,r){this._internalBuffer.setUint16(0,n,r),this.setUint8(t,this._internalBuffer.getUint8(0)),this.setUint8(t+1,this._internalBuffer.getUint8(1))}setUint32(t,n,r){this._internalBuffer.setUint32(0,n,r),this.setUint8(t,this._internalBuffer.getUint8(0)),this.setUint8(t+1,this._internalBuffer.getUint8(1)),this.setUint8(t+2,this._internalBuffer.getUint8(2)),this.setUint8(t+3,this._internalBuffer.getUint8(3))}setUint8(t,n){const[r,o]=this._findDataViewWithOffset(t);r.setUint8(t-o,n)}_findDataViewWithOffset(t){let n=0;for(const r of this._dataViews){const o=n+r.byteLength;if(t>=n&&t(t,n,r)=>e(o=>{const s=i=>o.next(i);return t.addEventListener(n,s,r),()=>t.removeEventListener(n,s,r)}),ma=(e,t)=>{const n=()=>{},r=o=>typeof o[0]=="function";return o=>{const s=(...i)=>{const a=o(r(i)?t({next:i[0]}):t(...i));return a!==void 0?a:n};return s[Symbol.observable]=()=>({subscribe:(...i)=>({unsubscribe:s(...i)})}),e(s)}},ga=ma(pn,mn),wa=pa(ga),va=(e,t,n,r,o)=>(s,i,a,c)=>{const u=a.getAudioTracks(),d=[],l=new i(a,{mimeType:"audio/webm;codecs=pcm"});let m=null,g=()=>{};const w=h=>{s.dispatchEvent(e("dataavailable",{data:new Blob(h,{type:c})}))},f=async(h,_)=>{const E=await Se(h,_);l.state==="inactive"?d.push(...E):(w(E),m=f(h,_))},p=()=>{l.state!=="inactive"&&(m!==null&&(m.catch(()=>{}),m=null),g(),g=()=>{},l.stop())};return l.addEventListener("error",h=>{p(),s.dispatchEvent(new ErrorEvent("error",{error:h.error===void 0?t():h.error}))}),l.addEventListener("pause",()=>s.dispatchEvent(new Event("pause"))),l.addEventListener("resume",()=>s.dispatchEvent(new Event("resume"))),l.addEventListener("start",()=>s.dispatchEvent(new Event("start"))),{get mimeType(){return c},get state(){return l.state},pause(){return l.pause()},resume(){return l.resume()},start(h){var _,E;if(a.getVideoTracks().length>0)throw n();if(l.state==="inactive"){const T=((_=u[0])===null||_===void 0?void 0:_.getSettings()).channelCount,A=(E=u[0])===null||E===void 0?void 0:E.getSettings().sampleRate;if(T===void 0)throw new Error("The channelCount is not defined.");if(A===void 0)throw new Error("The sampleRate is not defined.");let b=!1,y=!1,v=0,M=dn(c,A);g=()=>{y=!0};const k=wa(l,"dataavailable")(({data:N})=>{v+=1,M=M.then(async({dataView:U=null,elementType:x=null,encoderId:W,port:L})=>{const I=await N.arrayBuffer();v-=1;const S=U===null?new tt([I]):new tt([...U.buffers,I],U.byteOffset);if(!b&&l.state==="recording"&&!y){const F=o(S,0);if(F===null)return{dataView:S,elementType:x,encoderId:W,port:L};const{value:Q}=F;if(Q!==172351395)return{dataView:U,elementType:x,encoderId:W,port:L};b=!0}const{currentElementType:V,offset:O,contents:P}=r(S,x,T),B=OL.postMessage(F,F.map(({buffer:Q})=>Q))),v===0&&(l.state==="inactive"||y)&&(Se(W,null).then(F=>{w([...d,...F]),d.length=0,s.dispatchEvent(new Event("stop"))}),L.postMessage([]),L.close(),k()),{dataView:B,elementType:V,encoderId:W,port:L}})});h!==void 0&&M.then(({encoderId:N})=>m=f(N,h))}l.start(100)},stop:p}},_a=()=>typeof window>"u"?null:window,dr=(e,t)=>{if(t>=e.byteLength)return null;const n=e.getUint8(t);if(n>127)return 1;if(n>63)return 2;if(n>31)return 3;if(n>15)return 4;if(n>7)return 5;if(n>3)return 6;if(n>1)return 7;if(n>0)return 8;const r=dr(e,t+1);return r===null?null:r+8},Ea=(e,t)=>n=>{const r={value:e};return Object.defineProperties(n,{currentTarget:r,target:r}),typeof t=="function"?t.call(e,n):t.handleEvent.call(e,n)},fr=[],Ye=_a(),ya=Dr(Ye),hr=kr(ya),Aa=ha(hr,mt,Ur,De),It=Gr(dr),ba=jr(It),Ca=$r(It),Ta=Lr(ba,Ca),Ma=va(hr,mt,De,Ta,It),Na=xr(Ye),Oa=Fr(Ye),Ra=Vr(mt,De),ja=Br(Ra,De,Aa,Ma,fr,Pr(Na,Ea),Oa),$a=()=>Wr(Ye),Ga=async e=>{fr.push(await Sr(e))};export{ja as MediaRecorder,$a as isSupported,Ga as register}; -//# sourceMappingURL=module-3b9777eb.js.map diff --git a/spaces/deepseek-ai/deepseek-coder-33b-instruct/README.md b/spaces/deepseek-ai/deepseek-coder-33b-instruct/README.md deleted file mode 100644 index 03253587d24f19beac22e58bf8aa8d087711c19d..0000000000000000000000000000000000000000 --- a/spaces/deepseek-ai/deepseek-coder-33b-instruct/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Chat with DeepSeek Coder 33B -emoji: 🐬 -colorFrom: blue -colorTo: red -sdk: gradio -sdk_version: 3.48.0 -app_file: app.py -pinned: false -suggested_hardware: a10g-small -startup_duration_timeout: 1h ---- \ No newline at end of file diff --git a/spaces/diacanFperku/AutoGPT/Download Film Kartun Kisah 25 Nabi Islam.md b/spaces/diacanFperku/AutoGPT/Download Film Kartun Kisah 25 Nabi Islam.md deleted file mode 100644 index ec0e752cad0740340ca8cc727c257e7b3baf60f6..0000000000000000000000000000000000000000 --- a/spaces/diacanFperku/AutoGPT/Download Film Kartun Kisah 25 Nabi Islam.md +++ /dev/null @@ -1,9 +0,0 @@ -
      -

      thesis i to the latest film. download and watch free videos. - 8. -sri lanka live radio. specially designed for ktv and ktv5. download the latest films from your favourite content partners and watch them on the web.

      -

      download 59700 islamic art, islamic paintings, islamic fakes and islamic forgeries. download the latest muslim inspirational, islamic, art, artwork, islamic art and islamic fakes and forgeries. stories, ramadan download, quran, eid, ramadhan, quran reciters, quran quran suroor, quran story, quran mirror. download all story, quran downloader, quranic story, islam, quran, quran buku, quran reciter, islam pdf. this one is an old and classic story told by the messenger in the nights of ramadan. the story of zulaika and kausar from qur'an.

      -

      download film kartun kisah 25 nabi islam


      DOWNLOAD ———>>> https://gohhs.com/2uFVuE



      -

      download mysaur.com/download-film-kartun-kisah-25-nabi-islam-jav.php mysaur movies. download mysaur movies. download over 720,000+ flash games, arcade games and action games to play anytime, anywhere, on any device. datuk seri ahmad zahid hamidi, bersama masyarakat dan organisasi, seperti yang telah terpilih di pemilu 2018.

      -

      kartun nabi ibrahim'sahihul adaptasi, - kisah nabi ibrahim baca jelas atau tidak - alkitab 1. quran online. lirik islam-karena-di-kepercayaan-di-islam-album-download. download film kartun kisah 25 nabi islam.

      -

      kisah isra miraj - isra miraj dari cerita bimbing kata tayang artinya. isra miraj dalam ceritanya adalah tugas yang sama dengan tantangan berbeda-beda. mari kita pelajari kisah. from films to dramas and everything in between, we've got it all. coub.com/stories/2949299-best-download-film-kartun-kisah-25-nabi-islam.

      899543212b
      -
      -
      \ No newline at end of file diff --git a/spaces/diacanFperku/AutoGPT/Evangelion 222 English Dub 1080p Torrent.md b/spaces/diacanFperku/AutoGPT/Evangelion 222 English Dub 1080p Torrent.md deleted file mode 100644 index 6cf4de7e935e35bd062933fb7f4437b1993a9c00..0000000000000000000000000000000000000000 --- a/spaces/diacanFperku/AutoGPT/Evangelion 222 English Dub 1080p Torrent.md +++ /dev/null @@ -1,7 +0,0 @@ -
      -

      the second film in the four-part silver-screen remake of sci-fi anime classic neon genesis evangelion, evangelion: 2.0 you can (not) advance continues the familiar story established in the first film, while also reimagining the series with new characters and arcs. picking up where you are (not) alone left off, the second feature introduces two more eva pilots - asuka who moves in with shinji and misato and throws their world into further confusion, and new character mari, a mysterious pilot from europe. as the war against the angels rages on, more light is shed on the true motives and powers behind nerv and gendo ikari and seele's secret project.

      -

      rebuild of evangelion is a retelling of the original neon genesis evangelion anime series, and is produced by studio khara. hideaki anno served as the writer and general manager of the project, with kazuya tsurumaki and masayuki co-directing the films with anno. prime is streaming updated versions of the rebuild movies with updated animation.

      -

      evangelion 222 english dub 1080p torrent


      DOWNLOADhttps://gohhs.com/2uFVhq



      -

      the second film in the four-part silver-screen remake of sci-fi anime classic neon genesis evangelion, evangelion: 2.0 you can (not) advance continues the familiar story established in the first film, while also reimagining the series with new characters and arcs. picking up where you are (not) alone left off, the second feature introduces two more eva pilots - asuka who moves in with shinji and misato and throws their world into further confusion, and new character mari, a mysterious pilot from europe.

      899543212b
      -
      -
      \ No newline at end of file diff --git a/spaces/diacanFperku/AutoGPT/Julius Caesar Five Paragraph Essay ((HOT)).md b/spaces/diacanFperku/AutoGPT/Julius Caesar Five Paragraph Essay ((HOT)).md deleted file mode 100644 index 4b7abae75ddbf62f37ab84b85a5f6266a381ca39..0000000000000000000000000000000000000000 --- a/spaces/diacanFperku/AutoGPT/Julius Caesar Five Paragraph Essay ((HOT)).md +++ /dev/null @@ -1,55 +0,0 @@ - -

      Julius Caesar Five Paragraph Essay: How to Write a Great One

      -

      Julius Caesar is one of the most famous and influential figures in history. He was a Roman general and statesman who conquered Gaul, won the civil war, and became the dictator of Rome. He was also a brilliant orator, a reformer, and a patron of arts and literature. His life and death have inspired countless writers, artists, and historians. If you are assigned to write a five paragraph essay on Julius Caesar, you might wonder how to approach this topic and what to include in your paper. In this article, we will give you some tips on how to write a great Julius Caesar five paragraph essay.

      -

      Julius Caesar Five Paragraph Essay


      DOWNLOAD ••• https://gohhs.com/2uFUUL



      - -

      What is a Five Paragraph Essay?

      -

      A five paragraph essay is a common type of academic writing that consists of an introduction, three body paragraphs, and a conclusion. The introduction should introduce the topic, provide some background information, and state the main idea or thesis of the essay. The body paragraphs should support the thesis with evidence, examples, and analysis. The conclusion should summarize the main points, restate the thesis, and provide some final thoughts or implications. A five paragraph essay should be well-organized, coherent, and concise.

      - -

      How to Choose a Topic for Your Julius Caesar Five Paragraph Essay?

      -

      Julius Caesar is a rich and complex topic that offers many possibilities for your essay. You can choose to focus on a specific aspect of his life, such as his military achievements, his political reforms, his relationship with Cleopatra, or his assassination. You can also choose to analyze a theme or a character from the play Julius Caesar by William Shakespeare, which dramatizes the events leading up to and following Caesar's death. Some examples of themes are power and corruption, loyalty and betrayal, ambition and honor, or fate and free will. Some examples of characters are Brutus, Antony, Cassius, or Portia. Whatever topic you choose, make sure it is relevant, interesting, and narrow enough to be covered in five paragraphs.

      - -

      How to Write an Introduction for Your Julius Caesar Five Paragraph Essay?

      -

      The introduction is the first paragraph of your essay and it should capture the attention of your reader and introduce your topic. You can start with a hook, such as a quote, a question, a statistic, or an anecdote related to Julius Caesar. For example: - -"Et tu, Brute?" These are the famous last words of Julius Caesar as he falls victim to the daggers of his friends and enemies in the Senate House on March 15th, 44 BCE. - -What made Julius Caesar so powerful that he threatened the Roman Republic? What made him so vulnerable that he was killed by his own allies? How did his death change the course of history? - -After the hook, you should provide some background information on Julius Caesar and his historical context. You can briefly mention his birth, his rise to power, his conquests, his reforms, and his enemies. You should also explain why Julius Caesar is an important and relevant topic for your essay. - -Finally, you should state your thesis statement, which is the main idea or argument of your essay. Your thesis statement should be clear, specific, and debatable. It should also indicate what points you will discuss in your body paragraphs. For example: - -In this essay, I will argue that Julius Caesar was a great leader who improved the lives of many Romans but also a tyrant who endangered the Roman Republic with his ambition and arrogance.

      - -

      How to Write Body Paragraphs for Your Julius Caesar Five Paragraph Essay?

      -

      The body paragraphs are the second, third

      -

      How to Write a Conclusion for Your Julius Caesar Five Paragraph Essay?

      -

      The conclusion is the last paragraph of your essay and it should wrap up your argument and leave a lasting impression on your reader. You can start with a restatement of your thesis statement, using different words but keeping the same meaning. For example: - -Julius Caesar was both a hero and a villain who changed Rome and the world with his actions. - -Then, you should summarize the main points of your body paragraphs, reminding your reader of the evidence and analysis you provided. For example: - -He was a hero who conquered new lands, brought prosperity and culture to Rome, and initiated reforms that benefited the people. He was also a villain who threatened the republican system, amassed too much power, and provoked a civil war that led to his death. - -Finally, you should provide some final thoughts or implications of your argument, such as what lessons can be learned from Julius Caesar's life and death, how his legacy still affects us today, or what questions remain unanswered. For example: - -Julius Caesar's story teaches us that power can be a blessing or a curse, depending on how it is used and who controls it. His legacy still influences our language, calendar, politics, and literature. However, his story also raises questions about the role of fate and free will, the nature of leadership and morality, and the consequences of violence and betrayal.

      - -

      How to Revise and Edit Your Julius Caesar Five Paragraph Essay?

      -

      After you have written your first draft of your essay, you should revise and edit it to improve its quality and clarity. Here are some steps you can follow to revise and edit your essay:

      -
        -
      • Read your essay aloud or ask someone else to read it for you. This will help you catch any errors in grammar, spelling, punctuation, or sentence structure.
      • -
      • Check if your essay follows the five paragraph format: introduction, three body paragraphs, and conclusion. Make sure each paragraph has a clear topic sentence and transitions smoothly to the next one.
      • -
      • Check if your essay has a clear thesis statement that expresses your main idea or argument. Make sure your thesis statement is supported by evidence and analysis in your body paragraphs.
      • -
      • Check if your essay has a hook in the introduction that grabs the attention of your reader. Make sure your introduction also provides some background information on Julius Caesar and explains why he is an important and relevant topic for your essay.
      • -
      • Check if your essay has a conclusion that restates your thesis statement, summarizes your main points, and provides some final thoughts or implications. Make sure your conclusion does not introduce any new information or arguments.
      • -
      • Check if your essay uses appropriate language and tone for an academic paper. Avoid using slang, contractions, or informal expressions. Use formal words and phrases that convey your meaning clearly and precisely.
      • -
      • Check if your essay cites any sources you used for information or evidence. Use a consistent citation style, such as MLA or APA, to give credit to the authors of your sources.
      • -
      -

      By following these steps, you can revise and edit your Julius Caesar five paragraph essay and make it ready for submission.

      -

      Conclusion

      -

      Julius Caesar is a fascinating and complex topic that can inspire a great five paragraph essay. A five paragraph essay is a common type of academic writing that consists of an introduction, three body paragraphs, and a conclusion. To write a great Julius Caesar five paragraph essay, you need to choose a relevant and interesting topic, write a clear and debatable thesis statement, support your thesis with evidence and analysis in your body paragraphs, and wrap up your argument with a restatement of your thesis, a summary of your main points, and some final thoughts or implications in your conclusion. You also need to revise and edit your essay to improve its quality and clarity. By following these steps, you can write a great Julius Caesar five paragraph essay that will impress your teacher and your peers.

      3cee63e6c2
      -
      -
      \ No newline at end of file diff --git a/spaces/diacanFperku/AutoGPT/Lks Pkn Ratih Smp Kelas 8 Semester 1.17 Anita Divisore Adult.md b/spaces/diacanFperku/AutoGPT/Lks Pkn Ratih Smp Kelas 8 Semester 1.17 Anita Divisore Adult.md deleted file mode 100644 index a9d1d7fa7e6ac0cc8e3ca9b500f72f06db421d74..0000000000000000000000000000000000000000 --- a/spaces/diacanFperku/AutoGPT/Lks Pkn Ratih Smp Kelas 8 Semester 1.17 Anita Divisore Adult.md +++ /dev/null @@ -1,6 +0,0 @@ -

      Lks Pkn Ratih Smp Kelas 8 Semester 1.17 anita divisore adult


      Download ★★★ https://gohhs.com/2uFUTG



      -
      - d5da3c52bf
      -
      -
      -

      diff --git a/spaces/digitalxingtong/Eileen-Bert-Vits2/train_ms.py b/spaces/digitalxingtong/Eileen-Bert-Vits2/train_ms.py deleted file mode 100644 index 5d109003d40497ea4493e7c73f47c1eb7370a81e..0000000000000000000000000000000000000000 --- a/spaces/digitalxingtong/Eileen-Bert-Vits2/train_ms.py +++ /dev/null @@ -1,402 +0,0 @@ -import os -import json -import argparse -import itertools -import math -import torch -import shutil -from torch import nn, optim -from torch.nn import functional as F -from torch.utils.data import DataLoader -from torch.utils.tensorboard import SummaryWriter -import torch.multiprocessing as mp -import torch.distributed as dist -from torch.nn.parallel import DistributedDataParallel as DDP -from torch.cuda.amp import autocast, GradScaler -from tqdm import tqdm -import logging -logging.getLogger('numba').setLevel(logging.WARNING) -import commons -import utils -from data_utils import ( - TextAudioSpeakerLoader, - TextAudioSpeakerCollate, - DistributedBucketSampler -) -from models import ( - SynthesizerTrn, - MultiPeriodDiscriminator, - DurationDiscriminator, -) -from losses import ( - generator_loss, - discriminator_loss, - feature_loss, - kl_loss -) -from mel_processing import mel_spectrogram_torch, spec_to_mel_torch -from text.symbols import symbols - -torch.backends.cudnn.benchmark = True -torch.backends.cuda.matmul.allow_tf32 = True -torch.backends.cudnn.allow_tf32 = True -torch.set_float32_matmul_precision('medium') -global_step = 0 - - -def main(): - """Assume Single Node Multi GPUs Training Only""" - assert torch.cuda.is_available(), "CPU training is not allowed." - - n_gpus = torch.cuda.device_count() - os.environ['MASTER_ADDR'] = 'localhost' - os.environ['MASTER_PORT'] = '65280' - - hps = utils.get_hparams() - if not hps.cont: - shutil.copy('./pretrained_models/D_0.pth','./logs/OUTPUT_MODEL/D_0.pth') - shutil.copy('./pretrained_models/G_0.pth','./logs/OUTPUT_MODEL/G_0.pth') - shutil.copy('./pretrained_models/DUR_0.pth','./logs/OUTPUT_MODEL/DUR_0.pth') - mp.spawn(run, nprocs=n_gpus, args=(n_gpus, hps,)) - - -def run(rank, n_gpus, hps): - global global_step - if rank == 0: - logger = utils.get_logger(hps.model_dir) - logger.info(hps) - utils.check_git_hash(hps.model_dir) - writer = SummaryWriter(log_dir=hps.model_dir) - writer_eval = SummaryWriter(log_dir=os.path.join(hps.model_dir, "eval")) - - dist.init_process_group(backend= 'gloo' if os.name == 'nt' else 'nccl', init_method='env://', world_size=n_gpus, rank=rank) - torch.manual_seed(hps.train.seed) - torch.cuda.set_device(rank) - - train_dataset = TextAudioSpeakerLoader(hps.data.training_files, hps.data) - train_sampler = DistributedBucketSampler( - train_dataset, - hps.train.batch_size, - [32, 300, 400, 500, 600, 700, 800, 900, 1000], - num_replicas=n_gpus, - rank=rank, - shuffle=True) - collate_fn = TextAudioSpeakerCollate() - train_loader = DataLoader(train_dataset, num_workers=2, shuffle=False, pin_memory=True, - collate_fn=collate_fn, batch_sampler=train_sampler) - if rank == 0: - eval_dataset = TextAudioSpeakerLoader(hps.data.validation_files, hps.data) - eval_loader = DataLoader(eval_dataset, num_workers=0, shuffle=False, - batch_size=1, pin_memory=True, - drop_last=False, collate_fn=collate_fn) - if "use_noise_scaled_mas" in hps.model.keys() and hps.model.use_noise_scaled_mas == True: - print("Using noise scaled MAS for VITS2") - use_noise_scaled_mas = True - mas_noise_scale_initial = 0.01 - noise_scale_delta = 2e-6 - else: - print("Using normal MAS for VITS1") - use_noise_scaled_mas = False - mas_noise_scale_initial = 0.0 - noise_scale_delta = 0.0 - if "use_duration_discriminator" in hps.model.keys() and hps.model.use_duration_discriminator == True: - print("Using duration discriminator for VITS2") - use_duration_discriminator = True - net_dur_disc = DurationDiscriminator( - hps.model.hidden_channels, - hps.model.hidden_channels, - 3, - 0.1, - gin_channels=hps.model.gin_channels if hps.data.n_speakers != 0 else 0, - ).cuda(rank) - if "use_spk_conditioned_encoder" in hps.model.keys() and hps.model.use_spk_conditioned_encoder == True: - if hps.data.n_speakers == 0: - raise ValueError("n_speakers must be > 0 when using spk conditioned encoder to train multi-speaker model") - use_spk_conditioned_encoder = True - else: - print("Using normal encoder for VITS1") - use_spk_conditioned_encoder = False - - net_g = SynthesizerTrn( - len(symbols), - hps.data.filter_length // 2 + 1, - hps.train.segment_size // hps.data.hop_length, - n_speakers=hps.data.n_speakers, - mas_noise_scale_initial = mas_noise_scale_initial, - noise_scale_delta = noise_scale_delta, - **hps.model).cuda(rank) - - freeze_enc = getattr(hps.model, "freeze_enc", False) - if freeze_enc: - print("freeze encoder !!!") - for param in net_g.enc_p.parameters(): - param.requires_grad = False - - net_d = MultiPeriodDiscriminator(hps.model.use_spectral_norm).cuda(rank) - optim_g = torch.optim.AdamW( - filter(lambda p: p.requires_grad, net_g.parameters()), - hps.train.learning_rate, - betas=hps.train.betas, - eps=hps.train.eps) - optim_d = torch.optim.AdamW( - net_d.parameters(), - hps.train.learning_rate, - betas=hps.train.betas, - eps=hps.train.eps) - if net_dur_disc is not None: - optim_dur_disc = torch.optim.AdamW( - net_dur_disc.parameters(), - hps.train.learning_rate, - betas=hps.train.betas, - eps=hps.train.eps) - else: - optim_dur_disc = None - net_g = DDP(net_g, device_ids=[rank], find_unused_parameters=True) - net_d = DDP(net_d, device_ids=[rank], find_unused_parameters=True) - if net_dur_disc is not None: - net_dur_disc = DDP(net_dur_disc, device_ids=[rank], find_unused_parameters=True) - - pretrain_dir = None - if pretrain_dir is None: - try: - if net_dur_disc is not None: - _, optim_dur_disc, _, epoch_str = utils.load_checkpoint(utils.latest_checkpoint_path(hps.model_dir, "DUR_*.pth"), net_dur_disc, optim_dur_disc, skip_optimizer=not hps.cont) - _, optim_g, _, epoch_str = utils.load_checkpoint(utils.latest_checkpoint_path(hps.model_dir, "G_*.pth"), net_g, - optim_g, skip_optimizer=not hps.cont) - _, optim_d, _, epoch_str = utils.load_checkpoint(utils.latest_checkpoint_path(hps.model_dir, "D_*.pth"), net_d, - optim_d, skip_optimizer=not hps.cont) - - epoch_str = max(epoch_str, 1) - global_step = (epoch_str - 1) * len(train_loader) - except Exception as e: - print(e) - epoch_str = 1 - global_step = 0 - else: - _, _, _, epoch_str = utils.load_checkpoint(utils.latest_checkpoint_path(pretrain_dir, "G_*.pth"), net_g, - optim_g, True) - _, _, _, epoch_str = utils.load_checkpoint(utils.latest_checkpoint_path(pretrain_dir, "D_*.pth"), net_d, - optim_d, True) - - - - scheduler_g = torch.optim.lr_scheduler.ExponentialLR(optim_g, gamma=hps.train.lr_decay, last_epoch=epoch_str - 2) - scheduler_d = torch.optim.lr_scheduler.ExponentialLR(optim_d, gamma=hps.train.lr_decay, last_epoch=epoch_str - 2) - if net_dur_disc is not None: - scheduler_dur_disc = torch.optim.lr_scheduler.ExponentialLR(optim_dur_disc, gamma=hps.train.lr_decay, last_epoch=epoch_str-2) - else: - scheduler_dur_disc = None - scaler = GradScaler(enabled=hps.train.fp16_run) - - for epoch in range(epoch_str, hps.train.epochs + 1): - if rank == 0: - train_and_evaluate(rank, epoch, hps, [net_g, net_d, net_dur_disc], [optim_g, optim_d, optim_dur_disc], [scheduler_g, scheduler_d, scheduler_dur_disc], scaler, [train_loader, eval_loader], logger, [writer, writer_eval]) - else: - train_and_evaluate(rank, epoch, hps, [net_g, net_d, net_dur_disc], [optim_g, optim_d, optim_dur_disc], [scheduler_g, scheduler_d, scheduler_dur_disc], scaler, [train_loader, None], None, None) - scheduler_g.step() - scheduler_d.step() - if net_dur_disc is not None: - scheduler_dur_disc.step() - - -def train_and_evaluate(rank, epoch, hps, nets, optims, schedulers, scaler, loaders, logger, writers): - net_g, net_d, net_dur_disc = nets - optim_g, optim_d, optim_dur_disc = optims - scheduler_g, scheduler_d, scheduler_dur_disc = schedulers - train_loader, eval_loader = loaders - if writers is not None: - writer, writer_eval = writers - - train_loader.batch_sampler.set_epoch(epoch) - global global_step - - net_g.train() - net_d.train() - if net_dur_disc is not None: - net_dur_disc.train() - for batch_idx, (x, x_lengths, spec, spec_lengths, y, y_lengths, speakers, tone, language, bert) in tqdm(enumerate(train_loader)): - if net_g.module.use_noise_scaled_mas: - current_mas_noise_scale = net_g.module.mas_noise_scale_initial - net_g.module.noise_scale_delta * global_step - net_g.module.current_mas_noise_scale = max(current_mas_noise_scale, 0.0) - x, x_lengths = x.cuda(rank, non_blocking=True), x_lengths.cuda(rank, non_blocking=True) - spec, spec_lengths = spec.cuda(rank, non_blocking=True), spec_lengths.cuda(rank, non_blocking=True) - y, y_lengths = y.cuda(rank, non_blocking=True), y_lengths.cuda(rank, non_blocking=True) - speakers = speakers.cuda(rank, non_blocking=True) - tone = tone.cuda(rank, non_blocking=True) - language = language.cuda(rank, non_blocking=True) - bert = bert.cuda(rank, non_blocking=True) - - with autocast(enabled=hps.train.fp16_run): - y_hat, l_length, attn, ids_slice, x_mask, z_mask, \ - (z, z_p, m_p, logs_p, m_q, logs_q), (hidden_x, logw, logw_) = net_g(x, x_lengths, spec, spec_lengths, speakers, tone, language, bert) - mel = spec_to_mel_torch( - spec, - hps.data.filter_length, - hps.data.n_mel_channels, - hps.data.sampling_rate, - hps.data.mel_fmin, - hps.data.mel_fmax) - y_mel = commons.slice_segments(mel, ids_slice, hps.train.segment_size // hps.data.hop_length) - y_hat_mel = mel_spectrogram_torch( - y_hat.squeeze(1), - hps.data.filter_length, - hps.data.n_mel_channels, - hps.data.sampling_rate, - hps.data.hop_length, - hps.data.win_length, - hps.data.mel_fmin, - hps.data.mel_fmax - ) - - y = commons.slice_segments(y, ids_slice * hps.data.hop_length, hps.train.segment_size) # slice - - # Discriminator - y_d_hat_r, y_d_hat_g, _, _ = net_d(y, y_hat.detach()) - with autocast(enabled=False): - loss_disc, losses_disc_r, losses_disc_g = discriminator_loss(y_d_hat_r, y_d_hat_g) - loss_disc_all = loss_disc - if net_dur_disc is not None: - y_dur_hat_r, y_dur_hat_g = net_dur_disc(hidden_x.detach(), x_mask.detach(), logw.detach(), logw_.detach()) - with autocast(enabled=False): - # TODO: I think need to mean using the mask, but for now, just mean all - loss_dur_disc, losses_dur_disc_r, losses_dur_disc_g = discriminator_loss(y_dur_hat_r, y_dur_hat_g) - loss_dur_disc_all = loss_dur_disc - optim_dur_disc.zero_grad() - scaler.scale(loss_dur_disc_all).backward() - scaler.unscale_(optim_dur_disc) - grad_norm_dur_disc = commons.clip_grad_value_(net_dur_disc.parameters(), None) - scaler.step(optim_dur_disc) - - optim_d.zero_grad() - scaler.scale(loss_disc_all).backward() - scaler.unscale_(optim_d) - grad_norm_d = commons.clip_grad_value_(net_d.parameters(), None) - scaler.step(optim_d) - - with autocast(enabled=hps.train.fp16_run): - # Generator - y_d_hat_r, y_d_hat_g, fmap_r, fmap_g = net_d(y, y_hat) - if net_dur_disc is not None: - y_dur_hat_r, y_dur_hat_g = net_dur_disc(hidden_x, x_mask, logw, logw_) - with autocast(enabled=False): - loss_dur = torch.sum(l_length.float()) - loss_mel = F.l1_loss(y_mel, y_hat_mel) * hps.train.c_mel - loss_kl = kl_loss(z_p, logs_q, m_p, logs_p, z_mask) * hps.train.c_kl - - loss_fm = feature_loss(fmap_r, fmap_g) - loss_gen, losses_gen = generator_loss(y_d_hat_g) - loss_gen_all = loss_gen + loss_fm + loss_mel + loss_dur + loss_kl - if net_dur_disc is not None: - loss_dur_gen, losses_dur_gen = generator_loss(y_dur_hat_g) - loss_gen_all += loss_dur_gen - optim_g.zero_grad() - scaler.scale(loss_gen_all).backward() - scaler.unscale_(optim_g) - grad_norm_g = commons.clip_grad_value_(net_g.parameters(), None) - scaler.step(optim_g) - scaler.update() - - if rank == 0: - if global_step % hps.train.log_interval == 0: - lr = optim_g.param_groups[0]['lr'] - losses = [loss_disc, loss_gen, loss_fm, loss_mel, loss_dur, loss_kl] - logger.info('Train Epoch: {} [{:.0f}%]'.format( - epoch, - 100. * batch_idx / len(train_loader))) - logger.info([x.item() for x in losses] + [global_step, lr]) - - scalar_dict = {"loss/g/total": loss_gen_all, "loss/d/total": loss_disc_all, "learning_rate": lr, - "grad_norm_d": grad_norm_d, "grad_norm_g": grad_norm_g} - scalar_dict.update( - {"loss/g/fm": loss_fm, "loss/g/mel": loss_mel, "loss/g/dur": loss_dur, "loss/g/kl": loss_kl}) - scalar_dict.update({"loss/g/{}".format(i): v for i, v in enumerate(losses_gen)}) - scalar_dict.update({"loss/d_r/{}".format(i): v for i, v in enumerate(losses_disc_r)}) - scalar_dict.update({"loss/d_g/{}".format(i): v for i, v in enumerate(losses_disc_g)}) - - image_dict = { - "slice/mel_org": utils.plot_spectrogram_to_numpy(y_mel[0].data.cpu().numpy()), - "slice/mel_gen": utils.plot_spectrogram_to_numpy(y_hat_mel[0].data.cpu().numpy()), - "all/mel": utils.plot_spectrogram_to_numpy(mel[0].data.cpu().numpy()), - "all/attn": utils.plot_alignment_to_numpy(attn[0, 0].data.cpu().numpy()) - } - utils.summarize( - writer=writer, - global_step=global_step, - images=image_dict, - scalars=scalar_dict) - - if global_step % hps.train.eval_interval == 0: - evaluate(hps, net_g, eval_loader, writer_eval) - utils.save_checkpoint(net_g, optim_g, hps.train.learning_rate, epoch, - os.path.join(hps.model_dir, "G_{}.pth".format(global_step))) - utils.save_checkpoint(net_d, optim_d, hps.train.learning_rate, epoch, - os.path.join(hps.model_dir, "D_{}.pth".format(global_step))) - if net_dur_disc is not None: - utils.save_checkpoint(net_dur_disc, optim_dur_disc, hps.train.learning_rate, epoch, os.path.join(hps.model_dir, "DUR_{}.pth".format(global_step))) - keep_ckpts = getattr(hps.train, 'keep_ckpts', 5) - if keep_ckpts > 0: - utils.clean_checkpoints(path_to_models=hps.model_dir, n_ckpts_to_keep=keep_ckpts, sort_by_time=True) - - - global_step += 1 - - if rank == 0: - logger.info('====> Epoch: {}'.format(epoch)) - - - -def evaluate(hps, generator, eval_loader, writer_eval): - generator.eval() - image_dict = {} - audio_dict = {} - print("Evaluating ...") - with torch.no_grad(): - for batch_idx, (x, x_lengths, spec, spec_lengths, y, y_lengths, speakers, tone, language, bert) in enumerate(eval_loader): - x, x_lengths = x.cuda(), x_lengths.cuda() - spec, spec_lengths = spec.cuda(), spec_lengths.cuda() - y, y_lengths = y.cuda(), y_lengths.cuda() - speakers = speakers.cuda() - bert = bert.cuda() - tone = tone.cuda() - language = language.cuda() - for use_sdp in [True, False]: - y_hat, attn, mask, *_ = generator.module.infer(x, x_lengths, speakers, tone, language, bert, y=spec, max_len=1000, sdp_ratio=0.0 if not use_sdp else 1.0) - y_hat_lengths = mask.sum([1, 2]).long() * hps.data.hop_length - - mel = spec_to_mel_torch( - spec, - hps.data.filter_length, - hps.data.n_mel_channels, - hps.data.sampling_rate, - hps.data.mel_fmin, - hps.data.mel_fmax) - y_hat_mel = mel_spectrogram_torch( - y_hat.squeeze(1).float(), - hps.data.filter_length, - hps.data.n_mel_channels, - hps.data.sampling_rate, - hps.data.hop_length, - hps.data.win_length, - hps.data.mel_fmin, - hps.data.mel_fmax - ) - image_dict.update({ - f"gen/mel_{batch_idx}": utils.plot_spectrogram_to_numpy(y_hat_mel[0].cpu().numpy()) - }) - audio_dict.update({ - f"gen/audio_{batch_idx}_{use_sdp}": y_hat[0, :, :y_hat_lengths[0]] - }) - image_dict.update({f"gt/mel_{batch_idx}": utils.plot_spectrogram_to_numpy(mel[0].cpu().numpy())}) - audio_dict.update({f"gt/audio_{batch_idx}": y[0, :, :y_lengths[0]]}) - - utils.summarize( - writer=writer_eval, - global_step=global_step, - images=image_dict, - audios=audio_dict, - audio_sampling_rate=hps.data.sampling_rate - ) - generator.train() - -if __name__ == "__main__": - main() diff --git a/spaces/dineshreddy/WALT/cwalt/clustering_utils.py b/spaces/dineshreddy/WALT/cwalt/clustering_utils.py deleted file mode 100644 index 7463bfce84ae1c1089d9cf2a0e97de8e7397ce7a..0000000000000000000000000000000000000000 --- a/spaces/dineshreddy/WALT/cwalt/clustering_utils.py +++ /dev/null @@ -1,132 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -""" -Created on Fri May 20 15:18:20 2022 - -@author: dinesh -""" - -# 0 - Import related libraries - -import urllib -import zipfile -import os -import scipy.io -import math -import numpy as np -import matplotlib.pyplot as plt -import seaborn as sns - -from scipy.spatial.distance import directed_hausdorff -from sklearn.cluster import DBSCAN -from sklearn.metrics.pairwise import pairwise_distances -import scipy.spatial.distance - -from .kmedoid import kMedoids # kMedoids code is adapted from https://github.com/letiantian/kmedoids - -# Some visualization stuff, not so important -# sns.set() -plt.rcParams['figure.figsize'] = (12, 12) - -# Utility Functions - -color_lst = plt.rcParams['axes.prop_cycle'].by_key()['color'] -color_lst.extend(['firebrick', 'olive', 'indigo', 'khaki', 'teal', 'saddlebrown', - 'skyblue', 'coral', 'darkorange', 'lime', 'darkorchid', 'dimgray']) - - -def plot_cluster(image, traj_lst, cluster_lst): - ''' - Plots given trajectories with a color that is specific for every trajectory's own cluster index. - Outlier trajectories which are specified with -1 in `cluster_lst` are plotted dashed with black color - ''' - cluster_count = np.max(cluster_lst) + 1 - - for traj, cluster in zip(traj_lst, cluster_lst): - - # if cluster == -1: - # # Means it it a noisy trajectory, paint it black - # plt.plot(traj[:, 0], traj[:, 1], c='k', linestyle='dashed') - # - # else: - plt.plot(traj[:, 0], traj[:, 1], c=color_lst[cluster % len(color_lst)]) - - plt.imshow(image) - # plt.show() - plt.axis('off') - plt.savefig('trajectory.png', bbox_inches='tight') - plt.show() - - -# 3 - Distance matrix - -def hausdorff( u, v): - d = max(directed_hausdorff(u, v)[0], directed_hausdorff(v, u)[0]) - return d - - -def build_distance_matrix(traj_lst): - # 2 - Trajectory segmentation - - print('Running trajectory segmentation...') - degree_threshold = 5 - - for traj_index, traj in enumerate(traj_lst): - - hold_index_lst = [] - previous_azimuth = 1000 - - for point_index, point in enumerate(traj[:-1]): - next_point = traj[point_index + 1] - diff_vector = next_point - point - azimuth = (math.degrees(math.atan2(*diff_vector)) + 360) % 360 - - if abs(azimuth - previous_azimuth) > degree_threshold: - hold_index_lst.append(point_index) - previous_azimuth = azimuth - hold_index_lst.append(traj.shape[0] - 1) # Last point of trajectory is always added - - traj_lst[traj_index] = traj[hold_index_lst, :] - - print('Building distance matrix...') - traj_count = len(traj_lst) - D = np.zeros((traj_count, traj_count)) - - # This may take a while - for i in range(traj_count): - if i % 20 == 0: - print(i) - for j in range(i + 1, traj_count): - distance = hausdorff(traj_lst[i], traj_lst[j]) - D[i, j] = distance - D[j, i] = distance - - return D - - -def run_kmedoids(image, traj_lst, D): - # 4 - Different clustering methods - - # 4.1 - kmedoids - - traj_count = len(traj_lst) - - k = 3 # The number of clusters - medoid_center_lst, cluster2index_lst = kMedoids(D, k) - - cluster_lst = np.empty((traj_count,), dtype=int) - - for cluster in cluster2index_lst: - cluster_lst[cluster2index_lst[cluster]] = cluster - - plot_cluster(image, traj_lst, cluster_lst) - - -def run_dbscan(image, traj_lst, D): - mdl = DBSCAN(eps=400, min_samples=10) - cluster_lst = mdl.fit_predict(D) - - plot_cluster(image, traj_lst, cluster_lst) - - - diff --git a/spaces/dinhminh20521597/OCR_DEMO/configs/_base_/det_models/psenet_r50_fpnf.py b/spaces/dinhminh20521597/OCR_DEMO/configs/_base_/det_models/psenet_r50_fpnf.py deleted file mode 100644 index a3aff0d1325d3b9e25b5ed095cea28d313f611a0..0000000000000000000000000000000000000000 --- a/spaces/dinhminh20521597/OCR_DEMO/configs/_base_/det_models/psenet_r50_fpnf.py +++ /dev/null @@ -1,51 +0,0 @@ -model_poly = dict( - type='PSENet', - backbone=dict( - type='mmdet.ResNet', - depth=50, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=-1, - norm_cfg=dict(type='SyncBN', requires_grad=True), - init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50'), - norm_eval=True, - style='caffe'), - neck=dict( - type='FPNF', - in_channels=[256, 512, 1024, 2048], - out_channels=256, - fusion_type='concat'), - bbox_head=dict( - type='PSEHead', - in_channels=[256], - out_channels=7, - loss=dict(type='PSELoss'), - postprocessor=dict(type='PSEPostprocessor', text_repr_type='poly')), - train_cfg=None, - test_cfg=None) - -model_quad = dict( - type='PSENet', - backbone=dict( - type='mmdet.ResNet', - depth=50, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=-1, - norm_cfg=dict(type='SyncBN', requires_grad=True), - init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50'), - norm_eval=True, - style='caffe'), - neck=dict( - type='FPNF', - in_channels=[256, 512, 1024, 2048], - out_channels=256, - fusion_type='concat'), - bbox_head=dict( - type='PSEHead', - in_channels=[256], - out_channels=7, - loss=dict(type='PSELoss'), - postprocessor=dict(type='PSEPostprocessor', text_repr_type='quad')), - train_cfg=None, - test_cfg=None) diff --git a/spaces/dmvaldman/ICLR2023/get_submissions.py b/spaces/dmvaldman/ICLR2023/get_submissions.py deleted file mode 100644 index c670cfa8623d0c8d251ab78e203e9610f7460bb8..0000000000000000000000000000000000000000 --- a/spaces/dmvaldman/ICLR2023/get_submissions.py +++ /dev/null @@ -1,41 +0,0 @@ -import json -import requests -import csv - -offset = 0 -limit = 1000 -max_count = 4944 - -base_url = 'https://api.openreview.net' - -all_papers = [] -while offset < max_count: - limit = min(limit, max_count - offset) - - print(offset, limit) - url = base_url + f"/notes?details=invitation%2Coriginal&offset={offset}&limit={limit}&invitation=ICLR.cc%2F2023%2FConference%2F-%2FBlind_Submission" - - response = requests.get(url) - papers = json.loads(response.text)['notes'] - all_papers += papers - - offset += limit - - - -with open('iclr_submissions.csv', 'w', encoding='UTF8', newline='') as f: - header = ['title', 'url', 'pdf', 'tldr', 'abstract', 'keywords'] - writer = csv.writer(f) - writer.writerow(header) - - for paper in all_papers: - content = paper['content'] - - title = content['title'] - url = f'https://openreview.net/forum?id={paper["forum"]}' - pdf = f'https://openreview.net/pdf?id={paper["forum"]}' - tldr = content.get('TL;DR', '') - abstract = content['abstract'] - keywords = ', '.join(content['keywords']) - - writer.writerow([title, url, pdf, tldr, abstract, keywords]) \ No newline at end of file diff --git a/spaces/dolceschokolade/chatbot-mini/pages/api/google.ts b/spaces/dolceschokolade/chatbot-mini/pages/api/google.ts deleted file mode 100644 index 12024cbd714db593e33e504e4a96b24180311f3e..0000000000000000000000000000000000000000 --- a/spaces/dolceschokolade/chatbot-mini/pages/api/google.ts +++ /dev/null @@ -1,149 +0,0 @@ -import { NextApiRequest, NextApiResponse } from 'next'; - -import { OPENAI_API_HOST } from '@/utils/app/const'; -import { cleanSourceText } from '@/utils/server/google'; - -import { Message } from '@/types/chat'; -import { GoogleBody, GoogleSource } from '@/types/google'; - -import { Readability } from '@mozilla/readability'; -import endent from 'endent'; -import jsdom, { JSDOM } from 'jsdom'; - -const handler = async (req: NextApiRequest, res: NextApiResponse) => { - try { - const { messages, key, model, googleAPIKey, googleCSEId } = - req.body as GoogleBody; - - const userMessage = messages[messages.length - 1]; - const query = encodeURIComponent(userMessage.content.trim()); - - const googleRes = await fetch( - `https://customsearch.googleapis.com/customsearch/v1?key=${ - googleAPIKey ? googleAPIKey : process.env.GOOGLE_API_KEY - }&cx=${ - googleCSEId ? googleCSEId : process.env.GOOGLE_CSE_ID - }&q=${query}&num=5`, - ); - - const googleData = await googleRes.json(); - - const sources: GoogleSource[] = googleData.items.map((item: any) => ({ - title: item.title, - link: item.link, - displayLink: item.displayLink, - snippet: item.snippet, - image: item.pagemap?.cse_image?.[0]?.src, - text: '', - })); - - const sourcesWithText: any = await Promise.all( - sources.map(async (source) => { - try { - const timeoutPromise = new Promise((_, reject) => - setTimeout(() => reject(new Error('Request timed out')), 5000), - ); - - const res = (await Promise.race([ - fetch(source.link), - timeoutPromise, - ])) as any; - - // if (res) { - const html = await res.text(); - - const virtualConsole = new jsdom.VirtualConsole(); - virtualConsole.on('error', (error) => { - if (!error.message.includes('Could not parse CSS stylesheet')) { - console.error(error); - } - }); - - const dom = new JSDOM(html, { virtualConsole }); - const doc = dom.window.document; - const parsed = new Readability(doc).parse(); - - if (parsed) { - let sourceText = cleanSourceText(parsed.textContent); - - return { - ...source, - // TODO: switch to tokens - text: sourceText.slice(0, 2000), - } as GoogleSource; - } - // } - - return null; - } catch (error) { - console.error(error); - return null; - } - }), - ); - - const filteredSources: GoogleSource[] = sourcesWithText.filter(Boolean); - - const answerPrompt = endent` - Provide me with the information I requested. Use the sources to provide an accurate response. Respond in markdown format. Cite the sources you used as a markdown link as you use them at the end of each sentence by number of the source (ex: [[1]](link.com)). Provide an accurate response and then stop. Today's date is ${new Date().toLocaleDateString()}. - - Example Input: - What's the weather in San Francisco today? - - Example Sources: - [Weather in San Francisco](https://www.google.com/search?q=weather+san+francisco) - - Example Response: - It's 70 degrees and sunny in San Francisco today. [[1]](https://www.google.com/search?q=weather+san+francisco) - - Input: - ${userMessage.content.trim()} - - Sources: - ${filteredSources.map((source) => { - return endent` - ${source.title} (${source.link}): - ${source.text} - `; - })} - - Response: - `; - - const answerMessage: Message = { role: 'user', content: answerPrompt }; - - const answerRes = await fetch(`${OPENAI_API_HOST}/v1/chat/completions`, { - headers: { - 'Content-Type': 'application/json', - Authorization: `Bearer ${key ? key : process.env.OPENAI_API_KEY}`, - ...(process.env.OPENAI_ORGANIZATION && { - 'OpenAI-Organization': process.env.OPENAI_ORGANIZATION, - }), - }, - method: 'POST', - body: JSON.stringify({ - model: model.id, - messages: [ - { - role: 'system', - content: `Use the sources to provide an accurate response. Respond in markdown format. Cite the sources you used as [1](link), etc, as you use them. Maximum 4 sentences.`, - }, - answerMessage, - ], - max_tokens: 1000, - temperature: 1, - stream: false, - }), - }); - - const { choices: choices2 } = await answerRes.json(); - const answer = choices2[0].message.content; - - res.status(200).json({ answer }); - } catch (error) { - console.error(error); - res.status(500).json({ error: 'Error'}) - } -}; - -export default handler; diff --git a/spaces/dolceschokolade/chatbot-mini/types/google.ts b/spaces/dolceschokolade/chatbot-mini/types/google.ts deleted file mode 100644 index 1f10b422055f7ff8daa8ea08922e1d09370a1d2a..0000000000000000000000000000000000000000 --- a/spaces/dolceschokolade/chatbot-mini/types/google.ts +++ /dev/null @@ -1,19 +0,0 @@ -import { ChatBody, Message } from './chat'; - -export interface GoogleBody extends ChatBody { - googleAPIKey: string; - googleCSEId: string; -} - -export interface GoogleResponse { - message: Message; -} - -export interface GoogleSource { - title: string; - link: string; - displayLink: string; - snippet: string; - image: string; - text: string; -} diff --git a/spaces/dragonSwing/isr/utils.py b/spaces/dragonSwing/isr/utils.py deleted file mode 100644 index 36bbf83910668d2d41ce9b0a02a9468d1badcac0..0000000000000000000000000000000000000000 --- a/spaces/dragonSwing/isr/utils.py +++ /dev/null @@ -1,165 +0,0 @@ -import os -import torch -from basicsr.utils.download_util import load_file_from_url -from basicsr.archs.rrdbnet_arch import RRDBNet -from basicsr.archs.srvgg_arch import SRVGGNetCompact -from gfpgan.utils import GFPGANer -from realesrgan.utils import RealESRGANer - -from config import * -from srcnn import SRCNN - - -def get_upsampler(model_name, device=None): - if model_name == "RealESRGAN_x4plus": # x4 RRDBNet model - model = RRDBNet( - num_in_ch=3, - num_out_ch=3, - num_feat=64, - num_block=23, - num_grow_ch=32, - scale=4, - ) - netscale = 4 - file_url = [ - "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.1.0/RealESRGAN_x4plus.pth" - ] - elif model_name == "RealESRNet_x4plus": # x4 RRDBNet model - model = RRDBNet( - num_in_ch=3, - num_out_ch=3, - num_feat=64, - num_block=23, - num_grow_ch=32, - scale=4, - ) - netscale = 4 - file_url = [ - "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.1.1/RealESRNet_x4plus.pth" - ] - elif model_name == "RealESRGAN_x4plus_anime_6B": # x4 RRDBNet model with 6 blocks - model = RRDBNet( - num_in_ch=3, num_out_ch=3, num_feat=64, num_block=6, num_grow_ch=32, scale=4 - ) - netscale = 4 - file_url = [ - "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.2.4/RealESRGAN_x4plus_anime_6B.pth" - ] - elif model_name == "RealESRGAN_x2plus": # x2 RRDBNet model - model = RRDBNet( - num_in_ch=3, - num_out_ch=3, - num_feat=64, - num_block=23, - num_grow_ch=32, - scale=2, - ) - netscale = 2 - file_url = [ - "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.1/RealESRGAN_x2plus.pth" - ] - elif model_name == "realesr-animevideov3": # x4 VGG-style model (XS size) - model = SRVGGNetCompact( - num_in_ch=3, - num_out_ch=3, - num_feat=64, - num_conv=16, - upscale=4, - act_type="prelu", - ) - netscale = 4 - file_url = [ - "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-animevideov3.pth" - ] - elif model_name == "realesr-general-x4v3": # x4 VGG-style model (S size) - model = SRVGGNetCompact( - num_in_ch=3, - num_out_ch=3, - num_feat=64, - num_conv=32, - upscale=4, - act_type="prelu", - ) - netscale = 4 - file_url = [ - "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-general-wdn-x4v3.pth", - "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-general-x4v3.pth", - ] - elif model_name == "srcnn": - model = SRCNN(device=device) - model_path = os.path.join(ROOT_DIR, WEIGHT_DIR, model_name + ".pth") - model.load_state_dict(torch.load(model_path, map_location=torch.device("cpu"))) - if device: - model.to(device) - return model - else: - raise ValueError(f"Wrong model version {model_name}.") - - model_path = os.path.join(ROOT_DIR, WEIGHT_DIR, model_name + ".pth") - if not os.path.exists(model_path): - print(f"Downloading weights for model {model_name}") - - for url in file_url: - # model_path will be updated - model_path = load_file_from_url( - url=url, - model_dir=os.path.join(ROOT_DIR, WEIGHT_DIR), - progress=True, - file_name=None, - ) - - if model_name != "realesr-general-x4v3": - dni_weight = None - else: - dni_weight = [0.5, 0.5] - wdn_model_path = model_path.replace( - "realesr-general-x4v3", "realesr-general-wdn-x4v3" - ) - model_path = [model_path, wdn_model_path] - - half = "cuda" in str(device) - - return RealESRGANer( - scale=netscale, - model_path=model_path, - dni_weight=dni_weight, - model=model, - half=half, - device=device, - ) - - -def get_face_enhancer(model_name, upscale=2, bg_upsampler=None, device=None): - if model_name == "GFPGANv1.3": - arch = "clean" - channel_multiplier = 2 - file_url = "https://github.com/TencentARC/GFPGAN/releases/download/v1.3.0/GFPGANv1.3.pth" - elif model_name == "GFPGANv1.4": - arch = "clean" - channel_multiplier = 2 - file_url = "https://github.com/TencentARC/GFPGAN/releases/download/v1.3.0/GFPGANv1.4.pth" - elif model_name == "RestoreFormer": - arch = "RestoreFormer" - channel_multiplier = 2 - file_url = "https://github.com/TencentARC/GFPGAN/releases/download/v1.3.4/RestoreFormer.pth" - else: - raise ValueError(f"Wrong model version {model_name}.") - - model_path = os.path.join(ROOT_DIR, WEIGHT_DIR, model_name + ".pth") - if not os.path.exists(model_path): - print(f"Downloading weights for model {model_name}") - model_path = load_file_from_url( - url=file_url, - model_dir=os.path.join(ROOT_DIR, WEIGHT_DIR), - progress=True, - file_name=None, - ) - - return GFPGANer( - model_path=model_path, - upscale=upscale, - arch=arch, - channel_multiplier=channel_multiplier, - bg_upsampler=bg_upsampler, - device=device, - ) diff --git a/spaces/dtrejopizzo/texto-a-imagenes-intel/README.md b/spaces/dtrejopizzo/texto-a-imagenes-intel/README.md deleted file mode 100644 index 3a00e6d761b1efc653d2b09666ba74b37efbe015..0000000000000000000000000000000000000000 --- a/spaces/dtrejopizzo/texto-a-imagenes-intel/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Texto A Imagenes Intel -emoji: 🐠 -colorFrom: gray -colorTo: pink -sdk: gradio -sdk_version: 3.16.1 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/duycse1603/math2tex/ScanSSD/layers/modules/multibox_loss.py b/spaces/duycse1603/math2tex/ScanSSD/layers/modules/multibox_loss.py deleted file mode 100644 index e42816bed4ef662beb4d6e906bf58af70b1fbe47..0000000000000000000000000000000000000000 --- a/spaces/duycse1603/math2tex/ScanSSD/layers/modules/multibox_loss.py +++ /dev/null @@ -1,136 +0,0 @@ -# -*- coding: utf-8 -*- -import torch -import torch.nn as nn -import torch.nn.functional as F -# from torch.autograd import Variable -from ..box_utils import match, log_sum_exp -from .focal_loss import FocalLoss - -class MultiBoxLoss(nn.Module): - """SSD Weighted Loss Function - Compute Targets: - 1) Produce Confidence Target Indices by matching ground truth boxes - with (default) 'priorboxes' that have jaccard index > threshold parameter - (default threshold: 0.5). - 2) Produce localization target by 'encoding' variance into offsets of ground - truth boxes and their matched 'priorboxes'. - 3) Hard negative mining to filter the excessive number of negative examples - that comes with using a large number of default bounding boxes. - (default negative:positive ratio 3:1) - Objective Loss: - L(x,c,l,g) = (Lconf(x, c) + αLloc(x,l,g)) / N - Where, Lconf is the CrossEntropy Loss and Lloc is the SmoothL1 Loss - weighted by α which is set to 1 by cross val. - Args: - c: class confidences, - l: predicted boxes, - g: ground truth boxes - N: number of matched default boxes - See: https://arxiv.org/pdf/1512.02325.pdf for more details. - """ - - def __init__(self, args, cfg, overlap_thresh, bkg_label, neg_pos): - - super(MultiBoxLoss, self).__init__() - self.args = args - self.num_classes = cfg['num_classes'] - self.threshold = overlap_thresh - self.background_label = bkg_label - self.negpos_ratio = neg_pos - self.variance = cfg['variance'] - self.focal_loss = FocalLoss() - # self.neg_overlap = neg_overlap - # self.encode_target = encode_target - # self.use_prior_for_matching = prior_for_matching - # self.do_neg_mining = args.neg_mining - - def forward(self, predictions, targets): - """Multibox Loss - Args: - predictions (tuple): A tuple containing loc preds, conf preds, - and prior boxes from SSD net. - conf shape: torch.size(batch_size,num_priors,num_classes) - loc shape: torch.size(batch_size,num_priors,4) - priors shape: torch.size(num_priors,4) - - targets (tensor): Ground truth boxes and labels for a batch, - shape: [batch_size,num_objs,5] (last idx is the label). - """ - loc_data, conf_data, priors = predictions - num = loc_data.size(0) - priors = priors[:loc_data.size(1), :] - num_priors = (priors.size(0)) - num_classes = self.num_classes - - # match priors (default boxes) and ground truth boxes - loc_t = torch.Tensor(num, num_priors, 4) - conf_t = torch.LongTensor(num, num_priors) - - for idx in range(num): - truths = targets[idx][:, :-1].data - labels = targets[idx][:, -1].data - defaults = priors.data - match(self.threshold, truths, defaults, self.variance, labels, - loc_t, conf_t, idx) - - if self.args.cuda: - loc_t = loc_t.cuda() - conf_t = conf_t.cuda() - - # wrap targets - loc_t = Variable(loc_t, requires_grad=False) - conf_t = Variable(conf_t, requires_grad=False) - - pos = conf_t > 0 - num_pos = pos.sum(dim=1, keepdim=True) - - # Localization Loss (Smooth L1) - # Shape: [batch,num_priors,4] - pos_idx = pos.unsqueeze(pos.dim()).expand_as(loc_data) - loc_p = loc_data[pos_idx].view(-1, 4) - loc_t = loc_t[pos_idx].view(-1, 4) - loss_l = F.smooth_l1_loss(loc_p, loc_t, reduction='sum') - - # Compute max conf across batch for hard negative mining - batch_conf = conf_data.view(-1, self.num_classes) - - #print('conf_t view ', conf_t.view(-1, 1)) - #print('conf_t ' + conf_t.view(-1, 1)) - - loss_c = log_sum_exp(batch_conf) - batch_conf.gather(1, conf_t.view(-1, 1)) - - # Hard Negative Mining - if self.args.neg_mining: - loss_c = loss_c.view(pos.size()[0], pos.size()[1]) - loss_c = loss_c.view(num, -1) - loss_c[pos] = 0 # filter out pos boxes for now - _, loss_idx = loss_c.sort(1, descending=True) - _, idx_rank = loss_idx.sort(1) - num_pos = pos.long().sum(1, keepdim=True) - - num_neg = torch.clamp(self.negpos_ratio*num_pos, max=pos.size(1)-1) - neg = idx_rank < num_neg.expand_as(idx_rank) - else: - #num_neg = torch.tensor(0).expand_as(idx_rank) - #num_neg[idx_rank] = 1 - neg = conf_t == 0 - - # Confidence Loss Including Positive and Negative Example - pos_idx = pos.unsqueeze(2).expand_as(conf_data) - neg_idx = neg.unsqueeze(2).expand_as(conf_data) - conf_p = conf_data[(pos_idx+neg_idx).gt(0)].view(-1, self.num_classes) - targets_weighted = conf_t[(pos+neg).gt(0)] - - if self.args.loss_fun == 'ce': - loss_c = F.cross_entropy(conf_p, targets_weighted, reduction='sum') - else: - loss_c = self.focal_loss.compute(conf_p, targets_weighted) - - # Sum of losses: L(x,c,l,g) = (Lconf(x, c) + αLloc(x,l,g)) / N - - N = num_pos.data.sum() - #loss_l = loss_l.double() - #loss_c = loss_c.double() - loss_l /= N - loss_c /= N - return loss_l, loss_c diff --git a/spaces/dwancin/inpaint/README.md b/spaces/dwancin/inpaint/README.md deleted file mode 100644 index dae2d83c705095d369e83ee2894960a1a5279299..0000000000000000000000000000000000000000 --- a/spaces/dwancin/inpaint/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Inpainting -emoji: 🎨 -colorFrom: gray -colorTo: gray -sdk: gradio -sdk_version: 3.50.0 -app_file: app.py -pinned: false -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference \ No newline at end of file diff --git a/spaces/dylanebert/gaussian-viewer/src/vite.config.js b/spaces/dylanebert/gaussian-viewer/src/vite.config.js deleted file mode 100644 index bbf8c7da43f0080dc6b9fb275f9583b7c17f1506..0000000000000000000000000000000000000000 --- a/spaces/dylanebert/gaussian-viewer/src/vite.config.js +++ /dev/null @@ -1,6 +0,0 @@ -import { sveltekit } from '@sveltejs/kit/vite'; -import { defineConfig } from 'vite'; - -export default defineConfig({ - plugins: [sveltekit()] -}); diff --git a/spaces/dylanmcc/beaverdam/app.py b/spaces/dylanmcc/beaverdam/app.py deleted file mode 100644 index b3f91a9632ca1fcf23653faf58bb564d91f39c46..0000000000000000000000000000000000000000 --- a/spaces/dylanmcc/beaverdam/app.py +++ /dev/null @@ -1,13 +0,0 @@ -import gradio as gr -from fastai.vision.all import load_learner - -learner = load_learner('saved_model/beaverdam.pkl') -categories = ('Beaver Dam', 'Not a Beaver Dam') - -def is_it_a_beaver_dam(input_img): - pred, idx, probs = learner.predict(input_img) - return f'{pred} {dict(zip(categories, map(float, probs)))}' - -demo = gr.Interface(fn=is_it_a_beaver_dam, inputs=gr.Image(shape=(200, 200)), outputs=gr.Label()) - -demo.launch() \ No newline at end of file diff --git a/spaces/eatcosmos/hackaprompt/tests/test_token_count.py b/spaces/eatcosmos/hackaprompt/tests/test_token_count.py deleted file mode 100644 index 5081208467e87c4bea73a3313e6567a62db28e06..0000000000000000000000000000000000000000 --- a/spaces/eatcosmos/hackaprompt/tests/test_token_count.py +++ /dev/null @@ -1,29 +0,0 @@ -from hackaprompt.completers import get_completer -import os - -openai_api_key = os.getenv("OPENAI_API_KEY") - -def test_count_tokens_gpt_35_turbo(): - - completer = get_completer(model="gpt-3.5-turbo", openai_api_key=openai_api_key) - - prompt = "hello" - expected_token_count = 1 # taken on https://platform.openai.com/tokenizer - assert completer.get_token_count(prompt) == expected_token_count - - prompt = "This is a test to see how many tokens we have" - expected_token_count = 11 # taken on https://platform.openai.com/tokenizer - assert completer.get_token_count(prompt) == expected_token_count - - -def test_count_tokens_flanT5(): - - completer = get_completer(model="FlanT5-XXL") - - prompt = "hello" - expected_token_count = 2 - assert completer.get_token_count(prompt) == expected_token_count - - prompt = "This is a test to see how many tokens we have" - expected_token_count = 14 - assert completer.get_token_count(prompt) == expected_token_count diff --git a/spaces/enzostvs/hub-api-playground/components/method/index.tsx b/spaces/enzostvs/hub-api-playground/components/method/index.tsx deleted file mode 100644 index e380e9bc535d15337ea56e0a866d7edeaa536ff8..0000000000000000000000000000000000000000 --- a/spaces/enzostvs/hub-api-playground/components/method/index.tsx +++ /dev/null @@ -1,23 +0,0 @@ -import classNames from "classnames"; - -export const Method = ({ - method, - className, -}: { - method: string; - className?: string; -}) => ( -
      - {method} -
      -); diff --git a/spaces/eson/tokenizer-arena/vocab/bert_base_cased/README.md b/spaces/eson/tokenizer-arena/vocab/bert_base_cased/README.md deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/facebook/MusicGen/CONTRIBUTING.md b/spaces/facebook/MusicGen/CONTRIBUTING.md deleted file mode 100644 index a3e9507643d4439f509a8fc8b87dc73417ef9822..0000000000000000000000000000000000000000 --- a/spaces/facebook/MusicGen/CONTRIBUTING.md +++ /dev/null @@ -1,35 +0,0 @@ -# Contributing to AudioCraft - -We want to make contributing to this project as easy and transparent as -possible. - -## Pull Requests - -AudioCraft is the implementation of a research paper. -Therefore, we do not plan on accepting many pull requests for new features. -We certainly welcome them for bug fixes. - -1. Fork the repo and create your branch from `main`. -2. If you've added code that should be tested, add tests. -3. If you've changed APIs, update the documentation. -4. Ensure the test suite passes. -5. Make sure your code lints. -6. If you haven't already, complete the Contributor License Agreement ("CLA"). - -## Contributor License Agreement ("CLA") -In order to accept your pull request, we need you to submit a CLA. You only need -to do this once to work on any of Meta's open source projects. - -Complete your CLA here: - -## Issues -We use GitHub issues to track public bugs. Please ensure your description is -clear and has sufficient instructions to be able to reproduce the issue. - -Meta has a [bounty program](https://www.facebook.com/whitehat/) for the safe -disclosure of security bugs. In those cases, please go through the process -outlined on that page and do not file a public issue. - -## License -By contributing to encodec, you agree that your contributions will be licensed -under the LICENSE file in the root directory of this source tree. diff --git a/spaces/facebook/MusicGen/tests/models/test_audiogen.py b/spaces/facebook/MusicGen/tests/models/test_audiogen.py deleted file mode 100644 index 3850af066cedd5ea38bd9aead9634d6aaf938218..0000000000000000000000000000000000000000 --- a/spaces/facebook/MusicGen/tests/models/test_audiogen.py +++ /dev/null @@ -1,53 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -import pytest -import torch - -from audiocraft.models import AudioGen - - -class TestAudioGenModel: - def get_audiogen(self): - ag = AudioGen.get_pretrained(name='debug', device='cpu') - ag.set_generation_params(duration=2.0, extend_stride=2.) - return ag - - def test_base(self): - ag = self.get_audiogen() - assert ag.frame_rate == 25 - assert ag.sample_rate == 16000 - assert ag.audio_channels == 1 - - def test_generate_continuation(self): - ag = self.get_audiogen() - prompt = torch.randn(3, 1, 16000) - wav = ag.generate_continuation(prompt, 16000) - assert list(wav.shape) == [3, 1, 32000] - - prompt = torch.randn(2, 1, 16000) - wav = ag.generate_continuation( - prompt, 16000, ['youpi', 'lapin dort']) - assert list(wav.shape) == [2, 1, 32000] - - prompt = torch.randn(2, 1, 16000) - with pytest.raises(AssertionError): - wav = ag.generate_continuation( - prompt, 16000, ['youpi', 'lapin dort', 'one too many']) - - def test_generate(self): - ag = self.get_audiogen() - wav = ag.generate( - ['youpi', 'lapin dort']) - assert list(wav.shape) == [2, 1, 32000] - - def test_generate_long(self): - ag = self.get_audiogen() - ag.max_duration = 3. - ag.set_generation_params(duration=4., extend_stride=2.) - wav = ag.generate( - ['youpi', 'lapin dort']) - assert list(wav.shape) == [2, 1, 16000 * 4] diff --git a/spaces/falterWliame/Face_Mask_Detection/Auto Combo Para Bkl LINK.md b/spaces/falterWliame/Face_Mask_Detection/Auto Combo Para Bkl LINK.md deleted file mode 100644 index fdb64145cef62cf6dbfb2887ef010004097ad1d1..0000000000000000000000000000000000000000 --- a/spaces/falterWliame/Face_Mask_Detection/Auto Combo Para Bkl LINK.md +++ /dev/null @@ -1,6 +0,0 @@ -

      Auto Combo Para Bkl


      Download Zip > https://urlca.com/2uDcjV



      -
      -Seriously though, it's a pain when you get the cross-up, which is not particularly easy in this game, and gets punished for it. Auto-combos should not autocorrect ... 4d29de3e1b
      -
      -
      -

      diff --git a/spaces/falterWliame/Face_Mask_Detection/Gx Developer 8.7 Full Version.md b/spaces/falterWliame/Face_Mask_Detection/Gx Developer 8.7 Full Version.md deleted file mode 100644 index 21138a032fa59a48c62fb878bf5ecb1d7408b92c..0000000000000000000000000000000000000000 --- a/spaces/falterWliame/Face_Mask_Detection/Gx Developer 8.7 Full Version.md +++ /dev/null @@ -1,11 +0,0 @@ -

      gx developer 8.7 full version


      Download Filehttps://urlca.com/2uDd5P



      -
      -Download link: ... To download, log in, click on download link at the end of the article. -To save the file to your computer, click on the "Download file" link.Next, in the window that appears, click on the "Save" button and select a folder to download. -Select the folder where you want to save the file. -If you don't know where exactly the file is located, then click on the "Browse" button and select a folder. -There are several file types to choose from so you can choose the right one. -Click on the Save button. 8a78ff9644
      -
      -
      -

      diff --git a/spaces/fatiXbelha/sd/Com.zhiliaoapp.musically The original name of TikTok and what it means.md b/spaces/fatiXbelha/sd/Com.zhiliaoapp.musically The original name of TikTok and what it means.md deleted file mode 100644 index 1bbf0de9f863d3df1406b6ada96b3f51bcd4164b..0000000000000000000000000000000000000000 --- a/spaces/fatiXbelha/sd/Com.zhiliaoapp.musically The original name of TikTok and what it means.md +++ /dev/null @@ -1,150 +0,0 @@ - -

      What is com.zhiliaoapp.musically and why you should download it

      -

      If you are looking for a fun and creative way to express yourself, connect with others, and discover new trends, then you should definitely check out com.zhiliaoapp.musically. This is the package name of TikTok, one of the most popular and downloaded apps in the world. In this article, we will explain what com.zhiliaoapp.musically is, what are its features, how to download and install it on your device, how to use it to create and watch amazing videos, and how to optimize your experience. By the end of this article, you will be ready to join the millions of TikTok users who are having a blast on this app.

      -

      Introduction

      -

      What is com.zhiliaoapp.musically?

      -

      Com.zhiliaoapp.musically is the package name of TikTok, which is an app that allows you to create and watch short-form videos on various topics. TikTok is developed by TikTok Pte. Ltd., a Singaporean company that also owns other popular apps such as Douyin, Helo, Vigo Video, and Resso. TikTok was launched in 2016 as a rebranding of Musical.ly, a Chinese app that focused on lip-syncing videos. Since then, TikTok has expanded its scope to include comedy, gaming, DIY, food, sports, memes, pets, and more. TikTok has over 1 billion downloads on Google Play Store and has been named as one of the Editors' Choice apps.

      -

      com.zhiliaoapp.musically


      Download Zip ->>->>->> https://urllie.com/2uNFvR



      -

      What are the features of com.zhiliaoapp.musically?

      -

      Com.zhiliaoapp.musically has many features that make it an exciting and engaging app for users of all ages and interests. Some of these features are:

      -
        -
      • A personalized video feed based on what you watch, like, and share. TikTok offers you real, interesting, and fun videos that will make your day.
      • -
      • An explore page where you can discover videos from endless categories and hashtags. You can also search for specific keywords or users.
      • -
      • A pause and resume function that allows you to record multiple clips in one video. You can also use the timer and speed options to adjust your recording.
      • -
      • A huge library of music clips and sounds that you can add to your videos for free. You can also use your own voice or sound effects.
      • -
      • A variety of filters, effects, and AR objects that you can use to enhance your videos. You can also unlock more by completing challenges or joining events.
      • -
      • An integrated editing tool that allows you to trim, cut, merge, and duplicate video clips without leaving the app. You can also add stickers, text, emojis, and more.
      • -
      • A social platform where you can follow, like, comment, share, and message other users. You can also join or create groups, duets, reactions, live streams, and more.
      • -
      -

      How to download and install com.zhiliaoapp.musically on your device

      -

      Downloading from Google Play Store

      -

      The easiest way to download and install com.zhiliaoapp.musically on your device is to use the Google Play Store app. Here are the steps:

      -
        -
      1. Open the Google Play Store app on your device.
      2. -
      3. Search for "TikTok"
      4. Tap on the app icon that says "TikTok - Trends Start Here" and has a black and white logo.
      5. -
      6. Tap on the green "Install" button and wait for the app to download and install on your device.
      7. -
      8. Tap on the "Open" button to launch the app and start using it.
      9. -
      -

      Downloading from APK file

      -

      If you cannot access the Google Play Store or want to download an older version of com.zhiliaoapp.musically, you can use an APK file. An APK file is a package file that contains the app's code, resources, and manifest. Here are the steps:

      -
        -
      1. Find a reliable source for downloading the APK file of com.zhiliaoapp.musically. You can use websites such as APKPure, APKMirror, or Uptodown. Make sure to check the file size, version, and permissions before downloading.
      2. -
      3. Download the APK file to your device or transfer it from your computer using a USB cable or a cloud service.
      4. -
      5. Enable the installation of apps from unknown sources on your device. You can do this by going to Settings > Security > Unknown Sources and toggling it on.
      6. -
      7. Locate the APK file on your device using a file manager app or your device's Downloads folder.
      8. -
      9. Tap on the APK file and follow the instructions to install it on your device.
      10. -
      11. Tap on the app icon to launch the app and start using it.
      12. -
      -

      How to use com.zhiliaoapp.musically to create and watch amazing videos

      -

      Creating your own videos

      -

      One of the main attractions of com.zhiliaoapp.musically is that you can create your own videos and share them with the world. Here are the steps:

      -
        -
      1. Open the app and tap on the plus sign (+) at the bottom center of the screen.
      2. -
      3. Choose whether you want to record a video, upload a video from your gallery, or use a template.
      4. -
      5. If you choose to record a video, you can select the music or sound clip that you want to use by tapping on the "Sounds" button at the top of the screen. You can also search for a specific song or sound by using the magnifying glass icon.
      6. -
      7. You can also adjust the recording settings by tapping on the icons at the right side of the screen. You can change the speed, beauty, filters, effects, timer, and flash options.
      8. -
      9. To start recording, press and hold the red circle button at the bottom of the screen. You can also tap it once to start and stop recording. You can record multiple clips in one video by pausing and resuming.
      10. -
      11. Once you are done recording, you can edit your video by tapping on the check mark button at the bottom right of the screen. You can trim, cut, merge, and duplicate video clips by using the scissors icon. You can also add stickers, text, emojis, and more by using the icons at the bottom of the screen.
      12. -
      13. When you are satisfied with your video, tap on the "Next" button at the top right of the screen. You can then add a caption, hashtags, mentions, and other details to your video. You can also choose who can view your video by tapping on "Who can view this video" and selecting from public, friends, or private options.
      14. -
      15. To post your video, tap on the "Post" button at the bottom right of the screen. You can also save your video to your device by tapping on "Save" or share it to other apps by tapping on "More".
      16. -
      -

      Watching and engaging with other videos

      -

      Besides creating your own videos, you can also watch and engage with other videos on com.zhiliaoapp.musically. Here are some tips:

      -
        -
      • To watch videos, you can swipe up or down on your home screen to see videos from users that you follow or from users that are recommended for you based on your preferences. You can also tap on the "Discover" tab at the bottom of the screen to see videos from different categories and hashtags. You can also search for specific keywords or users by using the magnifying glass icon at the bottom of the screen.
      • -
      • To engage with videos, you can tap on the icons at the right side of the screen. You can like a video by tapping on the heart icon, comment on a video by tapping on the speech bubble icon, share a video by tapping on the arrow icon, or follow a user by tapping on their profile picture. You can also join or create duets, reactions, live streams, and more by tapping on the icons at the bottom of the screen.
      • -
      • To see more details about a video, you can tap on the caption or the sound name at the bottom of the screen. You can also see more videos from the same user, sound, or hashtag by tapping on their names.
      • -
      -

      How to optimize your com.zhiliaoapp.musically experience

      -

      Choosing the right music and sound effects

      -

      One of the key elements of com.zhiliaoapp.musically is the music and sound effects that you use for your videos. They can make your videos more catchy, funny, or emotional. Here are some tips:

      -
        -
      • Choose a music or sound clip that matches your video theme and mood. You can browse through different genres, moods, and trends by tapping on the "Sounds" button at the top of the recording screen. You can also search for a specific song or sound by using the magnifying glass icon.
      • -
      • Choose a music or sound clip that has a clear and catchy rhythm, melody, or lyrics. This will help you sync your movements and expressions with the sound and create a more engaging video.
      • -
      • Choose a music or sound clip that is not too long or too short. You can adjust the length of your video by using the slider at the bottom of the recording screen. You can also trim or cut your video clips after recording by using the scissors icon.
      • -
      • Choose a music or sound clip that is not too loud or too quiet. You can adjust the volume of your video by using the slider at the top right of the editing screen. You can also mute or unmute your video by tapping on the speaker icon.
      • -
      -

      Using filters, effects, and AR objects

      -

      Another way to spice up your com.zhiliaoapp.musically videos is to use filters, effects, and AR objects. These are visual enhancements that you can add to your videos to make them more colorful, fun, or realistic. Here are some tips:

      -
        -
      • Choose a filter that suits your video theme and mood. You can browse through different filters by tapping on the "Filters" button at the right side of the recording screen. You can also swipe left or right on the screen to change filters.
      • -
      • Choose an effect that adds some flair to your video. You can browse through different effects by tapping on the "Effects" button at the right side of the recording screen. You can also swipe up or down on the screen to change effects.
      • -
      • Choose an AR object that adds some realism to your video. You can browse through different AR objects by tapping on the "AR Objects" button at the right side of the recording screen. You can also drag and drop the AR objects on the screen to place them.
      • -
      • Experiment with different combinations of filters, effects, and AR objects to create unique and interesting videos. You can also unlock more by completing challenges or joining events.
      • -
      -

      Editing your videos with integrated tools

      -

      The final step to optimize your com.zhiliaoapp.musically videos is to edit them with the integrated tools. These are tools that allow you to fine-tune your videos without leaving the app. Here are some tips:

      -

      com.zhiliaoapp.musically download
      -com.zhiliaoapp.musically apk
      -com.zhiliaoapp.musically app
      -com.zhiliaoapp.musically tiktok
      -com.zhiliaoapp.musically lite
      -com.zhiliaoapp.musically go
      -com.zhiliaoapp.musically android
      -com.zhiliaoapp.musically update
      -com.zhiliaoapp.musically install
      -com.zhiliaoapp.musically online
      -com.zhiliaoapp.musically video
      -com.zhiliaoapp.musically review
      -com.zhiliaoapp.musically pc
      -com.zhiliaoapp.musically windows
      -com.zhiliaoapp.musically mac
      -com.zhiliaoapp.musically ios
      -com.zhiliaoapp.musically iphone
      -com.zhiliaoapp.musically ipad
      -com.zhiliaoapp.musically tv
      -com.zhiliaoapp.musically tablet
      -com.zhiliaoapp.musically chromebook
      -com.zhiliaoapp.musically firestick
      -com.zhiliaoapp.musically roku
      -com.zhiliaoapp.musically smart tv
      -com.zhiliaoapp.musically samsung tv
      -com.zhiliaoapp.musically lg tv
      -com.zhiliaoapp.musically sony tv
      -com.zhiliaoapp.musically hisense tv
      -com.zhiliaoapp.musically tcl tv
      -com.zhiliaoapp.musically vizio tv
      -com.zhiliaoapp.musically amazon appstore
      -com.zhiliaoapp.musically google play store
      -com.zhiliaoapp.musically apple app store
      -com.zhiliaoapp.musically microsoft store
      -com.zhiliaoapp.musically huawei appgallery
      -com.zhiliaoapp.musically xiaomi app store
      -com.zhiliaoapp.musically oppo app market
      -com.zhiliaoapp.musically vivo app store
      -com.zhiliaoapp.musically samsung galaxy store
      -com.zhiliaoapp.musically apkcombo.com [^3^]
      -com.zhiliaoapp.musically play.google.com [^1^] [^2^]
      -com.zhiliaoapp.musically uptodown.com [^4^]
      -com.zhiliaoapp.musically apkpure.com
      -com.zhiliaoapp.musically apkmonk.com
      -com.zhiliaoapp.musically apkdone.com
      -com.zhiliaoapp.musically apkmirror.com
      -com.zhiliaoapp.musically apkfab.com
      -com.zhiliaoapp.musically apksum.com

      -
        -
      • Trim, cut, merge, and duplicate your video clips by using the scissors icon at the bottom of the editing screen. You can also rearrange your video clips by dragging and dropping them.
      • -
      • Add stickers, text, emojis, and more by using the icons at the bottom of the editing screen. You can also resize, rotate, and move them by using your fingers.
      • -
      • Adjust the brightness, contrast, saturation, and other parameters of your video by using the "Adjust Clips" option at the top right of the editing screen. You can also apply a filter to your entire video by using the "Filter" option.
      • -
      • Add transitions, zooms, and other effects to your video by using the "Effects" option at the top right of the editing screen. You can also change the speed, direction, and duration of your effects by using the sliders.
      • -
      • Add music or sound effects to your video by using the "Sounds" option at the top right of the editing screen. You can also trim, loop, or mix your sounds by using the icons.
      • -
      -

      Conclusion

      -

      Summary of the main points

      -

      In conclusion, com.zhiliaoapp.musically is an app that allows you to create and watch short-form videos on various topics. It has many features that make it an exciting and engaging app for users of all ages and interests. You can download and install it on your device from Google Play Store or from an APK file. You can use it to create your own videos with music, sound effects, filters, effects, and AR objects. You can also watch and engage with other videos from different categories and hashtags. You can optimize your experience by choosing the right music and sound effects, using filters, effects, and AR objects, and editing your videos with integrated tools.

      -

      Call to action

      -

      If you are ready to join the millions of TikTok users who are having a blast on this app, then download com.zhiliaoapp.musically today and start creating and watching amazing videos. You will not regret it!

      -

      Frequently Asked Questions

      -

      What is com.zhiliaoapp.musically?

      -

      Com.zhiliaoapp.musically is the package name of TikTok, which is an app that allows you to create and watch short-form videos on various topics.

      -

      How do I download com.zhiliaoapp.musically?

      -

      You can download com.zhiliaoapp.musically from Google Play Store or from an APK file.

      -

      How do I create a video on com.zhiliaoapp.musically?

      -

      You can create a video on com.zhiliaoapp.musically by tapping on the plus sign (+) at the bottom center of the screen and choosing whether you want to record a video, upload a video from your gallery, or use a template.

      -

      How do I watch a video on com.zhiliaoapp.musically?

      -

      You can watch a video on com.zhiliaoapp.musically by swiping up or down on your home screen or tapping on the "Discover" tab at the bottom of the screen.

      -

      How do I edit a video on com.zhiliaoapp.musically?

      -

      You can edit a video on com.zhiliaoapp.musically by tapping on the check mark button at the bottom right of the recording screen and using the icons at the bottom of the editing screen.

      -

      I hope you enjoyed this article and learned something new about com.zhiliaoapp.musically. If you have any questions or feedback, please feel free to leave a comment below. Thank you for reading and happy TikToking!

      197e85843d
      -
      -
      \ No newline at end of file diff --git a/spaces/fatiXbelha/sd/Download and Play Submerged a New Custom Map for Among Us with Over 25 New Tasks.md b/spaces/fatiXbelha/sd/Download and Play Submerged a New Custom Map for Among Us with Over 25 New Tasks.md deleted file mode 100644 index 92c1622c158523af9951efeb7c285c0a35b0a8c5..0000000000000000000000000000000000000000 --- a/spaces/fatiXbelha/sd/Download and Play Submerged a New Custom Map for Among Us with Over 25 New Tasks.md +++ /dev/null @@ -1,87 +0,0 @@ -
      -

      Submerged Download Among Us: How to Play the Underwater Map Mod

      -

      Among Us is a popular multiplayer game that involves deception, deduction, and teamwork. The game has four official maps that offer different settings and challenges for the players. However, some creative modders have created custom maps that add more variety and fun to the game. One of these custom maps is Submerged, an underwater-themed map that introduces new mechanics, tasks, and sabotages. In this article, we will show you how to download and install Submerged, and how to play it with your friends or online.

      -

      submerged download among us


      Download Zip –––––>>> https://urllie.com/2uNIqj



      -

      What is Submerged?

      -

      Submerged is a mod for Among Us that adds a new map into the game. It was developed by a team of five modders who go by the name of SubmergedAmongUs. The mod was first released in September 2021, and has since been updated to support the latest versions of Among Us.

      -

      Features of Submerged

      -

      Submerged is a unique map that offers many features that are not found in the official maps. Some of these features are:

      -
        -
      • Multiple floors and elevators: The map has two decks, upper and lower, that are connected by elevators. Players can use the elevators to move between the decks, but they have to wait for them to arrive and open.
      • -
      • Over 25 new custom tasks: The map has many new tasks that are related to the underwater theme, such as spotting whale sharks, shooting depth charges, oxygenating sea plants, and more.
      • -
      • 4 new sabotages: The map has four new sabotages that can be triggered by the impostors, such as stabilizing water levels, retrieving oxygen masks, fixing lights, and clearing urchins.
      • -
      • New mechanics: The map has some new mechanics that affect the gameplay, such as fog of war, water pressure, oxygen depletion, and more.
      • -
      • Works on official servers: The map can be played on the official servers of Among Us, as well as on custom servers such as Impostor.
      • -
      -

      Compatibility of Submerged

      -

      Submerged is compatible with desktop releases of Among Us. This includes Steam, Epic Games, and Itch.io. It does not work on mobile or console versions of Among Us. It also requires Windows operating systems to run.

      -

      How to install submerged map in among us
      -Submerged among us mod github
      -Submerged custom map for among us windows and android
      -Submerged map among us release date
      -Submerged among us mod download link
      -Submerged map among us gameplay
      -Submerged among us mod compatibility
      -Submerged map among us features
      -Submerged among us mod installation guide
      -Submerged map among us screenshots and videos
      -Submerged map among us tasks and sabotages
      -Submerged among us mod latest version
      -Submerged map among us elevator mechanics
      -Submerged among us mod performance issues
      -Submerged map among us reviews and ratings
      -Submerged among us mod changelog and updates
      -Submerged map among us tips and tricks
      -Submerged among us mod languages and translations
      -Submerged map among us bug fixes and patches
      -Submerged among us mod license and terms of use
      -How to play submerged map in town of us R
      -How to play submerged map in the other roles
      -How to play submerged map in better town of us
      -How to play submerged map in extreme roles
      -How to play submerged map in las monjas
      -How to play submerged map in better crew link
      -How to play submerged map in region install
      -How to uninstall submerged map from among us
      -How to update submerged map for among us
      -How to join submerged map lobby in among us
      -How to host submerged map game in among us
      -How to customize submerged map settings in among us
      -How to report bugs or issues with submerged map in among us
      -How to contact submerged map developers in discord or email
      -How to support submerged map development in patreon or paypal
      -How to create your own custom maps for among us like submerged
      -How to make your mod compatible with submerged map for among us
      -How to include and redistribute submerged map with your mod for among us
      -How to stream or record submerged map gameplay for youtube or twitch
      -How to get notified when new versions of submerged map are released
      -What are the advantages and disadvantages of playing submerged map in among us
      -What are the best strategies and roles for playing submerged map in among us
      -What are the most common problems and solutions for playing submerged map in among us
      -What are the differences between submerged map and other custom maps for among us
      -What are the similarities between submerged map and official maps for among us
      -What are the future plans and goals for submerged map development
      -What are the inspirations and influences behind submerged map design
      -What are the challenges and difficulties faced by submerged map developers

      -

      Submerged is also compatible with some other mods that add new roles or features to Among Us, such as BetterTownOfUs, ExtremeRoles, TheOtherRoles, and more. However, using Submerged with any other mods might cause bugs or issues, unless the developers of those mods have ensured that their mod is compatible with Submerged. If you encounter problems while using Submerged with other mods, please contact the developers of those mods.

      -

      How to Download and Install Submerged

      -

      If you want to play Submerged, you need to download and install it on your computer. Here are the steps to do so:

      -

      Downloading Submerged

      -

      You can download Submerged from its GitHub page, where you can find the latest releases for each Among Us version. You need to download the correct ZIP file based on your Among Us version. For example, if you have Among Us v2022.10.25, you need to download Submerged v2022.10.26.

      -

      Installing Submerged

      -

      After downloading Submerged, you need to install it on your Among Us directory. Here are the steps to do so:

      -
        -
      1. Navigate to your Among Us installation directory. This is usually located in C:\Program Files (x86)\Steam\steamapps\common\Among Us for Steam users.
      2. -
      3. Extract the downloaded ZIP file there. Ensure enjoy playing Submerged and have a great time with your friends or online. If you have any questions or feedback about Submerged, you can contact the developers of the mod on their Discord server or on their Twitter account.

        -

        FAQs

        -

        Here are some frequently asked questions about Submerged:

        -
          -
        1. Q: Is Submerged safe to download and install?
        2. -A: Yes, Submerged is safe to download and install. It does not contain any viruses, malware, or spyware. It also does not modify any core files of Among Us, so it does not affect the game's performance or stability.
        3. Q: Can I play Submerged with other mods?
        4. -A: Yes, you can play Submerged with other mods that add new roles or features to Among Us, such as BetterTownOfUs, ExtremeRoles, TheOtherRoles, and more. However, you need to make sure that those mods are compatible with Submerged, and that they do not cause any bugs or issues. If you encounter any problems while playing Submerged with other mods, please contact the developers of those mods.
        5. Q: Can I play Submerged on mobile or console?
        6. -A: No, you cannot play Submerged on mobile or console. Submerged is only compatible with desktop releases of Among Us, such as Steam, Epic Games, and Itch.io. It also requires Windows operating systems to run.
        7. Q: How can I support the developers of Submerged?
        8. -A: You can support the developers of Submerged by following them on their social media accounts, joining their Discord server, giving them feedback, reporting bugs, sharing their mod with others, and donating to them via PayPal or Patreon.
        9. Q: Where can I find more information about Submerged?
        10. -A: You can find more information about Submerged on its GitHub page, where you can find the latest releases, changelogs, screenshots, videos, and more. You can also visit its website, where you can find a detailed guide on how to play Submerged. -

        197e85843d
        -
        -
        \ No newline at end of file diff --git a/spaces/fatiXbelha/sd/Experience the thrill of Call of Duty Mobile - Garena with the latest APK file.md b/spaces/fatiXbelha/sd/Experience the thrill of Call of Duty Mobile - Garena with the latest APK file.md deleted file mode 100644 index 825ae33d94a6076ffc81af582d84c46ecefd3fdb..0000000000000000000000000000000000000000 --- a/spaces/fatiXbelha/sd/Experience the thrill of Call of Duty Mobile - Garena with the latest APK file.md +++ /dev/null @@ -1,137 +0,0 @@ -
        -

        Call of Duty: Mobile - Garena APK: How to Download and Play the Legendary FPS Game on Your Android Device

        -

        If you are a fan of first-person shooter (FPS) games, you must have heard of Call of Duty, one of the most popular and successful franchises in the gaming industry. But did you know that you can also enjoy this thrilling and immersive game on your Android device? Yes, you read that right. Call of Duty: Mobile - Garena is a mobile version of the legendary FPS game that lets you experience the action-packed gameplay on your smartphone or tablet. In this article, we will tell you everything you need to know about this game, including what it is, how to download and install it, and how to play it. So, without further ado, let's get started!

        -

        What is Call of Duty: Mobile - Garena?

        -

        A brief introduction to the game and its features

        -

        Call of Duty: Mobile - Garena is a free-to-play online multiplayer FPS game developed by TiMi Studios and published by Garena for Android devices. It is based on the Call of Duty franchise, which is known for its realistic graphics, fast-paced gameplay, and diverse modes and maps. The game offers a variety of modes, including Multiplayer, Battle Royale, Zombies, Ranked, and more. You can also customize your weapons, loadouts, skins, perks, and operators according to your preferences and play styles. The game also features some of the most iconic maps from the Call of Duty series, such as Nuketown, Crash, Killhouse, Scrapyard, and more. You can also team up with your friends or other players from around the world and compete in various events and tournaments.

        -

        call of duty garena mobile apk


        Download File 🆓 https://urllie.com/2uNAEG



        -

        The difference between Call of Duty: Mobile - Garena and Call of Duty: Mobile - Global

        -

        You might be wondering what is the difference between Call of Duty: Mobile - Garena and Call of Duty: Mobile - Global, which are both mobile versions of the same game. Well, the main difference is that Call of Duty: Mobile - Garena is exclusive to Southeast Asia, while Call of Duty: Mobile - Global is available worldwide. This means that if you are living in countries like Indonesia, Malaysia, Singapore, Thailand, Vietnam, or Taiwan, you can only play Call of Duty: Mobile - Garena. However, if you are living in other regions, such as North America, Europe, or India, you can only play Call of Duty: Mobile - Global. Another difference is that Call of Duty: Mobile - Garena has some unique features and content that are not available in Call of Duty: Mobile - Global. For example, Call of Duty: Mobile - Garena has a new weapon called FFAR 1, which is a high-fire rate assault rifle. It also has a new multiplayer map called Armada Strike, which is a naval battle map with epic destroyers. It also has a new gameplay mode called Search & Rescue, which is a bomb mode with a twist. You can revive your allies by collecting their dog tags. It also has a new battle royale mode called Knight's Covenant, which allows you to collect resources and craft supplies for survival.

        -

        How to Download and Install Call of Duty: Mobile - Garena APK

        -

        The requirements and , which is a security setting that allows you to install apps that are not from the Google Play Store. You can enable this setting by going to the Settings app on your device, then tapping on Security or Privacy, then toggling on the Unknown Sources or Install Unknown Apps option.

      4. -
      5. The sixth and final requirement is that you need to have some patience and time, as the download and installation process may take a while depending on your device and internet speed.
      6. - -

        Once you have met all the requirements, you can follow these steps to download and install Call of Duty: Mobile - Garena APK from the official website:

        -
          -
        1. Launch the VPN app on your device and connect to a server in one of the Southeast Asian countries where Call of Duty: Mobile - Garena is available.
        2. -
        3. Open a web browser on your device and go to the official website of Call of Duty: Mobile - Garena. You should see a banner that says "Download Now" or "Get It On Google Play". Tap on it and you will be redirected to the Google Play Store page of the game.
        4. -
        5. Tap on the "Install" button and wait for the game to download and install on your device. You may need to grant some permissions and accept some terms and conditions before the installation begins.
        6. -
        7. Once the installation is complete, you can launch the game by tapping on its icon on your home screen or app drawer. You may need to sign in with your Garena account or create one if you don't have one. You may also need to download some additional data and updates before you can start playing.
        8. -
        -

        The alternative ways to download and install the game from third-party sources

        -

        If you don't want to download and install Call of Duty: Mobile - Garena APK from the official website, you can also try some alternative ways to get the game from third-party sources. However, you should be careful and cautious when using these methods, as they may pose some risks and drawbacks. Here are some of the alternative ways to download and install the game from third-party sources:

        -
          -
        • One of the alternative ways is to use an APK downloader website, which is a website that allows you to download APK files of various apps and games. Some of the popular APK downloader websites are APKPure, APKMirror, and Uptodown. To use this method, you need to go to one of these websites on your web browser, search for Call of Duty: Mobile - Garena, and download the latest version of the APK file. Then, you need to locate the downloaded file on your device, tap on it, and follow the instructions to install it. You may need to enable the installation of apps from unknown sources if you haven't done so already.
        • -
        • Another alternative way is to use an app store app, which is an app that allows you to access and download various apps and games that are not available on the Google Play Store. Some of the popular app store apps are Aptoide, ACMarket, and HappyMod. To use this method, you need to download one of these apps from their respective websites or other sources, install it on your device, launch it, search for Call of Duty: Mobile - Garena, and download and install it. You may need to enable the installation of apps from unknown sources if you haven't done so already.
        • -
        • A third alternative way is to use a modded APK file, which is an APK file that has been modified or hacked by someone to provide some extra features or advantages in the game. Some of the modded APK files for Call of Duty: Mobile - Garena may offer unlimited money, unlocked weapons, aimbot, wallhack, or other cheats. To use this method, you need to find a reliable source that provides modded APK files for Call of Duty: Mobile - Garena, such as a website, a forum, or a Telegram channel. Then, you need to download the modded APK file from that source, install it on your device, and launch it. You may need to enable the installation of apps from unknown sources if you haven't done so already.
        • -
        -

        The possible risks and benefits of using third-party sources

        -

        While using third-party sources may seem tempting and convenient, they also come with some risks and benefits that you should be aware of before deciding whether to use them or not. Here are some of the possible risks and benefits of using third-party sources:

        - - - - - - - - - - - - - - - - - -
        RisksBenefits
        You may download a fake or malicious APK file that may harm your device or steal your personal information.You may download a genuine and updated APK file that may provide you with the latest features and content of the game.
        You may violate the terms and conditions of the game or the platform and get banned or suspended from playing or accessing it.You may bypass the geo-restrictions and play the game in any region you want.
        You may encounter some bugs, errors, or compatibility issues that may affect your gameplay or device performance.You may enjoy some extra features or advantages that may enhance your gameplay or device performance.
        -

        Therefore, you should weigh the pros and cons of using third-party sources and make an informed decision based on your own judgment and risk tolerance. You should also always scan the APK files with a reputable antivirus app before installing them and backup your data regularly in case of any problems.

        -

        How to Play Call of Duty: Mobile - Garena on Your Android Device

        -

        The basic controls and settings of the game

        -

        Once you have downloaded and installed Call of Duty: Mobile - Garena on your Android device, you can start playing it by launching it and signing in with your Garena account. You will then see the main menu of the game, where you can access various options and features. Here are some of the basic controls and settings of the game that you should know:

        -
          -
        • To move your character, you can use the virtual joystick on the left side of the screen. To aim, you can swipe on the right side of the screen. To shoot, you can tap on the fire button on the right side of the screen. You can also use other buttons to reload, crouch, jump, switch weapons, throw grenades, use skills, and more.
        • -
        • To change your view, you can toggle between first-person and third-person perspectives by tapping on the eye icon on the top right corner of the screen. You can also adjust your camera sensitivity and field of view in the settings menu.
        • -
        • To customize your controls, you can go to the settings menu and tap on the controls tab. You can choose between simple mode, which automatically fires when you aim at an enemy, and advanced mode, which gives you more control over your firing. You can also drag and drop the buttons to rearrange them according to your preference. You can also enable or disable features such as aim assist, gyroscope, auto-sprint, quick run, quick melee, and more.
        • -
        • To customize your graphics, you can go to the settings menu and tap on the graphics tab. You can choose between low, medium, high, or very high graphics quality depending on your device capability. You can also enable or disable features such as anti-aliasing, depth of field, bloom, ragdoll physics, real-time shadows, water reflection, and more.
        • -
        • To customize your audio, you can go to the settings menu and tap on the audio tab. You can adjust the volume levels of music, sound effects, voice chat, and microphone. You can also enable or disable features such as 3D sound effect , and voice chat mode. You can also choose between different languages for the game audio and subtitles.
        • -
        -

        The different game modes and maps available in the game

        -

        Call of Duty: Mobile - Garena offers a variety of game modes and maps that you can choose from depending on your mood and preference. Here are some of the different game modes and maps available in the game:

        -
          -
        • Multiplayer: This is the classic mode where you can compete with other players in various modes such as Team Deathmatch, Domination, Search & Destroy, Hardpoint, Kill Confirmed, Capture the Flag, and more. You can also play in ranked matches to earn rewards and climb the leaderboards. Some of the multiplayer maps are Nuketown, Crash, Killhouse, Scrapyard, Crossfire, Firing Range, Hijacked, Summit, Raid, and more.
        • -
        • Battle Royale: This is the mode where you can fight for survival with up to 100 players in a large map that shrinks over time. You can play solo, duo, or squad mode and choose between first-person or third-person perspective. You can also customize your class, loadout, vehicle, and airdrop before the match. Some of the battle royale maps are Isolated, Alcatraz, Blitz, and Knight's Covenant.
        • -
        • Zombies: This is the mode where you can team up with other players and fight against hordes of zombies in various scenarios. You can also unlock new weapons, perks, and skills as you progress. Some of the zombies maps are Nacht der Untoten, Shi No Numa, Call of the Dead, and Outbreak.
        • -
        • Other modes: There are also some other modes that you can play in Call of Duty: Mobile - Garena, such as Gun Game, Prop Hunt, Attack of the Undead, Rapid Fire, Cranked, One Shot One Kill, Sticks and Stones, and more. These modes are usually available for a limited time and offer some fun and unique gameplay experiences.
        • -
        -

        The tips and tricks to improve your skills and performance in the game

        -

        If you want to improve your skills and performance in Call of Duty: Mobile - Garena, you need to practice a lot and learn from your mistakes. You also need to follow some tips and tricks that can help you gain an edge over your opponents. Here are some of the tips and tricks that you should keep in mind:

        -

        call of duty mobile garena apk download
        -call of duty mobile garena apk mod
        -call of duty mobile garena apk obb
        -call of duty mobile garena apk latest version
        -call of duty mobile garena apk update
        -call of duty mobile garena apk pure
        -call of duty mobile garena apk data
        -call of duty mobile garena apk offline
        -call of duty mobile garena apk hack
        -call of duty mobile garena apk mirror
        -call of duty mobile garena apk free download
        -call of duty mobile garena apk android
        -call of duty mobile garena apk file
        -call of duty mobile garena apk revdl
        -call of duty mobile garena apk rexdl
        -call of duty mobile garena apk 2023
        -call of duty mobile garena apk + obb download
        -call of duty mobile garena apk + data download
        -call of duty mobile garena apk + mod menu
        -call of duty mobile garena apk + obb highly compressed
        -call of duty mobile garena apk + obb offline
        -call of duty mobile garena apk + obb latest update
        -call of duty mobile garena apk + obb modded
        -call of duty mobile garena apk + obb hack download
        -call of duty mobile garena apk + obb unlimited money
        -download call of duty mobile garena apk for android
        -download call of duty mobile garena apk and obb file
        -download call of duty mobile garena apk mod menu
        -download call of duty mobile garena apk latest update 2023
        -download call of duty mobile garena apk offline mode
        -how to download call of duty mobile garena apk on pc
        -how to install call of duty mobile garena apk on android
        -how to update call of duty mobile garena apk manually
        -how to play call of duty mobile garena apk without internet
        -how to hack call of duty mobile garena apk with game guardian
        -how to get unlimited cp in call of duty mobile garena apk
        -how to fix lag in call of duty mobile garena apk 2023
        -how to get free skins in call of duty mobile garena apk mod
        -how to unlock all weapons in call of duty mobile garena apk hack
        -how to change server in call of duty mobile garena apk 2023

        -
          -
        • Choose the right sensitivity: Sensitivity is how fast or slow your camera moves when you swipe on the screen. You need to find the right sensitivity that suits your play style and device. You can adjust your sensitivity in the settings menu under the controls tab. You can also use different sensitivity settings for different modes and perspectives.
        • -
        • Choose the right loadout: Loadout is what you bring to the battle, such as your weapons, attachments, perks, operator skills, grenades, and tactical equipment. You need to choose the right loadout that suits your mode and map. You can customize your loadout in the loadout menu before each match. You can also create different loadouts for different situations.
        • -
        • Choose the right class: Class is what you specialize in when you play battle royale mode. Each class has a unique ability and passive skill that can give you an advantage in certain scenarios. You need to choose the right class that suits your strategy and team composition. You can customize your class in the class menu before each match. You can also change your class during the match by finding class upgrade stations.
        • -
        • Use cover and movement: Cover and movement are essential for survival and combat in Call of Duty: Mobile - Garena. You need to use cover to protect yourself from enemy fire and peek out to shoot back. You also need to move constantly to avoid being an easy target and flank your enemies. You can use different movements such as sprinting , sliding, crouching, jumping, and prone to move faster and more unpredictably. You can also use vehicles to travel across the map and run over your enemies.
        • -
        • Use the mini-map and radar: The mini-map and radar are useful tools that can help you locate your enemies and allies. The mini-map shows you a small portion of the map and the direction of your enemies and allies. The radar shows you a larger portion of the map and the distance of your enemies and allies. You can also use UAVs, spy planes, or other equipment to reveal more information on the mini-map and radar.
        • -
        • Use the voice chat and ping system: The voice chat and ping system are useful features that can help you communicate with your teammates. The voice chat allows you to talk to your teammates using your microphone. The ping system allows you to send signals to your teammates using your screen. You can use these features to coordinate your attacks, share information, request assistance, or warn about dangers.
        • -
        • Use the practice mode and training mode: The practice mode and training mode are useful modes that can help you improve your skills and performance in Call of Duty: Mobile - Garena. The practice mode allows you to play against bots or other players in various modes and maps. The training mode allows you to test your weapons, attachments, skills, and equipment in a shooting range. You can use these modes to practice your aim, movement, strategy, and tactics.
        • -
        -

        Conclusion

        -

        Call of Duty: Mobile - Garena is a mobile version of the legendary FPS game that lets you experience the action-packed gameplay on your Android device. You can download and install the game from the official website or from third-party sources, but you need to be careful and cautious when using the latter. You can also play the game in various modes and maps, such as multiplayer, battle royale, zombies, and more. You can also customize your controls, graphics, audio, loadout, class, and more according to your preferences and play styles. You can also follow some tips and tricks to improve your skills and performance in the game. If you are a fan of FPS games, you should definitely give Call of Duty: Mobile - Garena a try. You will not regret it!

        -

        FAQs

        -

        Is Call of Duty: Mobile - Garena free to play?

        -

        Yes, Call of Duty: Mobile - Garena is free to play. However, it also has some in-game purchases that can enhance your gameplay or appearance. You can buy items such as COD Points, Battle Passes, Crates, Bundles, Skins, Weapons, Operators, and more using real money or in-game currency.

        -

        Is Call of Duty: Mobile - Garena compatible with my device?

        -

        To play Call of Duty: Mobile - Garena on your Android device, you need to have Android 4.3 or higher and at least 2 GB of RAM and 4 GB of free storage space. You also need to have a stable internet connection and enough battery power. If your device meets these requirements, you should be able to play the game smoothly.

        -

        How can I update Call of Duty: Mobile - Garena?

        -

        To update Call of Duty: Mobile - Garena on your Android device , you need to follow the same method that you used to download and install the game. For example, if you downloaded and installed the game from the official website, you need to go to the official website again and download and install the latest version of the APK file. If you downloaded and installed the game from a third-party source, you need to go to that source again and download and install the latest version of the APK file. You may also receive notifications or prompts from the game or the platform to update the game when a new version is available.

        -

        How can I contact the customer service of Call of Duty: Mobile - Garena?

        -

        If you have any questions, issues, or feedback about Call of Duty: Mobile - Garena, you can contact the customer service of the game by using one of these methods:

        -
          -
        • You can go to the settings menu in the game and tap on the customer service tab. You can then choose between live chat, email, or phone call to communicate with a customer service representative.
        • -
        • You can go to the official website of Call of Duty: Mobile - Garena and tap on the support button. You can then fill out a form with your details and query and submit it. You will receive a response via email within 24 hours.
        • -
        • You can go to the official Facebook page of Call of Duty: Mobile - Garena and send a message with your query. You will receive a reply within 48 hours.
        • -
        -

        How can I play Call of Duty: Mobile - Garena on my PC?

        -

        If you want to play Call of Duty: Mobile - Garena on your PC, you need to use an Android emulator, which is a software that allows you to run Android apps and games on your PC. Some of the popular Android emulators are BlueStacks, NoxPlayer, and Gameloop. To use this method, you need to download and install one of these emulators on your PC, launch it, sign in with your Google account, search for Call of Duty: Mobile - Garena in the emulator's app store, download and install it, and start playing. You can also customize your keyboard and mouse controls in the emulator's settings.

        401be4b1e0
        -
        -
        \ No newline at end of file diff --git a/spaces/fclong/summary/fengshen/models/roformer/configuration_roformer.py b/spaces/fclong/summary/fengshen/models/roformer/configuration_roformer.py deleted file mode 100644 index 4818b31bd215b11d4ca952437869319fc25ae5b5..0000000000000000000000000000000000000000 --- a/spaces/fclong/summary/fengshen/models/roformer/configuration_roformer.py +++ /dev/null @@ -1,133 +0,0 @@ -# coding=utf-8 -# Copyright 2021 The IDEA Authors. All rights reserved. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" RoFormer model configuration """ - - -from transformers.configuration_utils import PretrainedConfig -from transformers.utils import logging - - -logger = logging.get_logger(__name__) - -RoFormer_PRETRAINED_CONFIG_ARCHIVE_MAP = { - # See all RoFormer models at https://huggingface.co/models?filter=bert -} - - -class RoFormerConfig(PretrainedConfig): - r""" - This is the configuration class to store the configuration of a :class:`~transformers.RoFormerModel`. It is - used to instantiate a RoFormer model according to the specified arguments, defining the model architecture. - Instantiating a configuration with the defaults will yield a similar configuration to that of the RoFormer - `megatron-bert-uncased-345m `__ architecture. - - Configuration objects inherit from :class:`~transformers.PretrainedConfig` and can be used to control the model - outputs. Read the documentation from :class:`~transformers.PretrainedConfig` for more information. - - - Args: - vocab_size (:obj:`int`, `optional`, defaults to 29056): - Vocabulary size of the RoFormer model. Defines the number of different tokens that can be represented - by the :obj:`inputs_ids` passed when calling :class:`~transformers.RoFormerModel`. - hidden_size (:obj:`int`, `optional`, defaults to 1024): - Dimensionality of the encoder layers and the pooler layer. - num_hidden_layers (:obj:`int`, `optional`, defaults to 24): - Number of hidden layers in the Transformer encoder. - num_attention_heads (:obj:`int`, `optional`, defaults to 16): - Number of attention heads for each attention layer in the Transformer encoder. - intermediate_size (:obj:`int`, `optional`, defaults to 4096): - Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder. - hidden_act (:obj:`str` or :obj:`Callable`, `optional`, defaults to :obj:`"gelu"`): - The non-linear activation function (function or string) in the encoder and pooler. If string, - :obj:`"gelu"`, :obj:`"relu"`, :obj:`"silu"` and :obj:`"gelu_new"` are supported. - hidden_dropout_prob (:obj:`float`, `optional`, defaults to 0.1): - The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. - attention_probs_dropout_prob (:obj:`float`, `optional`, defaults to 0.1): - The dropout ratio for the attention probabilities. - max_position_embeddings (:obj:`int`, `optional`, defaults to 512): - The maximum sequence length that this model might ever be used with. Typically set this to something large - just in case (e.g., 512 or 1024 or 2048). - type_vocab_size (:obj:`int`, `optional`, defaults to 2): - The vocabulary size of the :obj:`token_type_ids` passed when calling - :class:`~transformers.RoFormerModel`. - initializer_range (:obj:`float`, `optional`, defaults to 0.02): - The standard deviation of the truncated_normal_initializer for initializing all weight matrices. - layer_norm_eps (:obj:`float`, `optional`, defaults to 1e-12): - The epsilon used by the layer normalization layers. - gradient_checkpointing (:obj:`bool`, `optional`, defaults to :obj:`False`): - If True, use gradient checkpointing to save memory at the expense of slower backward pass. - position_embedding_type (:obj:`str`, `optional`, defaults to :obj:`"absolute"`): - Type of position embedding. Choose one of :obj:`"absolute"`, :obj:`"relative_key"`, - :obj:`"relative_key_query"`. For positional embeddings use :obj:`"absolute"`. For more information on - :obj:`"relative_key"`, please refer to `Self-Attention with Relative Position Representations (Shaw et al.) - `__. For more information on :obj:`"relative_key_query"`, please refer to - `Method 4` in `Improve Transformer Models with Better Relative Position Embeddings (Huang et al.) - `__. - use_cache (:obj:`bool`, `optional`, defaults to :obj:`True`): - Whether or not the model should return the last key/values attentions (not used by all models). Only - relevant if ``config.is_decoder=True``. - - Examples:: - - >>> from transformers import RoFormerModel, RoFormerConfig - - >>> # Initializing a RoFormer bert-base-uncased style configuration - >>> configuration = RoFormerConfig() - - >>> # Initializing a model from the bert-base-uncased style configuration - >>> model = RoFormerModel(configuration) - - >>> # Accessing the model configuration - >>> configuration = model.config - """ - model_type = "roformer" - - def __init__( - self, - vocab_size=29056, - hidden_size=1024, - num_hidden_layers=24, - num_attention_heads=16, - intermediate_size=4096, - hidden_act="gelu", - hidden_dropout_prob=0.1, - attention_probs_dropout_prob=0.1, - max_position_embeddings=512, - type_vocab_size=2, - initializer_range=0.02, - layer_norm_eps=1e-12, - pad_token_id=0, - gradient_checkpointing=False, - position_embedding_type="absolute", - use_cache=True, - **kwargs - ): - super().__init__(pad_token_id=pad_token_id, **kwargs) - - self.vocab_size = vocab_size - self.hidden_size = hidden_size - self.num_hidden_layers = num_hidden_layers - self.num_attention_heads = num_attention_heads - self.hidden_act = hidden_act - self.intermediate_size = intermediate_size - self.hidden_dropout_prob = hidden_dropout_prob - self.attention_probs_dropout_prob = attention_probs_dropout_prob - self.max_position_embeddings = max_position_embeddings - self.type_vocab_size = type_vocab_size - self.initializer_range = initializer_range - self.layer_norm_eps = layer_norm_eps - self.gradient_checkpointing = gradient_checkpointing - self.position_embedding_type = position_embedding_type - self.use_cache = use_cache diff --git a/spaces/feregVcuzo/sanity-test-midi/checkpoint/Commercial Registration e-Service in Qatar How to Print CR Copy Online and Renew Your License.md b/spaces/feregVcuzo/sanity-test-midi/checkpoint/Commercial Registration e-Service in Qatar How to Print CR Copy Online and Renew Your License.md deleted file mode 100644 index 7d9f2be6189d66ecd6580295f3b6069a0bd34321..0000000000000000000000000000000000000000 --- a/spaces/feregVcuzo/sanity-test-midi/checkpoint/Commercial Registration e-Service in Qatar How to Print CR Copy Online and Renew Your License.md +++ /dev/null @@ -1,162 +0,0 @@ - -

        How to Download CR Copy Qatar

        -

        If you own a company or a vehicle in Qatar, you may need to download a copy of your commercial registration (CR) or vehicle registration (Istimara) for various purposes. In this article, we will explain what is CR Copy Qatar, how to print it online, and how to renew your vehicle registration in Qatar.

        -

        What is CR Copy Qatar?

        -

        Definition and purpose of CR Copy

        -

        CR Copy Qatar is a document that shows the details of your company or business entity registered with the Ministry of Commerce and Industry (MOCI) in Qatar. It includes information such as the name, address, activities, partners, capital, and expiry date of your company. It also shows the QR code and barcode of your company.

        -

        how to download cr copy qatar


        Download File » https://gohhs.com/2uPsGn



        -

        CR Copy is required for various purposes, such as opening a bank account, applying for a loan, obtaining a visa, renting an office space, participating in tenders, or dealing with other government entities. It is also a proof of your legal status and identity as a business owner in Qatar.

        -

        How to obtain CR Copy for the first time

        -

        If you want to start a new business or company in Qatar, you need to apply for a commercial registration (CR) at the Commercial Registration and Licenses Department of MOCI. You can do this online through the MOCI website or visit one of the service centers. You will need to provide the following documents:

        -
          -
        • A completed application form
        • -
        • A copy of your ID or passport
        • -
        • A copy of your trade name reservation
        • -
        • A copy of your lease contract or office ownership certificate
        • -
        • A copy of your articles of association or partnership contract
        • -
        • A copy of your approval from the relevant authorities (if applicable)
        • -
        • A receipt of payment of the fees
        • -
        -

        Once your application is approved, you will receive your CR certificate and CR card. You can also print a copy of your CR online through the MOCI website or Metrash2 app.

        -

        How to print CR Copy online

        -

        Requirements for printing CR Copy online

        -

        If you already have a valid CR and you want to print a copy of it online, you need to meet the following requirements:

        -
          -
        • You must have cleared all traffic fines (if you own a vehicle)
        • -
        • You must have passed the yearly technical inspection (for vehicles older than three years)
        • -
        • You must have renewed your vehicle insurance and provide proof of renewal
        • -
        • You must have an active account on Metrash2 app or MOI website
        • -
        • You must have a debit or credit card for payment
        • -
        -

        Steps to print CR Copy online via Metrash2 or MOI website

        -

        You can print your CR Copy online through the Metrash2 app or the MOI website by following these steps:

        -
          -
        1. Log in to the Metrash2 app or the MOI website with your username and password
        2. -
        3. Click on "Traffic" and then "Vehicle Services"
        4. -
        5. Click on "Renew Vehicle Registration"
        6. -
        7. Enter your vehicle plate number and select the type of vehicle
        8. -
        9. Choose - Choose the delivery method of your CR Copy. You can either receive it by email, SMS, or QPost
        10. -
        11. Review the details of your CR and vehicle registration and confirm them
        12. -
        13. Pay the fees using your debit or credit card
        14. -
        15. Receive your CR Copy and vehicle registration certificate by your chosen delivery method
        16. -
        -

        Steps to print CR Copy online via QPost

        -

        You can also print your CR Copy online through the QPost website by following these steps:

        -

        How to print CR (Mustakhraj) online Qatar
        -Commercial Registration e-Service Qatar
        -How to renew your commercial registration in Qatar
        -Procedures for a new commercial license in Qatar
        -How to verify Qatari food and fodder certificates
        -Commercial Registration Section - Ministry of Commerce and Industry Qatar
        -How to print company registration (CR) in English or Arabic
        -Commercial Licenses Section - Ministry of Commerce and Industry Qatar
        -How to review trade agency contracts in Qatar
        -Commercial Agents Section - Ministry of Commerce and Industry Qatar
        -How to apply for commercial licenses in Qatar
        -How to manage and oversee the Commercial Registry in Qatar
        -How to implement policies related to commercial registrations in Qatar
        -How to market and regulate the private sector in Qatar
        -How to develop and implement terms and conditions for trade agents in Qatar
        -How to print CR copy online Qatar YouTube video
        -How to access the portal for commercial registration in Qatar
        -How to enter the username and password for commercial registration in Qatar
        -How to check the status of your commercial registration in Qatar
        -How to update your commercial registration information in Qatar
        -How to cancel your commercial registration in Qatar
        -How to transfer your commercial registration in Qatar
        -How to change your trade name in Qatar
        -How to add or remove partners in your commercial registration in Qatar
        -How to amend your commercial activities in Qatar
        -How to obtain a certificate of good standing in Qatar
        -How to get a copy of your commercial license in Qatar
        -How to renew your trade agency contract in Qatar
        -How to register a new trade agent in Qatar
        -How to terminate a trade agency contract in Qatar
        -How to file a complaint against a trade agent in Qatar
        -How to resolve a dispute with a trade agent in Qatar
        -How to apply for reciprocity for exporting goods and commodities in Qatar
        -How to find approved local agents for importing goods and commodities in Qatar
        -How to keep a record of economic, professional and commercial craftsmanship activities in Qatar
        -How to print CR copy online using mobile phone or tablet
        -Commercial Registration e-service FAQs and tips
        -Commercial Registration e-service customer support and feedback
        -Commercial Registration e-service fees and charges
        -Commercial Registration e-service requirements and documents
        -Commercial Registration e-service benefits and advantages
        -Commercial Registration e-service features and functions
        -Commercial Registration e-service testimonials and reviews
        -Commercial Registration e-service best practices and guidelines
        -Commercial Registration e-service news and updates

        -
          -
        1. Visit the QPost website and click on "E-Government"
        2. -
        3. Click on "Ministry of Interior" and then "Traffic Services"
        4. -
        5. Click on "Print Commercial Registration Copy"
        6. -
        7. Enter your CR number and click on "Search"
        8. -
        9. Review the details of your CR and click on "Add to Cart"
        10. -
        11. Proceed to checkout and pay the fees using your debit or credit card
        12. -
        13. Receive your CR Copy by mail within a few days
        14. -
        -

        How to renew your vehicle registration (Istimara) in Qatar

        -

        Requirements for renewing your vehicle registration

        -

        If you want to renew your vehicle registration (Istimara) in Qatar, you need to meet the following requirements:

        -
          -
        • Your vehicle must be registered under your name or your company name
        • -
        • Your vehicle must have a valid technical inspection certificate (for vehicles older than three years)
        • -
        • Your vehicle must have a valid insurance policy and provide proof of renewal
        • -
        • You must have cleared all traffic fines (if any)
        • -
        • You must have an active account on Metrash2 app or MOI website
        • -
        • You must have a debit or credit card for payment
        • -
        -

        Steps to renew your vehicle registration at a service center

        -

        You can renew your vehicle registration at any of the service centers of the Traffic Department or the General Directorate of Passports by following these steps:

        -
          -
        1. Visit the service center and submit the following documents:
        2. -
            -
          • Your original ID or passport
          • -
          • Your original vehicle registration certificate
          • -
          • Your original technical inspection certificate (for vehicles older than three years)
          • -
          • Your original insurance policy or proof of renewal
          • -
          -
        3. Pay the fees using your debit or credit card or cash
        4. -
        5. Receive your new vehicle registration certificate and sticker
        6. -
        -

        Steps to renew your vehicle registration online via Metrash2 or MOI website

        -

        You can also renew your vehicle registration online through the Metrash2 app or the MOI website by following these steps:

        -
          -
        1. Log in to the Metrash2 app or the MOI website with your username and password
        2. -
        3. Click on "Traffic" and then "Vehicle Services"
        4. -
        5. Click on "Renew Vehicle Registration"
        6. -
        7. Enter your vehicle plate number and select the type of vehicle
        8. -
        9. Choose the delivery method of your new vehicle registration certificate. You can either receive it by email, SMS, or QPost
        10. -
        11. Review the details of your vehicle registration and confirm them
        12. -
        13. Pay the fees using your debit or credit card
        14. -
        15. Receive your new vehicle registration certificate and sticker by your chosen delivery method
        16. -
        -

        Conclusion

        -

        In this article, we have explained what is CR Copy Qatar, how to print it online, and how to renew your vehicle registration in Qatar. We hope you found this article helpful and informative. If you have any questions or feedback, please feel free to contact us.

        -

        FAQs

        -

        What is the fee for printing CR Copy online?

        -

        The fee for printing CR Copy online is 10 QAR per copy, plus 20 QAR for delivery by QPost (if applicable).

        -

        How long does it take to receive CR Copy by QPost?

        -

        It usually takes 3 to 5 working days to receive CR Copy by QPost, depending on the availability of mail services.

        -

        What is the penalty for not renewing your vehicle registration on time?

        -

        If you fail to renew your vehicle registration on time, you will be fined 500 QAR for each month of delay, up to a maximum of 6000 QAR. You will also be liable for any traffic violations that occur during the period of expiry.

        -

        How can I check my traffic violations in Qatar?

        -

        You can check your traffic violations in Qatar online through the Met You can check your traffic violations in Qatar online through the Metrash2 app or the MOI website by following these steps:

        -
          -
        1. Log in to the Metrash2 app or the MOI website with your username and password
        2. -
        3. Click on "Traffic" and then "Traffic Violations"
        4. -
        5. Enter your ID number or vehicle plate number and click on "Search"
        6. -
        7. View the details of your traffic violations, such as the date, time, location, type, and amount of fine
        8. -
        9. Pay the fines online using your debit or credit card (if applicable)
        10. -
        -

        Where can I find more information about traffic services in Qatar?

        -

        You can find more information about traffic services in Qatar on the following websites:

        -

        401be4b1e0
        -
        -
        \ No newline at end of file diff --git a/spaces/feregVcuzo/sanity-test-midi/checkpoint/Download Guardian Tales APK Mod and Enjoy Unlimited Gems and Coins.md b/spaces/feregVcuzo/sanity-test-midi/checkpoint/Download Guardian Tales APK Mod and Enjoy Unlimited Gems and Coins.md deleted file mode 100644 index e08b479140d4593cc324f1e0cfacfff434d7d881..0000000000000000000000000000000000000000 --- a/spaces/feregVcuzo/sanity-test-midi/checkpoint/Download Guardian Tales APK Mod and Enjoy Unlimited Gems and Coins.md +++ /dev/null @@ -1,97 +0,0 @@ - -

        Guardian Tales: A Link to Classic Adventure

        -

        Guardian Tales is a charming and nostalgic RPG that pays homage to the classic 8-bit games and JRPGs of old. The game features puzzle-solving gameplay, strategic action combat, challenging dungeons and bosses, intense PVP and rankings, hero and weapon collection, guild creation, floating castle customization, tribute parodies, and much more. Guardian Tales is available for free on Android, iOS, and Nintendo Switch devices.

        -

        guardian tales unlimited gems apk


        Downloadhttps://gohhs.com/2uPuck



        -

        If you are a fan of Guardian Tales and want to experience more of its fun and quirky world, you might be tempted to try out Guardian Tales Unlimited Gems APK. This is a modified version of the game that claims to offer unlimited gems, platinmods, and other premium benefits. But before you download and install this mod apk, you should know what it is, how it works, what are the risks involved, and what are the alternatives.

        -

        What is Guardian Tales Unlimited Gems APK?

        -

        Guardian Tales Unlimited Gems APK is a mod apk that is created by third-party developers who are not affiliated with the official game developers. A mod apk is an altered version of an original app that changes some of its features or adds new ones. In this case, the mod apk claims to provide unlimited gems, platinmods, and other benefits for Guardian Tales players.

        -

        Gems are the premium currency in Guardian Tales that can be used to summon heroes and weapons, buy items, refresh stamina, and more. Platinmods are special mods that can enhance the game's performance, graphics, speed, cheats, and hacks. By using Guardian Tales Unlimited Gems APK, you can supposedly get access to these features without spending any real money or time.

        -

        How to Download and Install Guardian Tales Unlimited Gems APK?

        -

        If you want to try out Guardian Tales Unlimited Gems APK, you will need to follow these steps:

        -
          -
        1. Find a reliable source that offers the latest version of Guardian Tales Unlimited Gems APK. You can search for it on Google or use one of the links provided by some websites such as .
        2. -
        3. Download the mod apk file to your device. Make sure you have enough storage space and a stable internet connection.
        4. -
        5. Before installing the mod apk, you will need to enable unknown sources on your device. This will allow you to install apps from sources other than the official app store. To do this, go to Settings > Security > Unknown Sources and toggle it on.
        6. -
        7. Locate the downloaded mod apk file on your device and tap on it to start the installation process. Follow the instructions on the screen and wait for it to finish.
        8. -
        9. Once installed, you can launch Guardian Tales Unlimited Gems APK from your app drawer or home screen. You may need to log in with your existing account or create a new one.
        10. -
        -

        What are the Risks of Using Guardian Tales Unlimited Gems APK?

        -

        While Guardian Tales Unlimited Gems APK may sound tempting, it also comes with many risks that you should be aware of before using it. Here are some of them:

        -
          -
        • You may violate the terms of service of Guardian Tales and get banned from playing the game. The official game developers do not support or endorse any mod apks or hacks that alter their game. If they detect any suspicious activity or abnormal data on your account, they may suspend or terminate your access to the game without warning or compensation.
        • -
        • You may expose your device and personal information to malware or viruses. Since mod apks are not verified or tested by the official app store or antivirus software, they may contain harmful code or files that can damage your device or steal your data. You may also encounter annoying

          ads, pop-ups, or redirects that may interfere with your gaming experience or lead you to unsafe websites.

        • -
        • You may miss out on the updates, bug fixes, and new features of the official game. Since mod apks are not compatible with the original game, you may not be able to update your game or enjoy the latest content and improvements. You may also encounter glitches, errors, or crashes that may ruin your gameplay.
        • -
        -

        Therefore, using Guardian Tales Unlimited Gems APK is not recommended and may result in more harm than good. You should always play the game fair and square and respect the hard work of the game developers.

        -

        What are the Alternatives to Guardian Tales Unlimited Gems APK?

        -

        If you are looking for other ways to enjoy Guardian Tales without using Guardian Tales Unlimited Gems APK, here are some alternatives that you can try:

        -
          -
        • You can earn gems by completing quests, achievements, events, and daily missions. You can also get gems by participating in PVP battles, guild raids, tower of horizon, and other game modes. You can also watch ads or use coupon codes to get free gems occasionally.
        • -
        • You can use platinmods.com to find legit mods and hacks for Guardian Tales that are safe and tested by a trusted community. Platinmods.com offers mods such as god mode, damage multiplier, speed hack, and more. However, you will need to register and pay a subscription fee to access these mods.
        • -
        • You can use an emulator such as BlueStacks or NoxPlayer to play Guardian Tales on your PC or Mac. This will give you a bigger screen, better graphics, faster performance, and more control options. You can also use the emulator's features such as macro recorder, multi-instance, key mapping, and more to enhance your gameplay.
        • -
        -

        Conclusion

        -

        Guardian Tales is a fun and nostalgic RPG that offers a lot of content and features for players to enjoy. However, using Guardian Tales Unlimited Gems APK is not a good idea as it may cause you to lose your account, damage your device, or miss out on the official game updates. Instead, you should play the game legitimately and use the alternatives suggested above to improve your gaming experience. Guardian Tales is a game that deserves your support and appreciation.

        -

        FAQs

        -
          -
        1. What is Guardian Tales?
        2. -

          Guardian Tales is a charming and nostalgic RPG that pays homage to the classic 8-bit games and JRPGs of old. The game features puzzle-solving gameplay, strategic action combat, challenging dungeons and bosses, intense PVP and rankings, hero and weapon collection, guild creation, floating castle customization, tribute parodies, and much more.

          -

          guardian tales mod apk unlimited money and gems
          -guardian tales hack apk free gems and coins
          -guardian tales cheats apk unlimited gold and gems
          -guardian tales premium apk unlimited resources and gems
          -guardian tales cracked apk unlimited stamina and gems
          -guardian tales latest version apk unlimited everything and gems
          -guardian tales vip apk unlimited tickets and gems
          -guardian tales pro apk unlimited keys and gems
          -guardian tales unlocked apk unlimited characters and gems
          -guardian tales patched apk unlimited weapons and gems
          -guardian tales full apk unlimited levels and gems
          -guardian tales mega mod apk unlimited skills and gems
          -guardian tales god mode apk unlimited health and gems
          -guardian tales offline apk unlimited offline rewards and gems
          -guardian tales online apk unlimited online rewards and gems
          -guardian tales no root apk unlimited access and gems
          -guardian tales no ads apk unlimited fun and gems
          -guardian tales 2.70.0 mod apk unlimited updates and gems
          -guardian tales platinmods apk unlimited mods and gems
          -guardian tales rexdl apk unlimited downloads and gems
          -guardian tales revdl apk unlimited installs and gems
          -guardian tales an1 apk unlimited features and gems
          -guardian tales happymod apk unlimited happiness and gems
          -guardian tales apkpure apk unlimited purity and gems
          -guardian tales apkmody apk unlimited quality and gems
          -guardian tales android 1 apk unlimited compatibility and gems
          -guardian tales android republic apk unlimited security and gems
          -guardian tales moddroid apk unlimited performance and gems
          -guardian tales ihackedit apk unlimited hacks and gems
          -guardian tales blackmod apk unlimited black magic and gems
          -guardian tales mod menu apk unlimited options and gems
          -guardian tales modded apk unlimited customization and gems
          -guardian tales generator apk unlimited generation and gems
          -guardian tales injector apk unlimited injection and gems
          -guardian tales editor apk unlimited editing and gems
          -guardian tales tool apk unlimited tools and gems
          -guardian tales trainer apk unlimited training and gems
          -guardian tales script apk unlimited scripting and gems
          -guardian tales bot apk unlimited automation and gems
          -guardian tales glitch apk unlimited glitches and gems
          -guardian tales exploit apk unlimited exploits and gems
          -guardian tales cheat engine apk unlimited cheating and gems
          -guardian tales game killer apk unlimited killing and gems
          -guardian tales lucky patcher apk unlimited patching and gems
          -guardian tales game hacker apk unlimited hacking and gems
          -guardian tales game guardian apk unlimited guarding and gems
          -guardian tales sb game hacker apk unlimited sb hacking and gems
          -guardian tales freedom apk unlimited freedom and gems

          -
        3. What is Guardian Tales Unlimited Gems APK?
        4. -

          Guardian Tales Unlimited Gems APK is a mod apk that claims to offer unlimited gems, platinmods, and other benefits for Guardian Tales players. However, it is not recommended to use it as it may violate the terms of service of the game and expose your device and personal information to malware or viruses.

          -
        5. How to Download and Install Guardian Tales Unlimited Gems APK?
        6. -

          To download and install Guardian Tales Unlimited Gems APK, you will need to find a reliable source that offers the latest version of the mod apk, download it to your device, enable unknown sources on your device settings, locate the mod apk file on your device and tap on it to start the installation process, and launch the mod apk from your app drawer or home screen.

          -
        7. What are the Risks of Using Guardian Tales Unlimited Gems APK?
        8. -

          The risks of using Guardian Tales Unlimited Gems APK include getting banned from playing the game, exposing your device and personal information to malware or viruses, missing out on the updates, bug fixes, and new features of the official game, and encountering glitches, errors, or crashes that may ruin your gameplay.

          -
        9. What are the Alternatives to Guardian Tales Unlimited Gems APK?
        10. -

          The alternatives to Guardian Tales Unlimited Gems APK include earning gems by completing quests, achievements, events, and daily missions, using platinmods.com to find legit mods and hacks for Guardian Tales that are safe and tested by a trusted community, and using an emulator such as BlueStacks or NoxPlayer to play Guardian Tales on your PC or Mac.

          -

        401be4b1e0
        -
        -
        \ No newline at end of file diff --git a/spaces/fffiloni/controlnet-animation-doodle/node_modules/@types/node/assert.d.ts b/spaces/fffiloni/controlnet-animation-doodle/node_modules/@types/node/assert.d.ts deleted file mode 100644 index e8595e637123b36d6796d5e159ebbb5320254cb2..0000000000000000000000000000000000000000 --- a/spaces/fffiloni/controlnet-animation-doodle/node_modules/@types/node/assert.d.ts +++ /dev/null @@ -1,961 +0,0 @@ -/** - * The `assert` module provides a set of assertion functions for verifying - * invariants. - * @see [source](https://github.com/nodejs/node/blob/v18.0.0/lib/assert.js) - */ -declare module 'assert' { - /** - * An alias of {@link ok}. - * @since v0.5.9 - * @param value The input that is checked for being truthy. - */ - function assert(value: unknown, message?: string | Error): asserts value; - namespace assert { - /** - * Indicates the failure of an assertion. All errors thrown by the `assert` module - * will be instances of the `AssertionError` class. - */ - class AssertionError extends Error { - actual: unknown; - expected: unknown; - operator: string; - generatedMessage: boolean; - code: 'ERR_ASSERTION'; - constructor(options?: { - /** If provided, the error message is set to this value. */ - message?: string | undefined; - /** The `actual` property on the error instance. */ - actual?: unknown | undefined; - /** The `expected` property on the error instance. */ - expected?: unknown | undefined; - /** The `operator` property on the error instance. */ - operator?: string | undefined; - /** If provided, the generated stack trace omits frames before this function. */ - // tslint:disable-next-line:ban-types - stackStartFn?: Function | undefined; - }); - } - /** - * This feature is currently experimental and behavior might still change. - * @since v14.2.0, v12.19.0 - * @experimental - */ - class CallTracker { - /** - * The wrapper function is expected to be called exactly `exact` times. If the - * function has not been called exactly `exact` times when `tracker.verify()` is called, then `tracker.verify()` will throw an - * error. - * - * ```js - * import assert from 'assert'; - * - * // Creates call tracker. - * const tracker = new assert.CallTracker(); - * - * function func() {} - * - * // Returns a function that wraps func() that must be called exact times - * // before tracker.verify(). - * const callsfunc = tracker.calls(func); - * ``` - * @since v14.2.0, v12.19.0 - * @param [fn='A no-op function'] - * @param [exact=1] - * @return that wraps `fn`. - */ - calls(exact?: number): () => void; - calls any>(fn?: Func, exact?: number): Func; - /** - * Example: - * - * ```js - * import assert from 'node:assert'; - * - * const tracker = new assert.CallTracker(); - * - * function func() {} - * const callsfunc = tracker.calls(func); - * callsfunc(1, 2, 3); - * - * assert.deepStrictEqual(tracker.getCalls(callsfunc), - * [{ thisArg: this, arguments: [1, 2, 3 ] }]); - * ``` - * - * @since v18.8.0, v16.18.0 - * @params fn - * @returns An Array with the calls to a tracked function. - */ - getCalls(fn: Function): CallTrackerCall[]; - /** - * The arrays contains information about the expected and actual number of calls of - * the functions that have not been called the expected number of times. - * - * ```js - * import assert from 'assert'; - * - * // Creates call tracker. - * const tracker = new assert.CallTracker(); - * - * function func() {} - * - * function foo() {} - * - * // Returns a function that wraps func() that must be called exact times - * // before tracker.verify(). - * const callsfunc = tracker.calls(func, 2); - * - * // Returns an array containing information on callsfunc() - * tracker.report(); - * // [ - * // { - * // message: 'Expected the func function to be executed 2 time(s) but was - * // executed 0 time(s).', - * // actual: 0, - * // expected: 2, - * // operator: 'func', - * // stack: stack trace - * // } - * // ] - * ``` - * @since v14.2.0, v12.19.0 - * @return of objects containing information about the wrapper functions returned by `calls`. - */ - report(): CallTrackerReportInformation[]; - /** - * Reset calls of the call tracker. - * If a tracked function is passed as an argument, the calls will be reset for it. - * If no arguments are passed, all tracked functions will be reset. - * - * ```js - * import assert from 'node:assert'; - * - * const tracker = new assert.CallTracker(); - * - * function func() {} - * const callsfunc = tracker.calls(func); - * - * callsfunc(); - * // Tracker was called once - * tracker.getCalls(callsfunc).length === 1; - * - * tracker.reset(callsfunc); - * tracker.getCalls(callsfunc).length === 0; - * ``` - * - * @since v18.8.0, v16.18.0 - * @param fn a tracked function to reset. - */ - reset(fn?: Function): void; - /** - * Iterates through the list of functions passed to `tracker.calls()` and will throw an error for functions that - * have not been called the expected number of times. - * - * ```js - * import assert from 'assert'; - * - * // Creates call tracker. - * const tracker = new assert.CallTracker(); - * - * function func() {} - * - * // Returns a function that wraps func() that must be called exact times - * // before tracker.verify(). - * const callsfunc = tracker.calls(func, 2); - * - * callsfunc(); - * - * // Will throw an error since callsfunc() was only called once. - * tracker.verify(); - * ``` - * @since v14.2.0, v12.19.0 - */ - verify(): void; - } - interface CallTrackerCall { - thisArg: object; - arguments: unknown[]; - } - interface CallTrackerReportInformation { - message: string; - /** The actual number of times the function was called. */ - actual: number; - /** The number of times the function was expected to be called. */ - expected: number; - /** The name of the function that is wrapped. */ - operator: string; - /** A stack trace of the function. */ - stack: object; - } - type AssertPredicate = RegExp | (new () => object) | ((thrown: unknown) => boolean) | object | Error; - /** - * Throws an `AssertionError` with the provided error message or a default - * error message. If the `message` parameter is an instance of an `Error` then - * it will be thrown instead of the `AssertionError`. - * - * ```js - * import assert from 'assert/strict'; - * - * assert.fail(); - * // AssertionError [ERR_ASSERTION]: Failed - * - * assert.fail('boom'); - * // AssertionError [ERR_ASSERTION]: boom - * - * assert.fail(new TypeError('need array')); - * // TypeError: need array - * ``` - * - * Using `assert.fail()` with more than two arguments is possible but deprecated. - * See below for further details. - * @since v0.1.21 - * @param [message='Failed'] - */ - function fail(message?: string | Error): never; - /** @deprecated since v10.0.0 - use fail([message]) or other assert functions instead. */ - function fail( - actual: unknown, - expected: unknown, - message?: string | Error, - operator?: string, - // tslint:disable-next-line:ban-types - stackStartFn?: Function - ): never; - /** - * Tests if `value` is truthy. It is equivalent to`assert.equal(!!value, true, message)`. - * - * If `value` is not truthy, an `AssertionError` is thrown with a `message`property set equal to the value of the `message` parameter. If the `message`parameter is `undefined`, a default - * error message is assigned. If the `message`parameter is an instance of an `Error` then it will be thrown instead of the`AssertionError`. - * If no arguments are passed in at all `message` will be set to the string:`` 'No value argument passed to `assert.ok()`' ``. - * - * Be aware that in the `repl` the error message will be different to the one - * thrown in a file! See below for further details. - * - * ```js - * import assert from 'assert/strict'; - * - * assert.ok(true); - * // OK - * assert.ok(1); - * // OK - * - * assert.ok(); - * // AssertionError: No value argument passed to `assert.ok()` - * - * assert.ok(false, 'it\'s false'); - * // AssertionError: it's false - * - * // In the repl: - * assert.ok(typeof 123 === 'string'); - * // AssertionError: false == true - * - * // In a file (e.g. test.js): - * assert.ok(typeof 123 === 'string'); - * // AssertionError: The expression evaluated to a falsy value: - * // - * // assert.ok(typeof 123 === 'string') - * - * assert.ok(false); - * // AssertionError: The expression evaluated to a falsy value: - * // - * // assert.ok(false) - * - * assert.ok(0); - * // AssertionError: The expression evaluated to a falsy value: - * // - * // assert.ok(0) - * ``` - * - * ```js - * import assert from 'assert/strict'; - * - * // Using `assert()` works the same: - * assert(0); - * // AssertionError: The expression evaluated to a falsy value: - * // - * // assert(0) - * ``` - * @since v0.1.21 - */ - function ok(value: unknown, message?: string | Error): asserts value; - /** - * **Strict assertion mode** - * - * An alias of {@link strictEqual}. - * - * **Legacy assertion mode** - * - * > Stability: 3 - Legacy: Use {@link strictEqual} instead. - * - * Tests shallow, coercive equality between the `actual` and `expected` parameters - * using the [`==` operator](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Operators/Equality). `NaN` is specially handled - * and treated as being identical if both sides are `NaN`. - * - * ```js - * import assert from 'assert'; - * - * assert.equal(1, 1); - * // OK, 1 == 1 - * assert.equal(1, '1'); - * // OK, 1 == '1' - * assert.equal(NaN, NaN); - * // OK - * - * assert.equal(1, 2); - * // AssertionError: 1 == 2 - * assert.equal({ a: { b: 1 } }, { a: { b: 1 } }); - * // AssertionError: { a: { b: 1 } } == { a: { b: 1 } } - * ``` - * - * If the values are not equal, an `AssertionError` is thrown with a `message`property set equal to the value of the `message` parameter. If the `message`parameter is undefined, a default - * error message is assigned. If the `message`parameter is an instance of an `Error` then it will be thrown instead of the`AssertionError`. - * @since v0.1.21 - */ - function equal(actual: unknown, expected: unknown, message?: string | Error): void; - /** - * **Strict assertion mode** - * - * An alias of {@link notStrictEqual}. - * - * **Legacy assertion mode** - * - * > Stability: 3 - Legacy: Use {@link notStrictEqual} instead. - * - * Tests shallow, coercive inequality with the [`!=` operator](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Operators/Inequality). `NaN` is - * specially handled and treated as being identical if both sides are `NaN`. - * - * ```js - * import assert from 'assert'; - * - * assert.notEqual(1, 2); - * // OK - * - * assert.notEqual(1, 1); - * // AssertionError: 1 != 1 - * - * assert.notEqual(1, '1'); - * // AssertionError: 1 != '1' - * ``` - * - * If the values are equal, an `AssertionError` is thrown with a `message`property set equal to the value of the `message` parameter. If the `message`parameter is undefined, a default error - * message is assigned. If the `message`parameter is an instance of an `Error` then it will be thrown instead of the`AssertionError`. - * @since v0.1.21 - */ - function notEqual(actual: unknown, expected: unknown, message?: string | Error): void; - /** - * **Strict assertion mode** - * - * An alias of {@link deepStrictEqual}. - * - * **Legacy assertion mode** - * - * > Stability: 3 - Legacy: Use {@link deepStrictEqual} instead. - * - * Tests for deep equality between the `actual` and `expected` parameters. Consider - * using {@link deepStrictEqual} instead. {@link deepEqual} can have - * surprising results. - * - * _Deep equality_ means that the enumerable "own" properties of child objects - * are also recursively evaluated by the following rules. - * @since v0.1.21 - */ - function deepEqual(actual: unknown, expected: unknown, message?: string | Error): void; - /** - * **Strict assertion mode** - * - * An alias of {@link notDeepStrictEqual}. - * - * **Legacy assertion mode** - * - * > Stability: 3 - Legacy: Use {@link notDeepStrictEqual} instead. - * - * Tests for any deep inequality. Opposite of {@link deepEqual}. - * - * ```js - * import assert from 'assert'; - * - * const obj1 = { - * a: { - * b: 1 - * } - * }; - * const obj2 = { - * a: { - * b: 2 - * } - * }; - * const obj3 = { - * a: { - * b: 1 - * } - * }; - * const obj4 = Object.create(obj1); - * - * assert.notDeepEqual(obj1, obj1); - * // AssertionError: { a: { b: 1 } } notDeepEqual { a: { b: 1 } } - * - * assert.notDeepEqual(obj1, obj2); - * // OK - * - * assert.notDeepEqual(obj1, obj3); - * // AssertionError: { a: { b: 1 } } notDeepEqual { a: { b: 1 } } - * - * assert.notDeepEqual(obj1, obj4); - * // OK - * ``` - * - * If the values are deeply equal, an `AssertionError` is thrown with a`message` property set equal to the value of the `message` parameter. If the`message` parameter is undefined, a default - * error message is assigned. If the`message` parameter is an instance of an `Error` then it will be thrown - * instead of the `AssertionError`. - * @since v0.1.21 - */ - function notDeepEqual(actual: unknown, expected: unknown, message?: string | Error): void; - /** - * Tests strict equality between the `actual` and `expected` parameters as - * determined by [`Object.is()`](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Object/is). - * - * ```js - * import assert from 'assert/strict'; - * - * assert.strictEqual(1, 2); - * // AssertionError [ERR_ASSERTION]: Expected inputs to be strictly equal: - * // - * // 1 !== 2 - * - * assert.strictEqual(1, 1); - * // OK - * - * assert.strictEqual('Hello foobar', 'Hello World!'); - * // AssertionError [ERR_ASSERTION]: Expected inputs to be strictly equal: - * // + actual - expected - * // - * // + 'Hello foobar' - * // - 'Hello World!' - * // ^ - * - * const apples = 1; - * const oranges = 2; - * assert.strictEqual(apples, oranges, `apples ${apples} !== oranges ${oranges}`); - * // AssertionError [ERR_ASSERTION]: apples 1 !== oranges 2 - * - * assert.strictEqual(1, '1', new TypeError('Inputs are not identical')); - * // TypeError: Inputs are not identical - * ``` - * - * If the values are not strictly equal, an `AssertionError` is thrown with a`message` property set equal to the value of the `message` parameter. If the`message` parameter is undefined, a - * default error message is assigned. If the`message` parameter is an instance of an `Error` then it will be thrown - * instead of the `AssertionError`. - * @since v0.1.21 - */ - function strictEqual(actual: unknown, expected: T, message?: string | Error): asserts actual is T; - /** - * Tests strict inequality between the `actual` and `expected` parameters as - * determined by [`Object.is()`](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Object/is). - * - * ```js - * import assert from 'assert/strict'; - * - * assert.notStrictEqual(1, 2); - * // OK - * - * assert.notStrictEqual(1, 1); - * // AssertionError [ERR_ASSERTION]: Expected "actual" to be strictly unequal to: - * // - * // 1 - * - * assert.notStrictEqual(1, '1'); - * // OK - * ``` - * - * If the values are strictly equal, an `AssertionError` is thrown with a`message` property set equal to the value of the `message` parameter. If the`message` parameter is undefined, a - * default error message is assigned. If the`message` parameter is an instance of an `Error` then it will be thrown - * instead of the `AssertionError`. - * @since v0.1.21 - */ - function notStrictEqual(actual: unknown, expected: unknown, message?: string | Error): void; - /** - * Tests for deep equality between the `actual` and `expected` parameters. - * "Deep" equality means that the enumerable "own" properties of child objects - * are recursively evaluated also by the following rules. - * @since v1.2.0 - */ - function deepStrictEqual(actual: unknown, expected: T, message?: string | Error): asserts actual is T; - /** - * Tests for deep strict inequality. Opposite of {@link deepStrictEqual}. - * - * ```js - * import assert from 'assert/strict'; - * - * assert.notDeepStrictEqual({ a: 1 }, { a: '1' }); - * // OK - * ``` - * - * If the values are deeply and strictly equal, an `AssertionError` is thrown - * with a `message` property set equal to the value of the `message` parameter. If - * the `message` parameter is undefined, a default error message is assigned. If - * the `message` parameter is an instance of an `Error` then it will be thrown - * instead of the `AssertionError`. - * @since v1.2.0 - */ - function notDeepStrictEqual(actual: unknown, expected: unknown, message?: string | Error): void; - /** - * Expects the function `fn` to throw an error. - * - * If specified, `error` can be a [`Class`](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Classes), - * [`RegExp`](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Guide/Regular_Expressions), a validation function, - * a validation object where each property will be tested for strict deep equality, - * or an instance of error where each property will be tested for strict deep - * equality including the non-enumerable `message` and `name` properties. When - * using an object, it is also possible to use a regular expression, when - * validating against a string property. See below for examples. - * - * If specified, `message` will be appended to the message provided by the`AssertionError` if the `fn` call fails to throw or in case the error validation - * fails. - * - * Custom validation object/error instance: - * - * ```js - * import assert from 'assert/strict'; - * - * const err = new TypeError('Wrong value'); - * err.code = 404; - * err.foo = 'bar'; - * err.info = { - * nested: true, - * baz: 'text' - * }; - * err.reg = /abc/i; - * - * assert.throws( - * () => { - * throw err; - * }, - * { - * name: 'TypeError', - * message: 'Wrong value', - * info: { - * nested: true, - * baz: 'text' - * } - * // Only properties on the validation object will be tested for. - * // Using nested objects requires all properties to be present. Otherwise - * // the validation is going to fail. - * } - * ); - * - * // Using regular expressions to validate error properties: - * throws( - * () => { - * throw err; - * }, - * { - * // The `name` and `message` properties are strings and using regular - * // expressions on those will match against the string. If they fail, an - * // error is thrown. - * name: /^TypeError$/, - * message: /Wrong/, - * foo: 'bar', - * info: { - * nested: true, - * // It is not possible to use regular expressions for nested properties! - * baz: 'text' - * }, - * // The `reg` property contains a regular expression and only if the - * // validation object contains an identical regular expression, it is going - * // to pass. - * reg: /abc/i - * } - * ); - * - * // Fails due to the different `message` and `name` properties: - * throws( - * () => { - * const otherErr = new Error('Not found'); - * // Copy all enumerable properties from `err` to `otherErr`. - * for (const [key, value] of Object.entries(err)) { - * otherErr[key] = value; - * } - * throw otherErr; - * }, - * // The error's `message` and `name` properties will also be checked when using - * // an error as validation object. - * err - * ); - * ``` - * - * Validate instanceof using constructor: - * - * ```js - * import assert from 'assert/strict'; - * - * assert.throws( - * () => { - * throw new Error('Wrong value'); - * }, - * Error - * ); - * ``` - * - * Validate error message using [`RegExp`](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Guide/Regular_Expressions): - * - * Using a regular expression runs `.toString` on the error object, and will - * therefore also include the error name. - * - * ```js - * import assert from 'assert/strict'; - * - * assert.throws( - * () => { - * throw new Error('Wrong value'); - * }, - * /^Error: Wrong value$/ - * ); - * ``` - * - * Custom error validation: - * - * The function must return `true` to indicate all internal validations passed. - * It will otherwise fail with an `AssertionError`. - * - * ```js - * import assert from 'assert/strict'; - * - * assert.throws( - * () => { - * throw new Error('Wrong value'); - * }, - * (err) => { - * assert(err instanceof Error); - * assert(/value/.test(err)); - * // Avoid returning anything from validation functions besides `true`. - * // Otherwise, it's not clear what part of the validation failed. Instead, - * // throw an error about the specific validation that failed (as done in this - * // example) and add as much helpful debugging information to that error as - * // possible. - * return true; - * }, - * 'unexpected error' - * ); - * ``` - * - * `error` cannot be a string. If a string is provided as the second - * argument, then `error` is assumed to be omitted and the string will be used for`message` instead. This can lead to easy-to-miss mistakes. Using the same - * message as the thrown error message is going to result in an`ERR_AMBIGUOUS_ARGUMENT` error. Please read the example below carefully if using - * a string as the second argument gets considered: - * - * ```js - * import assert from 'assert/strict'; - * - * function throwingFirst() { - * throw new Error('First'); - * } - * - * function throwingSecond() { - * throw new Error('Second'); - * } - * - * function notThrowing() {} - * - * // The second argument is a string and the input function threw an Error. - * // The first case will not throw as it does not match for the error message - * // thrown by the input function! - * assert.throws(throwingFirst, 'Second'); - * // In the next example the message has no benefit over the message from the - * // error and since it is not clear if the user intended to actually match - * // against the error message, Node.js throws an `ERR_AMBIGUOUS_ARGUMENT` error. - * assert.throws(throwingSecond, 'Second'); - * // TypeError [ERR_AMBIGUOUS_ARGUMENT] - * - * // The string is only used (as message) in case the function does not throw: - * assert.throws(notThrowing, 'Second'); - * // AssertionError [ERR_ASSERTION]: Missing expected exception: Second - * - * // If it was intended to match for the error message do this instead: - * // It does not throw because the error messages match. - * assert.throws(throwingSecond, /Second$/); - * - * // If the error message does not match, an AssertionError is thrown. - * assert.throws(throwingFirst, /Second$/); - * // AssertionError [ERR_ASSERTION] - * ``` - * - * Due to the confusing error-prone notation, avoid a string as the second - * argument. - * @since v0.1.21 - */ - function throws(block: () => unknown, message?: string | Error): void; - function throws(block: () => unknown, error: AssertPredicate, message?: string | Error): void; - /** - * Asserts that the function `fn` does not throw an error. - * - * Using `assert.doesNotThrow()` is actually not useful because there - * is no benefit in catching an error and then rethrowing it. Instead, consider - * adding a comment next to the specific code path that should not throw and keep - * error messages as expressive as possible. - * - * When `assert.doesNotThrow()` is called, it will immediately call the `fn`function. - * - * If an error is thrown and it is the same type as that specified by the `error`parameter, then an `AssertionError` is thrown. If the error is of a - * different type, or if the `error` parameter is undefined, the error is - * propagated back to the caller. - * - * If specified, `error` can be a [`Class`](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Classes), - * [`RegExp`](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Guide/Regular_Expressions) or a validation - * function. See {@link throws} for more details. - * - * The following, for instance, will throw the `TypeError` because there is no - * matching error type in the assertion: - * - * ```js - * import assert from 'assert/strict'; - * - * assert.doesNotThrow( - * () => { - * throw new TypeError('Wrong value'); - * }, - * SyntaxError - * ); - * ``` - * - * However, the following will result in an `AssertionError` with the message - * 'Got unwanted exception...': - * - * ```js - * import assert from 'assert/strict'; - * - * assert.doesNotThrow( - * () => { - * throw new TypeError('Wrong value'); - * }, - * TypeError - * ); - * ``` - * - * If an `AssertionError` is thrown and a value is provided for the `message`parameter, the value of `message` will be appended to the `AssertionError` message: - * - * ```js - * import assert from 'assert/strict'; - * - * assert.doesNotThrow( - * () => { - * throw new TypeError('Wrong value'); - * }, - * /Wrong value/, - * 'Whoops' - * ); - * // Throws: AssertionError: Got unwanted exception: Whoops - * ``` - * @since v0.1.21 - */ - function doesNotThrow(block: () => unknown, message?: string | Error): void; - function doesNotThrow(block: () => unknown, error: AssertPredicate, message?: string | Error): void; - /** - * Throws `value` if `value` is not `undefined` or `null`. This is useful when - * testing the `error` argument in callbacks. The stack trace contains all frames - * from the error passed to `ifError()` including the potential new frames for`ifError()` itself. - * - * ```js - * import assert from 'assert/strict'; - * - * assert.ifError(null); - * // OK - * assert.ifError(0); - * // AssertionError [ERR_ASSERTION]: ifError got unwanted exception: 0 - * assert.ifError('error'); - * // AssertionError [ERR_ASSERTION]: ifError got unwanted exception: 'error' - * assert.ifError(new Error()); - * // AssertionError [ERR_ASSERTION]: ifError got unwanted exception: Error - * - * // Create some random error frames. - * let err; - * (function errorFrame() { - * err = new Error('test error'); - * })(); - * - * (function ifErrorFrame() { - * assert.ifError(err); - * })(); - * // AssertionError [ERR_ASSERTION]: ifError got unwanted exception: test error - * // at ifErrorFrame - * // at errorFrame - * ``` - * @since v0.1.97 - */ - function ifError(value: unknown): asserts value is null | undefined; - /** - * Awaits the `asyncFn` promise or, if `asyncFn` is a function, immediately - * calls the function and awaits the returned promise to complete. It will then - * check that the promise is rejected. - * - * If `asyncFn` is a function and it throws an error synchronously,`assert.rejects()` will return a rejected `Promise` with that error. If the - * function does not return a promise, `assert.rejects()` will return a rejected`Promise` with an `ERR_INVALID_RETURN_VALUE` error. In both cases the error - * handler is skipped. - * - * Besides the async nature to await the completion behaves identically to {@link throws}. - * - * If specified, `error` can be a [`Class`](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Classes), - * [`RegExp`](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Guide/Regular_Expressions), a validation function, - * an object where each property will be tested for, or an instance of error where - * each property will be tested for including the non-enumerable `message` and`name` properties. - * - * If specified, `message` will be the message provided by the `AssertionError` if the `asyncFn` fails to reject. - * - * ```js - * import assert from 'assert/strict'; - * - * await assert.rejects( - * async () => { - * throw new TypeError('Wrong value'); - * }, - * { - * name: 'TypeError', - * message: 'Wrong value' - * } - * ); - * ``` - * - * ```js - * import assert from 'assert/strict'; - * - * await assert.rejects( - * async () => { - * throw new TypeError('Wrong value'); - * }, - * (err) => { - * assert.strictEqual(err.name, 'TypeError'); - * assert.strictEqual(err.message, 'Wrong value'); - * return true; - * } - * ); - * ``` - * - * ```js - * import assert from 'assert/strict'; - * - * assert.rejects( - * Promise.reject(new Error('Wrong value')), - * Error - * ).then(() => { - * // ... - * }); - * ``` - * - * `error` cannot be a string. If a string is provided as the second - * argument, then `error` is assumed to be omitted and the string will be used for`message` instead. This can lead to easy-to-miss mistakes. Please read the - * example in {@link throws} carefully if using a string as the second - * argument gets considered. - * @since v10.0.0 - */ - function rejects(block: (() => Promise) | Promise, message?: string | Error): Promise; - function rejects(block: (() => Promise) | Promise, error: AssertPredicate, message?: string | Error): Promise; - /** - * Awaits the `asyncFn` promise or, if `asyncFn` is a function, immediately - * calls the function and awaits the returned promise to complete. It will then - * check that the promise is not rejected. - * - * If `asyncFn` is a function and it throws an error synchronously,`assert.doesNotReject()` will return a rejected `Promise` with that error. If - * the function does not return a promise, `assert.doesNotReject()` will return a - * rejected `Promise` with an `ERR_INVALID_RETURN_VALUE` error. In both cases - * the error handler is skipped. - * - * Using `assert.doesNotReject()` is actually not useful because there is little - * benefit in catching a rejection and then rejecting it again. Instead, consider - * adding a comment next to the specific code path that should not reject and keep - * error messages as expressive as possible. - * - * If specified, `error` can be a [`Class`](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Classes), - * [`RegExp`](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Guide/Regular_Expressions) or a validation - * function. See {@link throws} for more details. - * - * Besides the async nature to await the completion behaves identically to {@link doesNotThrow}. - * - * ```js - * import assert from 'assert/strict'; - * - * await assert.doesNotReject( - * async () => { - * throw new TypeError('Wrong value'); - * }, - * SyntaxError - * ); - * ``` - * - * ```js - * import assert from 'assert/strict'; - * - * assert.doesNotReject(Promise.reject(new TypeError('Wrong value'))) - * .then(() => { - * // ... - * }); - * ``` - * @since v10.0.0 - */ - function doesNotReject(block: (() => Promise) | Promise, message?: string | Error): Promise; - function doesNotReject(block: (() => Promise) | Promise, error: AssertPredicate, message?: string | Error): Promise; - /** - * Expects the `string` input to match the regular expression. - * - * ```js - * import assert from 'assert/strict'; - * - * assert.match('I will fail', /pass/); - * // AssertionError [ERR_ASSERTION]: The input did not match the regular ... - * - * assert.match(123, /pass/); - * // AssertionError [ERR_ASSERTION]: The "string" argument must be of type string. - * - * assert.match('I will pass', /pass/); - * // OK - * ``` - * - * If the values do not match, or if the `string` argument is of another type than`string`, an `AssertionError` is thrown with a `message` property set equal - * to the value of the `message` parameter. If the `message` parameter is - * undefined, a default error message is assigned. If the `message` parameter is an - * instance of an `Error` then it will be thrown instead of the `AssertionError`. - * @since v13.6.0, v12.16.0 - */ - function match(value: string, regExp: RegExp, message?: string | Error): void; - /** - * Expects the `string` input not to match the regular expression. - * - * ```js - * import assert from 'assert/strict'; - * - * assert.doesNotMatch('I will fail', /fail/); - * // AssertionError [ERR_ASSERTION]: The input was expected to not match the ... - * - * assert.doesNotMatch(123, /pass/); - * // AssertionError [ERR_ASSERTION]: The "string" argument must be of type string. - * - * assert.doesNotMatch('I will pass', /different/); - * // OK - * ``` - * - * If the values do match, or if the `string` argument is of another type than`string`, an `AssertionError` is thrown with a `message` property set equal - * to the value of the `message` parameter. If the `message` parameter is - * undefined, a default error message is assigned. If the `message` parameter is an - * instance of an `Error` then it will be thrown instead of the `AssertionError`. - * @since v13.6.0, v12.16.0 - */ - function doesNotMatch(value: string, regExp: RegExp, message?: string | Error): void; - const strict: Omit & { - (value: unknown, message?: string | Error): asserts value; - equal: typeof strictEqual; - notEqual: typeof notStrictEqual; - deepEqual: typeof deepStrictEqual; - notDeepEqual: typeof notDeepStrictEqual; - // Mapped types and assertion functions are incompatible? - // TS2775: Assertions require every name in the call target - // to be declared with an explicit type annotation. - ok: typeof ok; - strictEqual: typeof strictEqual; - deepStrictEqual: typeof deepStrictEqual; - ifError: typeof ifError; - strict: typeof strict; - }; - } - export = assert; -} -declare module 'node:assert' { - import assert = require('assert'); - export = assert; -} diff --git a/spaces/fgenie/scamtext_PAL_self_consistency/funcs/f_27.py b/spaces/fgenie/scamtext_PAL_self_consistency/funcs/f_27.py deleted file mode 100644 index 4d098457fc0efab6e3a68615b657caf9632f6ef7..0000000000000000000000000000000000000000 --- a/spaces/fgenie/scamtext_PAL_self_consistency/funcs/f_27.py +++ /dev/null @@ -1,29 +0,0 @@ - -import re - -def is_spam(text): - # Check for specific keywords - keywords = ["광고", "무료거부", "긴급", "핵심정보", "프로젝트", "추천주", "지금 바로", "수익률", "입금"] - if any(keyword in text for keyword in keywords): - return True - - # Check for urls with suspicious patterns - urls_pattern = re.compile(r'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+') - urls = re.findall(urls_pattern, text) - if urls: - for url in urls: - if any(word in url for word in ["bit.ly", "me2.kr", "오픈톡"]): - return True - - # Check for consecutive digits or percentages - digits = re.findall(r'\d{3,}', text) - percentages = re.findall(r'\d{2,}%+', text) - if digits or percentages: - return True - - # Check for multiple special characters - special_chars = re.findall(r'[\*-_@.&+:]+', text) - if len(special_chars) > 2: - return True - - return False diff --git a/spaces/foduucom/pan-card-detection/README.md b/spaces/foduucom/pan-card-detection/README.md deleted file mode 100644 index 4eae35d8b8b3cf60529b7a8dc7747d1ed0c8d744..0000000000000000000000000000000000000000 --- a/spaces/foduucom/pan-card-detection/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Pan Card Detection -emoji: 👀 -colorFrom: indigo -colorTo: red -sdk: gradio -sdk_version: 3.40.1 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/fuxin123zz/ChuanhuChatGPT/chat_func.py b/spaces/fuxin123zz/ChuanhuChatGPT/chat_func.py deleted file mode 100644 index 374178f3d22c5c23d1dc2952336cdc298a77315d..0000000000000000000000000000000000000000 --- a/spaces/fuxin123zz/ChuanhuChatGPT/chat_func.py +++ /dev/null @@ -1,456 +0,0 @@ -# -*- coding:utf-8 -*- -from __future__ import annotations -from typing import TYPE_CHECKING, List - -import logging -import json -import os -import requests -import urllib3 - -from tqdm import tqdm -import colorama -from duckduckgo_search import ddg -import asyncio -import aiohttp - -from presets import * -from llama_func import * -from utils import * - -# logging.basicConfig(level=logging.INFO, format="%(asctime)s [%(levelname)s] [%(filename)s:%(lineno)d] %(message)s") - -if TYPE_CHECKING: - from typing import TypedDict - - class DataframeData(TypedDict): - headers: List[str] - data: List[List[str | int | bool]] - - -initial_prompt = "You are a helpful assistant." -API_URL = "https://api.openai.com/v1/chat/completions" -HISTORY_DIR = "history" -TEMPLATES_DIR = "templates" - -def get_response( - openai_api_key, system_prompt, history, temperature, top_p, stream, selected_model -): - headers = { - "Content-Type": "application/json", - "Authorization": f"Bearer {openai_api_key}", - } - - history = [construct_system(system_prompt), *history] - - payload = { - "model": selected_model, - "messages": history, # [{"role": "user", "content": f"{inputs}"}], - "temperature": temperature, # 1.0, - "top_p": top_p, # 1.0, - "n": 1, - "stream": stream, - "presence_penalty": 0, - "frequency_penalty": 0, - } - if stream: - timeout = timeout_streaming - else: - timeout = timeout_all - - # 获取环境变量中的代理设置 - http_proxy = os.environ.get("HTTP_PROXY") or os.environ.get("http_proxy") - https_proxy = os.environ.get("HTTPS_PROXY") or os.environ.get("https_proxy") - - # 如果存在代理设置,使用它们 - proxies = {} - if http_proxy: - logging.info(f"Using HTTP proxy: {http_proxy}") - proxies["http"] = http_proxy - if https_proxy: - logging.info(f"Using HTTPS proxy: {https_proxy}") - proxies["https"] = https_proxy - - # 如果有代理,使用代理发送请求,否则使用默认设置发送请求 - if proxies: - response = requests.post( - API_URL, - headers=headers, - json=payload, - stream=True, - timeout=timeout, - proxies=proxies, - ) - else: - response = requests.post( - API_URL, - headers=headers, - json=payload, - stream=True, - timeout=timeout, - ) - return response - - -def stream_predict( - openai_api_key, - system_prompt, - history, - inputs, - chatbot, - all_token_counts, - top_p, - temperature, - selected_model, - fake_input=None, - display_append="" -): - def get_return_value(): - return chatbot, history, status_text, all_token_counts - - logging.info("实时回答模式") - partial_words = "" - counter = 0 - status_text = "开始实时传输回答……" - history.append(construct_user(inputs)) - history.append(construct_assistant("")) - if fake_input: - chatbot.append((fake_input, "")) - else: - chatbot.append((inputs, "")) - user_token_count = 0 - if len(all_token_counts) == 0: - system_prompt_token_count = count_token(construct_system(system_prompt)) - user_token_count = ( - count_token(construct_user(inputs)) + system_prompt_token_count - ) - else: - user_token_count = count_token(construct_user(inputs)) - all_token_counts.append(user_token_count) - logging.info(f"输入token计数: {user_token_count}") - yield get_return_value() - try: - response = get_response( - openai_api_key, - system_prompt, - history, - temperature, - top_p, - True, - selected_model, - ) - except requests.exceptions.ConnectTimeout: - status_text = ( - standard_error_msg + connection_timeout_prompt + error_retrieve_prompt - ) - yield get_return_value() - return - except requests.exceptions.ReadTimeout: - status_text = standard_error_msg + read_timeout_prompt + error_retrieve_prompt - yield get_return_value() - return - - yield get_return_value() - error_json_str = "" - - for chunk in tqdm(response.iter_lines()): - if counter == 0: - counter += 1 - continue - counter += 1 - # check whether each line is non-empty - if chunk: - chunk = chunk.decode() - chunklength = len(chunk) - try: - chunk = json.loads(chunk[6:]) - except json.JSONDecodeError: - logging.info(chunk) - error_json_str += chunk - status_text = f"JSON解析错误。请重置对话。收到的内容: {error_json_str}" - yield get_return_value() - continue - # decode each line as response data is in bytes - if chunklength > 6 and "delta" in chunk["choices"][0]: - finish_reason = chunk["choices"][0]["finish_reason"] - status_text = construct_token_message( - sum(all_token_counts), stream=True - ) - if finish_reason == "stop": - yield get_return_value() - break - try: - partial_words = ( - partial_words + chunk["choices"][0]["delta"]["content"] - ) - except KeyError: - status_text = ( - standard_error_msg - + "API回复中找不到内容。很可能是Token计数达到上限了。请重置对话。当前Token计数: " - + str(sum(all_token_counts)) - ) - yield get_return_value() - break - history[-1] = construct_assistant(partial_words) - chatbot[-1] = (chatbot[-1][0], partial_words+display_append) - all_token_counts[-1] += 1 - yield get_return_value() - - -def predict_all( - openai_api_key, - system_prompt, - history, - inputs, - chatbot, - all_token_counts, - top_p, - temperature, - selected_model, - fake_input=None, - display_append="" -): - logging.info("一次性回答模式") - history.append(construct_user(inputs)) - history.append(construct_assistant("")) - if fake_input: - chatbot.append((fake_input, "")) - else: - chatbot.append((inputs, "")) - all_token_counts.append(count_token(construct_user(inputs))) - try: - response = get_response( - openai_api_key, - system_prompt, - history, - temperature, - top_p, - False, - selected_model, - ) - except requests.exceptions.ConnectTimeout: - status_text = ( - standard_error_msg + connection_timeout_prompt + error_retrieve_prompt - ) - return chatbot, history, status_text, all_token_counts - except requests.exceptions.ProxyError: - status_text = standard_error_msg + proxy_error_prompt + error_retrieve_prompt - return chatbot, history, status_text, all_token_counts - except requests.exceptions.SSLError: - status_text = standard_error_msg + ssl_error_prompt + error_retrieve_prompt - return chatbot, history, status_text, all_token_counts - response = json.loads(response.text) - content = response["choices"][0]["message"]["content"] - history[-1] = construct_assistant(content) - chatbot[-1] = (chatbot[-1][0], content+display_append) - total_token_count = response["usage"]["total_tokens"] - all_token_counts[-1] = total_token_count - sum(all_token_counts) - status_text = construct_token_message(total_token_count) - return chatbot, history, status_text, all_token_counts - - -def predict( - openai_api_key, - system_prompt, - history, - inputs, - chatbot, - all_token_counts, - top_p, - temperature, - stream=False, - selected_model=MODELS[0], - use_websearch=False, - files = None, - should_check_token_count=True, -): # repetition_penalty, top_k - logging.info("输入为:" + colorama.Fore.BLUE + f"{inputs}" + colorama.Style.RESET_ALL) - if files: - msg = "构建索引中……(这可能需要比较久的时间)" - logging.info(msg) - yield chatbot, history, msg, all_token_counts - index = construct_index(openai_api_key, file_src=files) - msg = "索引构建完成,获取回答中……" - yield chatbot, history, msg, all_token_counts - history, chatbot, status_text = chat_ai(openai_api_key, index, inputs, history, chatbot) - yield chatbot, history, status_text, all_token_counts - return - - old_inputs = "" - link_references = [] - if use_websearch: - search_results = ddg(inputs, max_results=5) - old_inputs = inputs - web_results = [] - for idx, result in enumerate(search_results): - logging.info(f"搜索结果{idx + 1}:{result}") - domain_name = urllib3.util.parse_url(result["href"]).host - web_results.append(f'[{idx+1}]"{result["body"]}"\nURL: {result["href"]}') - link_references.append(f"{idx+1}. [{domain_name}]({result['href']})\n") - link_references = "\n\n" + "".join(link_references) - inputs = ( - replace_today(WEBSEARCH_PTOMPT_TEMPLATE) - .replace("{query}", inputs) - .replace("{web_results}", "\n\n".join(web_results)) - ) - else: - link_references = "" - - if len(openai_api_key) != 51: - status_text = standard_error_msg + no_apikey_msg - logging.info(status_text) - chatbot.append((inputs, "")) - if len(history) == 0: - history.append(construct_user(inputs)) - history.append("") - all_token_counts.append(0) - else: - history[-2] = construct_user(inputs) - yield chatbot, history, status_text, all_token_counts - return - - yield chatbot, history, "开始生成回答……", all_token_counts - - if stream: - logging.info("使用流式传输") - iter = stream_predict( - openai_api_key, - system_prompt, - history, - inputs, - chatbot, - all_token_counts, - top_p, - temperature, - selected_model, - fake_input=old_inputs, - display_append=link_references - ) - for chatbot, history, status_text, all_token_counts in iter: - yield chatbot, history, status_text, all_token_counts - else: - logging.info("不使用流式传输") - chatbot, history, status_text, all_token_counts = predict_all( - openai_api_key, - system_prompt, - history, - inputs, - chatbot, - all_token_counts, - top_p, - temperature, - selected_model, - fake_input=old_inputs, - display_append=link_references - ) - yield chatbot, history, status_text, all_token_counts - - logging.info(f"传输完毕。当前token计数为{all_token_counts}") - if len(history) > 1 and history[-1]["content"] != inputs: - logging.info( - "回答为:" - + colorama.Fore.BLUE - + f"{history[-1]['content']}" - + colorama.Style.RESET_ALL - ) - - if stream: - max_token = max_token_streaming - else: - max_token = max_token_all - - if sum(all_token_counts) > max_token and should_check_token_count: - status_text = f"精简token中{all_token_counts}/{max_token}" - logging.info(status_text) - yield chatbot, history, status_text, all_token_counts - iter = reduce_token_size( - openai_api_key, - system_prompt, - history, - chatbot, - all_token_counts, - top_p, - temperature, - max_token//2, - selected_model=selected_model, - ) - for chatbot, history, status_text, all_token_counts in iter: - status_text = f"Token 达到上限,已自动降低Token计数至 {status_text}" - yield chatbot, history, status_text, all_token_counts - - -def retry( - openai_api_key, - system_prompt, - history, - chatbot, - token_count, - top_p, - temperature, - stream=False, - selected_model=MODELS[0], -): - logging.info("重试中……") - if len(history) == 0: - yield chatbot, history, f"{standard_error_msg}上下文是空的", token_count - return - history.pop() - inputs = history.pop()["content"] - token_count.pop() - iter = predict( - openai_api_key, - system_prompt, - history, - inputs, - chatbot, - token_count, - top_p, - temperature, - stream=stream, - selected_model=selected_model, - ) - logging.info("重试中……") - for x in iter: - yield x - logging.info("重试完毕") - - -def reduce_token_size( - openai_api_key, - system_prompt, - history, - chatbot, - token_count, - top_p, - temperature, - max_token_count, - selected_model=MODELS[0], -): - logging.info("开始减少token数量……") - iter = predict( - openai_api_key, - system_prompt, - history, - summarize_prompt, - chatbot, - token_count, - top_p, - temperature, - selected_model=selected_model, - should_check_token_count=False, - ) - logging.info(f"chatbot: {chatbot}") - flag = False - for chatbot, history, status_text, previous_token_count in iter: - num_chat = find_n(previous_token_count, max_token_count) - if flag: - chatbot = chatbot[:-1] - flag = True - history = history[-2*num_chat:] if num_chat > 0 else [] - token_count = previous_token_count[-num_chat:] if num_chat > 0 else [] - msg = f"保留了最近{num_chat}轮对话" - yield chatbot, history, msg + "," + construct_token_message( - sum(token_count) if len(token_count) > 0 else 0, - ), token_count - logging.info(msg) - logging.info("减少token数量完毕") \ No newline at end of file diff --git a/spaces/g4f/freegpt-webui/server/website.py b/spaces/g4f/freegpt-webui/server/website.py deleted file mode 100644 index 795966cdb860f011f19240b9503f078d5d3f83a8..0000000000000000000000000000000000000000 --- a/spaces/g4f/freegpt-webui/server/website.py +++ /dev/null @@ -1,32 +0,0 @@ -from flask import render_template, redirect, url_for -from time import time -from os import urandom - - -class Website: - def __init__(self, bp, url_prefix) -> None: - self.bp = bp - self.url_prefix = url_prefix - self.routes = { - '/': { - 'function': lambda: redirect(url_for('._index')), - 'methods': ['GET', 'POST'] - }, - '/chat/': { - 'function': self._index, - 'methods': ['GET', 'POST'] - }, - '/chat/': { - 'function': self._chat, - 'methods': ['GET', 'POST'] - } - } - - def _chat(self, conversation_id): - if '-' not in conversation_id: - return redirect(url_for('._index')) - - return render_template('index.html', chat_id=conversation_id) - - def _index(self): - return render_template('index.html', chat_id=f'{urandom(4).hex()}-{urandom(2).hex()}-{urandom(2).hex()}-{urandom(2).hex()}-{hex(int(time() * 1000))[2:]}', url_prefix=self.url_prefix) diff --git a/spaces/georgefen/Face-Landmark-ControlNet/annotator/uniformer/mmseg/models/decode_heads/fpn_head.py b/spaces/georgefen/Face-Landmark-ControlNet/annotator/uniformer/mmseg/models/decode_heads/fpn_head.py deleted file mode 100644 index 1241c55b0813d1ecdddf1e66e7c5031fbf78ed50..0000000000000000000000000000000000000000 --- a/spaces/georgefen/Face-Landmark-ControlNet/annotator/uniformer/mmseg/models/decode_heads/fpn_head.py +++ /dev/null @@ -1,68 +0,0 @@ -import numpy as np -import torch.nn as nn -from annotator.uniformer.mmcv.cnn import ConvModule - -from annotator.uniformer.mmseg.ops import resize -from ..builder import HEADS -from .decode_head import BaseDecodeHead - - -@HEADS.register_module() -class FPNHead(BaseDecodeHead): - """Panoptic Feature Pyramid Networks. - - This head is the implementation of `Semantic FPN - `_. - - Args: - feature_strides (tuple[int]): The strides for input feature maps. - stack_lateral. All strides suppose to be power of 2. The first - one is of largest resolution. - """ - - def __init__(self, feature_strides, **kwargs): - super(FPNHead, self).__init__( - input_transform='multiple_select', **kwargs) - assert len(feature_strides) == len(self.in_channels) - assert min(feature_strides) == feature_strides[0] - self.feature_strides = feature_strides - - self.scale_heads = nn.ModuleList() - for i in range(len(feature_strides)): - head_length = max( - 1, - int(np.log2(feature_strides[i]) - np.log2(feature_strides[0]))) - scale_head = [] - for k in range(head_length): - scale_head.append( - ConvModule( - self.in_channels[i] if k == 0 else self.channels, - self.channels, - 3, - padding=1, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg, - act_cfg=self.act_cfg)) - if feature_strides[i] != feature_strides[0]: - scale_head.append( - nn.Upsample( - scale_factor=2, - mode='bilinear', - align_corners=self.align_corners)) - self.scale_heads.append(nn.Sequential(*scale_head)) - - def forward(self, inputs): - - x = self._transform_inputs(inputs) - - output = self.scale_heads[0](x[0]) - for i in range(1, len(self.feature_strides)): - # non inplace - output = output + resize( - self.scale_heads[i](x[i]), - size=output.shape[2:], - mode='bilinear', - align_corners=self.align_corners) - - output = self.cls_seg(output) - return output diff --git a/spaces/giswqs/Streamlit/index.html b/spaces/giswqs/Streamlit/index.html deleted file mode 100644 index 931c7f0979a8bb20bf34f48d314be11931e067c2..0000000000000000000000000000000000000000 --- a/spaces/giswqs/Streamlit/index.html +++ /dev/null @@ -1,39 +0,0 @@ - - - - Streamlit for Geospatial - - - - - - diff --git a/spaces/gossminn/fillmorle-app/sftp/utils/label_smoothing.py b/spaces/gossminn/fillmorle-app/sftp/utils/label_smoothing.py deleted file mode 100644 index 1b9162a0997428b1519b4c825887cf48532ab079..0000000000000000000000000000000000000000 --- a/spaces/gossminn/fillmorle-app/sftp/utils/label_smoothing.py +++ /dev/null @@ -1,48 +0,0 @@ -import torch -from torch import nn -from torch.nn import KLDivLoss -from torch.nn import LogSoftmax - - -class LabelSmoothingLoss(nn.Module): - def __init__(self, label_smoothing=0.0, unreliable_label=None, ignore_index=-100): - """ - If label_smoothing == 0.0, it is equivalent to xentropy - """ - assert 0.0 <= label_smoothing <= 1.0 - super(LabelSmoothingLoss, self).__init__() - - self.ignore_index = ignore_index - self.label_smoothing = label_smoothing - - self.loss_fn = KLDivLoss(reduction='batchmean') - self.unreliable_label = unreliable_label - self.max_gap = 100. - self.log_softmax = LogSoftmax(1) - - def forward(self, output, target): - """ - output: logits - target: labels - """ - vocab_size = output.shape[1] - mask = (target != self.ignore_index) - output, target = output[mask], target[mask] - output = self.log_softmax(output) - - def get_smooth_prob(ls): - smoothing_value = ls / (vocab_size - 1) - prob = output.new_full((target.size(0), vocab_size), smoothing_value) - prob.scatter_(1, target.unsqueeze(1), 1 - ls) - return prob - - if self.unreliable_label is not None: - smoothed_prob = get_smooth_prob(self.label_smoothing) - hard_prob = get_smooth_prob(0.0) - unreliable_mask = (target == self.unreliable_label).to(torch.float) - model_prob = ((smoothed_prob.T * unreliable_mask) + (hard_prob.T * (1 - unreliable_mask))).T - else: - model_prob = get_smooth_prob(self.label_smoothing) - - loss = self.loss_fn(output, model_prob) - return loss diff --git a/spaces/gotiQspiryo/whisper-ui/examples/ABBYY FineReader OCR 9.0.724 Pro Portable - The11thMtnDiv __FULL__ Keygen.md b/spaces/gotiQspiryo/whisper-ui/examples/ABBYY FineReader OCR 9.0.724 Pro Portable - The11thMtnDiv __FULL__ Keygen.md deleted file mode 100644 index 5735d814312156e3a77b9ad751d2ed238350cb7c..0000000000000000000000000000000000000000 --- a/spaces/gotiQspiryo/whisper-ui/examples/ABBYY FineReader OCR 9.0.724 Pro Portable - The11thMtnDiv __FULL__ Keygen.md +++ /dev/null @@ -1,126 +0,0 @@ - -

        ABBYY FineReader OCR 9.0.724 Pro Portable - The11thMtnDiv Keygen: A Powerful and Portable OCR Software

        - -

        If you are looking for a reliable and easy-to-use OCR software that can convert scanned documents, images, and PDFs into editable and searchable formats, you might want to check out ABBYY FineReader OCR 9.0.724 Pro Portable - The11thMtnDiv Keygen. This is a portable version of the popular ABBYY FineReader OCR software that does not require installation and can be run from any USB drive or external hard disk. It also comes with a keygen that can generate a valid serial number for activating the software.

        -

        ABBYY FineReader OCR 9.0.724 Pro Portable - The11thMtnDiv keygen


        Download File >>>>> https://urlgoal.com/2uyLr4



        - -

        What is ABBYY FineReader OCR 9.0.724 Pro Portable - The11thMtnDiv Keygen?

        - -

        ABBYY FineReader OCR 9.0.724 Pro Portable - The11thMtnDiv Keygen is a software package that contains two files: ABBYY FineReader OCR 9.0.724 Pro Portable.exe and The11thMtnDiv Keygen.exe. The first file is the portable version of the ABBYY FineReader OCR 9.0.724 Pro software, which is a professional optical character recognition (OCR) tool that can recognize text from scanned documents, images, and PDFs and convert them into editable and searchable formats such as Word, Excel, PowerPoint, HTML, TXT, RTF, and more. The second file is a keygen that can generate a serial number for activating the software.

        - -

        How to use ABBYY FineReader OCR 9.0.724 Pro Portable - The11thMtnDiv Keygen?

        - -

        To use ABBYY FineReader OCR 9.0.724 Pro Portable - The11thMtnDiv Keygen, you need to follow these steps:

        - -
          -
        1. Download the software package from one of the web search results for the query "ABBYY FineReader OCR 9.0.724 Pro Portable - The11thMtnDiv keygen".
        2. -
        3. Extract the files to a USB drive or an external hard disk.
        4. -
        5. Run the The11thMtnDiv Keygen.exe file and click on "Generate" to get a serial number.
        6. -
        7. Run the ABBYY FineReader OCR 9.0.724 Pro Portable.exe file and enter the serial number when prompted.
        8. -
        9. Enjoy using the software without installation or registration.
        10. -
        - -

        What are the benefits of using ABBYY FineReader OCR 9.0.724 Pro Portable - The11thMtnDiv Keygen?

        - -

        Some of the benefits of using ABBYY FineReader OCR 9.0.724 Pro Portable - The11thMtnDiv Keygen are:

        - -
          -
        • You can use the software on any computer without installing it or leaving any traces.
        • -
        • You can save space on your hard drive by storing the software on a removable device.
        • -
        • You can access your documents and files from anywhere by carrying the software with you.
        • -
        • You can save money by using the keygen instead of buying a license.
        • -
        • You can enjoy all the features and functions of the ABBYY FineReader OCR 9.0.724 Pro software, such as high accuracy, fast speed, multilingual support, advanced image processing, document layout retention, PDF conversion and editing, and more.
        • -
        - -

        Conclusion

        - -

        ABBYY FineReader OCR 9.0.724 Pro Portable - The11thMtnDiv Keygen is a powerful and portable OCR software that can help you convert scanned documents, images, and PDFs into editable and searchable formats with ease and convenience. It is also a cost-effective and space-saving solution that does not require installation or registration. If you are looking for a reliable and easy-to-use OCR software that can run from any USB drive or external hard disk, you might want to give ABBYY FineReader OCR 9.0.724 Pro Portable - The11thMtnDiv Keygen a try.

        -

        How to download ABBYY FineReader OCR 9.0.724 Pro Portable - The11thMtnDiv Keygen?

        - -

        There are many websites that offer ABBYY FineReader OCR 9.0.724 Pro Portable - The11thMtnDiv Keygen for download, but not all of them are safe and reliable. Some of them may contain viruses, malware, or spyware that can harm your computer or steal your personal information. Therefore, you should be careful and choose a trusted source for downloading the software package.

        -

        - -

        One of the websites that you can use to download ABBYY FineReader OCR 9.0.724 Pro Portable - The11thMtnDiv Keygen is this one. This website is hosted by Wix.com, a reputable platform for creating and hosting websites. It provides a direct link to the software package without any surveys, ads, or registration. The download speed is fast and the file size is about 160 MB.

        - -

        To download ABBYY FineReader OCR 9.0.724 Pro Portable - The11thMtnDiv Keygen from this website, you need to follow these steps:

        - -
          -
        1. Click on the link above to open the website in a new tab.
        2. -
        3. Scroll down to the bottom of the page and click on the green "Download" button.
        4. -
        5. Wait for a few seconds until the download starts automatically.
        6. -
        7. Save the file to your desired location on your USB drive or external hard disk.
        8. -
        - -

        How to troubleshoot ABBYY FineReader OCR 9.0.724 Pro Portable - The11thMtnDiv Keygen?

        - -

        Although ABBYY FineReader OCR 9.0.724 Pro Portable - The11thMtnDiv Keygen is a portable and easy-to-use software, you may encounter some problems or errors while using it. Here are some common issues and solutions that you can try:

        - -
          -
        • If the software does not run or shows an error message, make sure that you have extracted the files correctly and entered the serial number correctly.
        • -
        • If the software does not recognize the text from your scanned document, image, or PDF, make sure that you have selected the correct language and image quality settings.
        • -
        • If the software does not convert your document, image, or PDF into your desired format, make sure that you have chosen the correct output format and options.
        • -
        • If the software crashes or freezes, make sure that you have enough free space and memory on your USB drive or external hard disk.
        • -
        • If none of these solutions work, you can contact the support team of ABBYY FineReader OCR 9.0.724 Pro Portable - The11thMtnDiv Keygen by sending an email to simpcekdistmi@gmail.com or leaving a comment on their SoundCloud page here.
        • -
        -

        How to update ABBYY FineReader OCR 9.0.724 Pro Portable - The11thMtnDiv Keygen?

        - -

        ABBYY FineReader OCR 9.0.724 Pro Portable - The11thMtnDiv Keygen is a portable version of the ABBYY FineReader OCR 9.0.724 Pro software, which was released in 2008. Since then, ABBYY has released newer versions of the software with improved features and performance. If you want to update your portable software to the latest version, you need to follow these steps:

        - -
          -
        1. Download the latest version of ABBYY FineReader OCR from the official website here.
        2. -
        3. Install the software on your computer and activate it with a valid license key.
        4. -
        5. Download the portable version of the software from this website.
        6. -
        7. Copy the portable version of the software to your USB drive or external hard disk.
        8. -
        9. Run the portable version of the software and enjoy the updated features and functions.
        10. -
        - -

        How to compare ABBYY FineReader OCR 9.0.724 Pro Portable - The11thMtnDiv Keygen with other OCR software?

        - -

        ABBYY FineReader OCR 9.0.724 Pro Portable - The11thMtnDiv Keygen is one of the best OCR software available in the market, but it is not the only one. There are other OCR software that you can use to convert scanned documents, images, and PDFs into editable and searchable formats. Some of them are:

        - -
          -
        • Adobe Acrobat: This is a PDF software that can also perform OCR on scanned PDFs and images. It can convert them into various formats such as Word, Excel, PowerPoint, HTML, and more.
        • -
        • Nuance Power PDF: This is a PDF software that can also perform OCR on scanned PDFs and images. It can convert them into various formats such as Word, Excel, PowerPoint, HTML, and more.
        • -
        • OnlineOCR.net: This is an online service that can perform OCR on scanned documents, images, and PDFs. It can convert them into various formats such as Word, Excel, PowerPoint, HTML, TXT, RTF, and more.
        • -
        - -

        To compare ABBYY FineReader OCR 9.0.724 Pro Portable - The11thMtnDiv Keygen with other OCR software, you need to consider some factors such as:

        - -
          -
        • The accuracy and quality of the OCR results.
        • -
        • The speed and performance of the OCR process.
        • -
        • The features and functions of the OCR software.
        • -
        • The ease of use and user interface of the OCR software.
        • -
        • The price and license of the OCR software.
        • -
        - -

        Based on these factors, you can choose the best OCR software for your needs and preferences.

        -

        How to uninstall ABBYY FineReader OCR 9.0.724 Pro Portable - The11thMtnDiv Keygen?

        - -

        ABBYY FineReader OCR 9.0.724 Pro Portable - The11thMtnDiv Keygen is a portable software that does not require installation, so you do not need to uninstall it from your computer. However, if you want to remove it from your USB drive or external hard disk, you need to follow these steps:

        - -
          -
        1. Open the folder where you have extracted the files of ABBYY FineReader OCR 9.0.724 Pro Portable - The11thMtnDiv Keygen.
        2. -
        3. Select all the files and folders and press the "Delete" key on your keyboard.
        4. -
        5. Empty the recycle bin or trash bin of your USB drive or external hard disk.
        6. -
        7. That's it! You have successfully uninstalled ABBYY FineReader OCR 9.0.724 Pro Portable - The11thMtnDiv Keygen from your removable device.
        8. -
        - -

        How to get support for ABBYY FineReader OCR 9.0.724 Pro Portable - The11thMtnDiv Keygen?

        - -

        ABBYY FineReader OCR 9.0.724 Pro Portable - The11thMtnDiv Keygen is a software package that was created by SimpceKdistmi, a user of SoundCloud, a platform for sharing and listening to music and audio. SimpceKdistmi is not affiliated with ABBYY, the official developer of ABBYY FineReader OCR software. Therefore, if you have any questions, issues, or feedback regarding ABBYY FineReader OCR 9.0.724 Pro Portable - The11thMtnDiv Keygen, you need to contact SimpceKdistmi directly.

        - -

        You can get support for ABBYY FineReader OCR 9.0.724 Pro Portable - The11thMtnDiv Keygen by using one of these methods:

        - -
          -
        • Email: You can send an email to simpcekdistmi@gmail.com and explain your problem or request in detail.
        • -
        • SoundCloud: You can visit SimpceKdistmi's SoundCloud page here and leave a comment or a message with your question or feedback.
        • -
        • Wix: You can visit SimpceKdistmi's Wix website here and fill out the contact form with your name, email, and message.
        • -
        - -

        SimpceKdistmi will try to respond to your queries as soon as possible and provide you with the best possible support for ABBYY FineReader OCR 9.0.724 Pro Portable - The11thMtnDiv Keygen.

        -

        Conclusion

        - -

        ABBYY FineReader OCR 9.0.724 Pro Portable - The11thMtnDiv Keygen is a portable and powerful OCR software that can convert scanned documents, images, and PDFs into editable and searchable formats with high accuracy and speed. It is also a cost-effective and space-saving solution that does not require installation or registration. You can download, use, update, uninstall, and get support for ABBYY FineReader OCR 9.0.724 Pro Portable - The11thMtnDiv Keygen by following the steps and methods described in this article. If you are looking for a reliable and easy-to-use OCR software that can run from any USB drive or external hard disk, you might want to give ABBYY FineReader OCR 9.0.724 Pro Portable - The11thMtnDiv Keygen a try.

        3cee63e6c2
        -
        -
        \ No newline at end of file diff --git a/spaces/gputrain/UrbanSounds8K/article.md b/spaces/gputrain/UrbanSounds8K/article.md deleted file mode 100644 index 4803be356a486e9cf6e8107f6406b2ca19882536..0000000000000000000000000000000000000000 --- a/spaces/gputrain/UrbanSounds8K/article.md +++ /dev/null @@ -1,41 +0,0 @@ -## Dataset - -- [UrbanSound8K](https://urbansounddataset.weebly.com/urbansound8k.html) - -## Audio files - -Files are converted to melspectrograms that perform better in general for visual transformations of such audio files. - -## Training - -Using With Fast.ai and three epochs with minimal lines of code approaches 95% accuracy with a 20% validation of the entire dataset of 8732 labelled sound excerpts of 10 classes shown above. Fast.ai was used to train this classifier with a Resnet34 vision learner with three epochs. - - -| epoch | train_loss | valid_loss | accuracy | time | -|-------|------------|-------------|-------------|-------| -|0 | 1.462791 | 0.710250 | 0.775487 | 01:12 | -| 0 | 0.600056 | 0.309964 | 0.892325 | 00:40 | -|1 | 0.260431 | 0.200901 | 0.945017 | 00:39 | -| 2 | 0.090158 | 0.164748 | 0.950745 | 00:40 | - -## Classical Approaches - -[Classical approaches on this dataset as of 2019](https://www.researchgate.net/publication/335862311_Evaluation_of_Classical_Machine_Learning_Techniques_towards_Urban_Sound_Recognition_on_Embedded_Systems) - -## State of the Art Approaches - -The state-of-the-art methods for audio classification approach this problem as an image classification task. For such image classification problems from audio samples, [three common](https://scottmduda.medium.com/urban-environmental-audio-classification-using-mel-spectrograms-706ee6f8dcc1) - transformation approaches are: - -- Linear Spectrograms -- Log Spectrograms -- [Mel Spectrograms](https://towardsdatascience.com/audio-deep-learning-made-simple-part-2-why-mel-spectrograms-perform-better-aad889a93505) - - -## Credits - -Thanks to [Kurian Benoy](https://kurianbenoy.com/) and countless others that generously leave code in github to follow or write blogs that explain various things online. - -## Code Repo & Blog - -Additional details on my [Github Repo](https://github.com/gputrain/fastai2-coursework/tree/main/HW) and [my blog](https://www.gputrain.com/) where I will add additional details on this fast ai build, audio transforms and more. \ No newline at end of file diff --git a/spaces/gradio/HuBERT/examples/backtranslation/extract_bt_data.py b/spaces/gradio/HuBERT/examples/backtranslation/extract_bt_data.py deleted file mode 100644 index e766391e873d0d9a9561d67d5864934b2fad0681..0000000000000000000000000000000000000000 --- a/spaces/gradio/HuBERT/examples/backtranslation/extract_bt_data.py +++ /dev/null @@ -1,72 +0,0 @@ -#!/usr/bin/env python -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import argparse -import fileinput - -from tqdm import tqdm - - -def main(): - parser = argparse.ArgumentParser( - description=( - "Extract back-translations from the stdout of fairseq-generate. " - "If there are multiply hypotheses for a source, we only keep the first one. " - ) - ) - parser.add_argument("--output", required=True, help="output prefix") - parser.add_argument( - "--srclang", required=True, help="source language (extracted from H-* lines)" - ) - parser.add_argument( - "--tgtlang", required=True, help="target language (extracted from S-* lines)" - ) - parser.add_argument("--minlen", type=int, help="min length filter") - parser.add_argument("--maxlen", type=int, help="max length filter") - parser.add_argument("--ratio", type=float, help="ratio filter") - parser.add_argument("files", nargs="*", help="input files") - args = parser.parse_args() - - def validate(src, tgt): - srclen = len(src.split(" ")) if src != "" else 0 - tgtlen = len(tgt.split(" ")) if tgt != "" else 0 - if ( - (args.minlen is not None and (srclen < args.minlen or tgtlen < args.minlen)) - or ( - args.maxlen is not None - and (srclen > args.maxlen or tgtlen > args.maxlen) - ) - or ( - args.ratio is not None - and (max(srclen, tgtlen) / float(min(srclen, tgtlen)) > args.ratio) - ) - ): - return False - return True - - def safe_index(toks, index, default): - try: - return toks[index] - except IndexError: - return default - - with open(args.output + "." + args.srclang, "w") as src_h, open( - args.output + "." + args.tgtlang, "w" - ) as tgt_h: - for line in tqdm(fileinput.input(args.files)): - if line.startswith("S-"): - tgt = safe_index(line.rstrip().split("\t"), 1, "") - elif line.startswith("H-"): - if tgt is not None: - src = safe_index(line.rstrip().split("\t"), 2, "") - if validate(src, tgt): - print(src, file=src_h) - print(tgt, file=tgt_h) - tgt = None - - -if __name__ == "__main__": - main() diff --git a/spaces/gradio/HuBERT/examples/speech_recognition/data/__init__.py b/spaces/gradio/HuBERT/examples/speech_recognition/data/__init__.py deleted file mode 100644 index 47bb6e24ddf25aa4fd5bf0fe9672f89099efb9b4..0000000000000000000000000000000000000000 --- a/spaces/gradio/HuBERT/examples/speech_recognition/data/__init__.py +++ /dev/null @@ -1,11 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -from .asr_dataset import AsrDataset - - -__all__ = [ - "AsrDataset", -] diff --git a/spaces/gradio/HuBERT/fairseq/criterions/wav2vec_criterion.py b/spaces/gradio/HuBERT/fairseq/criterions/wav2vec_criterion.py deleted file mode 100644 index e04786cc3b75517cefd06303f98f8536f9279311..0000000000000000000000000000000000000000 --- a/spaces/gradio/HuBERT/fairseq/criterions/wav2vec_criterion.py +++ /dev/null @@ -1,229 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import math -from dataclasses import dataclass, field -from typing import List, Optional - -import torch -import torch.nn.functional as F -from fairseq import metrics, utils -from fairseq.criterions import FairseqCriterion, register_criterion -from fairseq.dataclass import FairseqDataclass -from fairseq.logging.meters import safe_round -from fairseq.utils import is_xla_tensor - - -@dataclass -class Wav2VecCriterionConfig(FairseqDataclass): - infonce: bool = field( - default=False, - metadata={ - "help": "if set, uses cross entropy instead of binary cross entropy (i.e. InfoNCE loss)" - }, - ) - loss_weights: Optional[List[float]] = field( - default=None, - metadata={"help": "weights for additional loss terms (not first one)"}, - ) - log_keys: List[str] = field( - default_factory=lambda: [], - metadata={"help": "output keys to log"}, - ) - -@register_criterion("wav2vec", dataclass=Wav2VecCriterionConfig) -class Wav2vecCriterion(FairseqCriterion): - def __init__(self, task, infonce=False, loss_weights=None, log_keys=None): - super().__init__(task) - self.infonce = infonce - self.loss_weights = loss_weights - self.log_keys = [] if log_keys is None else log_keys - - def forward(self, model, sample, reduce=True): - """Compute the loss for the given sample. - - Returns a tuple with three elements: - 1) the loss - 2) the sample size, which is used as the denominator for the gradient - 3) logging outputs to display while training - """ - net_output = model(**sample["net_input"]) - logits = model.get_logits(net_output).float() - target = model.get_targets(sample, net_output) - self.xla = is_xla_tensor(logits) - - # XXX: handle weights on xla. - weights = None - if hasattr(model, "get_target_weights") and not self.infonce: - weights = model.get_target_weights(target, net_output) - if torch.is_tensor(weights): - weights = weights.float() - - losses = [] - - reduction = "none" if ((not reduce) or self.xla) else "sum" - if self.infonce: - loss = F.cross_entropy(logits, target, reduction=reduction) - else: - loss = F.binary_cross_entropy_with_logits( - logits, target.float(), weights, reduction=reduction - ) - - if self.xla: - # tpu-comment: since dynamic shapes lead to recompilations on xla, - # we don't shrink tensors using mask_indices. - # Instead, we use mask indices to adjust loss. - mi = ( - sample['net_input']['mask_indices'] - .transpose(0, 1) # logits are transposed in `model.get_logits` - .reshape(logits.size(0)) - ) - loss = (loss * mi).sum() if reduce else (loss * mi) - - if 'sample_size' in sample: - sample_size = sample['sample_size'] - elif 'mask_indices' in sample['net_input']: - sample_size = sample['net_input']['mask_indices'].sum() - else: - sample_size = target.numel() if self.infonce else target.long().sum().item() - losses.append(loss.detach().clone()) - - if self.loss_weights is not None: - assert hasattr(model, "get_extra_losses") - extra_losses = model.get_extra_losses(net_output) - if torch.is_tensor(extra_losses): - extra_losses = [extra_losses] - if len(self.loss_weights) == 1 and len(extra_losses) != 1: - self.loss_weights = [self.loss_weights[0]] * len(extra_losses) - assert len(extra_losses) == len( - self.loss_weights - ), f"{len(extra_losses)}, {len(self.loss_weights)}" - for p, coef in zip(extra_losses, self.loss_weights): - if coef != 0 and p is not None: - p = coef * p.float() * sample_size - loss += p - losses.append(p) - - logging_output = { - "loss": loss.item() if (reduce and not self.xla) else loss.detach(), - "ntokens": sample_size, - "nsentences": sample["id"].numel(), - "sample_size": sample_size, - } - - for lk in self.log_keys: - # Only store "logits" and "target" for computing MAP and MAUC - # during validation - if lk == "logits": - if not self.training: - logging_output["logits"] = logits.cpu().numpy() - elif lk == "target": - if not self.training: - # If the targets have been mixed with the predictions of - # teacher models, find the original targets - if hasattr(model, "get_original_targets"): - original_target = model.get_original_targets(sample, net_output) - else: - original_target = target - logging_output["target"] = original_target.cpu().numpy() - elif lk in net_output: - value = net_output[lk] - if not is_xla_tensor(value): - value = float(value) - logging_output[lk] = value - - if len(losses) > 1: - for i, l in enumerate(losses): - logging_output[f"loss_{i}"] = l.item() if not self.xla else l.detach() - - if self.infonce: - with torch.no_grad(): - if logits.numel() == 0: - corr = 0 - count = 0 - else: - assert logits.dim() > 1, logits.shape - max = logits.argmax(-1) == 0 - min = logits.argmin(-1) == 0 - if is_xla_tensor(logits): - max, min = max * mi, min * mi - both = max & min - corr = max.long().sum() - both.long().sum() - count = mi.sum() - else: - both = max & min - corr = max.long().sum().item() - both.long().sum().item() - count = float(max.numel()) - - logging_output["correct"] = corr - logging_output["count"] = count - - return loss, sample_size, logging_output - - @staticmethod - def reduce_metrics(logging_outputs) -> None: - """Aggregate logging outputs from data parallel training.""" - loss_sum = utils.item(sum(log.get("loss", 0) for log in logging_outputs)) - ntokens = utils.item(sum(log.get("ntokens", 0) for log in logging_outputs)) - nsentences = utils.item( - sum(log.get("nsentences", 0) for log in logging_outputs) - ) - sample_size = utils.item( - sum(log.get("sample_size", 0) for log in logging_outputs) - ) - - metrics.log_scalar( - "loss", loss_sum / (sample_size or 1) / math.log(2), sample_size, round=3 - ) - metrics.log_scalar("ntokens", ntokens) - metrics.log_scalar("nsentences", nsentences) - - correct = sum(log.get("correct", 0) for log in logging_outputs) - metrics.log_scalar("_correct", correct) - - total = sum(log.get("count", 0) for log in logging_outputs) - metrics.log_scalar("_total", total) - - if total > 0: - metrics.log_derived( - "accuracy", - lambda meters: safe_round( - meters["_correct"].sum / meters["_total"].sum, 5 - ) - if meters["_total"].sum > 0 - else float("nan"), - ) - - builtin_keys = { - "loss", - "ntokens", - "nsentences", - "sample_size", - "correct", - "count", - } - - for k in logging_outputs[0]: - if k not in builtin_keys: - val = sum(log.get(k, 0) for log in logging_outputs) - if k.startswith("loss"): - metrics.log_scalar( - k, val / (sample_size or 1) / math.log(2), sample_size, round=3 - ) - else: - metrics.log_scalar(k, val / len(logging_outputs), round=3) - - # FIXME: revert when gather based xla reduction is implemented - #@staticmethod - #def logging_outputs_can_be_summed() -> bool: - def logging_outputs_can_be_summed(self) -> bool: - """ - Whether the logging outputs returned by `forward` can be summed - across workers prior to calling `reduce_metrics`. Setting this - to True will improves distributed training speed. - """ - # XXX: Gather based reduction not implemented for xla yet. - # So we fall to sum based reduction for xla. - return self.xla diff --git a/spaces/gradio/HuBERT/fairseq/scoring/__init__.py b/spaces/gradio/HuBERT/fairseq/scoring/__init__.py deleted file mode 100644 index 58f2f563e493327394dff1265030d18f0814b5a2..0000000000000000000000000000000000000000 --- a/spaces/gradio/HuBERT/fairseq/scoring/__init__.py +++ /dev/null @@ -1,55 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - - -import importlib -import os -from abc import ABC, abstractmethod - -from fairseq import registry -from omegaconf import DictConfig - - -class BaseScorer(ABC): - def __init__(self, cfg): - self.cfg = cfg - self.ref = [] - self.pred = [] - - def add_string(self, ref, pred): - self.ref.append(ref) - self.pred.append(pred) - - @abstractmethod - def score(self) -> float: - pass - - @abstractmethod - def result_string(self) -> str: - pass - - -_build_scorer, register_scorer, SCORER_REGISTRY, _ = registry.setup_registry( - "--scoring", default="bleu" -) - - -def build_scorer(choice, tgt_dict): - _choice = choice._name if isinstance(choice, DictConfig) else choice - - if _choice == "bleu": - from fairseq.scoring import bleu - - return bleu.Scorer( - bleu.BleuConfig(pad=tgt_dict.pad(), eos=tgt_dict.eos(), unk=tgt_dict.unk()) - ) - return _build_scorer(choice) - - -# automatically import any Python files in the current directory -for file in sorted(os.listdir(os.path.dirname(__file__))): - if file.endswith(".py") and not file.startswith("_"): - module = file[: file.find(".py")] - importlib.import_module("fairseq.scoring." + module) diff --git a/spaces/gradio/image_classification/app.py b/spaces/gradio/image_classification/app.py deleted file mode 100644 index 9860a9dea5fce7942a9b366d62df29c1def464fd..0000000000000000000000000000000000000000 --- a/spaces/gradio/image_classification/app.py +++ /dev/null @@ -1,23 +0,0 @@ -import gradio as gr -import torch -import requests -from torchvision import transforms - -model = torch.hub.load('pytorch/vision:v0.6.0', 'resnet18', pretrained=True).eval() -response = requests.get("https://git.io/JJkYN") -labels = response.text.split("\n") - -def predict(inp): - inp = transforms.ToTensor()(inp).unsqueeze(0) - with torch.no_grad(): - prediction = torch.nn.functional.softmax(model(inp)[0], dim=0) - confidences = {labels[i]: float(prediction[i]) for i in range(1000)} - return confidences - -demo = gr.Interface(fn=predict, - inputs=gr.inputs.Image(type="pil"), - outputs=gr.outputs.Label(num_top_classes=3), - examples=[["cheetah.jpg"]], - ) - -demo.launch() \ No newline at end of file diff --git a/spaces/gsaivinay/Llama-2-13B-GGML-UI/components/Settings/Import.tsx b/spaces/gsaivinay/Llama-2-13B-GGML-UI/components/Settings/Import.tsx deleted file mode 100644 index 5cc9582f8322dc8584677eb9eb9801a6809f68b9..0000000000000000000000000000000000000000 --- a/spaces/gsaivinay/Llama-2-13B-GGML-UI/components/Settings/Import.tsx +++ /dev/null @@ -1,51 +0,0 @@ -import { IconFileImport } from '@tabler/icons-react'; -import { FC } from 'react'; - -import { useTranslation } from 'next-i18next'; - -import { SupportedExportFormats } from '@/types/export'; - -import { SidebarButton } from '../Sidebar/SidebarButton'; - -interface Props { - onImport: (data: SupportedExportFormats) => void; -} - -export const Import: FC = ({ onImport }) => { - const { t } = useTranslation('sidebar'); - return ( - <> - { - if (!e.target.files?.length) return; - - const file = e.target.files[0]; - const reader = new FileReader(); - reader.onload = (e) => { - let json = JSON.parse(e.target?.result as string); - onImport(json); - }; - reader.readAsText(file); - }} - /> - - } - onClick={() => { - const importFile = document.querySelector( - '#import-file', - ) as HTMLInputElement; - if (importFile) { - importFile.click(); - } - }} - /> - - ); -}; diff --git a/spaces/h2oai/wave-tour/examples/plot_interval_theta.py b/spaces/h2oai/wave-tour/examples/plot_interval_theta.py deleted file mode 100644 index 5a4dfb90b8ed2e3eb4aec790fe55c2581e43b110..0000000000000000000000000000000000000000 --- a/spaces/h2oai/wave-tour/examples/plot_interval_theta.py +++ /dev/null @@ -1,26 +0,0 @@ -# Plot / Interval / Theta -# Make a "racetrack" #plot (a bar plot in polar coordinates, transposed). #interval -# --- -from h2o_wave import site, data, ui - -page = site['/demo'] - -page.add('example', ui.plot_card( - box='1 1 4 5', - title='Intervals, theta', - data=data('question percent', 8, rows=[ - ('Question 1', 0.21), - ('Question 2', 0.4), - ('Question 3', 0.49), - ('Question 4', 0.52), - ('Question 5', 0.53), - ('Question 6', 0.84), - ('Question 7', 0.88), - ('Question 8', 0.9), - ]), - plot=ui.plot([ - ui.mark(coord='theta', type='interval', x='=question', y='=percent', stack='auto', y_min=0) - ]) -)) - -page.save() diff --git a/spaces/h2oai/wave-tour/examples/textbox_trigger.py b/spaces/h2oai/wave-tour/examples/textbox_trigger.py deleted file mode 100644 index 47c7f1eba7d8cef700bd01fdc8394cbd21cacaa8..0000000000000000000000000000000000000000 --- a/spaces/h2oai/wave-tour/examples/textbox_trigger.py +++ /dev/null @@ -1,40 +0,0 @@ -# Form / Textbox / Trigger -# To handle live changes to a #textbox, enable the `trigger` attribute. -# #form #trigger -# --- -from typing import Optional -from h2o_wave import main, app, Q, ui - - -def to_pig_latin(text: Optional[str]): - if not text: - return '*Type in some text above to translate to Pig Latin!*' - words = text.lower().strip().split(' ') - texts = [] - for word in words: - if word[0] in 'aeiou': - texts.append(f'{word}yay') - else: - for letter in word: - if letter in 'aeiou': - texts.append(f'{word[word.index(letter):]}{word[:word.index(letter)]}ay') - break - return ' '.join(texts) - - -def get_form_items(txt: Optional[str]): - return [ - ui.textbox(name='text', label='English', multiline=True, trigger=True), - ui.label('Pig Latin'), - ui.text(to_pig_latin(txt)), - ] - - -@app('/demo') -async def serve(q: Q): - if not q.client.initialized: - q.page['example'] = ui.form_card(box='1 1 4 7', items=get_form_items(None)) - q.client.initialized = True - if q.args.text is not None: - q.page['example'].items = get_form_items(q.args.text) - await q.page.save() diff --git a/spaces/harish03/physicsv11-litbot/Dockerfile b/spaces/harish03/physicsv11-litbot/Dockerfile deleted file mode 100644 index 013fb487139b7432755793ab016e4433db706b2a..0000000000000000000000000000000000000000 --- a/spaces/harish03/physicsv11-litbot/Dockerfile +++ /dev/null @@ -1,11 +0,0 @@ -FROM python:3.9 -RUN useradd -m -u 1000 user -USER user -ENV HOME=/home/user \ - PATH=/home/user/.local/bin:$PATH -WORKDIR $HOME/app -COPY --chown=user . $HOME/app -COPY ./requirements.txt ~/app/requirements.txt -RUN pip install -r requirements.txt -COPY . . -CMD ["chainlit", "run", "app.py", "--port", "7860"] \ No newline at end of file diff --git a/spaces/harish03/physicsv11-litbot/app.py b/spaces/harish03/physicsv11-litbot/app.py deleted file mode 100644 index 69898669110ac69411b48b9602ef5d1c79acf658..0000000000000000000000000000000000000000 --- a/spaces/harish03/physicsv11-litbot/app.py +++ /dev/null @@ -1,87 +0,0 @@ -from langchain.document_loaders import DirectoryLoader, PyPDFLoader -from langchain.text_splitter import RecursiveCharacterTextSplitter -from langchain.vectorstores import FAISS -from langchain.embeddings import HuggingFaceEmbeddings -from langchain.llms import OpenAI -from langchain.chains import RetrievalQA -from langchain.chains.question_answering import load_qa_chain -from langchain.memory import ConversationBufferMemory -from langchain.prompts import PromptTemplate -import chainlit as cl - -DB_FAISS_PATH = 'vectorstore/faissdb' - -custom_prompt_template = """Use the following pieces of information to answer the user's question. -If you don't know the answer, just say that you don't know, don't try to make up an answer. - -Context: {context} -Question: {question} - -Only return the helpful answer below and nothing else. -Helpful answer: -""" - -def set_custom_prompt(): - """ - Prompt template for QA retrieval for each vectorstore - """ - prompt = PromptTemplate(template=custom_prompt_template, - input_variables=['context', 'question']) - return prompt - -#Retrieval QA Chain -def retrieval_qa_chain(llm, prompt,db): - qa_chain = RetrievalQA.from_chain_type(llm=llm,chain_type='stuff',retriever=db.as_retriever(search_kwargs={'k': 2}),return_source_documents=True,chain_type_kwargs={'prompt': prompt}) - return qa_chain - -#Loading the model -def load_llm(): - # Load the locally downloaded model here - llm = OpenAI(openai_api_key='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') - return llm - -#QA Model Function -def qa_bot(): - - embeddings = HuggingFaceEmbeddings(model_name='sentence-transformers/all-MiniLM-L6-v2',model_kwargs={'device': 'cpu'}) - db = FAISS.load_local(DB_FAISS_PATH, embeddings) - llm = load_llm() - qa_prompt = set_custom_prompt() - qa = retrieval_qa_chain(llm, qa_prompt,db) - - return qa - -#output function -def final_result(query): - qa_result = qa_bot() - response = qa_result({'query':query}) - return response - -#chainlit code -@cl.on_chat_start -async def start(): - chain = qa_bot() - msg = cl.Message(content="Starting the bot...") - await msg.send() - msg.content = "Hi, Welcome to Physics v11. What is your query?" - await msg.update() - - cl.user_session.set("chain", chain) - -@cl.on_message -async def main(message): - chain = cl.user_session.get("chain") - cb = cl.AsyncLangchainCallbackHandler( - stream_final_answer=True, answer_prefix_tokens=["FINAL", "ANSWER"] - ) - cb.answer_reached = True - res = await chain.acall(message, callbacks=[cb]) - answer = res["result"] - #sources = res["source_documents"] - - # if sources: - # answer += f"\nSources:" + str(sources) - # else: - # answer += "\nNo sources found" - - await cl.Message(content=answer).send() diff --git a/spaces/harkov000/peft-lora-sd-dreambooth/uploader.py b/spaces/harkov000/peft-lora-sd-dreambooth/uploader.py deleted file mode 100644 index c80cbe83b49e1b4d5de49c6f9e347879a73bab2f..0000000000000000000000000000000000000000 --- a/spaces/harkov000/peft-lora-sd-dreambooth/uploader.py +++ /dev/null @@ -1,17 +0,0 @@ -import gradio as gr -from huggingface_hub import HfApi - - -def upload(model_name: str, hf_token: str) -> None: - api = HfApi(token=hf_token) - user_name = api.whoami()["name"] - model_id = f"{user_name}/{model_name}" - try: - api.create_repo(model_id, repo_type="model", private=True) - api.upload_folder(repo_id=model_id, folder_path="results", path_in_repo="results", repo_type="model") - url = f"https://huggingface.co/{model_id}" - message = f"Your model was successfully uploaded to [{url}]({url})." - except Exception as e: - message = str(e) - - return gr.update(value=message, visible=True) diff --git a/spaces/hasibzunair/fifa-tryon-demo/Self-Correction-Human-Parsing-for-ACGPN/mhp_extension/detectron2/detectron2/data/datasets/pascal_voc.py b/spaces/hasibzunair/fifa-tryon-demo/Self-Correction-Human-Parsing-for-ACGPN/mhp_extension/detectron2/detectron2/data/datasets/pascal_voc.py deleted file mode 100644 index 5872d96575b428e90b29a7759a2f7b32dcc15d25..0000000000000000000000000000000000000000 --- a/spaces/hasibzunair/fifa-tryon-demo/Self-Correction-Human-Parsing-for-ACGPN/mhp_extension/detectron2/detectron2/data/datasets/pascal_voc.py +++ /dev/null @@ -1,80 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved - -import numpy as np -import os -import xml.etree.ElementTree as ET -from fvcore.common.file_io import PathManager - -from detectron2.data import DatasetCatalog, MetadataCatalog -from detectron2.structures import BoxMode - -__all__ = ["register_pascal_voc"] - - -# fmt: off -CLASS_NAMES = [ - "aeroplane", "bicycle", "bird", "boat", "bottle", "bus", "car", "cat", - "chair", "cow", "diningtable", "dog", "horse", "motorbike", "person", - "pottedplant", "sheep", "sofa", "train", "tvmonitor", -] -# fmt: on - - -def load_voc_instances(dirname: str, split: str): - """ - Load Pascal VOC detection annotations to Detectron2 format. - - Args: - dirname: Contain "Annotations", "ImageSets", "JPEGImages" - split (str): one of "train", "test", "val", "trainval" - """ - with PathManager.open(os.path.join(dirname, "ImageSets", "Main", split + ".txt")) as f: - fileids = np.loadtxt(f, dtype=np.str) - - # Needs to read many small annotation files. Makes sense at local - annotation_dirname = PathManager.get_local_path(os.path.join(dirname, "Annotations/")) - dicts = [] - for fileid in fileids: - anno_file = os.path.join(annotation_dirname, fileid + ".xml") - jpeg_file = os.path.join(dirname, "JPEGImages", fileid + ".jpg") - - with PathManager.open(anno_file) as f: - tree = ET.parse(f) - - r = { - "file_name": jpeg_file, - "image_id": fileid, - "height": int(tree.findall("./size/height")[0].text), - "width": int(tree.findall("./size/width")[0].text), - } - instances = [] - - for obj in tree.findall("object"): - cls = obj.find("name").text - # We include "difficult" samples in training. - # Based on limited experiments, they don't hurt accuracy. - # difficult = int(obj.find("difficult").text) - # if difficult == 1: - # continue - bbox = obj.find("bndbox") - bbox = [float(bbox.find(x).text) for x in ["xmin", "ymin", "xmax", "ymax"]] - # Original annotations are integers in the range [1, W or H] - # Assuming they mean 1-based pixel indices (inclusive), - # a box with annotation (xmin=1, xmax=W) covers the whole image. - # In coordinate space this is represented by (xmin=0, xmax=W) - bbox[0] -= 1.0 - bbox[1] -= 1.0 - instances.append( - {"category_id": CLASS_NAMES.index(cls), "bbox": bbox, "bbox_mode": BoxMode.XYXY_ABS} - ) - r["annotations"] = instances - dicts.append(r) - return dicts - - -def register_pascal_voc(name, dirname, split, year): - DatasetCatalog.register(name, lambda: load_voc_instances(dirname, split)) - MetadataCatalog.get(name).set( - thing_classes=CLASS_NAMES, dirname=dirname, year=year, split=split - ) diff --git a/spaces/hasibzunair/fifa-tryon-demo/Self-Correction-Human-Parsing-for-ACGPN/mhp_extension/detectron2/projects/DensePose/dev/run_instant_tests.sh b/spaces/hasibzunair/fifa-tryon-demo/Self-Correction-Human-Parsing-for-ACGPN/mhp_extension/detectron2/projects/DensePose/dev/run_instant_tests.sh deleted file mode 100644 index a53785180974a70bce7fdb0c9da4024166efd596..0000000000000000000000000000000000000000 --- a/spaces/hasibzunair/fifa-tryon-demo/Self-Correction-Human-Parsing-for-ACGPN/mhp_extension/detectron2/projects/DensePose/dev/run_instant_tests.sh +++ /dev/null @@ -1,28 +0,0 @@ -#!/bin/bash -e -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved - -BIN="python train_net.py" -OUTPUT="instant_test_output" -NUM_GPUS=2 -SOLVER_IMS_PER_BATCH=$((NUM_GPUS * 2)) - -CFG_LIST=( "${@:1}" ) -if [ ${#CFG_LIST[@]} -eq 0 ]; then - CFG_LIST=( ./configs/quick_schedules/*instant_test.yaml ) -fi - -echo "========================================================================" -echo "Configs to run:" -echo "${CFG_LIST[@]}" -echo "========================================================================" - -for cfg in "${CFG_LIST[@]}"; do - echo "========================================================================" - echo "Running $cfg ..." - echo "========================================================================" - $BIN --num-gpus $NUM_GPUS --config-file "$cfg" \ - SOLVER.IMS_PER_BATCH $SOLVER_IMS_PER_BATCH \ - OUTPUT_DIR "$OUTPUT" - rm -rf "$OUTPUT" -done - diff --git a/spaces/havas79/Real-ESRGAN_Demo/README.md b/spaces/havas79/Real-ESRGAN_Demo/README.md deleted file mode 100644 index 043bebf93b489e51b955b5dce00069dc2023c40f..0000000000000000000000000000000000000000 --- a/spaces/havas79/Real-ESRGAN_Demo/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Real-ESRGAN Demo for Image Restoration and Upscaling -emoji: 🖼️ -colorFrom: blue -colorTo: indigo -sdk: gradio -sdk_version: 3.3.1 -app_file: app.py -pinned: true ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/ho11laqe/nnUNet_calvingfront_detection/documentation/common_questions.md b/spaces/ho11laqe/nnUNet_calvingfront_detection/documentation/common_questions.md deleted file mode 100644 index f6fadcdd7cf23dbafb237b985dd1b37980330be1..0000000000000000000000000000000000000000 --- a/spaces/ho11laqe/nnUNet_calvingfront_detection/documentation/common_questions.md +++ /dev/null @@ -1,201 +0,0 @@ -# FAQ - -## Where can I find the segmentation metrics of my experiments? -**Results for the validation sets of each fold** are stored in the respective output folder after the training is completed. For example, this could be. -`${RESULTS_FOLDER}/nnUNet/3d_fullres/Task003_Liver/nnUNetTrainerV2__nnUNetPlansv2.1/fold_0`. After training there will - be a `validation_raw` subfolder and a `validation_raw_postprocessed` subfolder. In each of these folders is going to - be a `summary.json` file with the segmentation metrics. There are metrics for each individual validation case and then - at the bottom there is also a mean across all cases. - -**Cross-validation metrics** can only be computed after all five folds were run. You first need to run -`nnUNet_determine_postprocessing` first (see `nnUNet_determine_postprocessing -h` for help). This will collect the -predictions from the validation sets of the five folds, compute metrics on them and then determine the postprocessing. -Once this is all done, there will be new folders located in the output directory (for example -`${RESULTS_FOLDER}/nnUNet/3d_fullres/Task003_Liver/nnUNetTrainerV2__nnUNetPlansv2.1/`): `cv_niftis_raw` (raw predictions -from the cross-validation) and `cv_niftis_postprocessed` (postprocessed predictions). In each of these folders is -going to be a `summary.json` file with the metrics (see above). - -Note that the postprocessing determined on each individual fold is completely ignored by nnU-Net because it needs to -find a single postprocessing configuration for the whole cross-validation. The postprocessed results in each fold are -just for development purposes! - -**Test set results** see [here](#evaluating-test-set-results). - -**Ensemble performance** will be accessible here `${RESULTS_FOLDER}/nnUNet/ensembles/TASKNAME` after you ran -`nnUNet_find_best_configuration`. There are summary.csv for a quick overview and then there is also going to be -detailed results in the form of `summary.json` in the respective subfolders. - -## What postprocessing is selected? -After you run `nnUNet_determine_postprocessing` (see `nnUNet_determine_postprocessing -h` for help) there will be a -`postprocessing.json` file located in the output directory of your training (for example -`${RESULTS_FOLDER}/nnUNet/3d_fullres/Task003_Liver/nnUNetTrainerV2__nnUNetPlansv2.1/`). If you open this with a text -editor, there is a key "for_which_classes", followed by some list. For LiTS (classes 0: bg, 1: liver, 2: tumor) -this can for example be: -```python - "for_which_classes": [ - [ - 1, - 2 - ], - 1 -``` -This means that nnU-Net will first remove all but the largest components for the merged object consisting of classes -1 and 2 (essentially the liver including the tumors) and then in a second step also remove all but the largest -connected component for the liver class. - -Note that you do not have to run `nnUNet_determine_postprocessing` if you use `nnUNet_find_best_configuration`. -`nnUNet_find_best_configuration` will do that for you. - -Ensemble results and postprocessing will be stored in `${RESULTS_FOLDER}/nnUNet/ensembles` -(this will all be generated by `nnUNet_find_best_configuration`). - -## Evaluating test set results -This feature was only added recently. Please run `pip install --upgrade nnunet` or reinstall nnunet from the master. - -You can now use `nnUNet_evaluate_folder` to compute metrics on predicted test cases. For example: - -``` -nnUNet_evaluate_folder -ref FOLDER_WITH_GT -pred FOLDER_WITH_PREDICTIONS -l 1 2 3 4 -``` - -This example is for a dataset that has 4 foreground classes (labels 1, 2, 3, 4). `FOLDER_WITH_GT` and -`FOLDER_WITH_PREDICTIONS` must contain files with the same names containing the reference and predicted segmentations -of each case, respectivelty. The files must be nifti (end with .nii.gz). - -## Creating and managing data splits - -At the start of each training, nnU-Net will check whether the splits_final.pkl file is present in the directory where -the preprocessed data of the requested dataset is located. If the file is not present, nnU-Net will create its own -split: a five-fold cross-validation using all the available training cases. nnU-Net needs this five-fold -cross-validation to be able to determine the postprocessing and to run model/ensemble selection. - -There are however situations in which you may want to create your own split, for example -- in datasets like ACDC where several training cases are connected (there are two time steps for each patient) you -may need to manually create splits to ensure proper stratification. -- cases are annotated by multiple annotators and you would like to use the annotations as separate training examples -- if you are running experiments with a domain transfer, you might want to train only on cases from domain A and -validate on domain B -- ... - -Creating your own data split is simple: the splits_final.pkl file contains the following data structure (assume there are five training cases A, B, C, D, and E): -```python -splits = [ - {'train': ['A', 'B', 'C', 'D'], 'val': ['E']}, - {'train': ['A', 'B', 'C', 'E'], 'val': ['D']}, - {'train': ['A', 'B', 'D', 'E'], 'val': ['C']}, - {'train': ['A', 'C', 'D', 'E'], 'val': ['B']}, - {'train': ['B', 'C', 'D', 'E'], 'val': ['A']} -] -``` - -Use load_pickle and save_pickle from batchgenerators.utilities.file_and_folder_operations for loading/storing the splits. - -Splits is a list of length NUMBER_OF_FOLDS. Each entry in the list is a dict, with 'train' and 'val' as keys and lists -of the corresponding case names (without the _0000 etc!) as values. - -nnU-Net's five-fold cross validation will always create a list of len(splits)=5. But you can do whatever you want. Note -that if you define only 4 splits (fold 0-3) and then set fold=4 when training (that would be the fifth split), -nnU-Net will print a warning and proceed to use a random 80:20 data split. - -## How can I swap component XXX (for example the loss) of nnU-Net? - -All changes in nnU-Net are handled the same way: - -1) create a new nnU-Net trainer class. Place the file somewhere in the nnunet.training.network_training folder -(any subfolder will do. If you create a new subfolder, make sure to include an empty `__init__.py` file!) - -2) make your new trainer class derive from the trainer you would like to change (most likely this is going to be nnUNetTrainerV2) - -3) identify the function that you need to overwrite. You may have to go up the inheritance hierarchy to find it! - -4) overwrite that function in your custom trainer, make sure whatever you do is compatible with the rest of nnU-Net - -What these changes need to look like specifically is hard to say without knowing what you are exactly trying to do. -Before you open a new issue on GitHub, please have a look around the `nnunet.training.network_training` folder first! -There are tons of examples modifying various parts of the pipeline. - -Also see [here](extending_nnunet.md) - -## How does nnU-Net handle multi-modal images? - -Multi-modal images are treated as color channels. BraTS, which comes with T1, T1c, T2 and Flair images for each -training case will thus for example have 4 input channels. - -## Why does nnU-Net not use all my GPU memory? - -nnU-net and all its parameters are optimized for a training setting that uses about 8GB of VRAM for a network training. -Using more VRAM will not speed up the training. Using more VRAM has also not (yet) been beneficial for model -performance consistently enough to make that the default. If you really want to train with more VRAM, you can do one of these things: - -1) Manually edit the plans files to increase the batch size. A larger batch size gives better (less noisy) gradients -and may improve your model performance if the dataset is large. Note that nnU-Net always runs for 1000 epochs with 250 -iterations each (250000 iterations). The training time thus scales approximately linearly with the batch size -(batch size 4 is going to need twice as long for training than batch size 2!) - -2) Manually edit the plans files to increase the patch size. This one is tricky and should only been attempted if you -know what you are doing! Again, training times will be increased if you do this! 3) is a better way of increasing the -patch size. - -3) Run `nnUNet_plan_and_preprocess` with a larger GPU memory budget. This will make nnU-Net plan for larger patch sizes -during experiment planning. Doing this can change the patch size, network topology, the batch size as well as the -presence of the U-Net cascade. To run with a different memory budget, you need to specify a different experiment planner, for example -`nnUNet_plan_and_preprocess -t TASK_ID -pl2d None -pl3d ExperimentPlanner3D_v21_32GB` (note that `-pl2d None` will -disable 2D U-Net configuration. There is currently no planner for larger 2D U-Nets). We have planners for 8 GB (default), -11GB and 32GB available. If you need a planner for a different GPU size, you should be able to quickly hack together -your own using the code of the 11GB or 32GB planner (same goes for a 2D planner). Note that we have experimented with -these planners and not found an increase in segmentation performance as a result of using them. Training times are -again longer than with the default. - -## Do I need to always run all U-Net configurations? -The model training pipeline above is for challenge participations. Depending on your task you may not want to train all -U-Net models and you may also not want to run a cross-validation all the time. -Here are some recommendations about what U-Net model to train: -- It is safe to say that on average, the 3D U-Net model (3d_fullres) was most robust. If you just want to use nnU-Net because you -need segmentations, I recommend you start with this. -- If you are not happy with the results from the 3D U-Net then you can try the following: - - if your cases are very large so that the patch size of the 3d U-Net only covers a very small fraction of an image then - it is possible that the 3d U-Net cannot capture sufficient contextual information in order to be effective. If this - is the case, you should consider running the 3d U-Net cascade (3d_lowres followed by 3d_cascade_fullres) - - If your data is very anisotropic then a 2D U-Net may actually be a better choice (Promise12, ACDC, Task05_Prostate - from the decathlon are examples for anisotropic data) - -You do not have to run five-fold cross-validation all the time. If you want to test single model performance, use - *all* for `FOLD` instead of a number. Note that this will then not give you an estimate of your performance on the - training set. You will also no tbe able to automatically identify which ensembling should be used and nnU-Net will - not be able to configure a postprocessing. - -CAREFUL: DO NOT use fold=all when you intend to run the cascade! You must run the cross-validation in 3d_lowres so -that you get proper (=not overfitted) low resolution predictions. - -## Sharing Models -You can share trained models by simply sending the corresponding output folder from `RESULTS_FOLDER/nnUNet` to -whoever you want share them with. The recipient can then use nnU-Net for inference with this model. - -You can now also use `nnUNet_export_model_to_zip` to export a trained model (or models) to a zip file. The recipient -can then use `nnUNet_install_pretrained_model_from_zip` to install the model from this zip file. - -## Can I run nnU-Net on smaller GPUs? -nnU-Net is guaranteed to run on GPUs with 11GB of memory. Many configurations may also run on 8 GB. -If you have an 11GB and there is still an `Out of Memory` error, please read 'nnU-Net training: RuntimeError: CUDA out of memory' [here](common_problems_and_solutions.md). - -If you wish to configure nnU-Net to use a different amount of GPU memory, simply adapt the reference value for the GPU memory estimation -accordingly (with some slack because the whole thing is not an exact science!). For example, in -[experiment_planner_baseline_3DUNet_v21_11GB.py](nnunet/experiment_planning/experiment_planner_baseline_3DUNet_v21_11GB.py) -we provide an example that attempts to maximise the usage of GPU memory on 11GB as opposed to the default which leaves -much more headroom). This is simply achieved by this line: - -```python -ref = Generic_UNet.use_this_for_batch_size_computation_3D * 11 / 8 -``` - -with 8 being what is currently used (approximately) and 11 being the target. Should you get CUDA out of memory -issues, simply reduce the reference value. You should do this adaptation as part of a separate ExperimentPlanner class. -Please read the instructions [here](documentation/extending_nnunet.md). - - -## Why is no 3d_lowres model created? -3d_lowres is created only if the patch size in 3d_fullres less than 1/8 of the voxels of the median shape of the data -in 3d_fullres (for example Liver is about 512x512x512 and the patch size is 128x128x128, so that's 1/64 and thus -3d_lowres is created). You can enforce the creation of 3d_lowres models for smaller datasets by changing the value of -`HOW_MUCH_OF_A_PATIENT_MUST_THE_NETWORK_SEE_AT_STAGE0` (located in experiment_planning.configuration). - diff --git a/spaces/ho11laqe/nnUNet_calvingfront_detection/documentation/dataset_conversion.md b/spaces/ho11laqe/nnUNet_calvingfront_detection/documentation/dataset_conversion.md deleted file mode 100644 index 0d7f23e4f1fcccaf43d35fe2d2b1331e6e3f88cc..0000000000000000000000000000000000000000 --- a/spaces/ho11laqe/nnUNet_calvingfront_detection/documentation/dataset_conversion.md +++ /dev/null @@ -1,213 +0,0 @@ -# Dataset conversion instructions -nnU-Net requires the raw data to be brought into a specific format so that it know how to read and interpret it. This -format closely, but not entirely, follows the format used by the -[Medical Segmentation Decathlon](http://medicaldecathlon.com/) (MSD). - -The entry point to nnU-Net is the nnUNet_raw_data_base folder (which the user needs to specify when installing nnU-Net!). -Each segmentation dataset is stored as a separate 'Task'. Tasks are associated with a task ID, a three digit integer -(this is different from the MSD!) and -a task name (which you can freely choose): Task005_Prostate has 'Prostate' as task name and the task id is 5. Tasks are stored in the -nnUNet_raw_data_base/nnUNet_raw_data folder like this: - - nnUNet_raw_data_base/nnUNet_raw_data/ - ├── Task001_BrainTumour - ├── Task002_Heart - ├── Task003_Liver - ├── Task004_Hippocampus - ├── Task005_Prostate - ├── ... - -Within each task folder, the following structure is expected: - - Task001_BrainTumour/ - ├── dataset.json - ├── imagesTr - ├── (imagesTs) - └── labelsTr - -**Please make your custom task ids start at 500 to ensure that there will be no conflicts with downloaded pretrained models!!! (IDs cannot exceed 999)** - -imagesTr contains the images belonging to the training cases. nnU-Net will run pipeline configuration, training with -cross-validation, as well as finding postprocesing and the best ensemble on this data. imagesTs (optional) contains the -images that belong to the -test cases , labelsTr the images with the ground truth segmentation maps for the training cases. dataset.json contains -metadata of the dataset. - -Each training case is associated with an identifier = a unique name for that case. This identifier is used by nnU-Net to -recognize which label file belongs to which image. **All images (including labels) must be 3D nifti files (.nii.gz)!** - -The image files can have any scalar pixel type. The label files must contain segmentation maps that contain consecutive integers, -starting with 0: 0, 1, 2, 3, ... num_labels. 0 is considered background. Each class then has its own associated integer -value. -Images may have multiple modalities. This is especially often the case for medical images. Modalities are very much -like color channels in photos (three color channels: red, green blue), but can be much more diverse: CT, different types -or MRI, and many other. Imaging modalities are identified by nnU-Net by their suffix: a four-digit integer at the end -of the filename. Imaging files must therefore follow the following naming convention: case_identifier_XXXX.nii.gz. -Hereby, XXXX is the modality identifier. What modalities these identifiers belong to is specified in the dataset.json -file (see below). Label files are saved as case_identifier.nii.gz - -This naming scheme results in the following folder structure. It is the responsibility of the user to bring their -data into this format! - -Here is an example for the first Task of the MSD: BrainTumour. Each image has four modalities: FLAIR (0000), -T1w (0001), T1gd (0002) and T2w (0003). Note that the imagesTs folder is optional and does not have to be present. - - nnUNet_raw_data_base/nnUNet_raw_data/Task001_BrainTumour/ - ├── dataset.json - ├── imagesTr - │   ├── BRATS_001_0000.nii.gz - │   ├── BRATS_001_0001.nii.gz - │   ├── BRATS_001_0002.nii.gz - │   ├── BRATS_001_0003.nii.gz - │   ├── BRATS_002_0000.nii.gz - │   ├── BRATS_002_0001.nii.gz - │   ├── BRATS_002_0002.nii.gz - │   ├── BRATS_002_0003.nii.gz - │   ├── BRATS_003_0000.nii.gz - │   ├── BRATS_003_0001.nii.gz - │   ├── BRATS_003_0002.nii.gz - │   ├── BRATS_003_0003.nii.gz - │   ├── BRATS_004_0000.nii.gz - │   ├── BRATS_004_0001.nii.gz - │   ├── BRATS_004_0002.nii.gz - │   ├── BRATS_004_0003.nii.gz - │   ├── ... - ├── imagesTs - │   ├── BRATS_485_0000.nii.gz - │   ├── BRATS_485_0001.nii.gz - │   ├── BRATS_485_0002.nii.gz - │   ├── BRATS_485_0003.nii.gz - │   ├── BRATS_486_0000.nii.gz - │   ├── BRATS_486_0001.nii.gz - │   ├── BRATS_486_0002.nii.gz - │   ├── BRATS_486_0003.nii.gz - │   ├── BRATS_487_0000.nii.gz - │   ├── BRATS_487_0001.nii.gz - │   ├── BRATS_487_0002.nii.gz - │   ├── BRATS_487_0003.nii.gz - │   ├── BRATS_488_0000.nii.gz - │   ├── BRATS_488_0001.nii.gz - │   ├── BRATS_488_0002.nii.gz - │   ├── BRATS_488_0003.nii.gz - │   ├── BRATS_489_0000.nii.gz - │   ├── BRATS_489_0001.nii.gz - │   ├── BRATS_489_0002.nii.gz - │   ├── BRATS_489_0003.nii.gz - │   ├── ... - └── labelsTr - ├── BRATS_001.nii.gz - ├── BRATS_002.nii.gz - ├── BRATS_003.nii.gz - ├── BRATS_004.nii.gz - ├── ... - -Here is another example of the second task of the MSD, which has only one modality: - - nnUNet_raw_data_base/nnUNet_raw_data/Task002_Heart/ - ├── dataset.json - ├── imagesTr - │   ├── la_003_0000.nii.gz - │   ├── la_004_0000.nii.gz - │   ├── ... - ├── imagesTs - │   ├── la_001_0000.nii.gz - │   ├── la_002_0000.nii.gz - │   ├── ... - └── labelsTr - ├── la_003.nii.gz - ├── la_004.nii.gz - ├── ... - -For each training case, all images must have the same geometry to ensure that their pixel arrays are aligned. Also -make sure that all your data is co-registered! - -The dataset.json file used by nnU-Net is identical to the ones used by the MSD. For your custom tasks you need to create -them as well and thereby exactly follow the same structure. [This](https://drive.google.com/drive/folders/1HqEgzS8BV2c7xYNrZdEAnrHk7osJJ--2) -is where you can download the MSD data for reference. - -**NEW:** There now is a utility with which you can generate the dataset.json automatically. You can find it -[here](../nnunet/dataset_conversion/utils.py) (look for the function `generate_dataset_json`). -See [Task120](../nnunet/dataset_conversion/Task120_Massachusetts_RoadSegm.py) for an example on how to use it. And read -its documentation! - -Here is the content of the dataset.json from the Prostate task: - - { - "name": "PROSTATE", - "description": "Prostate transitional zone and peripheral zone segmentation", - "reference": "Radboud University, Nijmegen Medical Centre", - "licence":"CC-BY-SA 4.0", - "relase":"1.0 04/05/2018", - "tensorImageSize": "4D", - "modality": { - "0": "T2", - "1": "ADC" - }, - "labels": { - "0": "background", - "1": "PZ", - "2": "TZ" - }, - "numTraining": 32, - "numTest": 16, - "training":[{"image":"./imagesTr/prostate_16.nii.gz","label":"./labelsTr/prostate_16.nii.gz"},{"image":"./imagesTr/prostate_04.nii.gz","label":"./labelsTr/prostate_04.nii.gz"},...], - "test": ["./imagesTs/prostate_08.nii.gz","./imagesTs/prostate_22.nii.gz","./imagesTs/prostate_30.nii.gz",...] - } - -Note that we truncated the "training" and "test" lists for clarity. You need to specify all the cases in there. If you -don't have test images (imagesTs does not exist) you can leave "test" blank: `"test": []`. - -Please also have a look at the python files located [here](../nnunet/dataset_conversion). They show how we created our -custom dataset.jsons for a range of public datasets. - -## How to use decathlon datasets -The previous release of nnU-Net allowed users to either start with 4D or 3D niftis. This resulted in some confusion, -however, because some users would not know where they should save their data. We therefore dropped support for the 4D -niftis used by the MSD. Instead, we provide a utility that converts the MSD datasets into the format specified above: - -```bash -nnUNet_convert_decathlon_task -i FOLDER_TO_TASK_AS_DOWNLOADED_FROM_MSD -p NUM_PROCESSES -``` - -FOLDER_TO_TASK_AS_DOWNLOADED_FROM_MSD needs to point to the downloaded task folder (such as Task05_Prostate, note the -2-digit task id!). The converted Task will be saved under the same name in nnUNet_raw_data_base/nnUNet_raw_data -(but with a 3 digit identifier). You can overwrite the task id of the converted task by using the `-output_task_id` option. - - -## How to use 2D data with nnU-Net -nnU-Net was originally built for 3D images. It is also strongest when applied to 3D segmentation problems because a -large proportion of its design choices were built with 3D in mind. Also note that many 2D segmentation problems, -especially in the non-biomedical domain, may benefit from pretrained network architectures which nnU-Net does not -support. -Still, there is certainly a need for an out of the box segmentation solution for 2D segmentation problems. And -also on 2D segmentation tasks nnU-Net cam perform extremely well! We have, for example, won a 2D task in the cell -tracking challenge with nnU-Net (see our Nature Methods paper) and we have also successfully applied nnU-Net to -histopathological segmentation problems. -Working with 2D data in nnU-Net requires a small workaround in the creation of the dataset. Essentially, all images -must be converted to pseudo 3D images (so an image with shape (X, Y) needs to be converted to an image with shape -(1, X, Y). The resulting image must be saved in nifti format. Hereby it is important to set the spacing of the -first axis (the one with shape 1) to a value larger than the others. If you are working with niftis anyways, then -doing this should be easy for you. This example here is intended for demonstrating how nnU-Net can be used with -'regular' 2D images. We selected the massachusetts road segmentation dataset for this because it can be obtained -easily, it comes with a good amount of training cases but is still not too large to be difficult to handle. - -See [here](../nnunet/dataset_conversion/Task120_Massachusetts_RoadSegm.py) for an example. -This script contains a lot of comments and useful information. Also have a look -[here](../nnunet/dataset_conversion/Task089_Fluo-N2DH-SIM.py). - -## How to update an existing dataset -When updating a dataset you not only need to change the data located in `nnUNet_raw_data_base/nnUNet_raw_data`. Make -sure to also delete the whole (!) corresponding dataset in `nnUNet_raw_data_base/nnUNet_cropped_data`. nnU-Net will not -repeat the cropping (and thus will not update your dataset) if the old files are still in nnUNet_cropped_data! - -The best way of updating an existing dataset is (**choose one**): -- delete all data and models belonging to the old version of the dataset (nnUNet_preprocessed, corresponding results - in RESULTS_FOLDER/nnUNet, nnUNet_cropped_data, nnUNet_raw_data), then update -- (recommended) create the updated dataset from scratch using a new task ID **and** name - - -## How to convert other image formats to nifti -Please have a look at the following tasks: -- [Task120](../nnunet/dataset_conversion/Task120_Massachusetts_RoadSegm.py): 2D png images -- [Task075](../nnunet/dataset_conversion/Task075_Fluo_C3DH_A549_ManAndSim.py) and [Task076](../nnunet/dataset_conversion/Task076_Fluo_N3DH_SIM.py): 3D tiff -- [Task089](../nnunet/dataset_conversion/Task089_Fluo-N2DH-SIM.py) 2D tiff \ No newline at end of file diff --git a/spaces/ho11laqe/nnUNet_calvingfront_detection/nnunet/training/network_training/nnUNet_variants/optimizer_and_lr/nnUNetTrainerV2_fp16.py b/spaces/ho11laqe/nnUNet_calvingfront_detection/nnunet/training/network_training/nnUNet_variants/optimizer_and_lr/nnUNetTrainerV2_fp16.py deleted file mode 100644 index b705bd20e358c70797d7edbd0a8fb22fee5eeca9..0000000000000000000000000000000000000000 --- a/spaces/ho11laqe/nnUNet_calvingfront_detection/nnunet/training/network_training/nnUNet_variants/optimizer_and_lr/nnUNetTrainerV2_fp16.py +++ /dev/null @@ -1,24 +0,0 @@ -# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -from nnunet.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2 - - -class nnUNetTrainerV2_fp16(nnUNetTrainerV2): - def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None, - unpack_data=True, deterministic=True, fp16=False): - assert fp16, "This one only accepts fp16=True" - super().__init__(plans_file, fold, output_folder, dataset_directory, batch_dice, stage, unpack_data, - deterministic, fp16) diff --git a/spaces/huggingface-projects/video-composer-gpt4/utils.py b/spaces/huggingface-projects/video-composer-gpt4/utils.py deleted file mode 100644 index 6b3986b34f2cb190ad87ddbc39e1b6c811226638..0000000000000000000000000000000000000000 --- a/spaces/huggingface-projects/video-composer-gpt4/utils.py +++ /dev/null @@ -1,9 +0,0 @@ -def format_bash_command(tokens): - formatted_command = [] - indent = " " * 4 - for token in tokens: - if token.startswith("-"): - formatted_command.append("\n" + indent + token) - else: - formatted_command.append(" " + token) - return "".join(formatted_command) diff --git a/spaces/hysts/bizarre-pose-estimator-segmenter/style.css b/spaces/hysts/bizarre-pose-estimator-segmenter/style.css deleted file mode 100644 index c4739b4ea5fc35e774a049e3dacc443f7f0eac19..0000000000000000000000000000000000000000 --- a/spaces/hysts/bizarre-pose-estimator-segmenter/style.css +++ /dev/null @@ -1,3 +0,0 @@ -h1 { - text-align: center; -} diff --git a/spaces/hyxue/HiFiFace-inference-demo/arcface_torch/configs/wf42m_pfc02_16gpus_mbf_bs8k.py b/spaces/hyxue/HiFiFace-inference-demo/arcface_torch/configs/wf42m_pfc02_16gpus_mbf_bs8k.py deleted file mode 100644 index 14a6bb79da7eaa3f111e9efedf507e46a953c9aa..0000000000000000000000000000000000000000 --- a/spaces/hyxue/HiFiFace-inference-demo/arcface_torch/configs/wf42m_pfc02_16gpus_mbf_bs8k.py +++ /dev/null @@ -1,27 +0,0 @@ -from easydict import EasyDict as edict - -# make training faster -# our RAM is 256G -# mount -t tmpfs -o size=140G tmpfs /train_tmp - -config = edict() -config.margin_list = (1.0, 0.0, 0.4) -config.network = "mbf" -config.resume = False -config.output = None -config.embedding_size = 512 -config.sample_rate = 0.2 -config.fp16 = True -config.momentum = 0.9 -config.weight_decay = 1e-4 -config.batch_size = 512 -config.lr = 0.4 -config.verbose = 10000 -config.dali = False - -config.rec = "/train_tmp/WebFace42M" -config.num_classes = 2059906 -config.num_image = 42474557 -config.num_epoch = 20 -config.warmup_epoch = 2 -config.val_targets = [] diff --git a/spaces/hyxue/HiFiFace-inference-demo/arcface_torch/configs/wf42m_pfc03_40epoch_8gpu_vit_b.py b/spaces/hyxue/HiFiFace-inference-demo/arcface_torch/configs/wf42m_pfc03_40epoch_8gpu_vit_b.py deleted file mode 100644 index 36f6559ad3d66659dba3bc9c29e35c76a62b3576..0000000000000000000000000000000000000000 --- a/spaces/hyxue/HiFiFace-inference-demo/arcface_torch/configs/wf42m_pfc03_40epoch_8gpu_vit_b.py +++ /dev/null @@ -1,28 +0,0 @@ -from easydict import EasyDict as edict - -# make training faster -# our RAM is 256G -# mount -t tmpfs -o size=140G tmpfs /train_tmp - -config = edict() -config.margin_list = (1.0, 0.0, 0.4) -config.network = "vit_b_dp005_mask_005" -config.resume = False -config.output = None -config.embedding_size = 512 -config.sample_rate = 0.3 -config.fp16 = True -config.weight_decay = 0.1 -config.batch_size = 256 -config.gradient_acc = 12 # total batchsize is 256 * 12 -config.optimizer = "adamw" -config.lr = 0.001 -config.verbose = 2000 -config.dali = False - -config.rec = "/train_tmp/WebFace42M" -config.num_classes = 2059906 -config.num_image = 42474557 -config.num_epoch = 40 -config.warmup_epoch = config.num_epoch // 10 -config.val_targets = [] diff --git a/spaces/ibvhim/Gradio-Apps/Image_Classification_EfficientNetLite4/app.py b/spaces/ibvhim/Gradio-Apps/Image_Classification_EfficientNetLite4/app.py deleted file mode 100644 index 7ff66ed8881e58547f8c57c0284187a0895f8dae..0000000000000000000000000000000000000000 --- a/spaces/ibvhim/Gradio-Apps/Image_Classification_EfficientNetLite4/app.py +++ /dev/null @@ -1,82 +0,0 @@ -import json -import math - -import cv2 -import gradio as gr -import matplotlib.pyplot as plt -import numpy as np -import onnxruntime as rt -from huggingface_hub import hf_hub_download - -modele = hf_hub_download(repo_id="onnx/EfficientNet-Lite4", filename="efficientnet-lite4-11.onnx") -# load the labels text file -labels = json.load(open("Image_Classification_EfficientNetLite4/labels_map.txt", "r")) - -# set image file dimensions to 224x224 by resizing and cropping image from center -def pre_process_edgetpu(img, dims): - output_height, output_width, _ = dims - img = resize_with_aspectratio(img, output_height, output_width, inter_pol=cv2.INTER_LINEAR) - img = center_crop(img, output_height, output_width) - img = np.asarray(img, dtype='float32') - # converts jpg pixel value from [0 - 255] to float array [-1.0 - 1.0] - img -= [127.0, 127.0, 127.0] - img /= [128.0, 128.0, 128.0] - return img - - -# resize the image with a proportional scale -def resize_with_aspectratio(img, out_height, out_width, scale=87.5, inter_pol=cv2.INTER_LINEAR): - height, width, _ = img.shape - new_height = int(100.0 * out_height / scale) - new_width = int(100.0 * out_width / scale) - if height > width: - w = new_width - h = int(new_height * height / width) - else: - h = new_height - w = int(new_width * width / height) - img = cv2.resize(img, (w, h), interpolation=inter_pol) - return img - - -# crop the image around the center based on given height and width -def center_crop(img, out_height, out_width): - height, width, _ = img.shape - left = int((width - out_width) / 2) - right = int((width + out_width) / 2) - top = int((height - out_height) / 2) - bottom = int((height + out_height) / 2) - img = img[top:bottom, left:right] - return img - - -sess = rt.InferenceSession(modele) - - -def inference(img): - img = cv2.imread(img) - img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) - - img = pre_process_edgetpu(img, (224, 224, 3)) - - img_batch = np.expand_dims(img, axis=0) - - results = sess.run(["Softmax:0"], {"images:0": img_batch})[0] - result = reversed(results[0].argsort()[-5:]) - resultdic = {} - for r in result: - resultdic[labels[str(r)]] = float(results[0][r]) - return resultdic - - -title = "EfficientNet-Lite4" -description = "EfficientNet-Lite 4 is the largest variant and most accurate of the set of EfficientNet-Lite model. It is an integer-only quantized model that produces the highest accuracy of all of the EfficientNet models. It achieves 80.4% ImageNet top-1 accuracy, while still running in real-time (e.g. 30ms/image) on a Pixel 4 CPU." -examples = [[hf_hub_download('nateraw/gradio-guides-files', 'catonnx.jpg', repo_type='dataset', force_filename='catonnx.jpg')], - [('https://i.imgur.com/kVem6KB.jpeg'), 'cat_staring.jpg']] - -interface = gr.Interface( - inference, gr.inputs.Image(type="filepath"), "label", title=title, description=description, examples=examples -) - -if __name__ == '__main__': - interface.launch(debug=True) \ No newline at end of file diff --git a/spaces/igashov/DiffLinker/src/utils.py b/spaces/igashov/DiffLinker/src/utils.py deleted file mode 100644 index 2331af68420458b5dfb669fc51d7c034dbe4cea2..0000000000000000000000000000000000000000 --- a/spaces/igashov/DiffLinker/src/utils.py +++ /dev/null @@ -1,348 +0,0 @@ -import sys -from datetime import datetime - -import torch -import numpy as np - -class Logger(object): - def __init__(self, logpath, syspart=sys.stdout): - self.terminal = syspart - self.log = open(logpath, "a") - - def write(self, message): - - self.terminal.write(message) - self.log.write(message) - self.log.flush() - - def flush(self): - # this flush method is needed for python 3 compatibility. - # this handles the flush command by doing nothing. - # you might want to specify some extra behavior here. - pass - -def log(*args): - print(f'[{datetime.now()}]', *args) - -class EMA: - def __init__(self, beta): - super().__init__() - self.beta = beta - - def update_model_average(self, ma_model, current_model): - for current_params, ma_params in zip(current_model.parameters(), ma_model.parameters()): - old_weight, up_weight = ma_params.data, current_params.data - ma_params.data = self.update_average(old_weight, up_weight) - - def update_average(self, old, new): - if old is None: - return new - return old * self.beta + (1 - self.beta) * new - - -def sum_except_batch(x): - return x.reshape(x.size(0), -1).sum(dim=-1) - - -def remove_mean(x): - mean = torch.mean(x, dim=1, keepdim=True) - x = x - mean - return x - - -def remove_mean_with_mask(x, node_mask): - masked_max_abs_value = (x * (1 - node_mask)).abs().sum().item() - assert masked_max_abs_value < 1e-5, f'Error {masked_max_abs_value} too high' - N = node_mask.sum(1, keepdims=True) - - mean = torch.sum(x, dim=1, keepdim=True) / N - x = x - mean * node_mask - return x - - -def remove_partial_mean_with_mask(x, node_mask, center_of_mass_mask): - """ - Subtract center of mass of fragments from coordinates of all atoms - """ - x_masked = x * center_of_mass_mask - N = center_of_mass_mask.sum(1, keepdims=True) - mean = torch.sum(x_masked, dim=1, keepdim=True) / N - x = x - mean * node_mask - return x - - -def assert_mean_zero(x): - mean = torch.mean(x, dim=1, keepdim=True) - assert mean.abs().max().item() < 1e-4 - - -def assert_mean_zero_with_mask(x, node_mask, eps=1e-10): - assert_correctly_masked(x, node_mask) - largest_value = x.abs().max().item() - error = torch.sum(x, dim=1, keepdim=True).abs().max().item() - rel_error = error / (largest_value + eps) - assert rel_error < 1e-2, f'Mean is not zero, relative_error {rel_error}' - - -def assert_partial_mean_zero_with_mask(x, node_mask, center_of_mass_mask, eps=1e-10): - assert_correctly_masked(x, node_mask) - x_masked = x * center_of_mass_mask - largest_value = x_masked.abs().max().item() - error = torch.sum(x_masked, dim=1, keepdim=True).abs().max().item() - rel_error = error / (largest_value + eps) - assert rel_error < 1e-2, f'Partial mean is not zero, relative_error {rel_error}' - - -def assert_correctly_masked(variable, node_mask): - assert (variable * (1 - node_mask)).abs().max().item() < 1e-4, \ - 'Variables not masked properly.' - - -def check_mask_correct(variables, node_mask): - for i, variable in enumerate(variables): - if len(variable) > 0: - assert_correctly_masked(variable, node_mask) - - -def center_gravity_zero_gaussian_log_likelihood(x): - assert len(x.size()) == 3 - B, N, D = x.size() - assert_mean_zero(x) - - # r is invariant to a basis change in the relevant hyperplane. - r2 = sum_except_batch(x.pow(2)) - - # The relevant hyperplane is (N-1) * D dimensional. - degrees_of_freedom = (N-1) * D - - # Normalizing constant and logpx are computed: - log_normalizing_constant = -0.5 * degrees_of_freedom * np.log(2*np.pi) - log_px = -0.5 * r2 + log_normalizing_constant - - return log_px - - -def sample_center_gravity_zero_gaussian(size, device): - assert len(size) == 3 - x = torch.randn(size, device=device) - - # This projection only works because Gaussian is rotation invariant around - # zero and samples are independent! - x_projected = remove_mean(x) - return x_projected - - -def center_gravity_zero_gaussian_log_likelihood_with_mask(x, node_mask): - assert len(x.size()) == 3 - B, N_embedded, D = x.size() - assert_mean_zero_with_mask(x, node_mask) - - # r is invariant to a basis change in the relevant hyperplane, the masked - # out values will have zero contribution. - r2 = sum_except_batch(x.pow(2)) - - # The relevant hyperplane is (N-1) * D dimensional. - N = node_mask.squeeze(2).sum(1) # N has shape [B] - degrees_of_freedom = (N-1) * D - - # Normalizing constant and logpx are computed: - log_normalizing_constant = -0.5 * degrees_of_freedom * np.log(2*np.pi) - log_px = -0.5 * r2 + log_normalizing_constant - - return log_px - - -def sample_center_gravity_zero_gaussian_with_mask(size, device, node_mask): - assert len(size) == 3 - x = torch.randn(size, device=device) - - x_masked = x * node_mask - - # This projection only works because Gaussian is rotation invariant around - # zero and samples are independent! - # TODO: check it - x_projected = remove_mean_with_mask(x_masked, node_mask) - return x_projected - - -def standard_gaussian_log_likelihood(x): - # Normalizing constant and logpx are computed: - log_px = sum_except_batch(-0.5 * x * x - 0.5 * np.log(2*np.pi)) - return log_px - - -def sample_gaussian(size, device): - x = torch.randn(size, device=device) - return x - - -def standard_gaussian_log_likelihood_with_mask(x, node_mask): - # Normalizing constant and logpx are computed: - log_px_elementwise = -0.5 * x * x - 0.5 * np.log(2*np.pi) - log_px = sum_except_batch(log_px_elementwise * node_mask) - return log_px - - -def sample_gaussian_with_mask(size, device, node_mask): - x = torch.randn(size, device=device) - x_masked = x * node_mask - return x_masked - - -def concatenate_features(x, h): - xh = torch.cat([x, h['categorical']], dim=2) - if 'integer' in h: - xh = torch.cat([xh, h['integer']], dim=2) - return xh - - -def split_features(z, n_dims, num_classes, include_charges): - assert z.size(2) == n_dims + num_classes + include_charges - x = z[:, :, 0:n_dims] - h = {'categorical': z[:, :, n_dims:n_dims+num_classes]} - if include_charges: - h['integer'] = z[:, :, n_dims+num_classes:n_dims+num_classes+1] - - return x, h - - -# For gradient clipping - -class Queue: - def __init__(self, max_len=50): - self.items = [] - self.max_len = max_len - - def __len__(self): - return len(self.items) - - def add(self, item): - self.items.insert(0, item) - if len(self) > self.max_len: - self.items.pop() - - def mean(self): - return np.mean(self.items) - - def std(self): - return np.std(self.items) - - -def gradient_clipping(flow, gradnorm_queue): - # Allow gradient norm to be 150% + 2 * stdev of the recent history. - max_grad_norm = 1.5 * gradnorm_queue.mean() + 2 * gradnorm_queue.std() - - # Clips gradient and returns the norm - grad_norm = torch.nn.utils.clip_grad_norm_( - flow.parameters(), max_norm=max_grad_norm, norm_type=2.0) - - if float(grad_norm) > max_grad_norm: - gradnorm_queue.add(float(max_grad_norm)) - else: - gradnorm_queue.add(float(grad_norm)) - - if float(grad_norm) > max_grad_norm: - print(f'Clipped gradient with value {grad_norm:.1f} while allowed {max_grad_norm:.1f}') - return grad_norm - - -def disable_rdkit_logging(): - """ - Disables RDKit whiny logging. - """ - import rdkit.rdBase as rkrb - import rdkit.RDLogger as rkl - logger = rkl.logger() - logger.setLevel(rkl.ERROR) - rkrb.DisableLog('rdApp.error') - - -class FoundNaNException(Exception): - def __init__(self, x, h): - x_nan_idx = self.find_nan_idx(x) - h_nan_idx = self.find_nan_idx(h) - - self.x_h_nan_idx = x_nan_idx & h_nan_idx - self.only_x_nan_idx = x_nan_idx.difference(h_nan_idx) - self.only_h_nan_idx = h_nan_idx.difference(x_nan_idx) - - @staticmethod - def find_nan_idx(z): - idx = set() - for i in range(z.shape[0]): - if torch.any(torch.isnan(z[i])): - idx.add(i) - return idx - - -def get_batch_idx_for_animation(batch_size, batch_idx): - batch_indices = [] - mol_indices = [] - for idx in [0, 110, 360]: - if idx // batch_size == batch_idx: - batch_indices.append(idx % batch_size) - mol_indices.append(idx) - return batch_indices, mol_indices - - -# Rotation data augmntation -def random_rotation(x): - bs, n_nodes, n_dims = x.size() - device = x.device - angle_range = np.pi * 2 - if n_dims == 2: - theta = torch.rand(bs, 1, 1).to(device) * angle_range - np.pi - cos_theta = torch.cos(theta) - sin_theta = torch.sin(theta) - R_row0 = torch.cat([cos_theta, -sin_theta], dim=2) - R_row1 = torch.cat([sin_theta, cos_theta], dim=2) - R = torch.cat([R_row0, R_row1], dim=1) - - x = x.transpose(1, 2) - x = torch.matmul(R, x) - x = x.transpose(1, 2) - - elif n_dims == 3: - - # Build Rx - Rx = torch.eye(3).unsqueeze(0).repeat(bs, 1, 1).to(device) - theta = torch.rand(bs, 1, 1).to(device) * angle_range - np.pi - cos = torch.cos(theta) - sin = torch.sin(theta) - Rx[:, 1:2, 1:2] = cos - Rx[:, 1:2, 2:3] = sin - Rx[:, 2:3, 1:2] = - sin - Rx[:, 2:3, 2:3] = cos - - # Build Ry - Ry = torch.eye(3).unsqueeze(0).repeat(bs, 1, 1).to(device) - theta = torch.rand(bs, 1, 1).to(device) * angle_range - np.pi - cos = torch.cos(theta) - sin = torch.sin(theta) - Ry[:, 0:1, 0:1] = cos - Ry[:, 0:1, 2:3] = -sin - Ry[:, 2:3, 0:1] = sin - Ry[:, 2:3, 2:3] = cos - - # Build Rz - Rz = torch.eye(3).unsqueeze(0).repeat(bs, 1, 1).to(device) - theta = torch.rand(bs, 1, 1).to(device) * angle_range - np.pi - cos = torch.cos(theta) - sin = torch.sin(theta) - Rz[:, 0:1, 0:1] = cos - Rz[:, 0:1, 1:2] = sin - Rz[:, 1:2, 0:1] = -sin - Rz[:, 1:2, 1:2] = cos - - x = x.transpose(1, 2) - x = torch.matmul(Rx, x) - #x = torch.matmul(Rx.transpose(1, 2), x) - x = torch.matmul(Ry, x) - #x = torch.matmul(Ry.transpose(1, 2), x) - x = torch.matmul(Rz, x) - #x = torch.matmul(Rz.transpose(1, 2), x) - x = x.transpose(1, 2) - else: - raise Exception("Not implemented Error") - - return x.contiguous() \ No newline at end of file diff --git a/spaces/imageomics/dashboard-prototype/README.md b/spaces/imageomics/dashboard-prototype/README.md deleted file mode 100644 index b328930844c15dcf45ece1adf7a2d749525d9ba6..0000000000000000000000000000000000000000 --- a/spaces/imageomics/dashboard-prototype/README.md +++ /dev/null @@ -1,32 +0,0 @@ ---- -title: Dashboard Prototype -emoji: 👀 -colorFrom: pink -colorTo: green -sdk: docker -pinned: false -license: mit ---- - - - -# Dashboard Prototype - -For development information or to make your own version, see the [GitHub project repo](https://github.com/Imageomics/dashboard-prototype). - -## How it works - -For full dashboard functionality, upload a CSV or XLS file with the following columns: -- `Image_filename`*: Filename of each image, must be unique. **Note:** Images should be in PNG or JPEG format, TIFF may fail to render in the sample image display. -- `Species`: Species of each sample. -- `Subspecies`: Subspecies of each sample. -- `View`: View of the sample (eg., 'ventral' or 'dorsal' for butterflies). -- `Sex`: Sex of each sample. -- `hybrid_stat`: Hybrid status of each sample (eg., 'valid_subspecies', 'subspecies_synonym', or 'unknown'). -- `lat`*: Latitude at which image was taken or specimen was collected. -- `lon`*: Longitude at which image was taken or specimen was collected. -- `file_url`*: URL to access file. - -***Note:** -- `lat` and `lon` columns are not required to utilize the dashboard, but there will be no map view if they are not included. -- `Image_filename` and `file_url` are not required, but there will be no sample images option if either one is not included. \ No newline at end of file diff --git a/spaces/impyadav/Hindi-Song-Generation-GPT2/app.py b/spaces/impyadav/Hindi-Song-Generation-GPT2/app.py deleted file mode 100644 index 8057179abba2584582b2f0c73cc2a442f2276d1d..0000000000000000000000000000000000000000 --- a/spaces/impyadav/Hindi-Song-Generation-GPT2/app.py +++ /dev/null @@ -1,26 +0,0 @@ -import streamlit as st -from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline - -tokenizer = AutoTokenizer.from_pretrained("impyadav/GPT2-FineTuned-Hinglish-Song-Generation") - -model = AutoModelForCausalLM.from_pretrained("impyadav/GPT2-FineTuned-Hinglish-Song-Generation") - -def get_song(line): - lyricist = pipeline( - "text-generation", - model=model, - tokenizer=tokenizer - ) - return lyricist(line, max_length=150, num_return_sequences=3) - - -if __name__ == '__main__': - st.title('AI Lyricist') - st.write('Transformer Architecture : {}'.format('gpt-2')) - st.subheader("Input") - #st.write('Paste your query act here:') - user_input = st.text_area('', height=25) # height in pixel - # st.markdown('') - result = get_song(user_input) - if st.button('Run'): - st.write(result) \ No newline at end of file diff --git a/spaces/inamXcontru/PoeticTTS/Ccnp Switch Cbt Nuggets Videos Free LINK Download Torrent.md b/spaces/inamXcontru/PoeticTTS/Ccnp Switch Cbt Nuggets Videos Free LINK Download Torrent.md deleted file mode 100644 index b972096f58c0f30c5dc20f18709500273986f1fa..0000000000000000000000000000000000000000 --- a/spaces/inamXcontru/PoeticTTS/Ccnp Switch Cbt Nuggets Videos Free LINK Download Torrent.md +++ /dev/null @@ -1,14 +0,0 @@ -

        Ccnp Switch Cbt Nuggets Videos Free Download Torrent


        Download Zip ===== https://gohhs.com/2uz4f7



        -
        -Description: This Cisco CCNP Enterprise Intermediate Training prepares students to take the 300-420 ENSLD exam, which is one of the concentration exams... Cisco CCNP Routing and Switching: Advanced Edition -Nov 1, 2019 ... -Cisco CCNP Routing and Switching Series: Advanced Edition. -Cisco CCNP Routing -Cisco CCNP courses in Moscow. -CCNA® and CCNP® training -CCNA® and CCNP® (CCNA Routing & Switching) training. -CCNP trainees have the following -Cisco CCNP Routing and Switching Series Level 2 - Certification ... 8a78ff9644
        -
        -
        -

        diff --git a/spaces/innovatorved/whisper.api/app/api/models/__init__.py b/spaces/innovatorved/whisper.api/app/api/models/__init__.py deleted file mode 100644 index 821a20055852a38f18e2d26bb315dd0d9e440275..0000000000000000000000000000000000000000 --- a/spaces/innovatorved/whisper.api/app/api/models/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -from .user import User -from .transcribe import Transcription diff --git a/spaces/inplisQlawa/anything-midjourney-v4-1/Deadstick - Bush Flight Simulator Free Download !EXCLUSIVE! [full Version].md b/spaces/inplisQlawa/anything-midjourney-v4-1/Deadstick - Bush Flight Simulator Free Download !EXCLUSIVE! [full Version].md deleted file mode 100644 index f6ae58b1684c276a3b8ae26f62712477edb8be59..0000000000000000000000000000000000000000 --- a/spaces/inplisQlawa/anything-midjourney-v4-1/Deadstick - Bush Flight Simulator Free Download !EXCLUSIVE! [full Version].md +++ /dev/null @@ -1,6 +0,0 @@ -

        Deadstick - Bush Flight Simulator Free Download [full Version]


        Download File 🔗 https://urlin.us/2uEwfo



        -
        -The game Dead stick Blush Flight Simulator, torrent download which is free, you have to become a pilot of a small plane delivering goods in ... 4d29de3e1b
        -
        -
        -

        diff --git a/spaces/inplisQlawa/anything-midjourney-v4-1/Dragon Ball Z Kai 1080p Latino Mega REPACK.md b/spaces/inplisQlawa/anything-midjourney-v4-1/Dragon Ball Z Kai 1080p Latino Mega REPACK.md deleted file mode 100644 index f541dda2de917b341723085cadc3f614407da1c9..0000000000000000000000000000000000000000 --- a/spaces/inplisQlawa/anything-midjourney-v4-1/Dragon Ball Z Kai 1080p Latino Mega REPACK.md +++ /dev/null @@ -1,26 +0,0 @@ -
        -

        How to Watch Dragon Ball Z Kai in 1080p with Dual Audio

        -

        If you are a fan of Dragon Ball Z, you might have heard of Dragon Ball Z Kai, a remastered version of the original series that cuts out most of the filler episodes and follows the manga more closely. Dragon Ball Z Kai is a great way to experience the epic story of Goku and his friends in a shorter and more faithful adaptation.

        -

        dragon ball z kai 1080p latino mega


        Download Zip »»» https://urlin.us/2uEyIr



        -

        But how can you watch Dragon Ball Z Kai in high quality with dual audio? If you prefer to watch the show in Spanish or English, you might have trouble finding a reliable source that offers both languages and a good resolution. That's why we have compiled this guide to help you watch Dragon Ball Z Kai in 1080p with dual audio using a mega link.

        -

        What is a mega link?

        -

        A mega link is a URL that leads to a folder or file hosted on Mega.nz, a cloud storage service that allows users to upload and download large amounts of data. Mega links are often used by fans to share media content that is hard to find or not available on official platforms.

        -

        To use a mega link, you need to copy and paste it into your browser's address bar and press enter. You will be taken to the Mega website, where you can either download the file or folder directly or use the Mega app to manage your downloads. You can also use a base64 decoder to reveal the mega link if it is hidden or encrypted.

        -

        Where can I find a mega link for Dragon Ball Z Kai 1080p dual audio?

        -

        There are many sources online that claim to offer mega links for Dragon Ball Z Kai 1080p dual audio, but not all of them are trustworthy or working. Some of them might be broken, expired, deleted, or contain malware. That's why you need to be careful and check the credibility of the source before clicking on any link.

        -

        One of the best places to find reliable mega links for Dragon Ball Z Kai 1080p dual audio is Reddit, a social media platform where users can create and join communities based on their interests. Reddit has many subreddits dedicated to sharing and discussing anime content, such as r/hamlinks, r/animepiracy, r/megalinksanime, etc.

        -

        -

        On these subreddits, you can find posts from other fans who have uploaded or reuploaded Dragon Ball Z Kai 1080p dual audio on Mega.nz and shared their links with others. You can also ask for requests or recommendations if you can't find what you are looking for. However, you need to follow the rules of each subreddit and respect the uploader's wishes. Do not share the links outside of Reddit or report them to avoid them getting taken down.

        -

        Here are some examples of posts that offer mega links for Dragon Ball Z Kai 1080p dual audio on Reddit:

        - -

        How to watch Dragon Ball Z Kai 1080p dual audio using a mega link?

        -

        Once you have found a mega link for Dragon Ball Z Kai 1080p dual audio that works for you, you can follow these steps to watch the show using your preferred device:

        -
          -
        1. Copy and paste the mega link into your browser's address bar and press enter. If the link is hidden or encrypted using base64 encoding, use a base64 decoder website like base64decode.org to reveal it first.
        2. -
        3. You will be taken to the

          d5da3c52bf
          -
          -
          \ No newline at end of file diff --git a/spaces/inreVtussa/clothingai/Examples/Crack DWG TrueView 2016 Download __LINK__.md b/spaces/inreVtussa/clothingai/Examples/Crack DWG TrueView 2016 Download __LINK__.md deleted file mode 100644 index 3434d4fa970004313a736cd27137b1a9d3bd2c46..0000000000000000000000000000000000000000 --- a/spaces/inreVtussa/clothingai/Examples/Crack DWG TrueView 2016 Download __LINK__.md +++ /dev/null @@ -1,6 +0,0 @@ -

          crack DWG TrueView 2016 download


          Download File ->>> https://tiurll.com/2uCjej



          - -The Oasis montaj Viewer is a free, easy-to-use product that allows anyone ... Geosoft Oasis Montaj Crack Download Download Geosoft Oasis Montaj 8. ... including AutoCAD, ArcVIew, ER Mapper, TIF and many more. 1fdad05405
          -
          -
          -

          diff --git a/spaces/izumi-lab/stormy-7b-10ep/app.py b/spaces/izumi-lab/stormy-7b-10ep/app.py deleted file mode 100644 index ab9e45abe9d3a196d85e16ca54330efd630a8d1d..0000000000000000000000000000000000000000 --- a/spaces/izumi-lab/stormy-7b-10ep/app.py +++ /dev/null @@ -1,408 +0,0 @@ -import datetime -import json -import os -import shutil -from typing import Optional -from typing import Tuple -from typing import Union - -import gradio as gr -import requests -import torch -from fastchat.conversation import Conversation -from fastchat.conversation import SeparatorStyle -from fastchat.conversation import get_conv_template -from fastchat.conversation import register_conv_template -from fastchat.model.model_adapter import BaseAdapter -from fastchat.model.model_adapter import load_model -from fastchat.model.model_adapter import model_adapters -from fastchat.serve.cli import SimpleChatIO -from fastchat.serve.inference import generate_stream -from huggingface_hub import Repository -from huggingface_hub import snapshot_download -from peft import LoraConfig -from peft import PeftModel -from peft import get_peft_model -from peft import set_peft_model_state_dict -from transformers import AutoModelForCausalLM -from transformers import AutoTokenizer -from transformers import PreTrainedModel -from transformers import PreTrainedTokenizerBase - - -class FastTokenizerAvailableBaseAdapter(BaseAdapter): - def load_model(self, model_path: str, from_pretrained_kwargs: dict): - try: - tokenizer = AutoTokenizer.from_pretrained(model_path, use_fast=False) - except ValueError: - tokenizer = AutoTokenizer.from_pretrained(model_path) - model = AutoModelForCausalLM.from_pretrained( - model_path, low_cpu_mem_usage=True, **from_pretrained_kwargs - ) - return model, tokenizer - - -model_adapters[-1] = FastTokenizerAvailableBaseAdapter() - - -def load_lora_model( - model_path: str, - lora_weight: str, - device: str, - num_gpus: int, - max_gpu_memory: Optional[str] = None, - load_8bit: bool = False, - cpu_offloading: bool = False, - debug: bool = False, -) -> Tuple[Union[PreTrainedModel, PeftModel], PreTrainedTokenizerBase]: - model: Union[PreTrainedModel, PeftModel] - tokenizer: PreTrainedTokenizerBase - model, tokenizer = load_model( - model_path=model_path, - device=device, - num_gpus=num_gpus, - max_gpu_memory=max_gpu_memory, - load_8bit=load_8bit, - cpu_offloading=cpu_offloading, - debug=debug, - ) - if lora_weight is not None: - # model = PeftModelForCausalLM.from_pretrained(model, model_path, **kwargs) - config = LoraConfig.from_pretrained(lora_weight) - model = get_peft_model(model, config) - - # Check the available weights and load them - checkpoint_name = os.path.join( - lora_weight, "pytorch_model.bin" - ) # Full checkpoint - if not os.path.exists(checkpoint_name): - checkpoint_name = os.path.join( - lora_weight, "adapter_model.bin" - ) # only LoRA model - LoRA config above has to fit - # The two files above have a different name depending on how they were saved, - # but are actually the same. - if os.path.exists(checkpoint_name): - adapters_weights = torch.load(checkpoint_name) - set_peft_model_state_dict(model, adapters_weights) - else: - raise IOError(f"Checkpoint {checkpoint_name} not found") - - if debug: - print(model) - - model.eval() - - return model, tokenizer - - -print(datetime.datetime.now()) - -NUM_THREADS = 1 - -print(NUM_THREADS) - -print("starting server ...") - -BASE_MODEL = "cyberagent/open-calm-7b" -LORA_WEIGHTS_HF = "izumi-lab/stormy-7b-10ep" -HF_TOKEN = os.environ.get("HF_TOKEN", None) -DATASET_REPOSITORY = os.environ.get("DATASET_REPOSITORY", None) -SLACK_WEBHOOK = os.environ.get("SLACK_WEBHOOK", None) - -LORA_WEIGHTS = snapshot_download(LORA_WEIGHTS_HF) - -repo = None -LOCAL_DIR = "/home/user/data/" - -if HF_TOKEN and DATASET_REPOSITORY: - try: - shutil.rmtree(LOCAL_DIR) - except Exception: - pass - - repo = Repository( - local_dir=LOCAL_DIR, - clone_from=DATASET_REPOSITORY, - use_auth_token=HF_TOKEN, - repo_type="dataset", - ) - repo.git_pull() - -if torch.cuda.is_available(): - device = "cuda" -else: - device = "cpu" - -model, tokenizer = load_lora_model( - model_path=BASE_MODEL, - lora_weight=LORA_WEIGHTS, - device=device, - num_gpus=1, - max_gpu_memory="16GiB", - load_8bit=False, - cpu_offloading=False, - debug=False, -) - -register_conv_template( - Conversation( - name="japanese", - system="以下はタスクを説明する指示です。要求を適切に満たすような返答を書いてください。\n\n", - roles=("指示", "返答"), - messages=(), - offset=0, - sep_style=SeparatorStyle.ADD_COLON_SINGLE, - sep="\n###", - stop_str="###", - ) -) - - -Conversation._get_prompt = Conversation.get_prompt -Conversation._append_message = Conversation.append_message - - -def conversation_append_message(cls, role: str, message: str): - cls.offset = -2 - return cls._append_message(role, message) - - -def conversation_get_prompt_overrider(cls: Conversation) -> str: - cls.messages = cls.messages[-2:] - return cls._get_prompt() - - -def save_inputs_and_outputs(now, inputs, outputs, generate_kwargs): - current_hour = now.strftime("%Y-%m-%d_%H") - file_name = f"prompts_{LORA_WEIGHTS_HF.split('/')[-1]}_{current_hour}.jsonl" - - if repo is not None: - repo.git_pull(rebase=True) - with open(os.path.join(LOCAL_DIR, file_name), "a", encoding="utf-8") as f: - json.dump( - { - "inputs": inputs, - "outputs": outputs, - "generate_kwargs": generate_kwargs, - }, - f, - ensure_ascii=False, - ) - f.write("\n") - repo.push_to_hub() - - -# we cant add typing now -# https://github.com/gradio-app/gradio/issues/3514 -def evaluate( - instruction, - temperature=0.7, - max_tokens=256, - repetition_penalty=1.0, -): - try: - conv_template = "japanese" - - inputs = tokenizer(instruction, return_tensors="pt") - if len(inputs["input_ids"][0]) > max_tokens - 40: - if HF_TOKEN and DATASET_REPOSITORY: - try: - now = datetime.datetime.now() - current_time = now.strftime("%Y-%m-%d %H:%M:%S") - print(f"[{current_time}] Pushing prompt and completion to the Hub") - save_inputs_and_outputs( - now, - instruction, - "", - { - "temperature": temperature, - "max_tokens": max_tokens, - "repetition_penalty": repetition_penalty, - }, - ) - except Exception as e: - print(e) - return ( - f"please reduce the input length. Currently, {len(inputs['input_ids'][0])} ( > {max_tokens - 40}) tokens are used.", - gr.update(interactive=True), - gr.update(interactive=True), - ) - - conv = get_conv_template(conv_template) - - conv.append_message(conv.roles[0], instruction) - conv.append_message(conv.roles[1], None) - - generate_stream_func = generate_stream - prompt = conv.get_prompt() - - gen_params = { - "model": BASE_MODEL, - "prompt": prompt, - "temperature": temperature, - "max_new_tokens": max_tokens - len(inputs["input_ids"][0]) - 30, - "stop": conv.stop_str, - "stop_token_ids": conv.stop_token_ids, - "echo": False, - "repetition_penalty": repetition_penalty, - } - chatio = SimpleChatIO() - chatio.prompt_for_output(conv.roles[1]) - output_stream = generate_stream_func(model, tokenizer, gen_params, device) - output = chatio.stream_output(output_stream) - - if HF_TOKEN and DATASET_REPOSITORY: - try: - now = datetime.datetime.now() - current_time = now.strftime("%Y-%m-%d %H:%M:%S") - print(f"[{current_time}] Pushing prompt and completion to the Hub") - save_inputs_and_outputs( - now, - prompt, - output, - { - "temperature": temperature, - "max_tokens": max_tokens, - "repetition_penalty": repetition_penalty, - }, - ) - except Exception as e: - print(e) - return output, gr.update(interactive=True), gr.update(interactive=True) - except Exception as e: - print(e) - import traceback - - if SLACK_WEBHOOK: - payload_dic = { - "text": f"BASE_MODEL: {BASE_MODEL}\n LORA_WEIGHTS: {LORA_WEIGHTS_HF}\n" - + f"instruction: {instruction}\ninput: {input}\ntemperature: {temperature}\n" - + f"max_tokens: {max_tokens}\nrepetition_penalty: {repetition_penalty}\n\n" - + str(traceback.format_exc()), - "username": "Hugging Face Space", - "channel": "#monitor", - } - - try: - requests.post(SLACK_WEBHOOK, data=json.dumps(payload_dic)) - except Exception: - pass - return ( - "Error happend. Please return later.", - gr.update(interactive=True), - gr.update(interactive=True), - ) - - -def reset_textbox(): - return gr.update(value=""), gr.update(value=""), gr.update(value="") - - -def no_interactive() -> Tuple[gr.Request, gr.Request]: - return gr.update(interactive=False), gr.update(interactive=False) - - -title = """

          stormy 7B 10epochs

          """ - -theme = gr.themes.Default(primary_hue="green") -description = ( - "The official demo for **[izumi-lab/stormy-7b-10ep](https://huggingface.co/izumi-lab/stormy-7b-10ep)**. " - "It is a 7B-parameter CALM model finetuned to follow instructions. " - "It is trained on the dataset specially extracted from [izumi-lab/llm-japanese-dataset](https://huggingface.co/datasets/izumi-lab/llm-japanese-dataset) dataset. " - "For more information, please visit [the project's website](https://llm.msuzuki.me). " - "This model can output up to 256 tokens. " - "It takes about **1 minute** to output. When access is concentrated, the operation may become slow." -) -with gr.Blocks( - css="""#col_container { margin-left: auto; margin-right: auto;}""", - theme=theme, -) as demo: - gr.HTML(title) - gr.Markdown(description) - with gr.Column(elem_id="col_container", visible=False) as main_block: - with gr.Row(): - with gr.Column(): - instruction = gr.Textbox( - lines=3, label="Instruction", placeholder="こんにちは" - ) - with gr.Row(): - with gr.Column(scale=3): - clear_button = gr.Button("Clear").style(full_width=True) - with gr.Column(scale=5): - submit_button = gr.Button("Submit").style(full_width=True) - outputs = gr.Textbox(lines=4, label="Output") - - # inputs, top_p, temperature, top_k, repetition_penalty - with gr.Accordion("Parameters", open=True): - temperature = gr.Slider( - minimum=0, - maximum=1.0, - value=0.0, - step=0.05, - interactive=True, - label="Temperature", - ) - max_tokens = gr.Slider( - minimum=20, - maximum=256, - value=128, - step=1, - interactive=True, - label="Max length (Pre-prompt + instruction + input + output)", - ) - repetition_penalty = gr.Slider( - minimum=0.0, - maximum=5.0, - value=1.05, - step=0.05, - interactive=True, - label="Repetition penalty", - ) - - with gr.Column(elem_id="user_consent_container") as user_consent_block: - # Get user consent - gr.Markdown( - """ - ## User Consent for Data Collection, Use, and Sharing: - By using our app, you acknowledge and agree to the following terms regarding the data you provide: - - **Collection**: We may collect inputs you type into our app. - - **Use**: We may use the collected data for research purposes, to improve our services, and to develop new products or services, including commercial applications. - - **Sharing and Publication**: Your input data may be published, shared with third parties, or used for analysis and reporting purposes. - - **Data Retention**: We may retain your input data for as long as necessary. - - By continuing to use our app, you provide your explicit consent to the collection, use, and potential sharing of your data as described above. If you do not agree with our data collection, use, and sharing practices, please do not use our app. - - ## データ収集、利用、共有に関するユーザーの同意: - 本アプリを使用することにより、提供するデータに関する以下の条件に同意するものとします: - - **収集**: 本アプリに入力されるテキストデータは収集される場合があります。 - - **利用**: 収集されたデータは研究や、商用アプリケーションを含むサービスの開発に使用される場合があります。 - - **共有および公開**: 入力データは第三者と共有されたり、分析や公開の目的で使用される場合があります。 - - **データ保持**: 入力データは必要な限り保持されます。 - - 本アプリを引き続き使用することにより、上記のようにデータの収集・利用・共有について同意します。データの利用方法に同意しない場合は、本アプリを使用しないでください。 - """ - ) - accept_button = gr.Button("I Agree") - - def enable_inputs(): - return user_consent_block.update(visible=False), main_block.update( - visible=True - ) - - accept_button.click( - fn=enable_inputs, - inputs=[], - outputs=[user_consent_block, main_block], - queue=False, - ) - submit_button.click(no_interactive, [], [submit_button, clear_button]) - submit_button.click( - evaluate, - [instruction, temperature, max_tokens, repetition_penalty], - [outputs, submit_button, clear_button], - ) - clear_button.click(reset_textbox, [], [instruction, outputs], queue=False) - - demo.queue(max_size=20, concurrency_count=NUM_THREADS, api_open=False).launch( - server_name="0.0.0.0", server_port=7860 - ) diff --git a/spaces/j0hngou/vision-diffmask/code/models/gates.py b/spaces/j0hngou/vision-diffmask/code/models/gates.py deleted file mode 100644 index 4281fa9395f347b1894fbaa3268d17b7a21018c3..0000000000000000000000000000000000000000 --- a/spaces/j0hngou/vision-diffmask/code/models/gates.py +++ /dev/null @@ -1,261 +0,0 @@ -""" -Parts of this file have been adapted from -https://github.com/nicola-decao/diffmask/blob/master/diffmask/models/gates.py -""" - -import torch -import torch.nn as nn - -from torch import Tensor -from typing import Optional -from utils.distributions import RectifiedStreched, BinaryConcrete - - -class MLPGate(nn.Module): - def __init__(self, input_size: int, hidden_size: int, bias: bool = True): - """ - This is an MLP with the following structure; - Linear(input_size, hidden_size), Tanh(), Linear(hidden_size, 1) - The bias of the last layer is set to 5.0 to start with high probability - of keeping states (fundamental for good convergence as the initialized - DiffMask has not learned what to mask yet). - - Args: - input_size (int): the number of input features - hidden_size (int): the number of hidden units - bias (bool): whether to use a bias term - """ - super().__init__() - - self.f = nn.Sequential( - nn.utils.weight_norm(nn.Linear(input_size, hidden_size)), - nn.Tanh(), - nn.utils.weight_norm(nn.Linear(hidden_size, 1, bias=bias)), - ) - - if bias: - self.f[-1].bias.data[:] = 5.0 - - def forward(self, *args: Tensor) -> Tensor: - return self.f(torch.cat(args, -1)) - - -class MLPMaxGate(nn.Module): - def __init__( - self, - input_size: int, - hidden_size: int, - mul_activation: float = 10.0, - add_activation: float = 5.0, - bias: bool = True, - ): - """ - This is an MLP with the following structure; - Linear(input_size, hidden_size), Tanh(), Linear(hidden_size, 1) - The bias of the last layer is set to 5.0 to start with high probability - of keeping states (fundamental for good convergence as the initialized - DiffMask has not learned what to mask yet). - It also uses a scaler for the output of the activation function. - - Args: - input_size (int): the number of input features - hidden_size (int): the number of hidden units - mul_activation (float): the scaler for the output of the activation function - add_activation (float): the offset for the output of the activation function - bias (bool): whether to use a bias term - """ - super().__init__() - - self.f = nn.Sequential( - nn.utils.weight_norm(nn.Linear(input_size, hidden_size)), - nn.Tanh(), - nn.utils.weight_norm(nn.Linear(hidden_size, 1, bias=bias)), - nn.Tanh(), - ) - self.add_activation = nn.Parameter(torch.tensor(add_activation)) - self.mul_activation = mul_activation - - def forward(self, *args: Tensor) -> Tensor: - return self.f(torch.cat(args, -1)) * self.mul_activation + self.add_activation - - -class DiffMaskGateInput(nn.Module): - def __init__( - self, - hidden_size: int, - hidden_attention: int, - num_hidden_layers: int, - max_position_embeddings: int, - gate_fn: nn.Module = MLPMaxGate, - mul_activation: float = 10.0, - add_activation: float = 5.0, - gate_bias: bool = True, - placeholder: bool = False, - init_vector: Tensor = None, - ): - """This is a DiffMask module that masks the input of the first layer. - - Args: - hidden_size (int): the size of the hidden representations - hidden_attention (int) the amount of units in the gate's hidden (bottleneck) layer - num_hidden_layers (int): the number of hidden layers (and thus gates to use) - max_position_embeddings (int): the amount of placeholder embeddings to learn for the masked positions - gate_fn (nn.Module): the PyTorch module to use as a gate - mul_activation (float): the scaler for the output of the activation function - add_activation (float): the offset for the output of the activation function - gate_bias (bool): whether to use a bias term - placeholder (bool): whether to use placeholder embeddings or a zero vector - init_vector (Tensor): the initial vector to use for the placeholder embeddings - """ - super().__init__() - - # Create a ModuleList with the gates - self.g_hat = nn.ModuleList( - [ - gate_fn( - hidden_size * 2, - hidden_attention, - mul_activation, - add_activation, - gate_bias, - ) - for _ in range(num_hidden_layers) - ] - ) - - if placeholder: - # Use a placeholder embedding for the masked positions - self.placeholder = nn.Parameter( - nn.init.xavier_normal_( - torch.empty((1, max_position_embeddings, hidden_size)) - ) - if init_vector is None - else init_vector.view(1, 1, hidden_size).repeat( - 1, max_position_embeddings, 1 - ) - ) - else: - # Use a zero vector for the masked positions - self.register_buffer( - "placeholder", - torch.zeros((1, 1, hidden_size)), - ) - - def forward( - self, hidden_states: tuple[Tensor], layer_pred: Optional[int] - ) -> tuple[tuple[Tensor], Tensor, Tensor, Tensor, Tensor]: - # Concatenate the output of all the gates - logits = torch.cat( - [ - self.g_hat[i](hidden_states[0], hidden_states[i]) - for i in range( - (layer_pred + 1) if layer_pred is not None else len(hidden_states) - ) - ], - -1, - ) - - # Define a Hard Concrete distribution - dist = RectifiedStreched( - BinaryConcrete(torch.full_like(logits, 0.2), logits), - l=-0.2, - r=1.0, - ) - - # Calculate the expectation for the full gate probabilities - # These act as votes for the masked positions - gates_full = dist.rsample().cumprod(-1) - expected_L0_full = dist.log_expected_L0().cumsum(-1) - - # Extract the probabilities from the last layer, which acts - # as an aggregation of the votes per position - gates = gates_full[..., -1] - expected_L0 = expected_L0_full[..., -1] - - return ( - hidden_states[0] * gates.unsqueeze(-1) - + self.placeholder[:, : hidden_states[0].shape[-2]] - * (1 - gates).unsqueeze(-1), - gates, - expected_L0, - gates_full, - expected_L0_full, - ) - - -# class DiffMaskGateHidden(nn.Module): -# def __init__( -# self, -# hidden_size: int, -# hidden_attention: int, -# num_hidden_layers: int, -# max_position_embeddings: int, -# gate_fn: nn.Module = MLPMaxGate, -# gate_bias: bool = True, -# placeholder: bool = False, -# init_vector: Tensor = None, -# ): -# super().__init__() -# -# self.g_hat = nn.ModuleList( -# [ -# gate_fn(hidden_size, hidden_attention, bias=gate_bias) -# for _ in range(num_hidden_layers) -# ] -# ) -# -# if placeholder: -# self.placeholder = nn.ParameterList( -# [ -# nn.Parameter( -# nn.init.xavier_normal_( -# torch.empty((1, max_position_embeddings, hidden_size)) -# ) -# if init_vector is None -# else init_vector.view(1, 1, hidden_size).repeat( -# 1, max_position_embeddings, 1 -# ) -# ) -# for _ in range(num_hidden_layers) -# ] -# ) -# else: -# self.register_buffer( -# "placeholder", -# torch.zeros((num_hidden_layers, 1, 1, hidden_size)), -# ) -# -# def forward( -# self, hidden_states: tuple[Tensor], layer_pred: Optional[int] -# ) -> tuple[tuple[Tensor], Tensor, Tensor, Tensor, Tensor]: -# if layer_pred is not None: -# logits = self.g_hat[layer_pred](hidden_states[layer_pred]) -# else: -# logits = torch.cat( -# [self.g_hat[i](hidden_states[i]) for i in range(len(hidden_states))], -1 -# ) -# -# dist = RectifiedStreched( -# BinaryConcrete(torch.full_like(logits, 0.2), logits), -# l=-0.2, -# r=1.0, -# ) -# -# gates_full = dist.rsample() -# expected_L0_full = dist.log_expected_L0() -# -# gates = gates_full if layer_pred is not None else gates_full[..., :1] -# expected_L0 = ( -# expected_L0_full if layer_pred is not None else expected_L0_full[..., :1] -# ) -# -# layer_pred = layer_pred or 0 # equiv to "layer_pred if layer_pred else 0" -# return ( -# hidden_states[layer_pred] * gates -# + self.placeholder[layer_pred][:, : hidden_states[layer_pred].shape[-2]] -# * (1 - gates), -# gates.squeeze(-1), -# expected_L0.squeeze(-1), -# gates_full, -# expected_L0_full, -# ) diff --git a/spaces/james-oldfield/PandA/networks/biggan/file_utils.py b/spaces/james-oldfield/PandA/networks/biggan/file_utils.py deleted file mode 100644 index cb92e2d27780739afea1e8000abb9d5dcbd9543a..0000000000000000000000000000000000000000 --- a/spaces/james-oldfield/PandA/networks/biggan/file_utils.py +++ /dev/null @@ -1,232 +0,0 @@ -""" -Utilities for working with the local dataset cache. -This file is adapted from the AllenNLP library at https://github.com/allenai/allennlp -Copyright by the AllenNLP authors. -""" -from __future__ import (absolute_import, division, print_function, unicode_literals) - -import json -import logging -import os -import shutil -import tempfile -from functools import wraps -from hashlib import sha256 -import sys -from io import open - -import requests -from tqdm import tqdm - -try: - from urllib.parse import urlparse -except ImportError: - from urlparse import urlparse - -try: - from pathlib import Path - PYTORCH_PRETRAINED_BIGGAN_CACHE = Path(os.getenv('PYTORCH_PRETRAINED_BIGGAN_CACHE', - Path.home() / '.pytorch_pretrained_biggan')) -except (AttributeError, ImportError): - PYTORCH_PRETRAINED_BIGGAN_CACHE = os.getenv('PYTORCH_PRETRAINED_BIGGAN_CACHE', - os.path.join(os.path.expanduser("~"), '.pytorch_pretrained_biggan')) - -logger = logging.getLogger(__name__) # pylint: disable=invalid-name - - -def url_to_filename(url, etag=None): - """ - Convert `url` into a hashed filename in a repeatable way. - If `etag` is specified, append its hash to the url's, delimited - by a period. - """ - url_bytes = url.encode('utf-8') - url_hash = sha256(url_bytes) - filename = url_hash.hexdigest() - - if etag: - etag_bytes = etag.encode('utf-8') - etag_hash = sha256(etag_bytes) - filename += '.' + etag_hash.hexdigest() - - return filename - - -def filename_to_url(filename, cache_dir=None): - """ - Return the url and etag (which may be ``None``) stored for `filename`. - Raise ``EnvironmentError`` if `filename` or its stored metadata do not exist. - """ - if cache_dir is None: - cache_dir = PYTORCH_PRETRAINED_BIGGAN_CACHE - if sys.version_info[0] == 3 and isinstance(cache_dir, Path): - cache_dir = str(cache_dir) - - cache_path = os.path.join(cache_dir, filename) - if not os.path.exists(cache_path): - raise EnvironmentError("file {} not found".format(cache_path)) - - meta_path = cache_path + '.json' - if not os.path.exists(meta_path): - raise EnvironmentError("file {} not found".format(meta_path)) - - with open(meta_path, encoding="utf-8") as meta_file: - metadata = json.load(meta_file) - url = metadata['url'] - etag = metadata['etag'] - - return url, etag - - -def cached_path(url_or_filename, cache_dir=None): - """ - Given something that might be a URL (or might be a local path), - determine which. If it's a URL, download the file and cache it, and - return the path to the cached file. If it's already a local path, - make sure the file exists and then return the path. - """ - if cache_dir is None: - cache_dir = PYTORCH_PRETRAINED_BIGGAN_CACHE - if sys.version_info[0] == 3 and isinstance(url_or_filename, Path): - url_or_filename = str(url_or_filename) - if sys.version_info[0] == 3 and isinstance(cache_dir, Path): - cache_dir = str(cache_dir) - - parsed = urlparse(url_or_filename) - - if parsed.scheme in ('http', 'https', 's3'): - # URL, so get it from the cache (downloading if necessary) - return get_from_cache(url_or_filename, cache_dir) - elif os.path.exists(url_or_filename): - # File, and it exists. - return url_or_filename - elif parsed.scheme == '': - # File, but it doesn't exist. - raise EnvironmentError("file {} not found".format(url_or_filename)) - else: - # Something unknown - raise ValueError("unable to parse {} as a URL or as a local path".format(url_or_filename)) - - -def split_s3_path(url): - """Split a full s3 path into the bucket name and path.""" - parsed = urlparse(url) - if not parsed.netloc or not parsed.path: - raise ValueError("bad s3 path {}".format(url)) - bucket_name = parsed.netloc - s3_path = parsed.path - # Remove '/' at beginning of path. - if s3_path.startswith("/"): - s3_path = s3_path[1:] - return bucket_name, s3_path - - -def s3_request(func): - """ - Wrapper function for s3 requests in order to create more helpful error - messages. - """ - - @wraps(func) - def wrapper(url, *args, **kwargs): - try: - return func(url, *args, **kwargs) - except ClientError as exc: - if int(exc.response["Error"]["Code"]) == 404: - raise EnvironmentError("file {} not found".format(url)) - else: - raise - - return wrapper - - -def http_get(url, temp_file): - req = requests.get(url, stream=True) - content_length = req.headers.get('Content-Length') - total = int(content_length) if content_length is not None else None - progress = tqdm(unit="B", total=total) - for chunk in req.iter_content(chunk_size=1024): - if chunk: # filter out keep-alive new chunks - progress.update(len(chunk)) - temp_file.write(chunk) - progress.close() - - -def get_from_cache(url, cache_dir=None): - """ - Given a URL, look for the corresponding dataset in the local cache. - If it's not there, download it. Then return the path to the cached file. - """ - if cache_dir is None: - cache_dir = PYTORCH_PRETRAINED_BIGGAN_CACHE - if sys.version_info[0] == 3 and isinstance(cache_dir, Path): - cache_dir = str(cache_dir) - - if not os.path.exists(cache_dir): - os.makedirs(cache_dir) - - # Get eTag to add to filename, if it exists. - if url.startswith("s3://"): - print('Not supported due to colab demo. Sorry!') - raise - else: - response = requests.head(url, allow_redirects=True) - if response.status_code != 200: - raise IOError("HEAD request failed for url {} with status code {}" - .format(url, response.status_code)) - etag = response.headers.get("ETag") - - filename = url_to_filename(url, etag) - - # get cache path to put the file - cache_path = os.path.join(cache_dir, filename) - - if not os.path.exists(cache_path): - # Download to temporary file, then copy to cache dir once finished. - # Otherwise you get corrupt cache entries if the download gets interrupted. - with tempfile.NamedTemporaryFile() as temp_file: - logger.info("%s not found in cache, downloading to %s", url, temp_file.name) - - # GET file object - if url.startswith("s3://"): - print('Not supported due to colab demo. Sorry!') - raise - else: - http_get(url, temp_file) - - # we are copying the file before closing it, so flush to avoid truncation - temp_file.flush() - # shutil.copyfileobj() starts at the current position, so go to the start - temp_file.seek(0) - - logger.info("copying %s to cache at %s", temp_file.name, cache_path) - with open(cache_path, 'wb') as cache_file: - shutil.copyfileobj(temp_file, cache_file) - - logger.info("creating metadata file for %s", cache_path) - meta = {'url': url, 'etag': etag} - meta_path = cache_path + '.json' - with open(meta_path, 'w', encoding="utf-8") as meta_file: - json.dump(meta, meta_file) - - logger.info("removing temp file %s", temp_file.name) - - return cache_path - - -def read_set_from_file(filename): - ''' - Extract a de-duped collection (set) of text from a file. - Expected file format is one item per line. - ''' - collection = set() - with open(filename, 'r', encoding='utf-8') as file_: - for line in file_: - collection.add(line.rstrip()) - return collection - - -def get_file_extension(path, dot=True, lower=True): - ext = os.path.splitext(path)[1] - ext = ext if dot else ext[1:] - return ext.lower() if lower else ext diff --git a/spaces/jamesliu1217/midjourney-v5/app.py b/spaces/jamesliu1217/midjourney-v5/app.py deleted file mode 100644 index a7e777fc5c7f3e31a491e4bd016b8948b6a260f4..0000000000000000000000000000000000000000 --- a/spaces/jamesliu1217/midjourney-v5/app.py +++ /dev/null @@ -1,3 +0,0 @@ -import gradio as gr - -gr.Interface.load("models/flax/midjourney-v4-diffusion").launch() \ No newline at end of file diff --git a/spaces/jbilcke-hf/ai-comic-factory/src/app/interface/settings-dialog/field.tsx b/spaces/jbilcke-hf/ai-comic-factory/src/app/interface/settings-dialog/field.tsx deleted file mode 100644 index cbecc7e1999208fb83946e4674f913a85de7e514..0000000000000000000000000000000000000000 --- a/spaces/jbilcke-hf/ai-comic-factory/src/app/interface/settings-dialog/field.tsx +++ /dev/null @@ -1,7 +0,0 @@ -import { ReactNode } from "react" - -export function Field({ children }: { children: ReactNode }) { - return ( -
          {children}
          - ) -} \ No newline at end of file diff --git a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/dns/renderer.py b/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/dns/renderer.py deleted file mode 100644 index 53e7c0f6faf98477594b8ce65162160ce2ec0f98..0000000000000000000000000000000000000000 --- a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/dns/renderer.py +++ /dev/null @@ -1,324 +0,0 @@ -# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license - -# Copyright (C) 2001-2017 Nominum, Inc. -# -# Permission to use, copy, modify, and distribute this software and its -# documentation for any purpose with or without fee is hereby granted, -# provided that the above copyright notice and this permission notice -# appear in all copies. -# -# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES -# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR -# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT -# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -"""Help for building DNS wire format messages""" - -import contextlib -import io -import random -import struct -import time - -import dns.exception -import dns.tsig - -QUESTION = 0 -ANSWER = 1 -AUTHORITY = 2 -ADDITIONAL = 3 - - -class Renderer: - """Helper class for building DNS wire-format messages. - - Most applications can use the higher-level L{dns.message.Message} - class and its to_wire() method to generate wire-format messages. - This class is for those applications which need finer control - over the generation of messages. - - Typical use:: - - r = dns.renderer.Renderer(id=1, flags=0x80, max_size=512) - r.add_question(qname, qtype, qclass) - r.add_rrset(dns.renderer.ANSWER, rrset_1) - r.add_rrset(dns.renderer.ANSWER, rrset_2) - r.add_rrset(dns.renderer.AUTHORITY, ns_rrset) - r.add_rrset(dns.renderer.ADDITIONAL, ad_rrset_1) - r.add_rrset(dns.renderer.ADDITIONAL, ad_rrset_2) - r.add_edns(0, 0, 4096) - r.write_header() - r.add_tsig(keyname, secret, 300, 1, 0, '', request_mac) - wire = r.get_wire() - - If padding is going to be used, then the OPT record MUST be - written after everything else in the additional section except for - the TSIG (if any). - - output, an io.BytesIO, where rendering is written - - id: the message id - - flags: the message flags - - max_size: the maximum size of the message - - origin: the origin to use when rendering relative names - - compress: the compression table - - section: an int, the section currently being rendered - - counts: list of the number of RRs in each section - - mac: the MAC of the rendered message (if TSIG was used) - """ - - def __init__(self, id=None, flags=0, max_size=65535, origin=None): - """Initialize a new renderer.""" - - self.output = io.BytesIO() - if id is None: - self.id = random.randint(0, 65535) - else: - self.id = id - self.flags = flags - self.max_size = max_size - self.origin = origin - self.compress = {} - self.section = QUESTION - self.counts = [0, 0, 0, 0] - self.output.write(b"\x00" * 12) - self.mac = "" - self.reserved = 0 - self.was_padded = False - - def _rollback(self, where): - """Truncate the output buffer at offset *where*, and remove any - compression table entries that pointed beyond the truncation - point. - """ - - self.output.seek(where) - self.output.truncate() - keys_to_delete = [] - for k, v in self.compress.items(): - if v >= where: - keys_to_delete.append(k) - for k in keys_to_delete: - del self.compress[k] - - def _set_section(self, section): - """Set the renderer's current section. - - Sections must be rendered order: QUESTION, ANSWER, AUTHORITY, - ADDITIONAL. Sections may be empty. - - Raises dns.exception.FormError if an attempt was made to set - a section value less than the current section. - """ - - if self.section != section: - if self.section > section: - raise dns.exception.FormError - self.section = section - - @contextlib.contextmanager - def _track_size(self): - start = self.output.tell() - yield start - if self.output.tell() > self.max_size: - self._rollback(start) - raise dns.exception.TooBig - - def add_question(self, qname, rdtype, rdclass=dns.rdataclass.IN): - """Add a question to the message.""" - - self._set_section(QUESTION) - with self._track_size(): - qname.to_wire(self.output, self.compress, self.origin) - self.output.write(struct.pack("!HH", rdtype, rdclass)) - self.counts[QUESTION] += 1 - - def add_rrset(self, section, rrset, **kw): - """Add the rrset to the specified section. - - Any keyword arguments are passed on to the rdataset's to_wire() - routine. - """ - - self._set_section(section) - with self._track_size(): - n = rrset.to_wire(self.output, self.compress, self.origin, **kw) - self.counts[section] += n - - def add_rdataset(self, section, name, rdataset, **kw): - """Add the rdataset to the specified section, using the specified - name as the owner name. - - Any keyword arguments are passed on to the rdataset's to_wire() - routine. - """ - - self._set_section(section) - with self._track_size(): - n = rdataset.to_wire(name, self.output, self.compress, self.origin, **kw) - self.counts[section] += n - - def add_opt(self, opt, pad=0, opt_size=0, tsig_size=0): - """Add *opt* to the additional section, applying padding if desired. The - padding will take the specified precomputed OPT size and TSIG size into - account. - - Note that we don't have reliable way of knowing how big a GSS-TSIG digest - might be, so we we might not get an even multiple of the pad in that case.""" - if pad: - ttl = opt.ttl - assert opt_size >= 11 - opt_rdata = opt[0] - size_without_padding = self.output.tell() + opt_size + tsig_size - remainder = size_without_padding % pad - if remainder: - pad = b"\x00" * (pad - remainder) - else: - pad = b"" - options = list(opt_rdata.options) - options.append(dns.edns.GenericOption(dns.edns.OptionType.PADDING, pad)) - opt = dns.message.Message._make_opt(ttl, opt_rdata.rdclass, options) - self.was_padded = True - self.add_rrset(ADDITIONAL, opt) - - def add_edns(self, edns, ednsflags, payload, options=None): - """Add an EDNS OPT record to the message.""" - - # make sure the EDNS version in ednsflags agrees with edns - ednsflags &= 0xFF00FFFF - ednsflags |= edns << 16 - opt = dns.message.Message._make_opt(ednsflags, payload, options) - self.add_opt(opt) - - def add_tsig( - self, - keyname, - secret, - fudge, - id, - tsig_error, - other_data, - request_mac, - algorithm=dns.tsig.default_algorithm, - ): - """Add a TSIG signature to the message.""" - - s = self.output.getvalue() - - if isinstance(secret, dns.tsig.Key): - key = secret - else: - key = dns.tsig.Key(keyname, secret, algorithm) - tsig = dns.message.Message._make_tsig( - keyname, algorithm, 0, fudge, b"", id, tsig_error, other_data - ) - (tsig, _) = dns.tsig.sign(s, key, tsig[0], int(time.time()), request_mac) - self._write_tsig(tsig, keyname) - - def add_multi_tsig( - self, - ctx, - keyname, - secret, - fudge, - id, - tsig_error, - other_data, - request_mac, - algorithm=dns.tsig.default_algorithm, - ): - """Add a TSIG signature to the message. Unlike add_tsig(), this can be - used for a series of consecutive DNS envelopes, e.g. for a zone - transfer over TCP [RFC2845, 4.4]. - - For the first message in the sequence, give ctx=None. For each - subsequent message, give the ctx that was returned from the - add_multi_tsig() call for the previous message.""" - - s = self.output.getvalue() - - if isinstance(secret, dns.tsig.Key): - key = secret - else: - key = dns.tsig.Key(keyname, secret, algorithm) - tsig = dns.message.Message._make_tsig( - keyname, algorithm, 0, fudge, b"", id, tsig_error, other_data - ) - (tsig, ctx) = dns.tsig.sign( - s, key, tsig[0], int(time.time()), request_mac, ctx, True - ) - self._write_tsig(tsig, keyname) - return ctx - - def _write_tsig(self, tsig, keyname): - if self.was_padded: - compress = None - else: - compress = self.compress - self._set_section(ADDITIONAL) - with self._track_size(): - keyname.to_wire(self.output, compress, self.origin) - self.output.write( - struct.pack("!HHIH", dns.rdatatype.TSIG, dns.rdataclass.ANY, 0, 0) - ) - rdata_start = self.output.tell() - tsig.to_wire(self.output) - - after = self.output.tell() - self.output.seek(rdata_start - 2) - self.output.write(struct.pack("!H", after - rdata_start)) - self.counts[ADDITIONAL] += 1 - self.output.seek(10) - self.output.write(struct.pack("!H", self.counts[ADDITIONAL])) - self.output.seek(0, io.SEEK_END) - - def write_header(self): - """Write the DNS message header. - - Writing the DNS message header is done after all sections - have been rendered, but before the optional TSIG signature - is added. - """ - - self.output.seek(0) - self.output.write( - struct.pack( - "!HHHHHH", - self.id, - self.flags, - self.counts[0], - self.counts[1], - self.counts[2], - self.counts[3], - ) - ) - self.output.seek(0, io.SEEK_END) - - def get_wire(self): - """Return the wire format message.""" - - return self.output.getvalue() - - def reserve(self, size: int) -> None: - """Reserve *size* bytes.""" - if size < 0: - raise ValueError("reserved amount must be non-negative") - if size > self.max_size: - raise ValueError("cannot reserve more than the maximum size") - self.reserved += size - self.max_size -= size - - def release_reserved(self) -> None: - """Release the reserved bytes.""" - self.max_size += self.reserved - self.reserved = 0 diff --git a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/fontTools/subset/svg.py b/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/fontTools/subset/svg.py deleted file mode 100644 index 9daac6470c559eeaaac20ec61843e69251c6f61e..0000000000000000000000000000000000000000 --- a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/fontTools/subset/svg.py +++ /dev/null @@ -1,254 +0,0 @@ -from __future__ import annotations - -import re -from functools import lru_cache -from itertools import chain, count -from typing import Dict, Iterable, Iterator, List, Optional, Set, Tuple - -try: - from lxml import etree -except ImportError: - # lxml is required for subsetting SVG, but we prefer to delay the import error - # until subset_glyphs() is called (i.e. if font to subset has an 'SVG ' table) - etree = None - -from fontTools import ttLib -from fontTools.subset.util import _add_method -from fontTools.ttLib.tables.S_V_G_ import SVGDocument - - -__all__ = ["subset_glyphs"] - - -GID_RE = re.compile(r"^glyph(\d+)$") - -NAMESPACES = { - "svg": "http://www.w3.org/2000/svg", - "xlink": "http://www.w3.org/1999/xlink", -} -XLINK_HREF = f'{{{NAMESPACES["xlink"]}}}href' - - -# TODO(antrotype): Replace with functools.cache once we are 3.9+ -@lru_cache(maxsize=None) -def xpath(path): - # compile XPath upfront, caching result to reuse on multiple elements - return etree.XPath(path, namespaces=NAMESPACES) - - -def group_elements_by_id(tree: etree.Element) -> Dict[str, etree.Element]: - # select all svg elements with 'id' attribute no matter where they are - # including the root element itself: - # https://github.com/fonttools/fonttools/issues/2548 - return {el.attrib["id"]: el for el in xpath("//svg:*[@id]")(tree)} - - -def parse_css_declarations(style_attr: str) -> Dict[str, str]: - # https://developer.mozilla.org/en-US/docs/Web/SVG/Attribute/style - # https://developer.mozilla.org/en-US/docs/Web/CSS/Syntax#css_declarations - result = {} - for declaration in style_attr.split(";"): - if declaration.count(":") == 1: - property_name, value = declaration.split(":") - property_name = property_name.strip() - result[property_name] = value.strip() - elif declaration.strip(): - raise ValueError(f"Invalid CSS declaration syntax: {declaration}") - return result - - -def iter_referenced_ids(tree: etree.Element) -> Iterator[str]: - # Yield all the ids that can be reached via references from this element tree. - # We currently support xlink:href (as used by and gradient templates), - # and local url(#...) links found in fill or clip-path attributes - # TODO(anthrotype): Check we aren't missing other supported kinds of reference - find_svg_elements_with_references = xpath( - ".//svg:*[ " - "starts-with(@xlink:href, '#') " - "or starts-with(@fill, 'url(#') " - "or starts-with(@clip-path, 'url(#') " - "or contains(@style, ':url(#') " - "]", - ) - for el in chain([tree], find_svg_elements_with_references(tree)): - ref_id = href_local_target(el) - if ref_id is not None: - yield ref_id - - attrs = el.attrib - if "style" in attrs: - attrs = {**dict(attrs), **parse_css_declarations(el.attrib["style"])} - for attr in ("fill", "clip-path"): - if attr in attrs: - value = attrs[attr] - if value.startswith("url(#") and value.endswith(")"): - ref_id = value[5:-1] - assert ref_id - yield ref_id - - -def closure_element_ids( - elements: Dict[str, etree.Element], element_ids: Set[str] -) -> None: - # Expand the initial subset of element ids to include ids that can be reached - # via references from the initial set. - unvisited = element_ids - while unvisited: - referenced: Set[str] = set() - for el_id in unvisited: - if el_id not in elements: - # ignore dangling reference; not our job to validate svg - continue - referenced.update(iter_referenced_ids(elements[el_id])) - referenced -= element_ids - element_ids.update(referenced) - unvisited = referenced - - -def subset_elements(el: etree.Element, retained_ids: Set[str]) -> bool: - # Keep elements if their id is in the subset, or any of their children's id is. - # Drop elements whose id is not in the subset, and either have no children, - # or all their children are being dropped. - if el.attrib.get("id") in retained_ids: - # if id is in the set, don't recurse; keep whole subtree - return True - # recursively subset all the children; we use a list comprehension instead - # of a parentheses-less generator expression because we don't want any() to - # short-circuit, as our function has a side effect of dropping empty elements. - if any([subset_elements(e, retained_ids) for e in el]): - return True - assert len(el) == 0 - parent = el.getparent() - if parent is not None: - parent.remove(el) - return False - - -def remap_glyph_ids( - svg: etree.Element, glyph_index_map: Dict[int, int] -) -> Dict[str, str]: - # Given {old_gid: new_gid} map, rename all elements containing id="glyph{gid}" - # special attributes - elements = group_elements_by_id(svg) - id_map = {} - for el_id, el in elements.items(): - m = GID_RE.match(el_id) - if not m: - continue - old_index = int(m.group(1)) - new_index = glyph_index_map.get(old_index) - if new_index is not None: - if old_index == new_index: - continue - new_id = f"glyph{new_index}" - else: - # If the old index is missing, the element correspond to a glyph that was - # excluded from the font's subset. - # We rename it to avoid clashes with the new GIDs or other element ids. - new_id = f".{el_id}" - n = count(1) - while new_id in elements: - new_id = f"{new_id}.{next(n)}" - - id_map[el_id] = new_id - el.attrib["id"] = new_id - - return id_map - - -def href_local_target(el: etree.Element) -> Optional[str]: - if XLINK_HREF in el.attrib: - href = el.attrib[XLINK_HREF] - if href.startswith("#") and len(href) > 1: - return href[1:] # drop the leading # - return None - - -def update_glyph_href_links(svg: etree.Element, id_map: Dict[str, str]) -> None: - # update all xlink:href="#glyph..." attributes to point to the new glyph ids - for el in xpath(".//svg:*[starts-with(@xlink:href, '#glyph')]")(svg): - old_id = href_local_target(el) - assert old_id is not None - if old_id in id_map: - new_id = id_map[old_id] - el.attrib[XLINK_HREF] = f"#{new_id}" - - -def ranges(ints: Iterable[int]) -> Iterator[Tuple[int, int]]: - # Yield sorted, non-overlapping (min, max) ranges of consecutive integers - sorted_ints = iter(sorted(set(ints))) - try: - start = end = next(sorted_ints) - except StopIteration: - return - for v in sorted_ints: - if v - 1 == end: - end = v - else: - yield (start, end) - start = end = v - yield (start, end) - - -@_add_method(ttLib.getTableClass("SVG ")) -def subset_glyphs(self, s) -> bool: - if etree is None: - raise ImportError("No module named 'lxml', required to subset SVG") - - # glyph names (before subsetting) - glyph_order: List[str] = s.orig_glyph_order - # map from glyph names to original glyph indices - rev_orig_glyph_map: Dict[str, int] = s.reverseOrigGlyphMap - # map from original to new glyph indices (after subsetting) - glyph_index_map: Dict[int, int] = s.glyph_index_map - - new_docs: List[SVGDocument] = [] - for doc in self.docList: - - glyphs = { - glyph_order[i] for i in range(doc.startGlyphID, doc.endGlyphID + 1) - }.intersection(s.glyphs) - if not glyphs: - # no intersection: we can drop the whole record - continue - - svg = etree.fromstring( - # encode because fromstring dislikes xml encoding decl if input is str. - # SVG xml encoding must be utf-8 as per OT spec. - doc.data.encode("utf-8"), - parser=etree.XMLParser( - # Disable libxml2 security restrictions to support very deep trees. - # Without this we would get an error like this: - # `lxml.etree.XMLSyntaxError: internal error: Huge input lookup` - # when parsing big fonts e.g. noto-emoji-picosvg.ttf. - huge_tree=True, - # ignore blank text as it's not meaningful in OT-SVG; it also prevents - # dangling tail text after removing an element when pretty_print=True - remove_blank_text=True, - # don't replace entities; we don't expect any in OT-SVG and they may - # be abused for XXE attacks - resolve_entities=False, - ), - ) - - elements = group_elements_by_id(svg) - gids = {rev_orig_glyph_map[g] for g in glyphs} - element_ids = {f"glyph{i}" for i in gids} - closure_element_ids(elements, element_ids) - - if not subset_elements(svg, element_ids): - continue - - if not s.options.retain_gids: - id_map = remap_glyph_ids(svg, glyph_index_map) - update_glyph_href_links(svg, id_map) - - new_doc = etree.tostring(svg, pretty_print=s.options.pretty_svg).decode("utf-8") - - new_gids = (glyph_index_map[i] for i in gids) - for start, end in ranges(new_gids): - new_docs.append(SVGDocument(new_doc, start, end, doc.compressed)) - - self.docList = new_docs - - return bool(self.docList) diff --git a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/fontTools/ttLib/tables/T_S_I_B_.py b/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/fontTools/ttLib/tables/T_S_I_B_.py deleted file mode 100644 index 8a6c14c444595508c35bdc6ebace60b4bbbbdaba..0000000000000000000000000000000000000000 --- a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/fontTools/ttLib/tables/T_S_I_B_.py +++ /dev/null @@ -1,5 +0,0 @@ -from .T_S_I_V_ import table_T_S_I_V_ - - -class table_T_S_I_B_(table_T_S_I_V_): - pass diff --git a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/fontTools/ttLib/tables/_m_a_x_p.py b/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/fontTools/ttLib/tables/_m_a_x_p.py deleted file mode 100644 index 2934149773c6909cbab65861168524c10c9e7865..0000000000000000000000000000000000000000 --- a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/fontTools/ttLib/tables/_m_a_x_p.py +++ /dev/null @@ -1,140 +0,0 @@ -from fontTools.misc import sstruct -from fontTools.misc.textTools import safeEval -from . import DefaultTable - -maxpFormat_0_5 = """ - > # big endian - tableVersion: i - numGlyphs: H -""" - -maxpFormat_1_0_add = """ - > # big endian - maxPoints: H - maxContours: H - maxCompositePoints: H - maxCompositeContours: H - maxZones: H - maxTwilightPoints: H - maxStorage: H - maxFunctionDefs: H - maxInstructionDefs: H - maxStackElements: H - maxSizeOfInstructions: H - maxComponentElements: H - maxComponentDepth: H -""" - - -class table__m_a_x_p(DefaultTable.DefaultTable): - - dependencies = ["glyf"] - - def decompile(self, data, ttFont): - dummy, data = sstruct.unpack2(maxpFormat_0_5, data, self) - self.numGlyphs = int(self.numGlyphs) - if self.tableVersion != 0x00005000: - dummy, data = sstruct.unpack2(maxpFormat_1_0_add, data, self) - assert len(data) == 0 - - def compile(self, ttFont): - if "glyf" in ttFont: - if ttFont.isLoaded("glyf") and ttFont.recalcBBoxes: - self.recalc(ttFont) - else: - pass # CFF - self.numGlyphs = len(ttFont.getGlyphOrder()) - if self.tableVersion != 0x00005000: - self.tableVersion = 0x00010000 - data = sstruct.pack(maxpFormat_0_5, self) - if self.tableVersion == 0x00010000: - data = data + sstruct.pack(maxpFormat_1_0_add, self) - return data - - def recalc(self, ttFont): - """Recalculate the font bounding box, and most other maxp values except - for the TT instructions values. Also recalculate the value of bit 1 - of the flags field and the font bounding box of the 'head' table. - """ - glyfTable = ttFont["glyf"] - hmtxTable = ttFont["hmtx"] - headTable = ttFont["head"] - self.numGlyphs = len(glyfTable) - INFINITY = 100000 - xMin = +INFINITY - yMin = +INFINITY - xMax = -INFINITY - yMax = -INFINITY - maxPoints = 0 - maxContours = 0 - maxCompositePoints = 0 - maxCompositeContours = 0 - maxComponentElements = 0 - maxComponentDepth = 0 - allXMinIsLsb = 1 - for glyphName in ttFont.getGlyphOrder(): - g = glyfTable[glyphName] - if g.numberOfContours: - if hmtxTable[glyphName][1] != g.xMin: - allXMinIsLsb = 0 - xMin = min(xMin, g.xMin) - yMin = min(yMin, g.yMin) - xMax = max(xMax, g.xMax) - yMax = max(yMax, g.yMax) - if g.numberOfContours > 0: - nPoints, nContours = g.getMaxpValues() - maxPoints = max(maxPoints, nPoints) - maxContours = max(maxContours, nContours) - elif g.isComposite(): - nPoints, nContours, componentDepth = g.getCompositeMaxpValues( - glyfTable - ) - maxCompositePoints = max(maxCompositePoints, nPoints) - maxCompositeContours = max(maxCompositeContours, nContours) - maxComponentElements = max(maxComponentElements, len(g.components)) - maxComponentDepth = max(maxComponentDepth, componentDepth) - if xMin == +INFINITY: - headTable.xMin = 0 - headTable.yMin = 0 - headTable.xMax = 0 - headTable.yMax = 0 - else: - headTable.xMin = xMin - headTable.yMin = yMin - headTable.xMax = xMax - headTable.yMax = yMax - self.maxPoints = maxPoints - self.maxContours = maxContours - self.maxCompositePoints = maxCompositePoints - self.maxCompositeContours = maxCompositeContours - self.maxComponentElements = maxComponentElements - self.maxComponentDepth = maxComponentDepth - if allXMinIsLsb: - headTable.flags = headTable.flags | 0x2 - else: - headTable.flags = headTable.flags & ~0x2 - - def testrepr(self): - items = sorted(self.__dict__.items()) - print(". . . . . . . . .") - for combo in items: - print(" %s: %s" % combo) - print(". . . . . . . . .") - - def toXML(self, writer, ttFont): - if self.tableVersion != 0x00005000: - writer.comment("Most of this table will be recalculated by the compiler") - writer.newline() - formatstring, names, fixes = sstruct.getformat(maxpFormat_0_5) - if self.tableVersion != 0x00005000: - formatstring, names_1_0, fixes = sstruct.getformat(maxpFormat_1_0_add) - names = names + names_1_0 - for name in names: - value = getattr(self, name) - if name == "tableVersion": - value = hex(value) - writer.simpletag(name, value=value) - writer.newline() - - def fromXML(self, name, attrs, content, ttFont): - setattr(self, name, safeEval(attrs["value"])) diff --git a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/fontTools/ufoLib/plistlib.py b/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/fontTools/ufoLib/plistlib.py deleted file mode 100644 index 1f52f20a2b4836e39d3e292496928185dfe08534..0000000000000000000000000000000000000000 --- a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/fontTools/ufoLib/plistlib.py +++ /dev/null @@ -1,46 +0,0 @@ -"""DEPRECATED - This module is kept here only as a backward compatibility shim -for the old ufoLib.plistlib module, which was moved to fontTools.misc.plistlib. -Please use the latter instead. -""" -from fontTools.misc.plistlib import dump, dumps, load, loads -from fontTools.misc.textTools import tobytes - -# The following functions were part of the old py2-like ufoLib.plistlib API. -# They are kept only for backward compatiblity. -from fontTools.ufoLib.utils import deprecated - - -@deprecated("Use 'fontTools.misc.plistlib.load' instead") -def readPlist(path_or_file): - did_open = False - if isinstance(path_or_file, str): - path_or_file = open(path_or_file, "rb") - did_open = True - try: - return load(path_or_file, use_builtin_types=False) - finally: - if did_open: - path_or_file.close() - - -@deprecated("Use 'fontTools.misc.plistlib.dump' instead") -def writePlist(value, path_or_file): - did_open = False - if isinstance(path_or_file, str): - path_or_file = open(path_or_file, "wb") - did_open = True - try: - dump(value, path_or_file, use_builtin_types=False) - finally: - if did_open: - path_or_file.close() - - -@deprecated("Use 'fontTools.misc.plistlib.loads' instead") -def readPlistFromString(data): - return loads(tobytes(data, encoding="utf-8"), use_builtin_types=False) - - -@deprecated("Use 'fontTools.misc.plistlib.dumps' instead") -def writePlistToString(value): - return dumps(value, use_builtin_types=False) diff --git a/spaces/jordonpeter01/MusicGen2/audiocraft/utils/autocast.py b/spaces/jordonpeter01/MusicGen2/audiocraft/utils/autocast.py deleted file mode 100644 index ed644843bb37cf8a92a20fbd51d6cebaa43b9a08..0000000000000000000000000000000000000000 --- a/spaces/jordonpeter01/MusicGen2/audiocraft/utils/autocast.py +++ /dev/null @@ -1,40 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -import torch - - -class TorchAutocast: - """TorchAutocast utility class. - Allows you to enable and disable autocast. This is specially useful - when dealing with different architectures and clusters with different - levels of support. - - Args: - enabled (bool): Whether to enable torch.autocast or not. - args: Additional args for torch.autocast. - kwargs: Additional kwargs for torch.autocast - """ - def __init__(self, enabled: bool, *args, **kwargs): - self.autocast = torch.autocast(*args, **kwargs) if enabled else None - - def __enter__(self): - if self.autocast is None: - return - try: - self.autocast.__enter__() - except RuntimeError: - device = self.autocast.device - dtype = self.autocast.fast_dtype - raise RuntimeError( - f"There was an error autocasting with dtype={dtype} device={device}\n" - "If you are on the FAIR Cluster, you might need to use autocast_dtype=float16" - ) - - def __exit__(self, *args, **kwargs): - if self.autocast is None: - return - self.autocast.__exit__(*args, **kwargs) diff --git a/spaces/jordonpeter01/MusicGen2/tests/modules/test_seanet.py b/spaces/jordonpeter01/MusicGen2/tests/modules/test_seanet.py deleted file mode 100644 index e5c51b340a2f94fb2828b14daf83d5fad645073d..0000000000000000000000000000000000000000 --- a/spaces/jordonpeter01/MusicGen2/tests/modules/test_seanet.py +++ /dev/null @@ -1,115 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -from itertools import product - -import pytest -import torch - -from audiocraft.modules.seanet import SEANetEncoder, SEANetDecoder, SEANetResnetBlock -from audiocraft.modules import StreamableConv1d, StreamableConvTranspose1d - - -class TestSEANetModel: - - def test_base(self): - encoder = SEANetEncoder() - decoder = SEANetDecoder() - - x = torch.randn(1, 1, 24000) - z = encoder(x) - assert list(z.shape) == [1, 128, 75], z.shape - y = decoder(z) - assert y.shape == x.shape, (x.shape, y.shape) - - def test_causal(self): - encoder = SEANetEncoder(causal=True) - decoder = SEANetDecoder(causal=True) - x = torch.randn(1, 1, 24000) - - z = encoder(x) - assert list(z.shape) == [1, 128, 75], z.shape - y = decoder(z) - assert y.shape == x.shape, (x.shape, y.shape) - - def test_conv_skip_connection(self): - encoder = SEANetEncoder(true_skip=False) - decoder = SEANetDecoder(true_skip=False) - - x = torch.randn(1, 1, 24000) - z = encoder(x) - assert list(z.shape) == [1, 128, 75], z.shape - y = decoder(z) - assert y.shape == x.shape, (x.shape, y.shape) - - def test_seanet_encoder_decoder_final_act(self): - encoder = SEANetEncoder(true_skip=False) - decoder = SEANetDecoder(true_skip=False, final_activation='Tanh') - - x = torch.randn(1, 1, 24000) - z = encoder(x) - assert list(z.shape) == [1, 128, 75], z.shape - y = decoder(z) - assert y.shape == x.shape, (x.shape, y.shape) - - def _check_encoder_blocks_norm(self, encoder: SEANetEncoder, n_disable_blocks: int, norm: str): - n_blocks = 0 - for layer in encoder.model: - if isinstance(layer, StreamableConv1d): - n_blocks += 1 - assert layer.conv.norm_type == 'none' if n_blocks <= n_disable_blocks else norm - elif isinstance(layer, SEANetResnetBlock): - for resnet_layer in layer.block: - if isinstance(resnet_layer, StreamableConv1d): - # here we add + 1 to n_blocks as we increment n_blocks just after the block - assert resnet_layer.conv.norm_type == 'none' if (n_blocks + 1) <= n_disable_blocks else norm - - def test_encoder_disable_norm(self): - n_residuals = [0, 1, 3] - disable_blocks = [0, 1, 2, 3, 4, 5, 6] - norms = ['weight_norm', 'none'] - for n_res, disable_blocks, norm in product(n_residuals, disable_blocks, norms): - encoder = SEANetEncoder(n_residual_layers=n_res, norm=norm, - disable_norm_outer_blocks=disable_blocks) - self._check_encoder_blocks_norm(encoder, disable_blocks, norm) - - def _check_decoder_blocks_norm(self, decoder: SEANetDecoder, n_disable_blocks: int, norm: str): - n_blocks = 0 - for layer in decoder.model: - if isinstance(layer, StreamableConv1d): - n_blocks += 1 - assert layer.conv.norm_type == 'none' if (decoder.n_blocks - n_blocks) < n_disable_blocks else norm - elif isinstance(layer, StreamableConvTranspose1d): - n_blocks += 1 - assert layer.convtr.norm_type == 'none' if (decoder.n_blocks - n_blocks) < n_disable_blocks else norm - elif isinstance(layer, SEANetResnetBlock): - for resnet_layer in layer.block: - if isinstance(resnet_layer, StreamableConv1d): - assert resnet_layer.conv.norm_type == 'none' \ - if (decoder.n_blocks - n_blocks) < n_disable_blocks else norm - - def test_decoder_disable_norm(self): - n_residuals = [0, 1, 3] - disable_blocks = [0, 1, 2, 3, 4, 5, 6] - norms = ['weight_norm', 'none'] - for n_res, disable_blocks, norm in product(n_residuals, disable_blocks, norms): - decoder = SEANetDecoder(n_residual_layers=n_res, norm=norm, - disable_norm_outer_blocks=disable_blocks) - self._check_decoder_blocks_norm(decoder, disable_blocks, norm) - - def test_disable_norm_raises_exception(self): - # Invalid disable_norm_outer_blocks values raise exceptions - with pytest.raises(AssertionError): - SEANetEncoder(disable_norm_outer_blocks=-1) - - with pytest.raises(AssertionError): - SEANetEncoder(ratios=[1, 1, 2, 2], disable_norm_outer_blocks=7) - - with pytest.raises(AssertionError): - SEANetDecoder(disable_norm_outer_blocks=-1) - - with pytest.raises(AssertionError): - SEANetDecoder(ratios=[1, 1, 2, 2], disable_norm_outer_blocks=7) diff --git a/spaces/jracca/02-learning-space/README.md b/spaces/jracca/02-learning-space/README.md deleted file mode 100644 index 22a4a8dab3578b0a0dfb38e0d2a57eb3f43346ea..0000000000000000000000000000000000000000 --- a/spaces/jracca/02-learning-space/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: 02 Learning Space -emoji: 🐠 -colorFrom: pink -colorTo: green -sdk: streamlit -sdk_version: 1.10.0 -app_file: app.py -pinned: false -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/justest/chatglm-6b-int4/app.py b/spaces/justest/chatglm-6b-int4/app.py deleted file mode 100644 index a2d30ce8a4a2f2a9cae3f448723cdbeb3cec731c..0000000000000000000000000000000000000000 --- a/spaces/justest/chatglm-6b-int4/app.py +++ /dev/null @@ -1,52 +0,0 @@ -from transformers import AutoModel, AutoTokenizer -import gradio as gr -import json -model_path = 'THUDM/chatglm-6b-int4' -tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True) -model = AutoModel.from_pretrained(model_path, trust_remote_code=True).half().float() -model = model.eval() - -MAX_TURNS = 20 -MAX_BOXES = MAX_TURNS * 2 - - -def predict(input, max_length, top_p, temperature, history=None, state=None): - if state is None: - state = [] - if history is None or history == "": - history = state - else: - history = json.loads(history) - - for response, history in model.stream_chat(tokenizer, input, history, max_length=max_length, top_p=top_p, - temperature=temperature): - updates = [] - for query, response in history: - updates.append(gr.update(visible=True, value=query)) - updates.append(gr.update(visible=True, value=response)) - if len(updates) < MAX_BOXES: - updates = updates + [gr.Textbox.update(visible=False)] * (MAX_BOXES - len(updates)) - yield [history] + updates - - -with gr.Blocks() as demo: - state = gr.State([]) - text_boxes = [] - for i in range(MAX_BOXES): - if i % 2 == 0: - text_boxes.append(gr.Text(visible=False, label="提问:")) - else: - text_boxes.append(gr.Text(visible=False, label="回复:")) - - with gr.Row(): - with gr.Column(scale=4): - txt = gr.Textbox(show_label=False, placeholder="Enter text and press enter", lines=11).style( - container=False) - with gr.Column(scale=1): - max_length = gr.Slider(0, 4096, value=2048, step=1.0, label="Maximum length", interactive=True) - top_p = gr.Slider(0, 1, value=0.7, step=0.01, label="Top P", interactive=True) - temperature = gr.Slider(0, 1, value=0.95, step=0.01, label="Temperature", interactive=True) - history = gr.TextArea(visible=False) - button = gr.Button("Generate") - button.click(predict, [txt, max_length, top_p, temperature, history, state], [state] + text_boxes, queue=True) -demo.queue(concurrency_count=10).launch(enable_queue=True, max_threads=2) \ No newline at end of file diff --git a/spaces/jyseo/3DFuse/run_img_sampling.py b/spaces/jyseo/3DFuse/run_img_sampling.py deleted file mode 100644 index 8ceafaf17f73cab06077747a6e5c7162ca59a181..0000000000000000000000000000000000000000 --- a/spaces/jyseo/3DFuse/run_img_sampling.py +++ /dev/null @@ -1,19 +0,0 @@ -from adapt_sd import StableDiffusion - -from my.config import BaseConf - - -class SD(BaseConf): - """Stable Diffusion""" - variant: str = "v1" - v2_highres: bool = False - prompt: str = "a photograph of an astronaut riding a horse" - scale: float = 3.0 # classifier free guidance scale - precision: str = 'autocast' - dir: str = './' - alpha: float = 0.0 # merge scale - - def make(self): - args = self.dict() - model = StableDiffusion(**args) - return model \ No newline at end of file diff --git a/spaces/k1ngtai/MMS/uroman/lib/NLP/utilities.pm b/spaces/k1ngtai/MMS/uroman/lib/NLP/utilities.pm deleted file mode 100644 index 7be117449190533d826bd63b9266c1434d00408f..0000000000000000000000000000000000000000 --- a/spaces/k1ngtai/MMS/uroman/lib/NLP/utilities.pm +++ /dev/null @@ -1,3652 +0,0 @@ -################################################################ -# # -# utilities # -# # -################################################################ - -package NLP::utilities; - -use File::Spec; -use Time::HiRes qw(time); -use Time::Local; -use NLP::English; -use NLP::UTF8; - -$utf8 = NLP::UTF8; -$englishPM = NLP::English; - -%empty_ht = (); - -use constant DEBUGGING => 0; - -sub member { - local($this,$elem,@array) = @_; - - my $a; - if (defined($elem)) { - foreach $a (@array) { - if (defined($a)) { - return 1 if $elem eq $a; - } else { - $DB::single = 1; # debugger breakpoint - print STDERR "\nWarning: Undefined variable utilities::member::a\n"; - } - } - } else { - $DB::single = 1; # debugger breakpoint - print STDERR "\nWarning: Undefined variable utilities::member::elem\n"; - } - return 0; -} - -sub dual_member { - local($this,$elem1,$elem2,*array1,*array2) = @_; - # returns 1 if there exists a position $n - # such that $elem1 occurs at position $n in @array1 - # and $elem2 occurs at same position $n in @array2 - - return 0 unless defined($elem1) && defined($elem2); - my $last_index = ($#array1 < $#array2) ? $#array1 : $#array2; #min - my $a; - my $b; - foreach $i ((0 .. $last_index)) { - return 1 if defined($a = $array1[$i]) && defined($b = $array2[$i]) && ($a eq $elem1) && ($b eq $elem2); - } - return 0; -} - -sub sorted_list_equal { - local($this,*list1,*list2) = @_; - - return 0 unless $#list1 == $#list2; - foreach $i ((0 .. $#list1)) { - return 0 unless $list1[$i] eq $list2[$i]; - } - return 1; -} - -sub trim { - local($this, $s) = @_; - - $s =~ s/^\s*//; - $s =~ s/\s*$//; - $s =~ s/\s+/ /g; - return $s; -} - -sub trim2 { - local($this, $s) = @_; - - $s =~ s/^\s*//; - $s =~ s/\s*$//; - return $s; -} - -sub trim_left { - local($this, $s) = @_; - $s =~ s/^\s*//; - return $s; -} - -sub cap_member { - local($this,$elem,@array) = @_; - - my $a; - my $lc_elem = lc $elem; - foreach $a (@array) { - return $a if $lc_elem eq lc $a; - } - return ""; -} - -sub remove_elem { - local($this,$elem,@array) = @_; - - return @array unless $this->member($elem, @array); - @rm_list = (); - foreach $a (@array) { - push(@rm_list, $a) unless $elem eq $a; - } - return @rm_list; -} - -sub intersect_p { - local($this,*list1,*list2) = @_; - - foreach $elem1 (@list1) { - if (defined($elem1)) { - foreach $elem2 (@list2) { - if (defined($elem2)) { - return 1 if $elem1 eq $elem2; - } else { - $DB::single = 1; # debugger breakpoint - print STDERR "\nWarning: Undefined variable utilities::intersect_p::elem2\n"; - } - } - } else { - $DB::single = 1; # debugger breakpoint - print STDERR "\nWarning: Undefined variable utilities::intersect_p::elem1\n"; - } - } - return 0; -} - -sub intersect_expl_p { - local($this,*list1,@list2) = @_; - - foreach $elem1 (@list1) { - foreach $elem2 (@list2) { - return 1 if $elem1 eq $elem2; - } - } - return 0; -} - -sub intersection { - local($this,*list1,*list2) = @_; - - @intersection_list = (); - foreach $elem1 (@list1) { - foreach $elem2 (@list2) { - push(@intersection_list, $elem1) if ($elem1 eq $elem2) && ! $this->member($elem1, @intersection_list); - } - } - return @intersection_list; -} - -sub cap_intersect_p { - local($this,*list1,*list2) = @_; - - foreach $elem1 (@list1) { - $lc_elem1 = lc $elem1; - foreach $elem2 (@list2) { - return 1 if $lc_elem1 eq lc $elem2; - } - } - return 0; -} - -sub subset_p { - local($this,*list1,*list2) = @_; - - foreach $elem1 (@list1) { - return 0 unless $this->member($elem1, @list2); - } - return 1; -} - -sub cap_subset_p { - local($this,*list1,*list2) = @_; - - foreach $elem1 (@list1) { - return 0 unless $this->cap_member($elem1, @list2); - } - return 1; -} - -sub unique { - local($this, @list) = @_; - - my %seen = (); - @uniq = (); - foreach $item (@list) { - push(@uniq, $item) unless $seen{$item}++; - } - return @uniq; -} - -sub position { - local($this,$elem,@array) = @_; - $i = 0; - foreach $a (@array) { - return $i if $elem eq $a; - $i++; - } - return -1; -} - -sub positions { - local($this,$elem,@array) = @_; - $i = 0; - @positions_in_list = (); - foreach $a (@array) { - push(@positions_in_list, $i) if $elem eq $a; - $i++; - } - return @positions_in_list; -} - -sub last_position { - local($this,$elem,@array) = @_; - - $result = -1; - $i = 0; - foreach $a (@array) { - $result = $i if $elem eq $a; - $i++; - } - return $result; -} - -sub rand_n_digit_number { - local($this,$n) = @_; - - return 0 unless $n =~ /^[1-9]\d*$/; - $ten_power_n = 10 ** ($n - 1); - return int(rand(9 * $ten_power_n)) + $ten_power_n; -} - -# Consider File::Temp -sub new_tmp_filename { - local($this,$filename) = @_; - - $loop_limit = 1000; - ($dir,$simple_filename) = ($filename =~ /^(.+)\/([^\/]+)$/); - $simple_filename = $filename unless defined($simple_filename); - $new_filename = "$dir/tmp-" . $this->rand_n_digit_number(8) . "-$simple_filename"; - while ((-e $new_filename) && ($loop_limit-- >= 0)) { - $new_filename = "$dir/tmp-" . $this->rand_n_digit_number(8) . "-$simple_filename"; - } - return $new_filename; -} - -# support sorting order: "8", "8.0", "8.5", "8.5.1.", "8.10", "10", "10-12" - -sub compare_complex_numeric { - local($this,$a,$b) = @_; - - (my $a_num,my $a_rest) = ($a =~ /^(\d+)\D*(.*)$/); - (my $b_num,my $b_rest) = ($b =~ /^(\d+)\D*(.*)$/); - - if (defined($a_rest) && defined($b_rest)) { - return ($a_num <=> $b_num) - || $this->compare_complex_numeric($a_rest,$b_rest); - } else { - return $a cmp $b; - } -} - -# support sorting order: "lesson8-ps-v1.9.xml", "Lesson 10_ps-v_1.11.xml" -# approach: segment strings into alphabetic and numerical sections and compare pairwise - -sub compare_mixed_alpha_numeric { - local($this,$a,$b) = @_; - - ($a_alpha,$a_num,$a_rest) = ($a =~ /^(\D*)(\d[-\d\.]*)(.*)$/); - ($b_alpha,$b_num,$b_rest) = ($b =~ /^(\D*)(\d[-\d\.]*)(.*)$/); - - ($a_alpha) = ($a =~ /^(\D*)/) unless defined $a_alpha; - ($b_alpha) = ($b =~ /^(\D*)/) unless defined $b_alpha; - - # ignore non-alphabetic characters in alpha sections - $a_alpha =~ s/\W|_//g; - $b_alpha =~ s/\W|_//g; - - if ($alpha_cmp = lc $a_alpha cmp lc $b_alpha) { - return $alpha_cmp; - } elsif (defined($a_rest) && defined($b_rest)) { - return $this->compare_complex_numeric($a_num,$b_num) - || $this->compare_mixed_alpha_numeric ($a_rest,$b_rest); - } else { - return (defined($a_num) <=> defined($b_num)) || ($a cmp $b); - } -} - -# @sorted_lessons = sort { NLP::utilities->compare_mixed_alpha_numeric($a,$b) } @lessons; - -sub html_guarded_p { - local($this,$string) = @_; - - return 0 if $string =~ /[<>"]/; - $string .= " "; - @segs = split('&',$string); - shift @segs; - foreach $seg (@segs) { - next if $seg =~ /^[a-z]{2,6};/i; - # next if $seg =~ /^amp;/; - # next if $seg =~ /^quot;/; - # next if $seg =~ /^nbsp;/; - # next if $seg =~ /^gt;/; - # next if $seg =~ /^lt;/; - next if $seg =~ /^#(\d+);/; - next if $seg =~ /^#x([0-9a-fA-F]+);/; - return 0; - } - return 1; -} - -sub guard_tooltip_text { - local($this,$string) = @_; - - $string =~ s/\xCB\x88/'/g; - return $string; -} - -sub guard_html { - local($this,$string,$control_string) = @_; - - return "" unless defined($string); - my $guarded_string; - $control_string = "" unless defined($control_string); - return $string if ($string =~ /&/) - && (! ($control_string =~ /\bstrict\b/)) - && $this->html_guarded_p($string); - $guarded_string = $string; - $guarded_string =~ s/&/&/g; - if ($control_string =~ /slash quote/) { - $guarded_string =~ s/"/\\"/g; - } elsif ($control_string =~ /keep quote/) { - } else { - $guarded_string =~ s/\"/"/g; - } - if ($control_string =~ /escape-slash/) { - $guarded_string =~ s/\//&x2F;/g; - } - $guarded_string =~ s/>/>/g; - $guarded_string =~ s/" : - /^lt$/i ? "<" : - /^x2F$/i ? "/" : - /^nbsp$/i ? "\xC2\xA0" : - /^#(\d+)$/ ? $this->chr($1) : - /^#x([0-9a-f]+)$/i ? $this->chr(hex($1)) : - $_ - }gex; - return $string; -} - -sub unguard_html_r { - local($this,$string) = @_; - - return undef unless defined($string); - - $string =~ s/&/&/g; - $string =~ s/"/'/g; - $string =~ s/<//g; - - ($d) = ($string =~ /&#(\d+);/); - while (defined($d)) { - $c = $this->chr($d); - $string =~ s/&#$d;/$c/g; - ($d) = ($string =~ /&#(\d+);/); - } - ($x) = ($string =~ /&#x([0-9a-f]+);/i); - while (defined($x)) { - $c = $this->chr(hex($x)); - $string =~ s/&#x$x;/$c/g; - ($x) = ($string =~ /&#x([0-9a-f]+);/i); - } - $string0 = $string; - ($x) = ($string =~ /(?:https?|www|\.com)\S*\%([0-9a-f]{2,2})/i); - while (defined($x)) { - $c = $this->chr("%" . hex($x)); - $string =~ s/\%$x/$c/g; - ($x) = ($string =~ /(?:https?|www|\.com)\S*\%([0-9a-f]{2,2})/i); - } - return $string; -} - -sub unguard_html_l { - local($caller,$string) = @_; - - return undef unless defined($string); - - my $pre; - my $core; - my $post; - my $repl; - my $s = $string; - if (($pre,$core,$post) = ($s =~ /^(.*)&(amp|quot|lt|gt|#\d+|#x[0-9a-f]+);(.*)$/i)) { - $repl = "?"; - $repl = "&" if $core =~ /^amp$/i; - $repl = "'" if $core =~ /^quot$/i; - $repl = "<" if $core =~ /^lt$/i; - $repl = ">" if $core =~ /^gt$/i; - if ($core =~ /^#\d+$/i) { - $core2 = substr($core,1); - $repl = $caller->chr($core2); - } - $repl = $caller->chr(hex(substr($core,2))) if $core =~ /^#x[0-9a-f]+$/i; - $s = $pre . $repl . $post; - } - return $s; -} - -sub guard_html_quote { - local($caller,$string) = @_; - - $string =~ s/"/"/g; - return $string; -} - -sub unguard_html_quote { - local($caller,$string) = @_; - - $string =~ s/"/"/g; - return $string; -} - -sub uri_encode { - local($caller,$string) = @_; - - $string =~ s/([^^A-Za-z0-9\-_.!~*()'])/ sprintf "%%%02x", ord $1 /eg; - return $string; -} - -sub uri_decode { - local($caller,$string) = @_; - - $string =~ s/%([0-9A-Fa-f]{2})/chr(hex($1))/eg; - return $string; -} - -sub remove_xml_tags { - local($caller,$string) = @_; - - $string =~ s/<\/?[a-zA-Z][-_:a-zA-Z0-9]*(\s+[a-zA-Z][-_:a-zA-Z0-9]*=\"[^"]*\")*\s*\/?>//g; - return $string; -} - -sub remove_any_tokenization_at_signs_around_xml_tags { - local($caller,$string) = @_; - - $string =~ s/(?:\@ \@)?(<[^<>]+>)(?:\@ \@)?/$1/g; - $string =~ s/\@?(<[^<>]+>)\@?/$1/g; - return $string; -} - -sub remove_xml_tags_and_any_bordering_at_signs { - # at-signs from tokenization - local($caller,$string) = @_; - - $string =~ s/\@?<\/?[a-zA-Z][-_:a-zA-Z0-9]*(\s+[a-zA-Z][-_:a-zA-Z0-9]*=\"[^"]*\")*\s*\/?>\@?//g; - return $string; -} - -sub chr { - local($caller,$i) = @_; - - return undef unless $i =~ /^\%?\d+$/; - if ($i =~ /^%/) { - $i =~ s/^\%//; - return chr($i) if $i < 128; - return "\x80" | chr($i - 128) if $i < 256; - } else { - return chr($i) if $i < 128; - return ("\xC0" | chr(($i / 64) % 32)) - . ("\x80" | chr($i % 64)) if $i < 2048; - return ("\xE0" | chr(int($i / 4096) % 16)) - . ("\x80" | chr(int($i / 64) % 64)) - . ("\x80" | chr($i % 64)) if $i < 65536; - return ("\xF0" | chr(int($i / 262144) % 8)) - . ("\x80" | chr(int($i / 4096) % 64)) - . ("\x80" | chr(int($i / 64) % 64)) - . ("\x80" | chr($i % 64)) if $i < 2097152; - } - return "?"; -} - -sub guard_cgi { - local($caller, $string) = @_; - - $guarded_string = $string; - if ($string =~ /[\x80-\xFF]/) { - $guarded_string = ""; - while ($string ne "") { - $char = substr($string, 0, 1); - $string = substr($string, 1); - if ($char =~ /^[\\ ;\#\&\:\=\"\'\+\?\x00-\x1F\x80-\xFF]$/) { - $hex = sprintf("%2.2x",ord($char)); - $guarded_string .= uc "%$hex"; - } else { - $guarded_string .= $char; - } - } - } else { - $guarded_string = $string; - $guarded_string =~ s/%/%25/g; - $guarded_string =~ s/\n/%5Cn/g; - $guarded_string =~ s/\t/%5Ct/g; - $guarded_string =~ s/ /%20/g; - $guarded_string =~ s/"/%22/g; - $guarded_string =~ s/#/%23/g; - $guarded_string =~ s/&/%26/g; - $guarded_string =~ s/'/%27/g; - $guarded_string =~ s/\+/%2B/g; - $guarded_string =~ s/\//%2F/g; - $guarded_string =~ s/:/%3A/g; - $guarded_string =~ s/;/%3B/g; - $guarded_string =~ s//%3E/g; - $guarded_string =~ s/\?/%3F/g; - } - return $guarded_string; -} - -sub repair_cgi_guard { - local($caller,$string) = @_; - # undo second cgi-guard, e.g. "Jo%25C3%25ABlle_Aubron" -> "Jo%C3%ABlle_Aubron" - - $string =~ s/(%)25([CD][0-9A-F]%)25([89AB][0-9A-F])/$1$2$3/g; - $string =~ s/(%)25(E[0-9A-F]%)25([89AB][0-9A-F]%)25([89AB][0-9A-F])/$1$2$3$4/g; - return $string; -} - -sub unguard_cgi { - local($caller,$string) = @_; - - $unguarded_string = $string; - $unguarded_string =~ s/%5Cn/\n/g; - $unguarded_string =~ s/%5Ct/\t/g; - $unguarded_string =~ s/%20/ /g; - $unguarded_string =~ s/%23/#/g; - $unguarded_string =~ s/%26/&/g; - $unguarded_string =~ s/%2B/+/g; - $unguarded_string =~ s/%2C/,/g; - $unguarded_string =~ s/%3A/:/g; - $unguarded_string =~ s/%3D/=/g; - $unguarded_string =~ s/%3F/?/g; - $unguarded_string =~ s/%C3%A9/\xC3\xA9/g; - - # more general - ($code) = ($unguarded_string =~ /%([0-9A-F]{2,2})/); - while (defined($code)) { - $percent_code = "%" . $code; - $hex_code = sprintf("%c", hex($code)); - $unguarded_string =~ s/$percent_code/$hex_code/g; - ($code) = ($unguarded_string =~ /%([0-9A-F]{2,2})/); - } - - return $unguarded_string; -} - -sub regex_guard { - local($caller,$string) = @_; - - $guarded_string = $string; - $guarded_string =~ s/([\\\/\^\|\(\)\{\}\$\@\*\+\?\.\[\]])/\\$1/g - if $guarded_string =~ /[\\\/\^\|\(\)\{\}\$\@\*\+\?\.\[\]]/; - - return $guarded_string; -} - -sub g_regex_spec_tok_p { - local($this,$string) = @_; - - # specials: ( ) (?: ) [ ] - return ($string =~ /^(\(\?:|[()\[\]])$/); -} - -sub regex_guard_norm { - local($this,$string) = @_; - - return $string unless $string =~ /[\[\]\\()$@?+]/; - my $rest = $string; - my @stack = (""); - while ($rest ne "") { - # specials: ( ) (?: ) [ ] ? + - if (($pre, $special, $post) = ($rest =~ /^((?:\\.|[^\[\]()?+])*)(\(\?:|[\[\]()?+])(.*)$/)) { - # print STDERR "Special: $pre *$special* $post\n"; - unless ($pre eq "") { - push(@stack, $pre); - while (($#stack >= 1) && (! $this->g_regex_spec_tok_p($stack[$#stack-1])) - && (! $this->g_regex_spec_tok_p($stack[$#stack]))) { - $s1 = pop @stack; - $s2 = pop @stack; - push(@stack, "$s2$s1"); - } - } - if ($special =~ /^[?+]$/) { - push(@stack, "\\") if ($stack[$#stack] eq "") - || ($this->g_regex_spec_tok_p($stack[$#stack]) && ($stack[$#stack] ne "[")); - push(@stack, $special); - } elsif ($special eq "]") { - if (($#stack >= 1) && ($stack[$#stack-1] eq "[") && ! $this->g_regex_spec_tok_p($stack[$#stack])) { - $char_expression = pop @stack; - pop @stack; - push(@stack, "[$char_expression]"); - } else { - push(@stack, $special); - } - } elsif (($special =~ /^[()]/) && (($stack[$#stack] eq "[") - || (($#stack >= 1) - && ($stack[$#stack-1] eq "[") - && ! $this->g_regex_spec_tok_p($stack[$#stack])))) { - push(@stack, "\\$special"); - } elsif ($special eq ")") { - if (($#stack >= 1) && ($stack[$#stack-1] =~ /^\((\?:)?$/) && ! $this->g_regex_spec_tok_p($stack[$#stack])) { - $alt_expression = pop @stack; - $open_para = pop @stack; - if ($open_para eq "(") { - push(@stack, "(?:$alt_expression)"); - } else { - push(@stack, "$open_para$alt_expression)"); - } - } else { - push(@stack, $special); - } - } else { - push(@stack, $special); - } - while (($#stack >= 1) && (! $this->g_regex_spec_tok_p($stack[$#stack-1])) - && (! $this->g_regex_spec_tok_p($stack[$#stack]))) { - $s1 = pop @stack; - $s2 = pop @stack; - push(@stack, "$s2$s1"); - } - $rest = $post; - } else { - push(@stack, $rest); - $rest = ""; - } - } - # print STDERR "Stack: " . join(";", @stack) . "\n"; - foreach $i ((0 .. $#stack)) { - $stack_elem = $stack[$i]; - if ($stack_elem =~ /^[()\[\]]$/) { - $stack[$i] = "\\" . $stack[$i]; - } - } - return join("", @stack); -} - -sub string_guard { - local($caller,$string) = @_; - - return "" unless defined($string); - $guarded_string = $string; - $guarded_string =~ s/([\\"])/\\$1/g - if $guarded_string =~ /[\\"]/; - - return $guarded_string; -} - -sub json_string_guard { - local($caller,$string) = @_; - - return "" unless defined($string); - $guarded_string = $string; - $guarded_string =~ s/([\\"])/\\$1/g - if $guarded_string =~ /[\\"]/; - $guarded_string =~ s/\r*\n/\\n/g - if $guarded_string =~ /\n/; - - return $guarded_string; -} - -sub json_string_unguard { - local($caller,$string) = @_; - - return "" unless defined($string); - $string =~ s/\\n/\n/g - if $string =~ /\\n/; - return $string; -} - -sub guard_javascript_arg { - local($caller,$string) = @_; - - return "" unless defined($string); - $guarded_string = $string; - $guarded_string =~ s/\\/\\\\/g; - $guarded_string =~ s/'/\\'/g; - return $guarded_string; -} - -sub guard_substitution_right_hand_side { - # "$1x" => "$1 . \"x\"" - local($caller,$string) = @_; - - my $result = ""; - ($pre,$var,$post) = ($string =~ /^([^\$]*)(\$\d)(.*)$/); - while (defined($var)) { - $result .= " . " if $result; - $result .= "\"$pre\" . " unless $pre eq ""; - $result .= $var; - $string = $post; - ($pre,$var,$post) = ($string =~ /^([^\$]*)(\$\d)(.*)$/); - } - $result .= " . \"$string\"" if $string; - return $result; -} - -sub string_starts_with_substring { - local($caller,$string,$substring) = @_; - - $guarded_substring = $caller->regex_guard($substring); - return $string =~ /^$guarded_substring/; -} - -sub one_string_starts_with_the_other { - local($caller,$s1,$s2) = @_; - - return ($s1 eq $s2) - || $caller->string_starts_with_substring($s1,$s2) - || $caller->string_starts_with_substring($s2,$s1); -} - -sub string_ends_in_substring { - local($caller,$string,$substring) = @_; - - $guarded_substring = $caller->regex_guard($substring); - return $string =~ /$guarded_substring$/; -} - -sub string_equal_ignore_leading_multiple_or_trailing_blanks { - local($caller,$string1,$string2) = @_; - - return 1 if $string1 eq $string2; - $string1 =~ s/\s+/ /; - $string2 =~ s/\s+/ /; - $string1 =~ s/^\s+//; - $string2 =~ s/^\s+//; - $string1 =~ s/\s+$//; - $string2 =~ s/\s+$//; - - return $string1 eq $string2; -} - -sub strip_substring_from_start_of_string { - local($caller,$string,$substring,$error_code) = @_; - - $error_code = "ERROR" unless defined($error_code); - my $reg_surf = $caller->regex_guard($substring); - if ($string =~ /^$guarded_substring/) { - $string =~ s/^$reg_surf//; - return $string; - } else { - return $error_code; - } -} - -sub strip_substring_from_end_of_string { - local($caller,$string,$substring,$error_code) = @_; - - $error_code = "ERROR" unless defined($error_code); - my $reg_surf = $caller->regex_guard($substring); - if ($string =~ /$reg_surf$/) { - $string =~ s/$reg_surf$//; - return $string; - } else { - return $error_code; - } -} - -# to be deprecated -sub lang_code { - local($caller,$language) = @_; - - $langPM = NLP::Language->new(); - return $langPM->lang_code($language); -} - -sub full_language { - local($caller,$lang_code) = @_; - - return "Arabic" if $lang_code eq "ar"; - return "Chinese" if $lang_code eq "zh"; - return "Czech" if $lang_code eq "cs"; - return "Danish" if $lang_code eq "da"; - return "Dutch" if $lang_code eq "nl"; - return "English" if $lang_code eq "en"; - return "Finnish" if $lang_code eq "fi"; - return "French" if $lang_code eq "fr"; - return "German" if $lang_code eq "de"; - return "Greek" if $lang_code eq "el"; - return "Hebrew" if $lang_code eq "he"; - return "Hindi" if $lang_code eq "hi"; - return "Hungarian" if $lang_code eq "hu"; - return "Icelandic" if $lang_code eq "is"; - return "Indonesian" if $lang_code eq "id"; - return "Italian" if $lang_code eq "it"; - return "Japanese" if $lang_code eq "ja"; - return "Kinyarwanda" if $lang_code eq "rw"; - return "Korean" if $lang_code eq "ko"; - return "Latin" if $lang_code eq "la"; - return "Malagasy" if $lang_code eq "mg"; - return "Norwegian" if $lang_code eq "no"; - return "Pashto" if $lang_code eq "ps"; - return "Persian" if $lang_code eq "fa"; - return "Polish" if $lang_code eq "pl"; - return "Portuguese" if $lang_code eq "pt"; - return "Romanian" if $lang_code eq "ro"; - return "Russian" if $lang_code eq "ru"; - return "Spanish" if $lang_code eq "es"; - return "Swedish" if $lang_code eq "sv"; - return "Turkish" if $lang_code eq "tr"; - return "Urdu" if $lang_code eq "ur"; - return ""; -} - -# to be deprecated -sub short_lang_name { - local($caller,$lang_code) = @_; - - $langPM = NLP::Language->new(); - return $langPM->shortname($lang_code); -} - -sub ml_dir { - local($caller,$language,$type) = @_; - - $type = "MSB" unless defined($type); - $lang_code = $langPM->lang_code($language); - return $caller->ml_dir($lang_code, "lex") . "/corpora" if $type eq "corpora"; - return "" unless defined($rc); - $ml_home = $rc->ml_home_dir(); - return File::Spec->catfile($ml_home, "arabic") - if ($lang_code eq "ar-iq") && ! $caller->member(lc $type,"lex","onto","dict"); - $langPM = NLP::Language->new(); - $lexdir = $langPM->lexdir($lang_code); - return $lexdir if defined($lexdir); - return ""; -} - -sub language_lex_filename { - local($caller,$language,$type) = @_; - - $langPM = NLP::Language->new(); - if (($lang_code = $langPM->lang_code($language)) - && ($ml_dir = $caller->ml_dir($lang_code,$type)) - && ($norm_language = $caller->short_lang_name($lang_code))) { - return "$ml_dir/$norm_language-lex" if ($type eq "lex"); - return "$ml_dir/onto" if ($type eq "onto"); - return "$ml_dir/$norm_language-english-dict" if ($type eq "dict") && !($lang_code eq "en"); - return ""; - } else { - return ""; - } -} - -# filename_without_path is obsolete - replace with -# use File::Basename; -# basename($filename) -sub filename_without_path { - local($caller,$filename) = @_; - - $filename =~ s/^.*\/([^\/]+)$/$1/; - return $filename; -} - -sub option_string { - local($caller,$input_name,$default,*values,*labels) = @_; - - my $s = ""; - return $s; -} - -sub pes_subseq_surf { - local($this,$start,$length,$langCode,@pes) = @_; - - my $surf = ""; - if ($start+$length-1 <= $#pes) { - foreach $i ($start .. $start + $length - 1) { - my $pe = $pes[$i]; - $surf .= $pe->get("surf",""); - $surf .= " " if $langCode =~ /^(ar|en|fr)$/; - } - } - $surf =~ s/\s+$//; - return $surf; -} - -sub copyList { - local($this,@list) = @_; - - @copy_list = (); - foreach $elem (@list) { - push(@copy_list,$elem); - } - return @copy_list; -} - -sub list_with_same_elem { - local($this,$size,$elem) = @_; - - @list = (); - foreach $i (0 .. $size-1) { - push(@list,$elem); - } - return @list; -} - -sub count_occurrences { - local($this,$s,$substring) = @_; - - $occ = 0; - $new = $s; - $guarded_substring = $this->regex_guard($substring); - $new =~ s/$guarded_substring//; - while ($new ne $s) { - $occ++; - $s = $new; - $new =~ s/$guarded_substring//; - } - return $occ; -} - -sub position_of_nth_occurrence { - local($this,$s,$substring,$occ) = @_; - - return -1 unless $occ > 0; - my $pos = 0; - while (($pos = index($s, $substring, $pos)) >= 0) { - return $pos if $occ == 1; - $occ--; - $pos = $pos + length($substring); - } - return -1; -} - -sub has_diff_elements_p { - local($this,@array) = @_; - - return 0 if $#array < 1; - $elem = $array[0]; - - foreach $a (@array) { - return 1 if $elem ne $a; - } - return 0; -} - -sub init_log { - local($this,$logfile, $control) = @_; - - $control = "" unless defined($control); - if ((DEBUGGING || ($control =~ /debug/i)) && $logfile) { - system("rm -f $logfile"); - system("date > $logfile; chmod 777 $logfile"); - } -} - -sub time_stamp_log { - local($this,$logfile, $control) = @_; - - $control = "" unless defined($control); - if ((DEBUGGING || ($control =~ /debug/i)) && $logfile) { - system("date >> $logfile; chmod 777 $logfile"); - } -} - -sub log { - local($this,$message,$logfile,$control) = @_; - - $control = "" unless defined($control); - if ((DEBUGGING || ($control =~ /debug/i)) && $logfile) { - $this->init_log($logfile, $control) unless -w $logfile; - if ($control =~ /timestamp/i) { - $this->time_stamp_log($logfile, $control); - } - $guarded_message = $message; - $guarded_message =~ s/"/\\"/g; - system("echo \"$guarded_message\" >> $logfile"); - } -} - -sub month_name_to_month_number { - local($this,$month_name) = @_; - - $month_name_init = lc substr($month_name,0,3); - return $this->position($month_name_init, "jan", "feb", "mar", "apr", "may", "jun", "jul", "aug", "sep", "oct", "nov", "dec") + 1; -} - -my @short_month_names = ("Jan.","Febr.","March","April","May","June","July","Aug.","Sept.","Oct.","Nov.","Dec."); -my @full_month_names = ("January","February","March","April","May","June","July","August","September","October","November","December"); - -sub month_number_to_month_name { - local($this,$month_number, $control) = @_; - - $month_number =~ s/^0//; - if ($month_number =~ /^([1-9]|1[0-2])$/) { - return ($control && ($control =~ /short/i)) - ? $short_month_names[$month_number-1] - : $full_month_names[$month_number-1]; - } else { - return ""; - } -} - -sub leap_year { - local($this,$year) = @_; - - return 0 if $year % 4 != 0; - return 1 if $year % 400 == 0; - return 0 if $year % 100 == 0; - return 1; -} - -sub datetime { - local($this,$format,$time_in_secs, $command) = @_; - - $command = "" unless defined($command); - $time_in_secs = time unless defined($time_in_secs) && $time_in_secs; - @time_vector = ($command =~ /\b(gm|utc)\b/i) ? gmtime($time_in_secs) : localtime($time_in_secs); - ($sec,$min,$hour,$mday,$mon,$year,$wday,$yday,$isdst)=@time_vector; - $thisyear = $year + 1900; - $thismon=(Jan,Feb,Mar,Apr,May,Jun,Jul,Aug,Sep,Oct,Nov,Dec)[$mon]; - $thismon2=("Jan.","Febr.","March","April","May","June","July","Aug.","Sept.","Oct.","Nov.","Dec.")[$mon]; - $thismonth = $mon + 1; - $thisday=(Sun,Mon,Tue,Wed,Thu,Fri,Sat)[$wday]; - $milliseconds = int(($time_in_secs - int($time_in_secs)) * 1000); - $date="$thisday $thismon $mday, $thisyear"; - $sdate="$thismon $mday, $thisyear"; - $dashedDate = sprintf("%04d-%02d-%02d",$thisyear,$thismonth,$mday); - $slashedDate = sprintf("%02d/%02d/%04d",$mday,$thismonth,$thisyear); - $time=sprintf("%02d:%02d:%02d",$hour,$min,$sec); - $shorttime=sprintf("%d:%02d",$hour,$min); - $shortdatetime = "$thismon2 $mday, $shorttime"; - - if ($date =~ /undefined/) { - return ""; - } elsif ($format eq "date at time") { - return "$date at $time"; - } elsif ($format eq "date") { - return "$date"; - } elsif ($format eq "sdate") { - return "$sdate"; - } elsif ($format eq "ddate") { - return "$dashedDate"; - } elsif ($format eq "time") { - return "$time"; - } elsif ($format eq "dateTtime+ms") { - return $dashedDate . "T" . $time . "." . $milliseconds; - } elsif ($format eq "dateTtime") { - return $dashedDate . "T" . $time; - } elsif ($format eq "yyyymmdd") { - return sprintf("%04d%02d%02d",$thisyear,$thismonth,$mday); - } elsif ($format eq "short date at time") { - return $shortdatetime; - } else { - return "$date at $time"; - } -} - -sub datetime_of_last_file_modification { - local($this,$format,$filename) = @_; - - return $this->datetime($format,(stat($filename))[9]); -} - -sub add_1sec { - local($this,$datetime) = @_; - - if (($year,$month,$day,$hour,$minute,$second) = ($datetime =~ /^(\d\d\d\d)-(\d\d)-(\d\d)T(\d\d):(\d\d):(\d\d)$/)) { - $second++; - if ($second >= 60) { $second -= 60; $minute++; } - if ($minute >= 60) { $minute -= 60; $hour++; } - if ($hour >= 24) { $hour -= 24; $day++; } - if ($month =~ /^(01|03|05|07|08|10|12)$/) { - if ($day > 31) { $day -= 31; $month++; } - } elsif ($month =~ /^(04|06|09|11)$/) { - if ($day > 30) { $day -= 30; $month++; } - } elsif (($month eq "02") && $this->leap_year($year)) { - if ($day > 29) { $day -= 29; $month++; } - } elsif ($month eq "02") { - if ($day > 28) { $day -= 28; $month++; } - } - if ($month > 12) { $month -= 12; $year++; } - return sprintf("%04d-%02d-%02dT%02d:%02d:%02d", $year,$month,$day,$hour,$minute,$second); - } else { - return ""; - } -} - -sub stopwatch { - local($this, $function, $id, *ht, *OUT) = @_; - # function: start|stop|count|report; start|stop times are absolute (in secs.) - - my $current_time = time; - # print OUT "Point S stopwatch $function $id $current_time\n"; - if ($function eq "start") { - if ($ht{STOPWATCH_START}->{$id}) { - $ht{STOPWATCH_N_RESTARTS}->{$id} = ($ht{STOPWATCH_N_RESTARTS}->{$id} || 0) + 1; - } else { - $ht{STOPWATCH_START}->{$id} = $current_time; - } - } elsif ($function eq "end") { - if ($start_time = $ht{STOPWATCH_START}->{$id}) { - $ht{STOPWATCH_TIME}->{$id} = ($ht{STOPWATCH_TIME}->{$id} || 0) + ($current_time - $start_time); - $ht{STOPWATCH_START}->{$id} = ""; - } else { - $ht{STOPWATCH_N_DEAD_ENDS}->{$id} = ($ht{STOPWATCH_N_DEAD_ENDS}->{$id} || 0) + 1; - } - } elsif ($function eq "count") { - $ht{STOPWATCH_COUNT}->{$id} = ($ht{STOPWATCH_COUNT}->{$id} || 0) + 1; - } elsif ($function eq "report") { - my $id2; - foreach $id2 (keys %{$ht{STOPWATCH_START}}) { - if ($start_time = $ht{STOPWATCH_START}->{$id2}) { - $ht{STOPWATCH_TIME}->{$id2} = ($ht{STOPWATCH_TIME}->{$id2} || 0) + ($current_time - $start_time); - $ht{STOPWATCH_START}->{$id2} = $current_time; - } - } - print OUT "Time report:\n"; - foreach $id2 (sort { $ht{STOPWATCH_TIME}->{$b} <=> $ht{STOPWATCH_TIME}->{$a} } - keys %{$ht{STOPWATCH_TIME}}) { - my $stopwatch_time = $ht{STOPWATCH_TIME}->{$id2}; - $stopwatch_time = $this->round_to_n_decimal_places($stopwatch_time, 3); - my $n_restarts = $ht{STOPWATCH_N_RESTARTS}->{$id2}; - my $n_dead_ends = $ht{STOPWATCH_N_DEAD_ENDS}->{$id2}; - my $start_time = $ht{STOPWATCH_START}->{$id2}; - print OUT " $id2: $stopwatch_time seconds"; - print OUT " with $n_restarts restart(s)" if $n_restarts; - print OUT " with $n_dead_ends dead end(s)" if $n_dead_ends; - print OUT " (active)" if $start_time; - print OUT "\n"; - } - foreach $id2 (sort { $ht{STOPWATCH_COUNT}->{$b} <=> $ht{STOPWATCH_COUNT}->{$a} } - keys %{$ht{STOPWATCH_COUNT}}) { - $count = $ht{STOPWATCH_COUNT}->{$id2}; - print OUT " C $id2: $count\n"; - } - } -} - -sub print_html_banner { - local($this,$text,$bgcolor,*OUT,$control) = @_; - - $control = "" unless defined($control); - $bgcolor = "#BBCCFF" unless defined($bgcolor); - print OUT "
          "; - print OUT "  " unless $text =~ /^\s*<(table|nobr)/; - print OUT $text; - print OUT "
          \n"; - print OUT "
          \n" unless $control =~ /nobr/i; -} - -sub print_html_head { - local($this, $title, *OUT, $control, $onload_fc, $add_javascript) = @_; - - $control = "" unless defined($control); - $onload_fc = "" unless defined($onload_fc); - $onload_clause = ($onload_fc) ? " onload=\"$onload_fc\"" : ""; - $add_javascript = "" unless defined($add_javascript); - $max_age_clause = ""; - $max_age_clause = ""; # if $control =~ /\bexp1hour\b/; - $css_clause = ""; - $css_clause = "\n " if $control =~ /css/; - $css_clause .= "\n " if $control =~ /css/; - $css_clause = "\n " if $control =~ /css-handheld/; - $icon_clause = ""; - $icon_clause .= "\n " if $control =~ /\bAMR\b/i; - $icon_clause .= "\n " if $control =~ /\bCRE\b/i; - print OUT "\xEF\xBB\xBF\n" unless $control =~ /\bno-bom\b/; # utf8 marker byte order mark - print OUT< - - - $max_age_clause - $title$css_clause$icon_clause -END_OF_HEADER1 -; - - unless ($control =~ /no javascript/) { - print OUT< - - -END_OF_HEADER2 -; - } - - print OUT< - -END_OF_HEADER3 -; -} - - -sub print_html_foot { - local($this, *OUT) = @_; - - print OUT " \n"; - print OUT "\n"; -} - -sub print_html_page { - local($this, *OUT, $s) = @_; - - print OUT "\xEF\xBB\xBF\n"; - print OUT "\n"; - print OUT " \n"; - print OUT " DEBUG\n"; - print OUT " \n"; - print OUT " \n"; - print OUT " \n"; - print OUT " \n"; - print OUT " $s\n"; - print OUT " \n"; - print OUT "\n"; -} - -sub http_catfile { - local($this, @path) = @_; - - $result = File::Spec->catfile(@path); - $result =~ s/(https?):\/([a-zA-Z])/$1:\/\/$2/; - return $result; -} - -sub underscore_to_space { - local($this, $s) = @_; - - return "" unless defined($s); - - $s =~ s/_+/ /g; - return $s; -} - -sub space_to_underscore { - local($this, $s) = @_; - - return "" unless defined($s); - - $s =~ s/ /_/g; - return $s; -} - -sub remove_spaces { - local($this, $s) = @_; - - $s =~ s/\s//g; - return $s; -} - -sub is_punctuation_string_p { - local($this, $s) = @_; - - return "" unless $s; - $s = $this->normalize_string($s) if $s =~ /[\x80-\xBF]/; - return $s =~ /^[-_,;:.?!\/\@+*"()]+$/; -} - -sub is_rare_punctuation_string_p { - local($this, $s) = @_; - - return 0 unless $s =~ /^[\x21-\x2F\x3A\x40\x5B-\x60\x7B-\x7E]{2,}$/; - return 0 if $s =~ /^(\.{2,3}|-{2,3}|\*{2,3}|::|\@?[-\/:]\@?)$/; - return 1; -} - -sub simplify_punctuation { - local($this, $s) = @_; - - $s =~ s/\xE2\x80\x92/-/g; - $s =~ s/\xE2\x80\x93/-/g; - $s =~ s/\xE2\x80\x94/-/g; - $s =~ s/\xE2\x80\x95/-/g; - $s =~ s/\xE2\x80\x98/`/g; - $s =~ s/\xE2\x80\x99/'/g; - $s =~ s/\xE2\x80\x9A/`/g; - $s =~ s/\xE2\x80\x9C/"/g; - $s =~ s/\xE2\x80\x9D/"/g; - $s =~ s/\xE2\x80\x9E/"/g; - $s =~ s/\xE2\x80\x9F/"/g; - $s =~ s/\xE2\x80\xA2/*/g; - $s =~ s/\xE2\x80\xA4/./g; - $s =~ s/\xE2\x80\xA5/../g; - $s =~ s/\xE2\x80\xA6/.../g; - return $s; -} - -sub latin_plus_p { - local($this, $s, $control) = @_; - - $control = "" unless defined($control); - return $s =~ /^([\x20-\x7E]|\xC2[\xA1-\xBF]|[\xC3-\xCC][\x80-\xBF]|\xCA[\x80-\xAF]|\xE2[\x80-\xAF][\x80-\xBF])+$/; -} - -sub nth_line_in_file { - local($this, $filename, $n) = @_; - - return "" unless $n =~ /^[1-9]\d*$/; - open(IN, $filename) || return ""; - my $line_no = 0; - while () { - $line_no++; - if ($n == $line_no) { - $_ =~ s/\s+$//; - close(IN); - return $_; - } - } - close(IN); - return ""; -} - -sub read_file { - local($this, $filename) = @_; - - my $file_content = ""; - open(IN, $filename) || return ""; - while () { - $file_content .= $_; - } - close(IN); - return $file_content; -} - -sub cap_list { - local($this, @list) = @_; - - @cap_list = (); - foreach $l (@list) { - ($premod, $core) = ($l =~ /^(a|an) (\S.*)$/); - if (defined($premod) && defined($core)) { - push(@cap_list, "$premod \u$core"); - } elsif ($this->cap_member($l, "US")) { - push(@cap_list, uc $l); - } else { - push(@cap_list, "\u$l"); - } - } - return @cap_list; -} - -sub integer_list_with_commas_and_ranges { - local($this, @list) = @_; - - my $in_range_p = 0; - my $last_value = 0; - my $result = ""; - while (@list) { - $elem = shift @list; - if ($elem =~ /^\d+$/) { - if ($in_range_p) { - if ($elem == $last_value + 1) { - $last_value = $elem; - } else { - $result .= "-$last_value, $elem"; - if (@list && ($next = $list[0]) && ($elem =~ /^\d+$/) && ($next =~ /^\d+$/) - && ($next == $elem + 1)) { - $last_value = $elem; - $in_range_p = 1; - } else { - $in_range_p = 0; - } - } - } else { - $result .= ", $elem"; - if (@list && ($next = $list[0]) && ($elem =~ /^\d+$/) && ($next =~ /^\d+$/) - && ($next == $elem + 1)) { - $last_value = $elem; - $in_range_p = 1; - } - } - } else { - if ($in_range_p) { - $result .= "-$last_value, $elem"; - $in_range_p = 0; - } else { - $result .= ", $elem"; - } - } - } - if ($in_range_p) { - $result .= "-$last_value"; - } - $result =~ s/^,\s*//; - return $result; -} - -sub comma_append { - local($this, $a, $b) = @_; - - if (defined($a) && ($a =~ /\S/)) { - if (defined($b) && ($b =~ /\S/)) { - return "$a,$b"; - } else { - return $a; - } - } else { - if (defined($b) && ($b =~ /\S/)) { - return $b; - } else { - return ""; - } - } -} - -sub version { - return "3.17"; -} - -sub print_stderr { - local($this, $message, $verbose) = @_; - - $verbose = 1 unless defined($verbose); - print STDERR $message if $verbose; - return 1; -} - -sub print_log { - local($this, $message, *LOG, $verbose) = @_; - - $verbose = 1 unless defined($verbose); - print LOG $message if $verbose; - return 1; -} - -sub compare_alignment { - local($this, $a, $b, $delimiter) = @_; - - $delimiter = "-" unless $delimiter; - my @a_list = split($delimiter, $a); - my @b_list = split($delimiter, $b); - - while (@a_list && @b_list) { - $a_head = shift @a_list; - $b_head = shift @b_list; - next if $a_head eq $b_head; - return $a_head <=> $b_head if ($a_head =~ /^\d+$/) && ($b_head =~ /^\d+$/); - return $a_head cmp $b_head; - } - return -1 if @a_list; - return 1 if @b_list; - return 0; -} - -sub normalize_string { - # normalize punctuation, full-width characters (to ASCII) - local($this, $s, $control) = @_; - - $control = "" unless defined($control); - - $norm_s = $s; - $norm_s =~ tr/A-Z/a-z/; - - $norm_s =~ s/ \@([-:\/])/ $1/g; # non-initial left @ - $norm_s =~ s/^\@([-:\/])/$1/; # initial left @ - $norm_s =~ s/([-:\/])\@ /$1 /g; # non-initial right @ - $norm_s =~ s/([-:\/])\@$/$1/; # initial right @ - $norm_s =~ s/([\(\)"])([,;.?!])/$1 $2/g; - $norm_s =~ s/\bcannot\b/can not/g; - - $norm_s =~ s/\xC2\xAD/-/g; # soft hyphen - - $norm_s =~ s/\xE2\x80\x94/-/g; # em dash - $norm_s =~ s/\xE2\x80\x95/-/g; # horizontal bar - $norm_s =~ s/\xE2\x80\x98/`/g; # grave accent - $norm_s =~ s/\xE2\x80\x99/'/g; # apostrophe - $norm_s =~ s/\xE2\x80\x9C/"/g; # left double quote mark - $norm_s =~ s/\xE2\x80\x9D/"/g; # right double quote mark - $norm_s =~ s/\xE2\x94\x80/-/g; # box drawings light horizontal - $norm_s =~ s/\xE2\x94\x81/-/g; # box drawings heavy horizontal - $norm_s =~ s/\xE3\x80\x81/,/g; # ideographic comma - $norm_s =~ s/\xE3\x80\x82/./g; # ideographic full stop - $norm_s =~ s/\xE3\x80\x88/"/g; # left angle bracket - $norm_s =~ s/\xE3\x80\x89/"/g; # right angle bracket - $norm_s =~ s/\xE3\x80\x8A/"/g; # left double angle bracket - $norm_s =~ s/\xE3\x80\x8B/"/g; # right double angle bracket - $norm_s =~ s/\xE3\x80\x8C/"/g; # left corner bracket - $norm_s =~ s/\xE3\x80\x8D/"/g; # right corner bracket - $norm_s =~ s/\xE3\x80\x8E/"/g; # left white corner bracket - $norm_s =~ s/\xE3\x80\x8F/"/g; # right white corner bracket - $norm_s =~ s/\xE3\x83\xBB/\xC2\xB7/g; # katakana middle dot -> middle dot - $norm_s =~ s/\xEF\xBB\xBF//g; # UTF8 marker - - if ($control =~ /\bzh\b/i) { - # de-tokenize Chinese - unless ($control =~ /\bpreserve-tok\b/) { - while ($norm_s =~ /[\xE0-\xEF][\x80-\xBF][\x80-\xBF] [\xE0-\xEF][\x80-\xBF][\x80-\xBF]/) { - $norm_s =~ s/([\xE0-\xEF][\x80-\xBF][\x80-\xBF]) ([\xE0-\xEF][\x80-\xBF][\x80-\xBF])/$1$2/g; - } - $norm_s =~ s/([\xE0-\xEF][\x80-\xBF][\x80-\xBF]) ([\x21-\x7E])/$1$2/g; - $norm_s =~ s/([\x21-\x7E]) ([\xE0-\xEF][\x80-\xBF][\x80-\xBF])/$1$2/g; - } - - # fullwidth characters - while ($norm_s =~ /\xEF\xBC[\x81-\xBF]/) { - ($pre,$fullwidth,$post) = ($norm_s =~ /^(.*)(\xEF\xBC[\x81-\xBF])(.*)$/); - $fullwidth =~ s/^\xEF\xBC//; - $fullwidth =~ tr/[\x81-\xBF]/[\x21-\x5F]/; - $norm_s = "$pre$fullwidth$post"; - } - while ($norm_s =~ /\xEF\xBD[\x80-\x9E]/) { - ($pre,$fullwidth,$post) = ($norm_s =~ /^(.*)(\xEF\xBD[\x80-\x9E])(.*)$/); - $fullwidth =~ s/^\xEF\xBD//; - $fullwidth =~ tr/[\x80-\x9E]/[\x60-\x7E]/; - $norm_s = "$pre$fullwidth$post"; - } - $norm_s =~ tr/A-Z/a-z/ unless $control =~ /\bpreserve-case\b/; - - unless ($control =~ /\bpreserve-tok\b/) { - while ($norm_s =~ /[\x21-\x2F\x3A-\x40\x5B-\x60\x7B-\x7E] [\x21-\x2F\x3A-\x40\x5B-\x60\x7B-\x7E]/) { - $norm_s =~ s/([\x21-\x2F\x3A-\x40\x5B-\x60\x7B-\x7E]) ([\x21-\x2F\x3A-\x40\x5B-\x60\x7B-\x7E])/$1$2/g; - } - $norm_s =~ s/([\x21-\x7E]) ([\x21-\x2F\x3A-\x40\x5B-\x60\x7B-\x7E])/$1$2/g; - $norm_s =~ s/([\x21-\x2F\x3A-\x40\x5B-\x60\x7B-\x7E]) ([\x21-\x7E])/$1$2/g; - $norm_s =~ s/ (\xC2\xA9|\xC2\xB7|\xC3\x97) /$1/g; # copyright sign, middle dot, multiplication sign - } - } - - if (($control =~ /\bzh\b/i) && ($control =~ /\bnorm-char\b/)) { - $norm_s =~ s/\xE6\x96\xBC/\xE4\xBA\x8E/g; # feng1 (first char. of Chin. "lie low", line 1308) - $norm_s =~ s/\xE6\xAD\xA7/\xE5\xB2\x90/g; # qi2 (second char. of Chin. "difference", line 1623) - $norm_s =~ s/\xE8\x82\xB2/\xE6\xAF\x93/g; # yu4 (second char. of Chin. "sports", line 440) - $norm_s =~ s/\xE8\x91\x97/\xE7\x9D\x80/g; # zhao (second char. of Chin. "prominent", line 4) - $norm_s =~ s/\xE9\x81\x87/\xE8\xBF\x82/g; # yu4 (second char. of Chin. "good luck", line 959) - } - - if ($control =~ /\bspurious-punct\b/) { - $norm_s =~ s/^\s*[-_\." ]+//; - $norm_s =~ s/[-_\." ]+\s*$//; - $norm_s =~ s/\(\s+end\s+\)\s*$//i; - $norm_s =~ s/^\s*null\s*$//i; - } - - $norm_s =~ s/^\s+//; - $norm_s =~ s/\s+$//; - $norm_s =~ s/\s+/ /g; - - return $norm_s; -} - -sub normalize_extreme_string { - local($this, $s, $control) = @_; - - $control = "" unless defined($control); - - $norm_s = $s; - $norm_s =~ s/\xE2\xA9\xBE/\xE2\x89\xA5/g; # slanted greater than or equal to - - return $norm_s; -} - -sub increase_ht_count { - local($this, *ht, $incr, @path) = @_; - - if ($#path == 0) { - $ht{($path[0])} = ($ht{($path[0])} || 0) + $incr; - } elsif ($#path == 1) { - $ht{($path[0])}->{($path[1])} - = ($ht{($path[0])}->{($path[1])} || 0) + $incr; - } elsif ($#path == 2) { - $ht{($path[0])}->{($path[1])}->{($path[2])} - = ($ht{($path[0])}->{($path[1])}->{($path[2])} || 0) + $incr; - } elsif ($#path == 3) { - $ht{($path[0])}->{($path[1])}->{($path[2])}->{($path[3])} - = ($ht{($path[0])}->{($path[1])}->{($path[2])}->{($path[3])} || 0) + $incr; - } elsif ($#path == 4) { - $ht{($path[0])}->{($path[1])}->{($path[2])}->{($path[3])}->{($path[4])} - = ($ht{($path[0])}->{($path[1])}->{($path[2])}->{($path[3])}->{($path[4])} || 0) + $incr; - } else { - print STDERR "increase_ht_count unsupported for path of length " . ($#path + 1) . "\n"; - } -} - -sub adjust_numbers { - # non-negative integers - local($this, $s, $delta) = @_; - - $result = ""; - while ($s =~ /\d/) { - ($pre,$i,$post) = ($s =~ /^([^0-9]*)(\d+)([^0-9].*|)$/); - $result .= $pre . ($i + $delta); - $s = $post; - } - $result .= $s; - return $result; -} - -sub first_defined { - local($this, @list) = @_; - - foreach $elem (@list) { - return $elem if defined($elem); - } - return ""; -} - -sub first_defined_non_empty { - local($this, @list) = @_; - - foreach $item (@list) { - return $item if defined($item) && ($item ne ""); - } - return ""; -} - -sub elem_after_member_list { - local($this,$elem,@array) = @_; - - my @elem_after_member_list = (); - foreach $i ((0 .. ($#array - 1))) { - push(@elem_after_member_list, $array[$i+1]) if $elem eq $array[$i]; - } - return join(" ", @elem_after_member_list); -} - -sub add_value_to_list { - local($this,$s,$value,$sep) = @_; - - $s = "" unless defined($s); - $sep = "," unless defined($sep); - return ($s =~ /\S/) ? "$s$sep$value" : $value; -} - -sub add_new_value_to_list { - local($this,$s,$value,$sep) = @_; - - $s = "" unless defined($s); - $sep = "," unless defined($sep); - my @values = split(/$sep/, $s); - push(@values, $value) if defined($value) && ! $this->member($value, @values); - - return join($sep, @values); -} - -sub add_new_hash_value_to_list { - local($this,*ht,$key,$value,$sep) = @_; - - $sep = "," unless defined($sep); - my $value_s = $ht{$key}; - if (defined($value_s)) { - my @values = split(/$sep/, $value_s); - push(@values, $value) unless $this->member($value, @values); - $ht{$key} = join($sep, @values); - } else { - $ht{$key} = $value; - } -} - -sub ip_info { - local($this, $ip_address) = @_; - - my %ip_map = (); - $ip_map{"128.9.208.69"} = "Ulf Hermjakob (bach.isi.edu)"; - $ip_map{"128.9.208.169"} = "Ulf Hermjakob (brahms.isi.edu)"; - $ip_map{"128.9.184.148"} = "Ulf Hermjakob (beethoven.isi.edu ?)"; - $ip_map{"128.9.184.162"} = "Ulf Hermjakob (beethoven.isi.edu)"; - $ip_map{"128.9.176.39"} = "Kevin Knight"; - $ip_map{"128.9.184.187"} = "Kevin Knight"; - $ip_map{"128.9.216.56"} = "Kevin Knight"; - $ip_map{"128.9.208.155"} = "cage.isi.edu"; - - return ($ip_name = $ip_map{$ip_address}) ? "$ip_address - $ip_name" : $ip_address; -} - -# from standalone de-accent.pl -sub de_accent_string { - local($this, $s) = @_; - - $s =~ tr/A-Z/a-z/; - unless (0) { - # Latin-1 - if ($s =~ /\xC3[\x80-\xBF]/) { - $s =~ s/(À|Á|Â|Ã|Ä|Å)/A/g; - $s =~ s/Æ/Ae/g; - $s =~ s/Ç/C/g; - $s =~ s/Ð/D/g; - $s =~ s/(È|É|Ê|Ë)/E/g; - $s =~ s/(Ì|Í|Î|Ï)/I/g; - $s =~ s/Ñ/N/g; - $s =~ s/(Ò|Ó|Ô|Õ|Ö|Ø)/O/g; - $s =~ s/(Ù|Ú|Û|Ü)/U/g; - $s =~ s/Þ/Th/g; - $s =~ s/Ý/Y/g; - $s =~ s/(à|á|â|ã|ä|å)/a/g; - $s =~ s/æ/ae/g; - $s =~ s/ç/c/g; - $s =~ s/(è|é|ê|ë)/e/g; - $s =~ s/(ì|í|î|ï)/i/g; - $s =~ s/ð/d/g; - $s =~ s/ñ/n/g; - $s =~ s/(ò|ó|ô|õ|ö)/o/g; - $s =~ s/ß/ss/g; - $s =~ s/þ/th/g; - $s =~ s/(ù|ú|û|ü)/u/g; - $s =~ s/(ý|ÿ)/y/g; - } - # Latin Extended-A - if ($s =~ /[\xC4-\xC5][\x80-\xBF]/) { - $s =~ s/(Ā|Ă|Ą)/A/g; - $s =~ s/(ā|ă|ą)/a/g; - $s =~ s/(Ć|Ĉ|Ċ|Č)/C/g; - $s =~ s/(ć|ĉ|ċ|č)/c/g; - $s =~ s/(Ď|Đ)/D/g; - $s =~ s/(ď|đ)/d/g; - $s =~ s/(Ē|Ĕ|Ė|Ę|Ě)/E/g; - $s =~ s/(ē|ĕ|ė|ę|ě)/e/g; - $s =~ s/(Ĝ|Ğ|Ġ|Ģ)/G/g; - $s =~ s/(ĝ|ğ|ġ|ģ)/g/g; - $s =~ s/(Ĥ|Ħ)/H/g; - $s =~ s/(ĥ|ħ)/h/g; - $s =~ s/(Ĩ|Ī|Ĭ|Į|İ)/I/g; - $s =~ s/(ĩ|ī|ĭ|į|ı)/i/g; - $s =~ s/IJ/Ij/g; - $s =~ s/ij/ij/g; - $s =~ s/Ĵ/J/g; - $s =~ s/ĵ/j/g; - $s =~ s/Ķ/K/g; - $s =~ s/(ķ|ĸ)/k/g; - $s =~ s/(Ĺ|Ļ|Ľ|Ŀ|Ł)/L/g; - $s =~ s/(ļ|ľ|ŀ|ł)/l/g; - $s =~ s/(Ń|Ņ|Ň|Ŋ)/N/g; - $s =~ s/(ń|ņ|ň|ʼn|ŋ)/n/g; - $s =~ s/(Ō|Ŏ|Ő)/O/g; - $s =~ s/(ō|ŏ|ő)/o/g; - $s =~ s/Œ/Oe/g; - $s =~ s/œ/oe/g; - $s =~ s/(Ŕ|Ŗ|Ř)/R/g; - $s =~ s/(ŕ|ŗ|ř)/r/g; - $s =~ s/(Ś|Ŝ|Ş|Š)/S/g; - $s =~ s/(ś|ŝ|ş|š|ſ)/s/g; - $s =~ s/(Ţ|Ť|Ŧ)/T/g; - $s =~ s/(ţ|ť|ŧ)/t/g; - $s =~ s/(Ũ|Ū|Ŭ|Ů|Ű|Ų)/U/g; - $s =~ s/(ũ|ū|ŭ|ů|ű|ų)/u/g; - $s =~ s/Ŵ/W/g; - $s =~ s/ŵ/w/g; - $s =~ s/(Ŷ|Ÿ)/Y/g; - $s =~ s/ŷ/y/g; - $s =~ s/(Ź|Ż|Ž)/Z/g; - $s =~ s/(ź|ż|ž)/z/g; - } - # Latin Extended-B - if ($s =~ /[\xC7-\xC7][\x80-\xBF]/) { - $s =~ s/(\xC7\x8D)/A/g; - $s =~ s/(\xC7\x8E)/a/g; - $s =~ s/(\xC7\x8F)/I/g; - $s =~ s/(\xC7\x90)/i/g; - $s =~ s/(\xC7\x91)/O/g; - $s =~ s/(\xC7\x92)/o/g; - $s =~ s/(\xC7\x93)/U/g; - $s =~ s/(\xC7\x94)/u/g; - $s =~ s/(\xC7\x95)/U/g; - $s =~ s/(\xC7\x96)/u/g; - $s =~ s/(\xC7\x97)/U/g; - $s =~ s/(\xC7\x98)/u/g; - $s =~ s/(\xC7\x99)/U/g; - $s =~ s/(\xC7\x9A)/u/g; - $s =~ s/(\xC7\x9B)/U/g; - $s =~ s/(\xC7\x9C)/u/g; - } - # Latin Extended Additional - if ($s =~ /\xE1[\xB8-\xBF][\x80-\xBF]/) { - $s =~ s/(ḁ|ạ|ả|ấ|ầ|ẩ|ẫ|ậ|ắ|ằ|ẳ|ẵ|ặ|ẚ)/a/g; - $s =~ s/(ḃ|ḅ|ḇ)/b/g; - $s =~ s/(ḉ)/c/g; - $s =~ s/(ḋ|ḍ|ḏ|ḑ|ḓ)/d/g; - $s =~ s/(ḕ|ḗ|ḙ|ḛ|ḝ|ẹ|ẻ|ẽ|ế|ề|ể|ễ|ệ)/e/g; - $s =~ s/(ḟ)/f/g; - $s =~ s/(ḡ)/g/g; - $s =~ s/(ḣ|ḥ|ḧ|ḩ|ḫ)/h/g; - $s =~ s/(ḭ|ḯ|ỉ|ị)/i/g; - $s =~ s/(ḱ|ḳ|ḵ)/k/g; - $s =~ s/(ḷ|ḹ|ḻ|ḽ)/l/g; - $s =~ s/(ḿ|ṁ|ṃ)/m/g; - $s =~ s/(ṅ|ṇ|ṉ|ṋ)/m/g; - $s =~ s/(ọ|ỏ|ố|ồ|ổ|ỗ|ộ|ớ|ờ|ở|ỡ|ợ|ṍ|ṏ|ṑ|ṓ)/o/g; - $s =~ s/(ṕ|ṗ)/p/g; - $s =~ s/(ṙ|ṛ|ṝ|ṟ)/r/g; - $s =~ s/(ṡ|ṣ|ṥ|ṧ|ṩ|ẛ)/s/g; - $s =~ s/(ṫ|ṭ|ṯ|ṱ)/t/g; - $s =~ s/(ṳ|ṵ|ṷ|ṹ|ṻ|ụ|ủ|ứ|ừ|ử|ữ|ự)/u/g; - $s =~ s/(ṽ|ṿ)/v/g; - $s =~ s/(ẁ|ẃ|ẅ|ẇ|ẉ|ẘ)/w/g; - $s =~ s/(ẋ|ẍ)/x/g; - $s =~ s/(ẏ|ỳ|ỵ|ỷ|ỹ|ẙ)/y/g; - $s =~ s/(ẑ|ẓ|ẕ)/z/g; - $s =~ s/(Ḁ|Ạ|Ả|Ấ|Ầ|Ẩ|Ẫ|Ậ|Ắ|Ằ|Ẳ|Ẵ|Ặ)/A/g; - $s =~ s/(Ḃ|Ḅ|Ḇ)/B/g; - $s =~ s/(Ḉ)/C/g; - $s =~ s/(Ḋ|Ḍ|Ḏ|Ḑ|Ḓ)/D/g; - $s =~ s/(Ḕ|Ḗ|Ḙ|Ḛ|Ḝ|Ẹ|Ẻ|Ẽ|Ế|Ề|Ể|Ễ|Ệ)/E/g; - $s =~ s/(Ḟ)/F/g; - $s =~ s/(Ḡ)/G/g; - $s =~ s/(Ḣ|Ḥ|Ḧ|Ḩ|Ḫ)/H/g; - $s =~ s/(Ḭ|Ḯ|Ỉ|Ị)/I/g; - $s =~ s/(Ḱ|Ḳ|Ḵ)/K/g; - $s =~ s/(Ḷ|Ḹ|Ḻ|Ḽ)/L/g; - $s =~ s/(Ḿ|Ṁ|Ṃ)/M/g; - $s =~ s/(Ṅ|Ṇ|Ṉ|Ṋ)/N/g; - $s =~ s/(Ṍ|Ṏ|Ṑ|Ṓ|Ọ|Ỏ|Ố|Ồ|Ổ|Ỗ|Ộ|Ớ|Ờ|Ở|Ỡ|Ợ)/O/g; - $s =~ s/(Ṕ|Ṗ)/P/g; - $s =~ s/(Ṙ|Ṛ|Ṝ|Ṟ)/R/g; - $s =~ s/(Ṡ|Ṣ|Ṥ|Ṧ|Ṩ)/S/g; - $s =~ s/(Ṫ|Ṭ|Ṯ|Ṱ)/T/g; - $s =~ s/(Ṳ|Ṵ|Ṷ|Ṹ|Ṻ|Ụ|Ủ|Ứ|Ừ|Ử|Ữ|Ự)/U/g; - $s =~ s/(Ṽ|Ṿ)/V/g; - $s =~ s/(Ẁ|Ẃ|Ẅ|Ẇ|Ẉ)/W/g; - $s =~ s/(Ẍ)/X/g; - $s =~ s/(Ẏ|Ỳ|Ỵ|Ỷ|Ỹ)/Y/g; - $s =~ s/(Ẑ|Ẓ|Ẕ)/Z/g; - } - # Greek letters - if ($s =~ /\xCE[\x86-\xAB]/) { - $s =~ s/ά/α/g; - $s =~ s/έ/ε/g; - $s =~ s/ί/ι/g; - $s =~ s/ϊ/ι/g; - $s =~ s/ΐ/ι/g; - $s =~ s/ό/ο/g; - $s =~ s/ύ/υ/g; - $s =~ s/ϋ/υ/g; - $s =~ s/ΰ/υ/g; - $s =~ s/ώ/ω/g; - $s =~ s/Ά/Α/g; - $s =~ s/Έ/Ε/g; - $s =~ s/Ή/Η/g; - $s =~ s/Ί/Ι/g; - $s =~ s/Ϊ/Ι/g; - $s =~ s/Ύ/Υ/g; - $s =~ s/Ϋ/Υ/g; - $s =~ s/Ώ/Ω/g; - } - # Cyrillic letters - if ($s =~ /\xD0[\x80-\xAF]/) { - $s =~ s/Ѐ/Е/g; - $s =~ s/Ё/Е/g; - $s =~ s/Ѓ/Г/g; - $s =~ s/Ќ/К/g; - $s =~ s/Ѝ/И/g; - $s =~ s/Й/И/g; - $s =~ s/ѐ/е/g; - $s =~ s/ё/е/g; - $s =~ s/ѓ/г/g; - $s =~ s/ќ/к/g; - $s =~ s/ѝ/и/g; - $s =~ s/й/и/g; - } - } - return $s; -} - -sub read_de_accent_case_resource { - local($this, $filename, *ht, *LOG, $verbose) = @_; - # e.g. data/char-de-accent-lc.txt - - if (open(IN, $filename)) { - my $mode = "de-accent"; - my $line_number = 0; - my $n_de_accent_targets = 0; - my $n_de_accent_sources = 0; - my $n_case_entries = 0; - while () { - s/^\xEF\xBB\xBF//; - s/\s*$//; - $line_number++; - if ($_ =~ /^#+\s*CASE\b/) { - $mode = "case"; - } elsif ($_ =~ /^#+\s*PUNCTUATION NORMALIZATION\b/) { - $mode = "punctuation-normalization"; - } elsif ($_ =~ /^#/) { - # ignore comment - } elsif ($_ =~ /^\s*$/) { - # ignore empty line - } elsif (($mode eq "de-accent") && (($char_without_accent, @chars_with_accent) = split(/\s+/, $_))) { - if (keys %{$ht{DE_ACCENT_INV}->{$char_without_accent}}) { - print LOG "Ignoring duplicate de-accent line for target $char_without_accent in l.$line_number in $filename\n" unless $char_without_accent eq "--"; - } elsif (@chars_with_accent) { - $n_de_accent_targets++; - foreach $char_with_accent (@chars_with_accent) { - my @prev_target_chars = keys %{$ht{DE_ACCENT}->{$char_with_accent}}; - print LOG "Accent character $char_with_accent has duplicate target $char_without_accent (besides @prev_target_chars) in l.$line_number in $filename\n" if @prev_target_chars && (! ($char_without_accent =~ /^[aou]e$/i)); - $char_without_accent = "" if $char_without_accent eq "--"; - $ht{DE_ACCENT}->{$char_with_accent}->{$char_without_accent} = 1; - $ht{DE_ACCENT1}->{$char_with_accent} = $char_without_accent - if (! defined($ht{DE_ACCENT1}->{$char_with_accent})) - && ($char_without_accent =~ /^.[\x80-\xBF]*$/); - $ht{DE_ACCENT_INV}->{$char_without_accent}->{$char_with_accent} = 1; - $ht{UPPER_CASE_OR_ACCENTED}->{$char_with_accent} = 1; - $n_de_accent_sources++; - } - } else { - print LOG "Empty de-accent list for $char_without_accent in l.$line_number in $filename\n"; - } - } elsif (($mode eq "punctuation-normalization") && (($norm_punct, @unnorm_puncts) = split(/\s+/, $_))) { - if (keys %{$ht{NORM_PUNCT_INV}->{$norm_punct}}) { - print LOG "Ignoring duplicate punctuation-normalization line for target $norm_punct in l.$line_number in $filename\n"; - } elsif (@unnorm_puncts) { - foreach $unnorm_punct (@unnorm_puncts) { - my $prev_norm_punct = $ht{NORM_PUNCT}->{$unnorm_punct}; - if ($prev_norm_punct) { - print LOG "Ignoring duplicate punctuation normalization $unnorm_punct -> $norm_punct (besides $prev_norm_punct) in l.$line_number in $filename\n"; - } - $ht{NORM_PUNCT}->{$unnorm_punct} = $norm_punct; - $ht{NORM_PUNCT_INV}->{$norm_punct}->{$unnorm_punct} = 1; - $ht{LC_DE_ACCENT_CHAR_NORM_PUNCT}->{$unnorm_punct} = $norm_punct; - } - } - } elsif (($mode eq "case") && (($uc_char, $lc_char) = ($_ =~ /^(\S+)\s+(\S+)\s*$/))) { - $ht{UPPER_TO_LOWER_CASE}->{$uc_char} = $lc_char; - $ht{LOWER_TO_UPPER_CASE}->{$lc_char} = $uc_char; - $ht{UPPER_CASE_P}->{$uc_char} = 1; - $ht{LOWER_CASE_P}->{$lc_char} = 1; - $ht{UPPER_CASE_OR_ACCENTED}->{$uc_char} = 1; - $n_case_entries++; - } else { - print LOG "Unrecognized l.$line_number in $filename\n"; - } - } - foreach $char (keys %{$ht{UPPER_CASE_OR_ACCENTED}}) { - my $lc_char = $ht{UPPER_TO_LOWER_CASE}->{$char}; - $lc_char = $char unless defined($lc_char); - my @de_accend_char_results = sort keys %{$ht{DE_ACCENT}->{$lc_char}}; - my $new_char = (@de_accend_char_results) ? $de_accend_char_results[0] : $lc_char; - $ht{LC_DE_ACCENT_CHAR}->{$char} = $new_char; - $ht{LC_DE_ACCENT_CHAR_NORM_PUNCT}->{$char} = $new_char; - } - close(IN); - print LOG "Found $n_case_entries case entries, $n_de_accent_sources/$n_de_accent_targets source/target entries in $line_number lines in file $filename\n" if $verbose; - } else { - print LOG "Can't open $filename\n"; - } -} - -sub de_accent_char { - local($this, $char, *ht, $default) = @_; - - @de_accend_char_results = sort keys %{$ht{DE_ACCENT}->{$char}}; - return (@de_accend_char_results) ? @de_accend_char_results : ($default); -} - -sub lower_case_char { - local($this, $char, *ht, $default) = @_; - - return (defined($lc = $ht{UPPER_TO_LOWER_CASE}->{$char})) ? $lc : $default; -} - -sub lower_case_and_de_accent_char { - local($this, $char, *ht) = @_; - - my $lc_char = $this->lower_case_char($char, *ht, $char); - return $this->de_accent_char($lc_char, *ht, $lc_char); -} - -sub lower_case_and_de_accent_string { - local($this, $string, *ht, $control) = @_; - - # $this->stopwatch("start", "lower_case_and_de_accent_string", *ht, *LOG); - my $norm_punct_p = ($control && ($control =~ /norm-punct/i)); - my @chars = $this->split_into_utf8_characters($string); - my $result = ""; - foreach $char (@chars) { - my @lc_de_accented_chars = $this->lower_case_and_de_accent_char($char, *ht); - if ($norm_punct_p - && (! @lc_de_accented_chars)) { - my $norm_punct = $ht{NORM_PUNCT}->{$char}; - @lc_de_accented_chars = ($norm_punct) if $norm_punct; - } - $result .= ((@lc_de_accented_chars) ? $lc_de_accented_chars[0] : $char); - } - # $this->stopwatch("end", "lower_case_and_de_accent_string", *ht, *LOG); - return $result; -} - -sub lower_case_and_de_accent_norm_punct { - local($this, $char, *ht) = @_; - - my $new_char = $ht{LC_DE_ACCENT_CHAR_NORM_PUNCT}->{$char}; - return (defined($new_char)) ? $new_char : $char; -} - -sub lower_case_and_de_accent_string2 { - local($this, $string, *ht, $control) = @_; - - my $norm_punct_p = ($control && ($control =~ /norm-punct/i)); - # $this->stopwatch("start", "lower_case_and_de_accent_string2", *ht, *LOG); - my $s = $string; - my $result = ""; - while (($char, $rest) = ($s =~ /^(.[\x80-\xBF]*)(.*)$/)) { - my $new_char = $ht{LC_DE_ACCENT_CHAR}->{$char}; - if (defined($new_char)) { - $result .= $new_char; - } elsif ($norm_punct_p && defined($new_char = $ht{NORM_PUNCT}->{$char})) { - $result .= $new_char; - } else { - $result .= $char; - } - $s = $rest; - } - # $this->stopwatch("end", "lower_case_and_de_accent_string2", *ht, *LOG); - return $result; -} - -sub lower_case_string { - local($this, $string, *ht, $control) = @_; - - my $norm_punct_p = ($control && ($control =~ /norm-punct/i)); - my $s = $string; - my $result = ""; - while (($char, $rest) = ($s =~ /^(.[\x80-\xBF]*)(.*)$/)) { - my $lc_char = $ht{UPPER_TO_LOWER_CASE}->{$char}; - if (defined($lc_char)) { - $result .= $lc_char; - } elsif ($norm_punct_p && defined($new_char = $ht{NORM_PUNCT}->{$char})) { - $result .= $new_char; - } else { - $result .= $char; - } - $s = $rest; - } - return $result; -} - -sub round_to_n_decimal_places { - local($this, $x, $n, $fill_decimals_p) = @_; - - $fill_decimals_p = 0 unless defined($fill_decimals_p); - unless (defined($x)) { - return $x; - } - if (($x =~ /^-?\d+$/) && (! $fill_decimals_p)) { - return $x; - } - $factor = 1; - foreach $i ((1 .. $n)) { - $factor *= 10; - } - my $rounded_number; - if ($x > 0) { - $rounded_number = (int(($factor * $x) + 0.5) / $factor); - } else { - $rounded_number = (int(($factor * $x) - 0.5) / $factor); - } - if ($fill_decimals_p) { - ($period, $decimals) = ($rounded_number =~ /^-?\d+(\.?)(\d*)$/); - $rounded_number .= "." unless $period || ($n == 0); - foreach ((1 .. ($n - length($decimals)))) { - $rounded_number .= 0; - } - } - return $rounded_number; -} - -sub commify { - local($caller,$number) = @_; - - my $text = reverse $number; - $text =~ s/(\d\d\d)(?=\d)(?!\d*\.)/$1,/g; - return scalar reverse $text; -} - -sub add_javascript_functions { - local($caller,@function_names) = @_; - - $add_javascript_function_s = ""; - foreach $function_name (@function_names) { - - if ($function_name eq "highlight_elems") { - $add_javascript_function_s .= " - function highlight_elems(group_id, value) { - if (group_id != '') { - i = 1; - id = group_id + '-' + i; - while ((s = document.getElementById(id)) != null) { - if (! s.origColor) { - if (s.style.color) { - s.origColor = s.style.color; - } else { - s.origColor = '#000000'; - } - } - if (value == '1') { - s.style.color = '#0000FF'; - if (s.innerHTML == '-') { - s.style.innerHtml = s.innerHTML; - s.innerHTML = '-   ← here'; - s.style.fontWeight = 900; - } else { - s.style.fontWeight = 'bold'; - } - } else { - s.style.fontWeight = 'normal'; - s.style.color = s.origColor; - if (s.style.innerHtml != null) { - s.innerHTML = s.style.innerHtml; - } - } - i = i + 1; - id = group_id + '-' + i; - } - } - } -"; - } elsif ($function_name eq "set_style_for_ids") { - $add_javascript_function_s .= " - function set_style_for_ids(style,id_list) { - var ids = id_list.split(/\\s+/); - var len = ids.length; - var s; - for (var i=0; i>$filename")) { - print OUT $s; - close(OUT); - $result = "Appended"; - } else { - $result = "Can't append"; - } - } else { - if (open(OUT, ">$filename")) { - print OUT $s; - close(OUT); - $result = "Wrote"; - } else { - $result = "Can't write"; - } - } - chmod($mod, $filename) if defined($mod) && -e $filename; - return $result; -} - -sub square { - local($caller, $x) = @_; - - return $x * $x; -} - -sub mutual_info { - local($caller, $ab_count, $a_count, $b_count, $total_count, $smoothing) = @_; - - $smoothing = 1 unless defined($smoothing); - $ab_count = 0 unless defined($ab_count); - return 0 unless $a_count && $b_count && $total_count; - - my $p_ab = $ab_count / $total_count; - my $p_a = $a_count / $total_count; - my $p_b = $b_count / $total_count; - my $expected_ab = $p_a * $p_b * $total_count; - - return -99 unless $expected_ab || $smoothing; - - return CORE::log(($ab_count + $smoothing) / ($expected_ab + $smoothing)); -} - -sub mutual_info_multi { - local($caller, $multi_count, $total_count, $smoothing, @counts) = @_; - - return 0 unless $total_count; - my $p_indivuals = 1; - foreach $count (@counts) { - return 0 unless $count; - $p_indivuals *= ($count / $total_count); - } - my $expected_multi_count = $p_indivuals * $total_count; - # print STDERR "actual vs. expected multi_count($multi_count, $total_count, $smoothing, @counts) = $multi_count vs. $expected_multi_count\n"; - - return -99 unless $expected_multi_count || $smoothing; - - return CORE::log(($multi_count + $smoothing) / ($expected_multi_count + $smoothing)); -} - -sub precision_recall_fmeasure { - local($caller, $n_gold, $n_test, $n_shared, $pretty_print_p) = @_; - - unless (($n_gold =~ /^[1-9]\d*$/) && ($n_test =~ /^[1-9]\d*$/)) { - $zero = ($pretty_print_p) ? "0%" : 0; - if ($n_gold =~ /^[1-9]\d*$/) { - return ("n/a", $zero, $zero); - } elsif ($n_test =~ /^[1-9]\d*$/) { - return ($zero, "n/a", $zero); - } else { - return ("n/a", "n/a", "n/a"); - } - } - my $precision = $n_shared / $n_test; - my $recall = $n_shared / $n_gold; - my $f_measure = ($precision * $recall * 2) / ($precision + $recall); - - return ($precision, $recall, $f_measure) unless $pretty_print_p; - - my $pretty_precision = $caller->round_to_n_decimal_places(100*$precision, 1) . "%"; - my $pretty_recall = $caller->round_to_n_decimal_places(100*$recall, 1) . "%"; - my $pretty_f_measure = $caller->round_to_n_decimal_places(100*$f_measure, 1) . "%"; - - return ($pretty_precision, $pretty_recall, $pretty_f_measure); -} - -sub recapitalize_named_entity { - local($caller, $s) = @_; - - my @comps = (); - foreach $comp (split(/\s+/, $s)) { - if ($comp =~ /^(and|da|for|of|on|the|van|von)$/) { - push(@comps, $comp); - } elsif ($comp =~ /^[a-z]/) { - push(@comps, ucfirst $comp); - } else { - push(@comps, $comp); - } - } - return join(" ", @comps); -} - -sub slot_value_in_double_colon_del_list { - local($this, $s, $slot, $default) = @_; - - $default = "" unless defined($default); - if (($value) = ($s =~ /::$slot\s+(\S.*\S|\S)\s*$/)) { - $value =~ s/\s*::\S.*\s*$//; - return $value; - } else { - return $default; - } -} - -sub synt_in_double_colon_del_list { - local($this, $s) = @_; - - ($value) = ($s =~ /::synt\s+(\S+|\S.*?\S)(?:\s+::.*)?$/); - return (defined($value)) ? $value : ""; -} - -sub form_in_double_colon_del_list { - local($this, $s) = @_; - - ($value) = ($s =~ /::form\s+(\S+|\S.*?\S)(?:\s+::.*)?$/); - return (defined($value)) ? $value : ""; -} - -sub lex_in_double_colon_del_list { - local($this, $s) = @_; - - ($value) = ($s =~ /::lex\s+(\S+|\S.*?\S)(?:\s+::.*)?$/); - return (defined($value)) ? $value : ""; -} - -sub multi_slot_value_in_double_colon_del_list { - # e.g. when there are multiple slot/value pairs in a line, e.g. ::eng ... :eng ... - local($this, $s, $slot) = @_; - - @values = (); - while (($value, $rest) = ($s =~ /::$slot\s+(\S|\S.*?\S)(\s+::\S.*|\s*)$/)) { - push(@values, $value); - $s = $rest; - } - return @values; -} - -sub remove_slot_in_double_colon_del_list { - local($this, $s, $slot) = @_; - - $s =~ s/::$slot(?:|\s+\S|\s+\S.*?\S)(\s+::\S.*|\s*)$/$1/; - $s =~ s/^\s*//; - return $s; -} - -sub extract_split_info_from_split_dir { - local($this, $dir, *ht) = @_; - - my $n_files = 0; - my $n_snt_ids = 0; - if (opendir(DIR, $dir)) { - my @filenames = sort readdir(DIR); - closedir(DIR); - foreach $filename (@filenames) { - next unless $filename =~ /\.txt$/; - my $split_class; - if (($split_class) = ($filename =~ /-(dev|training|test)-/)) { - my $full_filename = "$dir/$filename"; - if (open(IN, $full_filename)) { - my $old_n_snt_ids = $n_snt_ids; - while () { - if (($snt_id) = ($_ =~ /^#\s*::id\s+(\S+)/)) { - if ($old_split_class = $ht{SPLIT_CLASS}->{$snt_id}) { - unless ($old_split_class eq $split_class) { - print STDERR "Conflicting split class for $snt_id: $old_split_class $split_class\n"; - } - } else { - $ht{SPLIT_CLASS}->{$snt_id} = $split_class; - $ht{SPLIT_CLASS_COUNT}->{$split_class} = ($ht{SPLIT_CLASS_COUNT}->{$split_class} || 0) + 1; - $n_snt_ids++; - } - } - } - $n_files++ unless $n_snt_ids == $old_n_snt_ids; - close(IN); - } else { - print STDERR "Can't open file $full_filename"; - } - } else { - print STDERR "Skipping file $filename when extracting split info from $dir\n"; - } - } - print STDERR "Extracted $n_snt_ids split classes from $n_files files.\n"; - } else { - print STDERR "Can't open directory $dir to extract split info.\n"; - } -} - -sub extract_toks_for_split_class_from_dir { - local($this, $dir, *ht, $split_class, $control) = @_; - - $control = "" unless defined($control); - $print_snt_id_p = ($control =~ /\bwith-snt-id\b/); - my $n_files = 0; - my $n_snts = 0; - if (opendir(DIR, $dir)) { - my @filenames = sort readdir(DIR); - closedir(DIR); - foreach $filename (@filenames) { - next unless $filename =~ /^alignment-release-.*\.txt$/; - my $full_filename = "$dir/$filename"; - if (open(IN, $full_filename)) { - my $old_n_snts = $n_snts; - my $snt_id = ""; - while () { - if (($s_value) = ($_ =~ /^#\s*::id\s+(\S+)/)) { - $snt_id = $s_value; - $proper_split_class_p - = ($this_split_class = $ht{SPLIT_CLASS}->{$snt_id}) - && ($this_split_class eq $split_class); - } elsif (($tok) = ($_ =~ /^#\s*::tok\s+(\S|\S.*\S)\s*$/)) { - if ($proper_split_class_p) { - print "$snt_id " if $print_snt_id_p; - print "$tok\n"; - $n_snts++; - } - } - } - $n_files++ unless $n_snts == $old_n_snts; - close(IN); - } else { - print STDERR "Can't open file $full_filename"; - } - } - print STDERR "Extracted $n_snts tokenized sentences ($split_class) from $n_files files.\n"; - } else { - print STDERR "Can't open directory $dir to extract tokens.\n"; - } -} - -sub load_relevant_tok_ngram_corpus { - local($this, $filename, *ht, $max_lex_rule_span, $ngram_count_min, $optional_ngram_output_filename) = @_; - - $ngram_count_min = 1 unless $ngram_count_min; - $max_lex_rule_span = 10 unless $max_lex_rule_span; - my $n_ngram_instances = 0; - my $n_ngram_types = 0; - if (open(IN, $filename)) { - while () { - s/\s*$//; - @tokens = split(/\s+/, $_); - foreach $from_token_index ((0 .. $#tokens)) { - foreach $to_token_index (($from_token_index .. ($from_token_index + $max_lex_rule_span -1))) { - last if $to_token_index > $#tokens; - my $ngram = join(" ", @tokens[$from_token_index .. $to_token_index]); - $ht{RELEVANT_NGRAM}->{$ngram} = ($ht{RELEVANT_NGRAM}->{$ngram} || 0) + 1; - } - } - } - close(IN); - if ($optional_ngram_output_filename && open(OUT, ">$optional_ngram_output_filename")) { - foreach $ngram (sort keys %{$ht{RELEVANT_NGRAM}}) { - $count = $ht{RELEVANT_NGRAM}->{$ngram}; - next unless $count >= $ngram_count_min; - print OUT "($count) $ngram\n"; - $n_ngram_types++; - $n_ngram_instances += $count; - } - close(OUT); - print STDERR "Extracted $n_ngram_types ngram types, $n_ngram_instances ngram instances.\n"; - print STDERR "Wrote ngram stats to $optional_ngram_output_filename\n"; - } - } else { - print STDERR "Can't open relevant tok ngram corpus $filename\n"; - } -} - -sub load_relevant_tok_ngrams { - local($this, $filename, *ht) = @_; - - my $n_entries = 0; - if (open(IN, $filename)) { - while () { - s/\s*$//; - if (($count, $ngram) = ($_ =~ /^\((\d+)\)\s+(\S|\S.*\S)\s*$/)) { - $lc_ngram = lc $ngram; - $ht{RELEVANT_NGRAM}->{$lc_ngram} = ($ht{RELEVANT_NGRAM}->{$lc_ngram} || 0) + $count; - $ht{RELEVANT_LC_NGRAM}->{$lc_ngram} = ($ht{RELEVANT_LC_NGRAM}->{$lc_ngram} || 0) + $count; - $n_entries++; - } - } - close(IN); - print STDERR "Read in $n_entries entries from $filename\n"; - } else { - print STDERR "Can't open relevant tok ngrams from $filename\n"; - } -} - -sub snt_id_sort_function { - local($this, $a, $b) = @_; - - if ((($core_a, $index_a) = ($a =~ /^(\S+)\.(\d+)$/)) - && (($core_b, $index_b) = ($b =~ /^(\S+)\.(\d+)$/))) { - return ($core_a cmp $core_b) || ($index_a <=> $index_b); - } else { - return $a cmp $b; - } -} - -sub count_value_sort_function { - local($this, $a_count, $b_count, $a_value, $b_value, $control) = @_; - - # normalize fractions such as "1/2" - if ($a_count > $b_count) { - return ($control eq "decreasing") ? -1 : 1; - } elsif ($b_count > $a_count) { - return ($control eq "decreasing") ? 1 : -1; - } - $a_value = $num / $den if ($num, $den) = ($a_value =~ /^([1-9]\d*)\/([1-9]\d*)$/); - $b_value = $num / $den if ($num, $den) = ($b_value =~ /^([1-9]\d*)\/([1-9]\d*)$/); - $a_value =~ s/:/\./ if $a_value =~ /^\d+:\d+$/; - $b_value =~ s/:/\./ if $b_value =~ /^\d+:\d+$/; - if (($a_value =~ /^-?\d+(\.\d+)?$/) - && ($b_value =~ /^-?\d+(\.\d+)?$/)) { - return $a_value <=> $b_value; - } elsif ($a_value =~ /^-?\d+(\.\d+)?$/) { - return 1; - } elsif ($b_value =~ /^-?\d+(\.\d+)?$/) { - return -1; - } else { - return $a_value cmp $b_value; - } -} - -sub undef_to_blank { - local($this, $x) = @_; - - return (defined($x)) ? $x : ""; -} - -sub en_lex_amr_list { - local($this, $s) = @_; - - $bpe = qr{ \( (?: (?> [^()]+ ) | (??{ $bpe }))* \) }x; # see Perl Cookbook 2nd ed. p. 218 - @en_lex_amr_list = (); - my $amr_s; - my $lex; - my $test; - while ($s =~ /\S/) { - $s =~ s/^\s*//; - if (($s =~ /^\([a-z]\d* .*\)/) - && (($amr_s, $rest) = ($s =~ /^($bpe)(\s.*|)$/))) { - push(@en_lex_amr_list, $amr_s); - $s = $rest; - } elsif (($lex, $rest) = ($s =~ /^\s*(\S+)(\s.*|)$/)) { - push(@en_lex_amr_list, $lex); - $s = $rest; - } else { - print STDERR "en_lex_amr_list can't process: $s\n"; - $s = ""; - } - } - return @en_lex_amr_list; -} - -sub make_sure_dir_exists { - local($this, $dir, $umask) = @_; - - mkdir($dir, $umask) unless -d $dir; - chmod($umask, $dir); -} - -sub pretty_percentage { - local($this, $numerator, $denominator) = @_; - - return ($denominator == 0) ? "n/a" : ($this->round_to_n_decimal_places(100*$numerator/$denominator, 2) . "%"); -} - -sub html_color_nth_line { - local($this, $s, $n, $color, $delimiter) = @_; - - $delimiter = "
          " unless defined($delimiter); - @lines = split($delimiter, $s); - $lines[$n] = "" . $lines[$n] . "" if ($n =~ /^\d+$/) && ($n <= $#lines); - return join($delimiter, @lines); -} - -sub likely_valid_url_format { - local($this, $url) = @_; - - $url = lc $url; - return 0 if $url =~ /\s/; - return 0 if $url =~ /[@]/; - return 1 if $url =~ /^https?:\/\/.+\.[a-z]+(\?.+)?$/; - return 1 if $url =~ /[a-z].+\.(com|edu|gov|net|org)$/; - return 0; -} - -# see also EnglMorph->special_token_type -$common_file_suffixes = "aspx?|bmp|cgi|docx?|gif|html?|jpeg|jpg|mp3|mp4|pdf|php|png|pptx?|stm|svg|txt|xml"; -$common_top_domain_suffixes = "museum|info|cat|com|edu|gov|int|mil|net|org|ar|at|au|be|bg|bi|br|ca|ch|cn|co|cz|de|dk|es|eu|fi|fr|gr|hk|hu|id|ie|il|in|ir|is|it|jp|ke|kr|lu|mg|mx|my|nl|no|nz|ph|pl|pt|ro|rs|ru|rw|se|sg|sk|so|tr|tv|tw|tz|ua|ug|uk|us|za"; - -sub token_is_url_p { - local($this, $token) = @_; - - return 1 if $token =~ /^www(\.[a-z0-9]([-a-z0-9_]|\xC3[\x80-\x96\x98-\xB6\xB8-\xBF])+)+\.([a-z]{2,2}|$common_top_domain_suffixes)(\/(\.{1,3}|[a-z0-9]([-a-z0-9_%]|\xC3[\x80-\x96\x98-\xB6\xB8-\xBF])+))*(\/[a-z0-9_][-a-z0-9_]+\.($common_file_suffixes))?$/i; - return 1 if $token =~ /^https?:\/\/([a-z]\.)?([a-z0-9]([-a-z0-9_]|\xC3[\x80-\x96\x98-\xB6\xB8-\xBF])+\.)+[a-z]{2,}(\/(\.{1,3}|([-a-z0-9_%]|\xC3[\x80-\x96\x98-\xB6\xB8-\xBF])+))*(\/[a-z_][-a-z0-9_]+\.($common_file_suffixes))?$/i; - return 1 if $token =~ /^[a-z][-a-z0-9_]+(\.[a-z][-a-z0-9_]+)*\.($common_top_domain_suffixes)(\/[a-z0-9]([-a-z0-9_%]|\xC3[\x80-\x96\x98-\xB6\xB8-\xBF])+)*(\/[a-z][-a-z0-9_]+\.($common_file_suffixes))?$/i; - return 0; -} - -sub token_is_email_p { - local($this, $token) = @_; - - return ($token =~ /^[a-z][-a-z0-9_]+(\.[a-z][-a-z0-9_]+)*\@[a-z][-a-z0-9_]+(\.[a-z][-a-z0-9_]+)*\.($common_top_domain_suffixes)$/i); -} - -sub token_is_filename_p { - local($this, $token) = @_; - - return 1 if $token =~ /\.($common_file_suffixes)$/; - return 0; -} - -sub token_is_xml_token_p { - local($this, $token) = @_; - - return ($token =~ /^&(amp|apos|gt|lt|nbsp|quot|&#\d+|&#x[0-9A-F]+);$/i); -} - -sub token_is_handle_p { - local($this, $token) = @_; - - return ($token =~ /^\@[a-z][_a-z0-9]*[a-z0-9]$/i); -} - -sub min { - local($this, @list) = @_; - - my $min = ""; - foreach $item (@list) { - $min = $item if ($item =~ /^-?\d+(?:\.\d*)?$/) && (($min eq "") || ($item < $min)); - } - return $min; -} - -sub max { - local($this, @list) = @_; - - my $max = ""; - foreach $item (@list) { - $max = $item if defined($item) && ($item =~ /^-?\d+(?:\.\d*)?(e[-+]\d+)?$/) && (($max eq "") || ($item > $max)); - } - return $max; -} - -sub split_tok_s_into_tokens { - local($this, $tok_s) = @_; - - @token_list = (); - while (($pre, $link_token, $post) = ($tok_s =~ /^(.*?)\s*(\@?<[^<>]+>\@?)\s*(.*)$/)) { - # generate dummy token for leading blank(s) - if (($tok_s =~ /^\s/) && ($pre eq "") && ($#token_list < 0)) { - push(@token_list, ""); - } else { - push(@token_list, split(/\s+/, $pre)); - } - push(@token_list, $link_token); - $tok_s = $post; - } - push(@token_list, split(/\s+/, $tok_s)); - return @token_list; -} - -sub shuffle { - local($this, @list) = @_; - - @shuffle_list = (); - while (@list) { - $len = $#list + 1; - $rand_position = int(rand($len)); - push(@shuffle_list, $list[$rand_position]); - splice(@list, $rand_position, 1); - } - $s = join(" ", @shuffle_list); - return @shuffle_list; -} - -sub timestamp_to_seconds { - local($this, $timestamp) = @_; - - my $epochtime; - if (($year, $month, $day, $hour, $minute, $second) = ($timestamp =~ /^(\d\d\d\d)-(\d\d)-(\d\d)T(\d\d):(\d\d):(\d\d)$/)) { - $epochtime = timelocal($second, $minute, $hour, $day, $month-1, $year); - } elsif (($year, $month, $day) = ($timestamp =~ /^(\d\d\d\d)-(\d\d)-(\d\d)$/)) { - $epochtime = timelocal(0, 0, 0, $day, $month-1, $year); - } elsif (($year, $month, $day, $hour, $minute, $second, $second_fraction) = ($timestamp =~ /^(\d\d\d\d)-(\d\d)-(\d\d)T(\d\d):(\d\d):(\d\d)\.(\d+)$/)) { - $epochtime = timelocal($second, $minute, $hour, $day, $month-1, $year) + ($second_fraction / (10 ** length($second_fraction))); - } else { - $epochtime = 0; - } - return $epochtime; -} - -sub timestamp_diff_in_seconds { - local($this, $timestamp1, $timestamp2) = @_; - - my $epochtime1 = $this->timestamp_to_seconds($timestamp1); - my $epochtime2 = $this->timestamp_to_seconds($timestamp2); - return $epochtime2 - $epochtime1; -} - -sub dirhash { - # maps string to hash of length 4 with characters [a-z2-8] (shorter acc. to $len) - local($this, $s, $len) = @_; - - $hash = 9999; - $mega = 2 ** 20; - $mega1 = $mega - 1; - $giga = 2 ** 26; - foreach $c (split //, $s) { - $hash = $hash*33 + ord($c); - $hash = ($hash >> 20) ^ ($hash & $mega1) if $hash >= $giga; - } - while ($hash >= $mega) { - $hash = ($hash >> 20) ^ ($hash & $mega1); - } - $result = ""; - while ($hash) { - $c = $hash & 31; - $result .= CORE::chr($c + (($c >= 26) ? 24 : 97)); - $hash = $hash >> 5; - } - while (length($result) < 4) { - $result .= "8"; - } - return substr($result, 0, $len) if $len; - return $result; -} - -sub full_path_python { - - foreach $bin_path (split(":", "/usr/sbin:/usr/bin:/bin:/usr/local/bin")) { - return $python if -x ($python = "$bin_path/python"); - } - return "python"; -} - -sub string_contains_unbalanced_paras { - local($this, $s) = @_; - - return 0 unless $s =~ /[(){}\[\]]/; - $rest = $s; - while (($pre,$left,$right,$post) = ($rest =~ /^(.*)([({\[]).*?([\]})])(.*)$/)) { - return 1 unless (($left eq "(") && ($right eq ")")) - || (($left eq "[") && ($right eq "]")) - || (($left eq "{") && ($right eq "}")); - $rest = "$pre$post"; - } - return 1 if $rest =~ /[(){}\[\]]/; - return 0; -} - -sub dequote_string { - local($this, $s) = @_; - - if ($s =~ /^".*"$/) { - $s = substr($s, 1, -1); - $s =~ s/\\"/"/g; - return $s; - } elsif ($s =~ /^'.*'$/) { - $s = substr($s, 1, -1); - $s =~ s/\\'/'/g; - return $s; - } else { - return $s; - } -} - -sub defined_non_space { - local($this, $s) = @_; - - return (defined($s) && ($s =~ /\S/)); -} - -sub default_if_undefined { - local($this, $s, $default) = @_; - - return (defined($s) ? $s : $default); -} - -sub remove_empties { - local($this, @list) = @_; - - @filtered_list = (); - foreach $elem (@list) { - push(@filtered_list, $elem) if defined($elem) && (! ($elem =~ /^\s*$/)) && (! $this->member($elem, @filtered_list)); - } - - return @filtered_list; -} - -# copied from AMRexp.pm -sub new_var_for_surf_amr { - local($this, $amr_s, $s) = @_; - - my $letter = ($s =~ /^[a-z]/i) ? lc substr($s, 0, 1) : "x"; - return $letter unless ($amr_s =~ /:\S+\s+\($letter\s+\//) - || ($amr_s =~ /\s\($letter\s+\//) - || ($amr_s =~ /^\s*\($letter\s+\//); # ))) - my $i = 2; - while (($amr_s =~ /:\S+\s+\($letter$i\s+\//) - || ($amr_s =~ /\s+\($letter$i\s+\//) - || ($amr_s =~ /^\s*\($letter$i\s+\//)) { # ))) - $i++; - } - return "$letter$i"; -} - -# copied from AMRexp.pm -sub new_vars_for_surf_amr { - local($this, $amr_s, $ref_amr_s) = @_; - - my $new_amr_s = ""; - my %new_var_ht = (); - my $remaining_amr_s = $amr_s; - my $pre; my $var; my $concept; my $post; - while (($pre, $var, $concept, $post) = ($remaining_amr_s =~ /^(.*?\()([a-z]\d*)\s+\/\s+([^ ()\s]+)(.*)$/s)) { - $new_var = $this->new_var_for_surf_amr("$ref_amr_s $new_amr_s", $concept); - $new_var_ht{$var} = $new_var; - $new_amr_s .= "$pre$new_var / $concept"; - $remaining_amr_s = $post; - } - $new_amr_s .= $remaining_amr_s; - - # also update any reentrancy variables - $remaining_amr_s = $new_amr_s; - $new_amr_s2 = ""; - while (($pre, $var, $post) = ($remaining_amr_s =~ /^(.*?:\S+\s+)([a-z]\d*)([ ()\s].*)$/s)) { - $new_var = $new_var_ht{$var} || $var; - $new_amr_s2 .= "$pre$new_var"; - $remaining_amr_s = $post; - } - $new_amr_s2 .= $remaining_amr_s; - - return $new_amr_s2; -} - -sub update_inner_span_for_id { - local($this, $html_line, $slot, $new_value) = @_; - # e.g. slot: workset-language-name value: Uyghur - - if (defined($new_value) - && (($pre, $old_value, $post) = ($html_line =~ /^(.*]* id="$slot"[^<>]*>)([^<>]*)(<\/span\b[^<>]*>.*)$/i)) - && ($old_value ne $new_value)) { - # print STDERR "Inserting new $slot $old_value -> $new_value\n"; - return $pre . $new_value . $post . "\n"; - } else { - # no change - return $html_line; - } -} - -sub levenshtein_distance { - local($this, $s1, $s2) = @_; - - my $i; - my $j; - my @distance; - my @s1_chars = $utf8->split_into_utf8_characters($s1, "return only chars", *empty_ht); - my $s1_length = $#s1_chars + 1; - my @s2_chars = $utf8->split_into_utf8_characters($s2, "return only chars", *empty_ht); - my $s2_length = $#s2_chars + 1; - for ($i = 0; $i <= $s1_length; $i++) { - $distance[$i][0] = $i; - } - for ($j = 1; $j <= $s2_length; $j++) { - $distance[0][$j] = $j; - } - for ($j = 1; $j <= $s2_length; $j++) { - for ($i = 1; $i <= $s1_length; $i++) { - my $substitution_cost = ($s1_chars[$i-1] eq $s2_chars[$j-1]) ? 0 : 1; - $distance[$i][$j] = $this->min($distance[$i-1][$j] + 1, - $distance[$i][$j-1] + 1, - $distance[$i-1][$j-1] + $substitution_cost); - # print STDERR "SC($i,$j) = $substitution_cost\n"; - # $d = $distance[$i][$j]; - # print STDERR "D($i,$j) = $d\n"; - } - } - return $distance[$s1_length][$s2_length]; -} - -sub markup_parts_of_string_in_common_with_ref { - local($this, $s, $ref, $start_markup, $end_markup, $deletion_markup, $verbose) = @_; - - # \x01 temporary start-markup - # \x02 temporary end-markup - # \x03 temporary deletion-markup - $s =~ s/[\x01-\x03]//g; - $ref =~ s/[\x01-\x03]//g; - my $i; - my $j; - my @distance; - my @s_chars = $utf8->split_into_utf8_characters($s, "return only chars", *empty_ht); - my $s_length = $#s_chars + 1; - my @ref_chars = $utf8->split_into_utf8_characters($ref, "return only chars", *empty_ht); - my $ref_length = $#ref_chars + 1; - $distance[0][0] = 0; - $del_ins_subst_op[0][0] = "-"; - for ($i = 1; $i <= $s_length; $i++) { - $distance[$i][0] = $i; - $del_ins_subst_op[$i][0] = 0; - } - for ($j = 1; $j <= $ref_length; $j++) { - $distance[0][$j] = $j; - $del_ins_subst_op[0][$j] = 1; - } - for ($j = 1; $j <= $ref_length; $j++) { - for ($i = 1; $i <= $s_length; $i++) { - my $substitution_cost = (($s_chars[$i-1] eq $ref_chars[$j-1])) ? 0 : 1; - my @del_ins_subst_list = ($distance[$i-1][$j] + 1, - $distance[$i][$j-1] + 1, - $distance[$i-1][$j-1] + $substitution_cost); - my $min = $this->min(@del_ins_subst_list); - my $del_ins_subst_position = $this->position($min, @del_ins_subst_list); - $distance[$i][$j] = $min; - $del_ins_subst_op[$i][$j] = $del_ins_subst_position; - } - } - $d = $distance[$s_length][$ref_length]; - print STDERR "markup_parts_of_string_in_common_with_ref LD($s,$ref) = $d\n" if $verbose; - for ($j = 0; $j <= $ref_length; $j++) { - for ($i = 0; $i <= $s_length; $i++) { - $d = $distance[$i][$j]; - $op = $del_ins_subst_op[$i][$j]; - print STDERR "$d($op) " if $verbose; - } - print STDERR "\n" if $verbose; - } - my $result = ""; - my $i_end = $s_length; - my $j_end = $ref_length; - my $cost = $distance[$i_end][$j_end]; - $i = $i_end; - $j = $j_end; - while (1) { - $result2 = $result; - $result2 =~ s/\x01/$start_markup/g; - $result2 =~ s/\x02/$end_markup/g; - $result2 =~ s/\x03/$deletion_markup/g; - print STDERR "i:$i i-end:$i_end j:$j j-end:$j_end r: $result2\n" if $verbose; - # matching characters - if ($i && $j && ($del_ins_subst_op[$i][$j] == 2) && ($distance[$i-1][$j-1] == $distance[$i][$j])) { - $i--; - $j--; - } else { - # previously matching characters - if (($i < $i_end) && ($j < $j_end)) { - my $sub_s = join("", @s_chars[$i .. $i_end-1]); - $result = "\x01" . $sub_s . "\x02" . $result; - } - # character substitution - if ($i && $j && ($del_ins_subst_op[$i][$j] == 2)) { - $i--; - $j--; - $result = $s_chars[$i] . $result; - } elsif ($i && ($del_ins_subst_op[$i][$j] == 0)) { - $i--; - $result = $s_chars[$i] . $result; - } elsif ($j && ($del_ins_subst_op[$i][$j] == 1)) { - $j--; - $result = "\x03" . $result; - } else { - last; - } - $i_end = $i; - $j_end = $j; - } - } - $result2 = $result; - $result2 =~ s/\x01/$start_markup/g; - $result2 =~ s/\x02/$end_markup/g; - $result2 =~ s/\x03/$deletion_markup/g; - print STDERR "i:$i i-end:$i_end j:$j j-end:$j_end r: $result2 *\n" if $verbose; - $result =~ s/(\x02)\x03+(\x01)/$1$deletion_markup$2/g; - $result =~ s/(\x02)\x03+$/$1$deletion_markup/g; - $result =~ s/^\x03+(\x01)/$deletion_markup$1/g; - $result =~ s/\x03//g; - $result =~ s/\x01/$start_markup/g; - $result =~ s/\x02/$end_markup/g; - return $result; -} - -sub env_https { - my $https = $ENV{'HTTPS'}; - return 1 if $https && ($https eq "on"); - - my $http_via = $ENV{'HTTP_VIA'}; - return 1 if $http_via && ($http_via =~ /\bHTTPS\b.* \d+(?:\.\d+){3,}:443\b/); # tmp for beta.isi.edu - - return 0; -} - -sub env_http_host { - return $ENV{'HTTP_HOST'} || ""; -} - -sub env_script_filename { - return $ENV{'SCRIPT_FILENAME'} || ""; -} - -sub cgi_mt_app_root_dir { - local($this, $target) = @_; - my $s; - if ($target =~ /filename/i) { - $s = $ENV{'SCRIPT_FILENAME'} || ""; - } else { - $s = $ENV{'SCRIPT_NAME'} || ""; - } - return "" unless $s; - return $d if ($d) = ($s =~ /^(.*?\/(?:amr-editor|chinese-room-editor|utools|romanizer\/version\/[-.a-z0-9]+|romanizer))\//); - return $d if ($d) = ($s =~ /^(.*)\/(?:bin|src|scripts?)\/[^\/]*$/); - return $d if ($d) = ($s =~ /^(.*)\/[^\/]*$/); - return ""; -} - -sub parent_dir { - local($this, $dir) = @_; - - $dir =~ s/\/[^\/]+\/?$//; - return $dir || "/"; -} - -sub span_start { - local($this, $span, $default) = @_; - - $default = "" unless defined($default); - return (($start) = ($span =~ /^(\d+)-\d+$/)) ? $start : $default; -} - -sub span_end { - local($this, $span, $default) = @_; - - $default = "" unless defined($default); - return (($end) = ($span =~ /^\d+-(\d+)$/)) ? $end : $default; -} - -sub oct_mode { - local($this, $filename) = @_; - - @stat = stat($filename); - return "" unless @stat; - $mode = $stat[2]; - $oct_mode = sprintf("%04o", $mode & 07777); - return $oct_mode; -} - -sub csv_to_list { - local($this, $s, $control_string) = @_; - # Allow quoted string such as "Wait\, what?" as element with escaped comma inside. - - $control_string = "" unless defined($control_string); - $strip_p = ($control_string =~ /\bstrip\b/); - $allow_simple_commas_in_quote = ($control_string =~ /\bsimple-comma-ok\b/); - $ignore_empty_elem_p = ($control_string =~ /\bno-empty\b/); - @cvs_list = (); - while ($s ne "") { - if ((($elem, $rest) = ($s =~ /^"((?:\\[,\"]|[^,\"][\x80-\xBF]*)*)"(,.*|)$/)) - || ($allow_simple_commas_in_quote - && (($elem, $rest) = ($s =~ /^"((?:\\[,\"]|[^\"][\x80-\xBF]*)*)"(,.*|)$/))) - || (($elem, $rest) = ($s =~ /^([^,]*)(,.*|\s*)$/)) - || (($elem, $rest) = ($s =~ /^(.*)()$/))) { - if ($strip_p) { - $elem =~ s/^\s*//; - $elem =~ s/\s*$//; - } - push(@cvs_list, $elem) unless $ignore_empty_elem_p && ($elem eq ""); - $rest =~ s/^,//; - $s = $rest; - } else { - print STDERR "Error in csv_to_list processing $s\n"; - last; - } - } - return @cvs_list; -} - -sub kl_divergence { - local($this, $distribution_id, $gold_distribution_id, *ht, $smoothing) = @_; - - my $total_count = $ht{DISTRIBUTION_TOTAL_COUNT}->{$distribution_id}; - my $total_gold_count = $ht{DISTRIBUTION_TOTAL_COUNT}->{$gold_distribution_id}; - return unless $total_count && $total_gold_count; - - my @values = keys %{$ht{DISTRIBUTION_VALUE_COUNT}->{$gold_distribution_id}}; - my $n_values = $#values + 1; - - my $min_total_count = $this->min($total_count, $total_gold_count); - $smoothing = 1 - (10000/((100+$min_total_count)**2)) unless defined($smoothing); - return unless $smoothing; - my $smoothed_n_values = $smoothing * $n_values; - my $divergence = 0; - foreach $value (@values) { - my $count = $ht{DISTRIBUTION_VALUE_COUNT}->{$distribution_id}->{$value} || 0; - my $gold_count = $ht{DISTRIBUTION_VALUE_COUNT}->{$gold_distribution_id}->{$value}; - my $p = ($count + $smoothing) / ($total_count + $smoothed_n_values); - my $q = ($gold_count + $smoothing) / ($total_gold_count + $smoothed_n_values); - if ($p == 0) { - # no impact on divergence - } elsif ($q) { - my $incr = $p * CORE::log($p/$q); - $divergence += $incr; - my $incr2 = $this->round_to_n_decimal_places($incr, 5); - my $p2 = $this->round_to_n_decimal_places($p, 5); - my $q2 = $this->round_to_n_decimal_places($q, 5); - $incr2 = "+" . $incr2 if $incr > 0; - $log = " value: $value count: $count gold_count: $gold_count p: $p2 q: $q2 $incr2\n"; - $ht{KL_DIVERGENCE_LOG}->{$distribution_id}->{$gold_distribution_id}->{$value} = $log; - $ht{KL_DIVERGENCE_INCR}->{$distribution_id}->{$gold_distribution_id}->{$value} = $incr; - } else { - $divergence += 999; - } - } - return $divergence; -} - -sub read_ISO_8859_named_entities { - local($this, *ht, $filename, $verbose) = @_; - # e.g. from /nfs/isd/ulf/arabic/data/ISO-8859-1-HTML-named-entities.txt - # - # - # - # - # - # - - my $n = 0; - if (open(IN, $filename)) { - while () { - s/^\xEF\xBB\xBF//; - if (($name, $dec_unicode) = ($_ =~ /^{$name} = $dec_unicode; - $ht{HTML_ENTITY_DECUNICODE_TO_NAME}->{$dec_unicode} = $name; - $ht{HTML_ENTITY_NAME_TO_UTF8}->{$name} = $utf8->unicode2string($dec_unicode); - $n++; - # print STDERR "read_ISO_8859_named_entities $name $dec_unicode .\n" if $name =~ /dash/; - } - } - close(IN); - print STDERR "Loaded $n entries from $filename\n" if $verbose; - } else { - print STDERR "Could not open $filename\n" if $verbose; - } -} - -sub neg { - local($this, $x) = @_; - - # robust - return (defined($x) && ($x =~ /^-?\d+(?:\.\d+)?$/)) ? (- $x) : $x; -} - -sub read_ttable_gloss_data { - local($this, $filename, $lang_code, *ht, $direction) = @_; - # e.g. /nfs/isd/ulf/croom/oov-lanpairs/som-eng/som-eng-ttable-glosses.txt - - $direction = "f to e" unless defined($direction); - if (open(IN, $filename)) { - while () { - if (($headword, $gloss) = ($_ =~ /^(.*?)\t(.*?)\s*$/)) { - if ($direction eq "e to f") { - $ht{TTABLE_E_GLOSS}->{$lang_code}->{$headword} = $gloss; - } else { - $ht{TTABLE_F_GLOSS}->{$lang_code}->{$headword} = $gloss; - } - } - } - close(IN); - } -} - -sub format_gloss_for_tooltop { - local($this, $gloss) = @_; - - $gloss =~ s/^\s*/\t/; - $gloss =~ s/\s*$//; - $gloss =~ s/ / /g; - $gloss =~ s/\t/ /g; - return $gloss; -} - -sub obsolete_tooltip { - local($this, $s, $lang_code, *ht) = @_; - - return $gloss if defined($gloss = $ht{TTABLE_F_GLOSS}->{$lang_code}->{$s}); - @e_s = sort { $ht{T_TABLE_F_E_C}->{$lang_code}->{$s}->{$b} - <=> $ht{T_TABLE_F_E_C}->{$lang_code}->{$s}->{$a} } - keys %{$ht{T_TABLE_F_E_C}->{$lang_code}->{$s}}; - if (@e_s) { - $e = shift @e_s; - $count = $ht{T_TABLE_F_E_C}->{$lang_code}->{$s}->{$e}; - $min_count = $this->max($count * 0.01, 1.0); - $count =~ s/(\.\d\d)\d*$/$1/; - $result = "$s: $e ($count)"; - $n = 1; - while (@e_s) { - $e = shift @e_s; - $count = $ht{T_TABLE_F_E_C}->{$lang_code}->{$s}->{$e}; - last if $count < $min_count; - $count =~ s/(\.\d\d)\d*$/$1/; - $result .= " $e ($count)"; - $n++; - last if $n >= 10; - } - $ht{TTABLE_F_GLOSS}->{$lang_code}->{$s} = $result; - return $result; - } else { - return ""; - } -} - -sub markup_html_line_init { - local($this, $s, *ht, $id) = @_; - - my @chars = $utf8->split_into_utf8_characters($s, "return only chars", *empty_ht); - $ht{S}->{$id} = $s; -} - -sub markup_html_line_regex { - local($this, $id, *ht, $regex, $m_slot, $m_value, *LOG) = @_; - - unless ($regex eq "") { - my $s = $ht{S}->{$id}; - my $current_pos = 0; - while (($pre, $match_s, $post) = ($s =~ /^(.*?)($regex)(.*)$/)) { - $current_pos += $utf8->length_in_utf8_chars($pre); - my $match_len = $utf8->length_in_utf8_chars($match_s); - $ht{START}->{$id}->{$current_pos}->{$m_slot}->{$m_value} = 1; - $ht{STOP}->{$id}->{($current_pos+$match_len)}->{$m_slot}->{$m_value} = 1; - $current_pos += $match_len; - $s = $post; - } - } -} - -sub html_markup_line { - local($this, $id, *ht, *LOG) = @_; - - my @titles = (); - my @colors = (); - my @text_decorations = (); - - my $s = $ht{S}->{$id}; - # print LOG "html_markup_line $id: $s\n"; - my @chars = $utf8->split_into_utf8_characters($s, "return only chars", *empty_ht); - my $markedup_s = ""; - - my $new_title = ""; - my $new_color = ""; - my $new_text_decoration = ""; - my $n_spans = 0; - my $i; - foreach $i ((0 .. ($#chars+1))) { - my $stop_span_p = 0; - foreach $m_slot (keys %{$ht{STOP}->{$id}->{$i}}) { - foreach $m_value (keys %{$ht{STOP}->{$id}->{$i}->{$m_slot}}) { - if ($m_slot eq "title") { - my $last_positition = $this->last_position($m_value, @titles); - splice(@titles, $last_positition, 1) if $last_positition >= 0; - $stop_span_p = 1; - } elsif ($m_slot eq "color") { - my $last_positition = $this->last_position($m_value, @colors); - splice(@colors, $last_positition, 1) if $last_positition >= 0; - $stop_span_p = 1; - } elsif ($m_slot eq "text-decoration") { - my $last_positition = $this->last_position($m_value, @text_decorations); - splice(@text_decorations, $last_positition, 1) if $last_positition >= 0; - $stop_span_p = 1; - } - } - } - if ($stop_span_p) { - $markedup_s .= ""; - $n_spans--; - } - my $start_span_p = 0; - foreach $m_slot (keys %{$ht{START}->{$id}->{$i}}) { - foreach $m_value (keys %{$ht{START}->{$id}->{$i}->{$m_slot}}) { - if ($m_slot eq "title") { - push(@titles, $m_value); - $start_span_p = 1; - } elsif ($m_slot eq "color") { - push(@colors, $m_value); - $start_span_p = 1; - } elsif ($m_slot eq "text-decoration") { - push(@text_decorations, $m_value); - $start_span_p = 1; - } - } - } - if ($stop_span_p || $start_span_p) { - my $new_title = (@titles) ? $titles[$#titles] : ""; - my $new_color = (@colors) ? $colors[$#colors] : ""; - my $new_text_decoration = (@text_decorations) ? $text_decorations[$#text_decorations] : ""; - if ($new_title || $new_color || $new_text_decoration) { - my $args = ""; - if ($new_title) { - $g_title = $this->guard_html_quote($new_title); - $args .= " title=\"$g_title\""; - } - if ($new_color || $new_text_decoration) { - $g_color = $this->guard_html_quote($new_color); - $g_text_decoration = $this->guard_html_quote($new_text_decoration); - $color_clause = ($new_color) ? "color:$g_color;" : ""; - $text_decoration_clause = ($new_text_decoration) ? "text-decoration:$g_text_decoration;" : ""; - $text_decoration_clause =~ s/text-decoration:(border-bottom:)/$1/g; - $args .= " style=\"$color_clause$text_decoration_clause\""; - } - if ($n_spans) { - $markedup_s .= ""; - $n_spans--; - } - $markedup_s .= ""; - $n_spans++; - } - } - $markedup_s .= $chars[$i] if $i <= $#chars; - } - print LOG "Error in html_markup_line $id final no. of open spans: $n_spans\n" if $n_spans && $tokenization_log_verbose; - return $markedup_s; -} - -sub offset_adjustment { - local($this, $g, $s, $offset, $snt_id, *ht, *LOG, $control) = @_; - # s(tring) e.g. "can't" - # g(old string) e.g. "can not" - # Typically when s is a slight variation of g (e.g. with additional tokenization spaces in s) - # returns mapping 0->0, 1->1, 2->2, 3->3, 6->4, 7->5 - - $control = "" unless defined($control); - my $verbose = ($control =~ /\bverbose\b/); - my $s_offset = 0; - my $g_offset = 0; - my @s_chars = $utf8->split_into_utf8_characters($s, "return only chars", *ht); - my @g_chars = $utf8->split_into_utf8_characters($g, "return only chars", *ht); - my $s_len = $#s_chars + 1; - my $g_len = $#g_chars + 1; - $ht{OFFSET_MAP}->{$snt_id}->{$offset}->{$s_offset} = $g_offset; - $ht{OFFSET_MAP}->{$snt_id}->{$offset}->{($s_offset+$s_len)} = $g_offset+$g_len; - - while (($s_offset < $s_len) && ($g_offset < $g_len)) { - if ($s_chars[$s_offset] eq $g_chars[$g_offset]) { - $s_offset++; - $g_offset++; - $ht{OFFSET_MAP}->{$snt_id}->{$offset}->{$s_offset} = $g_offset; - } else { - my $best_gm = 0; - my $best_sm = 0; - my $best_match_len = 0; - foreach $max_m ((1 .. 4)) { - foreach $sm ((0 .. $max_m)) { - $max_match_len = 0; - while ((($s_index = $s_offset+$sm+$max_match_len) < $s_len) - && (($g_index = $g_offset+$max_m+$max_match_len) < $g_len)) { - if ($s_chars[$s_index] eq $g_chars[$g_index]) { - $max_match_len++; - } else { - last; - } - } - if ($max_match_len > $best_match_len) { - $best_match_len = $max_match_len; - $best_sm = $sm; - $best_gm = $max_m; - } - } - foreach $gm ((0 .. $max_m)) { - $max_match_len = 0; - while ((($s_index = $s_offset+$max_m+$max_match_len) < $s_len) - && (($g_index = $g_offset+$gm+$max_match_len) < $g_len)) { - if ($s_chars[$s_index] eq $g_chars[$g_index]) { - $max_match_len++; - } else { - last; - } - } - if ($max_match_len > $best_match_len) { - $best_match_len = $max_match_len; - $best_sm = $max_m; - $best_gm = $gm; - } - } - } - if ($best_match_len) { - $s_offset += $best_sm; - $g_offset += $best_gm; - $ht{OFFSET_MAP}->{$snt_id}->{$offset}->{$s_offset} = $g_offset; - } else { - last; - } - } - } - if ($verbose) { - foreach $s_offset (sort { $a <=> $b } - keys %{$ht{OFFSET_MAP}->{$snt_id}->{$offset}}) { - my $g_offset = $ht{OFFSET_MAP}->{$snt_id}->{$offset}->{$s_offset}; - print LOG " OFFSET_MAP $snt_id.$offset $s/$g $s_offset -> $g_offset\n" if $tokenization_log_verbose; - } - } -} - -sub length_in_utf8_chars { - local($this, $s) = @_; - - $s =~ s/[\x80-\xBF]//g; - $s =~ s/[\x00-\x7F\xC0-\xFF]/c/g; - return length($s); -} - -sub split_into_utf8_characters { - local($this, $text) = @_; - # "return only chars; return trailing whitespaces" - - @characters = (); - while (($char, $rest) = ($text =~ /^(.[\x80-\xBF]*)(.*)$/)) { - push(@characters, $char); - $text = $rest; - } - return @characters; -} - -sub first_char_of_string { - local($this, $s) = @_; - - $s =~ s/^(.[\x80-\xBF]*).*$/$1/; - return $s; -} - -sub last_char_of_string { - local($this, $s) = @_; - - $s =~ s/^.*([^\x80-\xBF][\x80-\xBF]*)$/$1/; - return $s; -} - -sub first_n_chars_of_string { - local($this, $s, $n) = @_; - - $s =~ s/^((?:.[\x80-\xBF]*){$n,$n}).*$/$1/; - return $s; -} - -sub last_n_chars_of_string { - local($this, $s, $n) = @_; - - $s =~ s/^.*((?:[^\x80-\xBF][\x80-\xBF]*){$n,$n})$/$1/; - return $s; -} - - -1; diff --git a/spaces/kanden/vits-uma-genshin-honkai/app.py b/spaces/kanden/vits-uma-genshin-honkai/app.py deleted file mode 100644 index e716f57cc6ec2dda804e40d0590595154108f3cb..0000000000000000000000000000000000000000 --- a/spaces/kanden/vits-uma-genshin-honkai/app.py +++ /dev/null @@ -1,123 +0,0 @@ -# coding=utf-8 -import time -import gradio as gr -import utils -import commons -from models import SynthesizerTrn -from text import text_to_sequence -from torch import no_grad, LongTensor - -hps_ms = utils.get_hparams_from_file(r'./model/config.json') -net_g_ms = SynthesizerTrn( - len(hps_ms.symbols), - hps_ms.data.filter_length // 2 + 1, - hps_ms.train.segment_size // hps_ms.data.hop_length, - n_speakers=hps_ms.data.n_speakers, - **hps_ms.model) -_ = net_g_ms.eval() -speakers = hps_ms.speakers -model, optimizer, learning_rate, epochs = utils.load_checkpoint(r'./model/G_953000.pth', net_g_ms, None) - -def get_text(text, hps): - text_norm, clean_text = text_to_sequence(text, hps.symbols, hps.data.text_cleaners) - if hps.data.add_blank: - text_norm = commons.intersperse(text_norm, 0) - text_norm = LongTensor(text_norm) - return text_norm, clean_text - -def vits(text, language, speaker_id, noise_scale, noise_scale_w, length_scale): - start = time.perf_counter() - if not len(text): - return "输入文本不能为空!", None, None - text = text.replace('\n', ' ').replace('\r', '').replace(" ", "") - if len(text) > 300: - return f"输入文字过长!{len(text)}>100", None, None - if language == 0: - text = f"[ZH]{text}[ZH]" - elif language == 1: - text = f"[JA]{text}[JA]" - else: - text = f"{text}" - stn_tst, clean_text = get_text(text, hps_ms) - with no_grad(): - x_tst = stn_tst.unsqueeze(0) - x_tst_lengths = LongTensor([stn_tst.size(0)]) - speaker_id = LongTensor([speaker_id]) - audio = net_g_ms.infer(x_tst, x_tst_lengths, sid=speaker_id, noise_scale=noise_scale, noise_scale_w=noise_scale_w, - length_scale=length_scale)[0][0, 0].data.float().numpy() - - return "生成成功!", (22050, audio), f"生成耗时 {round(time.perf_counter()-start, 2)} s" - -def search_speaker(search_value): - for s in speakers: - if search_value == s: - return s - for s in speakers: - if search_value in s: - return s - -def change_lang(language): - if language == 0: - return 0.6, 0.668, 1.2 - else: - return 0.6, 0.668, 1.1 - -download_audio_js = """ -() =>{{ - let root = document.querySelector("body > gradio-app"); - if (root.shadowRoot != null) - root = root.shadowRoot; - let audio = root.querySelector("#tts-audio").querySelector("audio"); - let text = root.querySelector("#input-text").querySelector("textarea"); - if (audio == undefined) - return; - text = text.value; - if (text == undefined) - text = Math.floor(Math.random()*100000000); - audio = audio.src; - let oA = document.createElement("a"); - oA.download = text.substr(0, 20)+'.wav'; - oA.href = audio; - document.body.appendChild(oA); - oA.click(); - oA.remove(); -}} -""" - -if __name__ == '__main__': - with gr.Blocks() as app: - gr.Markdown( - "#
          VITS语音在线合成demo\n" - "
          主要有赛马娘,原神中文,原神日语,崩坏3的音色
          " - '' - '' - ) - - with gr.Tabs(): - with gr.TabItem("vits"): - with gr.Row(): - with gr.Column(): - input_text = gr.Textbox(label="Text (100 words limitation)", lines=5, value="今天晚上吃啥好呢。", elem_id=f"input-text") - lang = gr.Dropdown(label="Language", choices=["中文", "日语", "中日混合(中文用[ZH][ZH]包裹起来,日文用[JA][JA]包裹起来)"], - type="index", value="中文") - btn = gr.Button(value="Submit") - with gr.Row(): - search = gr.Textbox(label="Search Speaker", lines=1) - btn2 = gr.Button(value="Search") - sid = gr.Dropdown(label="Speaker", choices=speakers, type="index", value=speakers[228]) - with gr.Row(): - ns = gr.Slider(label="noise_scale(控制感情变化程度)", minimum=0.1, maximum=1.0, step=0.1, value=0.6, interactive=True) - nsw = gr.Slider(label="noise_scale_w(控制音素发音长度)", minimum=0.1, maximum=1.0, step=0.1, value=0.668, interactive=True) - ls = gr.Slider(label="length_scale(控制整体语速)", minimum=0.1, maximum=2.0, step=0.1, value=1.2, interactive=True) - with gr.Column(): - o1 = gr.Textbox(label="Output Message") - o2 = gr.Audio(label="Output Audio", elem_id=f"tts-audio") - o3 = gr.Textbox(label="Extra Info") - download = gr.Button("Download Audio") - btn.click(vits, inputs=[input_text, lang, sid, ns, nsw, ls], outputs=[o1, o2, o3], api_name="generate") - download.click(None, [], [], _js=download_audio_js.format()) - btn2.click(search_speaker, inputs=[search], outputs=[sid]) - lang.change(change_lang, inputs=[lang], outputs=[ns, nsw, ls]) - with gr.TabItem("可用人物一览"): - gr.Radio(label="Speaker", choices=speakers, interactive=False, type="index") - app.queue(concurrency_count=1).launch() diff --git a/spaces/keras-dreambooth/ignatius/app.py b/spaces/keras-dreambooth/ignatius/app.py deleted file mode 100644 index ced5ac2a9169ea8e55e07c10fc0b7d3ee4f836c1..0000000000000000000000000000000000000000 --- a/spaces/keras-dreambooth/ignatius/app.py +++ /dev/null @@ -1,58 +0,0 @@ -from huggingface_hub import from_pretrained_keras -import keras_cv -import gradio as gr -from tensorflow import keras - -keras.mixed_precision.set_global_policy("mixed_float16") -# load keras model -resolution = 512 -dreambooth_model = keras_cv.models.StableDiffusion( - img_width=resolution, img_height=resolution, jit_compile=True, - ) -loaded_diffusion_model = from_pretrained_keras("keras-dreambooth/ignatius") -dreambooth_model._diffusion_model = loaded_diffusion_model - -# generate images -def generate_images(prompt, negative_prompt, num_imgs_to_gen, num_steps, guidance_scale): - """ - This function is used to generate images using our fine-tuned keras dreambooth stable diffusion model. - Args: - prompt (str): The text input given by the user based on which images will be generated. - negative_prompt (srt): The text to eliminate from the generation some concepts. - num_imgs_to_gen (int): The number of images to be generated using given prompt. - num_steps (int): The number of denoising steps - guidance_scale (double): Increasing guidance makes generation follow more closely to the prompt. - Returns: - generated_img (List): List of images that were generated using the model - """ - generated_images = dreambooth_model.text_to_image( - prompt, - negative_prompt=negative_prompt, - batch_size=num_imgs_to_gen, - num_steps=num_steps, - unconditional_guidance_scale=guidance_scale - ) - return generated_images - -with gr.Blocks() as demo: - gr.HTML("

          Ignatius Farray - The cavern of the muffled scream

          ") - with gr.Row(): - with gr.Column(): - prompt = gr.Textbox(lines=1, value="ignatius in a standup comedy spectacle", label="Base Prompt") - negative_prompt = gr.Textbox(lines=1, value="bad anatomy, blurry, ugly, deformed", label="Negative Prompt") - samples = gr.Slider(minimum=1, maximum=10, default=1, step=1, label="Number of Image") - num_steps = gr.Slider(label="Inference Steps", value=50, maximum=450) - guidance_scale = gr.Number(label="Guidance scale", value=7.5) - run = gr.Button(value="Run") - with gr.Column(): - gallery = gr.Gallery(label="Outputs").style(grid=(1,2)) - - run.click(generate_images, inputs=[prompt, negative_prompt, samples, num_steps, guidance_scale], outputs=gallery) - - gr.Examples([["ignatius on the moon","bad anatomy, blurry, ugly", 2, 150, 15], - ["A photo of ignatius person inside a box","bad anatomy, blurry, ugly", 2, 150, 15], - ["A closeup portrait of ignatius, highly detailed, high qulity","bad anatomy, blurry, ugly", 2, 150, 15]], - [prompt, negative_prompt, samples, num_steps, guidance_scale], gallery, generate_images) - gr.Markdown('\n Demo created by: Eduardo Matallanas') - -demo.launch(debug=True) \ No newline at end of file diff --git a/spaces/keras-io/timeseries-classification-from-scratch/app.py b/spaces/keras-io/timeseries-classification-from-scratch/app.py deleted file mode 100644 index a531fce4a7f836e9b28097387009e6507ab9909a..0000000000000000000000000000000000000000 --- a/spaces/keras-io/timeseries-classification-from-scratch/app.py +++ /dev/null @@ -1,99 +0,0 @@ -from functools import partial -from typing import Dict - -import gradio as gr -import numpy as np -import plotly.graph_objects as go -from huggingface_hub import from_pretrained_keras - -ROOT_DATA_URL = "https://raw.githubusercontent.com/hfawaz/cd-diagram/master/FordA" -TRAIN_DATA_URL = f"{ROOT_DATA_URL}/FordA_TRAIN.tsv" -TEST_DATA_URL = f"{ROOT_DATA_URL}/FordA_TEST.tsv" -TIMESERIES_LEN = 500 -CLASSES = {"Symptom does NOT exist", "Symptom exists"} - -model = from_pretrained_keras("keras-io/timeseries-classification-from-scratch") - -# Read data -def read_data(file_url: str): - data = np.loadtxt(file_url, delimiter="\t") - y = data[:, 0] - x = data[:, 1:] - return x, y.astype(int) - - -x_train, y_train = read_data(file_url=TRAIN_DATA_URL) -x_test, y_test = read_data(file_url=TEST_DATA_URL) - -# Helper functions -def get_prediction(row_index: int, data: np.ndarray) -> Dict[str, float]: - x = data[row_index].reshape((1, TIMESERIES_LEN, 1)) - predictions = model.predict(x).flatten() - return {k: float(v) for k, v in zip(CLASSES, predictions)} - - -def create_plot(row_index: int, dataset_name: str) -> go.Figure: - x = x_train - row = x[row_index] - scatter = go.Scatter( - x=list(range(TIMESERIES_LEN)), - y=row.flatten(), - mode="lines+markers", - ) - fig = go.Figure(data=scatter) - fig.update_layout(title=f"Timeseries in row {row_index} of {dataset_name} set ") - return fig - - -def show_tab_section(data: np.ndarray, dataset_name: str): - num_indexes = data.shape[0] - index = gr.Slider( - maximum=num_indexes - 1, - label="Select the index of the row you want to classify:", - ) - button = gr.Button("Predict") - plot = gr.Plot() - create_plot_data = partial(create_plot, dataset_name=dataset_name) - button.click(create_plot_data, inputs=[index], outputs=[plot]) - get_prediction_data = partial(get_prediction, data=data) - label = gr.Label() - button.click(get_prediction_data, inputs=[index], outputs=[label]) - - -# Gradio Demo -title = "# Timeseries classification from scratch" -description = """ -Select a time series in the Training or Test dataset and ask the model to classify it! -
          -
          -The model was trained on the FordA dataset. Each row is a diagnostic session run on an automotive subsystem. In each session 500 samples were collected. Given a time series, the model was trained to identify if a specific symptom exists or it does not exist. -
          -
          -

          - Model: https://huggingface.co/keras-io/timeseries-classification-from-scratch -
          - Keras Example: https://keras.io/examples/timeseries/timeseries_classification_from_scratch/ -

          -
          -""" -article = """ -
          - Space by Edoardo Abati -
          - Keras example by hfawaz -
          -""" - -demo = gr.Blocks() - -with demo: - gr.Markdown(title) - gr.Markdown(description) - with gr.Tabs(): - with gr.TabItem("Training set"): - show_tab_section(data=x_train, dataset_name="Training") - with gr.TabItem("Test set"): - show_tab_section(data=x_test, dataset_name="Test") - gr.Markdown(article) - -demo.launch(enable_queue=True) diff --git a/spaces/kevinwang676/ChatGLM2-VC-SadTalker/src/face3d/data/image_folder.py b/spaces/kevinwang676/ChatGLM2-VC-SadTalker/src/face3d/data/image_folder.py deleted file mode 100644 index efadc2ecbe2fb4b53b78230aba25ec505eff0e55..0000000000000000000000000000000000000000 --- a/spaces/kevinwang676/ChatGLM2-VC-SadTalker/src/face3d/data/image_folder.py +++ /dev/null @@ -1,66 +0,0 @@ -"""A modified image folder class - -We modify the official PyTorch image folder (https://github.com/pytorch/vision/blob/master/torchvision/datasets/folder.py) -so that this class can load images from both current directory and its subdirectories. -""" -import numpy as np -import torch.utils.data as data - -from PIL import Image -import os -import os.path - -IMG_EXTENSIONS = [ - '.jpg', '.JPG', '.jpeg', '.JPEG', - '.png', '.PNG', '.ppm', '.PPM', '.bmp', '.BMP', - '.tif', '.TIF', '.tiff', '.TIFF', -] - - -def is_image_file(filename): - return any(filename.endswith(extension) for extension in IMG_EXTENSIONS) - - -def make_dataset(dir, max_dataset_size=float("inf")): - images = [] - assert os.path.isdir(dir) or os.path.islink(dir), '%s is not a valid directory' % dir - - for root, _, fnames in sorted(os.walk(dir, followlinks=True)): - for fname in fnames: - if is_image_file(fname): - path = os.path.join(root, fname) - images.append(path) - return images[:min(max_dataset_size, len(images))] - - -def default_loader(path): - return Image.open(path).convert('RGB') - - -class ImageFolder(data.Dataset): - - def __init__(self, root, transform=None, return_paths=False, - loader=default_loader): - imgs = make_dataset(root) - if len(imgs) == 0: - raise(RuntimeError("Found 0 images in: " + root + "\n" - "Supported image extensions are: " + ",".join(IMG_EXTENSIONS))) - - self.root = root - self.imgs = imgs - self.transform = transform - self.return_paths = return_paths - self.loader = loader - - def __getitem__(self, index): - path = self.imgs[index] - img = self.loader(path) - if self.transform is not None: - img = self.transform(img) - if self.return_paths: - return img, path - else: - return img - - def __len__(self): - return len(self.imgs) diff --git a/spaces/kevinwang676/ChatGLM2-VC-SadTalker/src/face3d/models/arcface_torch/partial_fc.py b/spaces/kevinwang676/ChatGLM2-VC-SadTalker/src/face3d/models/arcface_torch/partial_fc.py deleted file mode 100644 index 17e2d25715d10ba446c957e1d2528b0687ed71d5..0000000000000000000000000000000000000000 --- a/spaces/kevinwang676/ChatGLM2-VC-SadTalker/src/face3d/models/arcface_torch/partial_fc.py +++ /dev/null @@ -1,222 +0,0 @@ -import logging -import os - -import torch -import torch.distributed as dist -from torch.nn import Module -from torch.nn.functional import normalize, linear -from torch.nn.parameter import Parameter - - -class PartialFC(Module): - """ - Author: {Xiang An, Yang Xiao, XuHan Zhu} in DeepGlint, - Partial FC: Training 10 Million Identities on a Single Machine - See the original paper: - https://arxiv.org/abs/2010.05222 - """ - - @torch.no_grad() - def __init__(self, rank, local_rank, world_size, batch_size, resume, - margin_softmax, num_classes, sample_rate=1.0, embedding_size=512, prefix="./"): - """ - rank: int - Unique process(GPU) ID from 0 to world_size - 1. - local_rank: int - Unique process(GPU) ID within the server from 0 to 7. - world_size: int - Number of GPU. - batch_size: int - Batch size on current rank(GPU). - resume: bool - Select whether to restore the weight of softmax. - margin_softmax: callable - A function of margin softmax, eg: cosface, arcface. - num_classes: int - The number of class center storage in current rank(CPU/GPU), usually is total_classes // world_size, - required. - sample_rate: float - The partial fc sampling rate, when the number of classes increases to more than 2 millions, Sampling - can greatly speed up training, and reduce a lot of GPU memory, default is 1.0. - embedding_size: int - The feature dimension, default is 512. - prefix: str - Path for save checkpoint, default is './'. - """ - super(PartialFC, self).__init__() - # - self.num_classes: int = num_classes - self.rank: int = rank - self.local_rank: int = local_rank - self.device: torch.device = torch.device("cuda:{}".format(self.local_rank)) - self.world_size: int = world_size - self.batch_size: int = batch_size - self.margin_softmax: callable = margin_softmax - self.sample_rate: float = sample_rate - self.embedding_size: int = embedding_size - self.prefix: str = prefix - self.num_local: int = num_classes // world_size + int(rank < num_classes % world_size) - self.class_start: int = num_classes // world_size * rank + min(rank, num_classes % world_size) - self.num_sample: int = int(self.sample_rate * self.num_local) - - self.weight_name = os.path.join(self.prefix, "rank_{}_softmax_weight.pt".format(self.rank)) - self.weight_mom_name = os.path.join(self.prefix, "rank_{}_softmax_weight_mom.pt".format(self.rank)) - - if resume: - try: - self.weight: torch.Tensor = torch.load(self.weight_name) - self.weight_mom: torch.Tensor = torch.load(self.weight_mom_name) - if self.weight.shape[0] != self.num_local or self.weight_mom.shape[0] != self.num_local: - raise IndexError - logging.info("softmax weight resume successfully!") - logging.info("softmax weight mom resume successfully!") - except (FileNotFoundError, KeyError, IndexError): - self.weight = torch.normal(0, 0.01, (self.num_local, self.embedding_size), device=self.device) - self.weight_mom: torch.Tensor = torch.zeros_like(self.weight) - logging.info("softmax weight init!") - logging.info("softmax weight mom init!") - else: - self.weight = torch.normal(0, 0.01, (self.num_local, self.embedding_size), device=self.device) - self.weight_mom: torch.Tensor = torch.zeros_like(self.weight) - logging.info("softmax weight init successfully!") - logging.info("softmax weight mom init successfully!") - self.stream: torch.cuda.Stream = torch.cuda.Stream(local_rank) - - self.index = None - if int(self.sample_rate) == 1: - self.update = lambda: 0 - self.sub_weight = Parameter(self.weight) - self.sub_weight_mom = self.weight_mom - else: - self.sub_weight = Parameter(torch.empty((0, 0)).cuda(local_rank)) - - def save_params(self): - """ Save softmax weight for each rank on prefix - """ - torch.save(self.weight.data, self.weight_name) - torch.save(self.weight_mom, self.weight_mom_name) - - @torch.no_grad() - def sample(self, total_label): - """ - Sample all positive class centers in each rank, and random select neg class centers to filling a fixed - `num_sample`. - - total_label: tensor - Label after all gather, which cross all GPUs. - """ - index_positive = (self.class_start <= total_label) & (total_label < self.class_start + self.num_local) - total_label[~index_positive] = -1 - total_label[index_positive] -= self.class_start - if int(self.sample_rate) != 1: - positive = torch.unique(total_label[index_positive], sorted=True) - if self.num_sample - positive.size(0) >= 0: - perm = torch.rand(size=[self.num_local], device=self.device) - perm[positive] = 2.0 - index = torch.topk(perm, k=self.num_sample)[1] - index = index.sort()[0] - else: - index = positive - self.index = index - total_label[index_positive] = torch.searchsorted(index, total_label[index_positive]) - self.sub_weight = Parameter(self.weight[index]) - self.sub_weight_mom = self.weight_mom[index] - - def forward(self, total_features, norm_weight): - """ Partial fc forward, `logits = X * sample(W)` - """ - torch.cuda.current_stream().wait_stream(self.stream) - logits = linear(total_features, norm_weight) - return logits - - @torch.no_grad() - def update(self): - """ Set updated weight and weight_mom to memory bank. - """ - self.weight_mom[self.index] = self.sub_weight_mom - self.weight[self.index] = self.sub_weight - - def prepare(self, label, optimizer): - """ - get sampled class centers for cal softmax. - - label: tensor - Label tensor on each rank. - optimizer: opt - Optimizer for partial fc, which need to get weight mom. - """ - with torch.cuda.stream(self.stream): - total_label = torch.zeros( - size=[self.batch_size * self.world_size], device=self.device, dtype=torch.long) - dist.all_gather(list(total_label.chunk(self.world_size, dim=0)), label) - self.sample(total_label) - optimizer.state.pop(optimizer.param_groups[-1]['params'][0], None) - optimizer.param_groups[-1]['params'][0] = self.sub_weight - optimizer.state[self.sub_weight]['momentum_buffer'] = self.sub_weight_mom - norm_weight = normalize(self.sub_weight) - return total_label, norm_weight - - def forward_backward(self, label, features, optimizer): - """ - Partial fc forward and backward with model parallel - - label: tensor - Label tensor on each rank(GPU) - features: tensor - Features tensor on each rank(GPU) - optimizer: optimizer - Optimizer for partial fc - - Returns: - -------- - x_grad: tensor - The gradient of features. - loss_v: tensor - Loss value for cross entropy. - """ - total_label, norm_weight = self.prepare(label, optimizer) - total_features = torch.zeros( - size=[self.batch_size * self.world_size, self.embedding_size], device=self.device) - dist.all_gather(list(total_features.chunk(self.world_size, dim=0)), features.data) - total_features.requires_grad = True - - logits = self.forward(total_features, norm_weight) - logits = self.margin_softmax(logits, total_label) - - with torch.no_grad(): - max_fc = torch.max(logits, dim=1, keepdim=True)[0] - dist.all_reduce(max_fc, dist.ReduceOp.MAX) - - # calculate exp(logits) and all-reduce - logits_exp = torch.exp(logits - max_fc) - logits_sum_exp = logits_exp.sum(dim=1, keepdims=True) - dist.all_reduce(logits_sum_exp, dist.ReduceOp.SUM) - - # calculate prob - logits_exp.div_(logits_sum_exp) - - # get one-hot - grad = logits_exp - index = torch.where(total_label != -1)[0] - one_hot = torch.zeros(size=[index.size()[0], grad.size()[1]], device=grad.device) - one_hot.scatter_(1, total_label[index, None], 1) - - # calculate loss - loss = torch.zeros(grad.size()[0], 1, device=grad.device) - loss[index] = grad[index].gather(1, total_label[index, None]) - dist.all_reduce(loss, dist.ReduceOp.SUM) - loss_v = loss.clamp_min_(1e-30).log_().mean() * (-1) - - # calculate grad - grad[index] -= one_hot - grad.div_(self.batch_size * self.world_size) - - logits.backward(grad) - if total_features.grad is not None: - total_features.grad.detach_() - x_grad: torch.Tensor = torch.zeros_like(features, requires_grad=True) - # feature gradient all-reduce - dist.reduce_scatter(x_grad, list(total_features.grad.chunk(self.world_size, dim=0))) - x_grad = x_grad * self.world_size - # backward backbone - return x_grad, loss_v diff --git a/spaces/kokofixcomputers/chat-ui/src/lib/stores/pendingMessageIdToRetry.ts b/spaces/kokofixcomputers/chat-ui/src/lib/stores/pendingMessageIdToRetry.ts deleted file mode 100644 index 47eec8770ae561b2c4881c5d001a3d46ee699b3b..0000000000000000000000000000000000000000 --- a/spaces/kokofixcomputers/chat-ui/src/lib/stores/pendingMessageIdToRetry.ts +++ /dev/null @@ -1,4 +0,0 @@ -import type { Message } from "$lib/types/Message"; -import { writable } from "svelte/store"; - -export const pendingMessageIdToRetry = writable(null); diff --git a/spaces/kwinten/attrition/README.md b/spaces/kwinten/attrition/README.md deleted file mode 100644 index adfc803bf3a638ce7947e5345975aa150a0b7f7b..0000000000000000000000000000000000000000 --- a/spaces/kwinten/attrition/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Attrition -emoji: 🔥 -colorFrom: green -colorTo: red -sdk: gradio -sdk_version: 3.16.2 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/gradio/templates/frontend/assets/Copy-fd383441.js b/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/gradio/templates/frontend/assets/Copy-fd383441.js deleted file mode 100644 index d4452297e23da0d1edcb62880b67bc84ef55f57d..0000000000000000000000000000000000000000 --- a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/gradio/templates/frontend/assets/Copy-fd383441.js +++ /dev/null @@ -1,2 +0,0 @@ -import{S as p,i as c,s as h,B as a,C as e,g as u,E as i,F as n,q as g}from"./index-8c3da1d9.js";function v(l){let t,s;return{c(){t=a("svg"),s=a("polyline"),e(s,"points","20 6 9 17 4 12"),e(t,"xmlns","http://www.w3.org/2000/svg"),e(t,"width","100%"),e(t,"height","100%"),e(t,"viewBox","0 0 24 24"),e(t,"fill","none"),e(t,"stroke","currentColor"),e(t,"stroke-width","3"),e(t,"stroke-linecap","round"),e(t,"stroke-linejoin","round")},m(o,r){u(o,t,r),i(t,s)},p:n,i:n,o:n,d(o){o&&g(t)}}}class m extends p{constructor(t){super(),c(this,t,null,v,h,{})}}function w(l){let t,s,o;return{c(){t=a("svg"),s=a("path"),o=a("path"),e(s,"fill","currentColor"),e(s,"d","M28 10v18H10V10h18m0-2H10a2 2 0 0 0-2 2v18a2 2 0 0 0 2 2h18a2 2 0 0 0 2-2V10a2 2 0 0 0-2-2Z"),e(o,"fill","currentColor"),e(o,"d","M4 18H2V4a2 2 0 0 1 2-2h14v2H4Z"),e(t,"xmlns","http://www.w3.org/2000/svg"),e(t,"width","100%"),e(t,"height","100%"),e(t,"viewBox","0 0 32 32")},m(r,d){u(r,t,d),i(t,s),i(t,o)},p:n,i:n,o:n,d(r){r&&g(t)}}}class x extends p{constructor(t){super(),c(this,t,null,w,h,{})}}export{x as C,m as a}; -//# sourceMappingURL=Copy-fd383441.js.map diff --git a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/gradio/templates/frontend/assets/index-03db9d2f.js b/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/gradio/templates/frontend/assets/index-03db9d2f.js deleted file mode 100644 index e55e1403291de798257ac4302cb35d5d4b39aa26..0000000000000000000000000000000000000000 --- a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/gradio/templates/frontend/assets/index-03db9d2f.js +++ /dev/null @@ -1,2 +0,0 @@ -import{S as F,i as L,s as N,e as j,H as q,G as S,C as w,m as E,g as B,z as Q,ao as V,p as M,t as T,n as z,q as R,r as W,a8 as X,I as O,K as P,ap as Y,M as C,E as y,J as H,a0 as Z,x as p,$ as x,b as I,a as J,h as $,j as ee,k as K,y as G}from"./index-8c3da1d9.js";/* empty css */import{g as le,B as te}from"./Button-62634b34.js";/* empty css */import{B as ae}from"./BlockTitle-338c46c0.js";import"./Info-b95ed9db.js";function U(a,e,t){const l=a.slice();return l[15]=e[t],l[17]=t,l}function ie(a){let e;return{c(){e=O(a[3])},m(t,l){B(t,e,l)},p(t,l){l&8&&P(e,t[3])},d(t){t&&R(e)}}}function A(a,e){let t,l,s,o,m=!1,h,b,i=e[15]+"",_,c,n,f,v;function r(){return e[13](e[15],e[17])}return n=Y(e[12][0]),{key:a,first:null,c(){t=S("label"),l=S("input"),h=q(),b=S("span"),_=O(i),c=q(),l.disabled=e[2],w(l,"type","radio"),w(l,"name",s="radio-"+e[6]),l.__value=o=e[15],l.value=l.__value,w(l,"class","svelte-1p9xokt"),w(b,"class","ml-2 svelte-1p9xokt"),w(t,"style",e[7]),w(t,"class","svelte-1p9xokt"),C(t,"disabled",e[2]),C(t,"selected",e[0]===e[15]),n.p(l),this.first=t},m(k,g){B(k,t,g),y(t,l),l.checked=l.__value===e[0],y(t,h),y(t,b),y(b,_),y(t,c),f||(v=[H(l,"change",e[11]),H(l,"input",r)],f=!0)},p(k,g){e=k,g&4&&(l.disabled=e[2]),g&64&&s!==(s="radio-"+e[6])&&w(l,"name",s),g&2&&o!==(o=e[15])&&(l.__value=o,l.value=l.__value,m=!0),(m||g&3)&&(l.checked=l.__value===e[0]),g&2&&i!==(i=e[15]+"")&&P(_,i),g&128&&w(t,"style",e[7]),g&4&&C(t,"disabled",e[2]),g&3&&C(t,"selected",e[0]===e[15])},d(k){k&&R(t),n.r(),f=!1,Z(v)}}}function ne(a){let e,t,l,s=[],o=new Map,m;e=new ae({props:{show_label:a[5],info:a[4],$$slots:{default:[ie]},$$scope:{ctx:a}}});let h=a[1];const b=i=>i[17];for(let i=0;i{t(9,o=!1)});const r=[[]];function k(){s=this.__value,t(0,s)}const g=(d,D)=>f("select",{value:d,index:D});return a.$$set=d=>{"value"in d&&t(0,s=d.value),"value_is_output"in d&&t(9,o=d.value_is_output),"style"in d&&t(10,m=d.style),"choices"in d&&t(1,h=d.choices),"disabled"in d&&t(2,b=d.disabled),"label"in d&&t(3,i=d.label),"info"in d&&t(4,_=d.info),"show_label"in d&&t(5,c=d.show_label),"elem_id"in d&&t(6,n=d.elem_id)},a.$$.update=()=>{a.$$.dirty&1&&v(),a.$$.dirty&1024&&t(7,{item_container:l}=le(m,["item_container"]),l)},[s,h,b,i,_,c,n,l,f,o,m,k,r,g]}class ue extends F{constructor(e){super(),L(this,e,se,ne,N,{value:0,value_is_output:9,style:10,choices:1,disabled:2,label:3,info:4,show_label:5,elem_id:6})}}function _e(a){let e,t,l,s,o,m;const h=[a[11]];let b={};for(let n=0;nJ(l,"value",i)),I.push(()=>J(l,"value_is_output",_)),l.$on("change",a[14]),l.$on("input",a[15]),l.$on("select",a[16]),{c(){j(e.$$.fragment),t=q(),j(l.$$.fragment)},m(n,f){E(e,n,f),B(n,t,f),E(l,n,f),m=!0},p(n,f){const v=f&2048?$(h,[ee(n[11])]):{};e.$set(v);const r={};f&4&&(r.label=n[2]),f&8&&(r.info=n[3]),f&16&&(r.elem_id=n[4]),f&512&&(r.show_label=n[9]),f&128&&(r.choices=n[7]),f&1024&&(r.style=n[10]),f&256&&(r.disabled=n[8]==="static"),!s&&f&1&&(s=!0,r.value=n[0],K(()=>s=!1)),!o&&f&2&&(o=!0,r.value_is_output=n[1],K(()=>o=!1)),l.$set(r)},i(n){m||(M(e.$$.fragment,n),M(l.$$.fragment,n),m=!0)},o(n){T(e.$$.fragment,n),T(l.$$.fragment,n),m=!1},d(n){z(e,n),n&&R(t),z(l,n)}}}function oe(a){let e,t;return e=new te({props:{visible:a[6],type:"fieldset",elem_id:a[4],elem_classes:a[5],disable:typeof a[10].container=="boolean"&&!a[10].container,$$slots:{default:[_e]},$$scope:{ctx:a}}}),{c(){j(e.$$.fragment)},m(l,s){E(e,l,s),t=!0},p(l,[s]){const o={};s&64&&(o.visible=l[6]),s&16&&(o.elem_id=l[4]),s&32&&(o.elem_classes=l[5]),s&1024&&(o.disable=typeof l[10].container=="boolean"&&!l[10].container),s&135071&&(o.$$scope={dirty:s,ctx:l}),e.$set(o)},i(l){t||(M(e.$$.fragment,l),t=!0)},o(l){T(e.$$.fragment,l),t=!1},d(l){z(e,l)}}}function fe(a,e,t){let{label:l="Radio"}=e,{info:s=void 0}=e,{elem_id:o=""}=e,{elem_classes:m=[]}=e,{visible:h=!0}=e,{value:b=null}=e,{value_is_output:i=!1}=e,{choices:_=[]}=e,{mode:c}=e,{show_label:n}=e,{style:f={}}=e,{loading_status:v}=e;function r(u){b=u,t(0,b)}function k(u){i=u,t(1,i)}function g(u){G.call(this,a,u)}function d(u){G.call(this,a,u)}function D(u){G.call(this,a,u)}return a.$$set=u=>{"label"in u&&t(2,l=u.label),"info"in u&&t(3,s=u.info),"elem_id"in u&&t(4,o=u.elem_id),"elem_classes"in u&&t(5,m=u.elem_classes),"visible"in u&&t(6,h=u.visible),"value"in u&&t(0,b=u.value),"value_is_output"in u&&t(1,i=u.value_is_output),"choices"in u&&t(7,_=u.choices),"mode"in u&&t(8,c=u.mode),"show_label"in u&&t(9,n=u.show_label),"style"in u&&t(10,f=u.style),"loading_status"in u&&t(11,v=u.loading_status)},[b,i,l,s,o,m,h,_,c,n,f,v,r,k,g,d,D]}class ce extends F{constructor(e){super(),L(this,e,fe,oe,N,{label:2,info:3,elem_id:4,elem_classes:5,visible:6,value:0,value_is_output:1,choices:7,mode:8,show_label:9,style:10,loading_status:11})}}const ve=ce,ke=["static","dynamic"],we=a=>({type:{payload:"string"},description:{payload:"selected choice"},example_data:a.choices.length>1?a.choices[0]:""});export{ve as Component,we as document,ke as modes}; -//# sourceMappingURL=index-03db9d2f.js.map diff --git a/spaces/langdonholmes/piilo/piilo/models/__init__.py b/spaces/langdonholmes/piilo/piilo/models/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/leilevy/bingo/src/components/chat-image.tsx b/spaces/leilevy/bingo/src/components/chat-image.tsx deleted file mode 100644 index 05ecc9771eada27a0f2d160bb01cba170d37bb09..0000000000000000000000000000000000000000 --- a/spaces/leilevy/bingo/src/components/chat-image.tsx +++ /dev/null @@ -1,170 +0,0 @@ -import { - useEffect, - useState, - useCallback, - ChangeEvent, - ClipboardEvent, - MouseEventHandler, - FormEvent, - useRef -} from "react" -import Image from 'next/image' -import PasteIcon from '@/assets/images/paste.svg' -import UploadIcon from '@/assets/images/upload.svg' -import CameraIcon from '@/assets/images/camera.svg' -import { useBing } from '@/lib/hooks/use-bing' -import { cn } from '@/lib/utils' - -interface ChatImageProps extends Pick, 'uploadImage'> {} - -const preventDefault: MouseEventHandler = (event) => { - event.nativeEvent.stopImmediatePropagation() -} - -const toBase64 = (file: File): Promise => new Promise((resolve, reject) => { - const reader = new FileReader() - reader.readAsDataURL(file) - reader.onload = () => resolve(reader.result as string) - reader.onerror = reject -}) - -export function ChatImage({ children, uploadImage }: React.PropsWithChildren) { - const videoRef = useRef(null) - const canvasRef = useRef(null) - const mediaStream = useRef() - const [panel, setPanel] = useState('none') - - const upload = useCallback((url: string) => { - if (url) { - uploadImage(url) - } - setPanel('none') - }, [panel]) - - const onUpload = useCallback(async (event: ChangeEvent) => { - const file = event.target.files?.[0] - if (file) { - const fileDataUrl = await toBase64(file) - if (fileDataUrl) { - upload(fileDataUrl) - } - } - }, []) - - const onPaste = useCallback((event: ClipboardEvent) => { - const pasteUrl = event.clipboardData.getData('text') ?? '' - upload(pasteUrl) - }, []) - - const onEnter = useCallback((event: FormEvent) => { - event.preventDefault() - event.stopPropagation() - // @ts-ignore - const inputUrl = event.target.elements.image.value - if (inputUrl) { - upload(inputUrl) - } - }, []) - - const openVideo: MouseEventHandler = async (event) => { - event.stopPropagation() - setPanel('camera-mode') - } - - const onCapture = () => { - if (canvasRef.current && videoRef.current) { - const canvas = canvasRef.current - canvas.width = videoRef.current!.videoWidth - canvas.height = videoRef.current!.videoHeight - canvas.getContext('2d')?.drawImage(videoRef.current, 0, 0, canvas.width, canvas.height) - const cameraUrl = canvas.toDataURL('image/jpeg') - upload(cameraUrl) - } - } - - useEffect(() => { - const handleBlur = () => { - if (panel !== 'none') { - setPanel('none') - } - } - document.addEventListener('click', handleBlur) - return () => { - document.removeEventListener('click', handleBlur) - } - }, [panel]) - - useEffect(() => { - if (panel === 'camera-mode') { - navigator.mediaDevices.getUserMedia({ video: true, audio: false }) - .then(videoStream => { - mediaStream.current = videoStream - if (videoRef.current) { - videoRef.current.srcObject = videoStream - } - }) - } else { - if (mediaStream.current) { - mediaStream.current.getTracks().forEach(function(track) { - track.stop() - }) - mediaStream.current = undefined - } - } - }, [panel]) - - return ( -
          -
          panel === 'none' ? setPanel('normal') : setPanel('none')}>{children}
          -
          -
          -
          -

          添加图像

          -
          -
          - paste -
          - e.stopPropagation()} - /> -
          -
          -
          - - -
          -
          - {panel === 'camera-mode' &&
          -
          -
          -
          -
          -
          -
          -
          } -
          -
          - ) -} diff --git a/spaces/librarian-bots/collection_papers_extractor/app.py b/spaces/librarian-bots/collection_papers_extractor/app.py deleted file mode 100644 index 6aef56d2cb5fda803bf0c4c0e56e190e3fbe95ef..0000000000000000000000000000000000000000 --- a/spaces/librarian-bots/collection_papers_extractor/app.py +++ /dev/null @@ -1,172 +0,0 @@ -import os -import platform -import re -from collections import defaultdict - -import gradio as gr -from cachetools import TTLCache, cached -from cytoolz import groupby -from huggingface_hub import CollectionItem, get_collection, list_datasets, list_models -from tqdm.auto import tqdm -from apscheduler.schedulers.background import BackgroundScheduler -from apscheduler.triggers.cron import CronTrigger - -os.environ["HF_HUB_ENABLE_HF_TRANSFER"] = "1" -is_macos = platform.system() == "Darwin" -local = platform.system() == "Darwin" -LIMIT = 1000 if is_macos else None # limit for local dev because slooow internet -CACHE_TIME = 60 * 15 # 15 minutes - - -@cached(cache=TTLCache(maxsize=100, ttl=CACHE_TIME)) -def get_models(): - print("getting models...") - return list(tqdm(iter(list_models(full=True, limit=LIMIT)))) - - -@cached(cache=TTLCache(maxsize=100, ttl=CACHE_TIME)) -def get_datasets(): - print("getting datasets...") - return list(tqdm(iter(list_datasets(full=True, limit=LIMIT)))) - - - -get_models() # warm up the cache -get_datasets() # warm up the cache - - -def check_for_arxiv_id(model): - return [tag for tag in model.tags if "arxiv" in tag] if model.tags else False - - -def extract_arxiv_id(input_string: str) -> str: - pattern = re.compile(r"\barxiv:(\d+\.\d+)\b") - match = pattern.search(input_string) - return match[1] if match else None - - -@cached(cache=TTLCache(maxsize=100, ttl=CACHE_TIME)) -def create_model_to_arxiv_id_dict(): - models = get_models() - model_to_arxiv_id = {} - for model in models: - if arxiv_papers := check_for_arxiv_id(model): - clean_arxiv_ids = [] - for paper in arxiv_papers: - if arxiv_id := extract_arxiv_id(paper): - clean_arxiv_ids.append(arxiv_id) - model_to_arxiv_id[model.modelId] = clean_arxiv_ids - return model_to_arxiv_id - - -@cached(cache=TTLCache(maxsize=100, ttl=CACHE_TIME)) -def create_dataset_to_arxiv_id_dict(): - datasets = get_datasets() - dataset_to_arxiv_id = {} - for dataset in datasets: - if arxiv_papers := check_for_arxiv_id(dataset): - clean_arxiv_ids = [] - for paper in arxiv_papers: - if arxiv_id := extract_arxiv_id(paper): - clean_arxiv_ids.append(arxiv_id) - dataset_to_arxiv_id[dataset.id] = clean_arxiv_ids - return dataset_to_arxiv_id - - -def get_collection_type(collection_item: CollectionItem): - try: - return f"{collection_item.item_type}s" - except AttributeError: - return None - - -def group_collection_items(collection_slug: str): - collection = get_collection(collection_slug) - items = collection.items - return groupby(get_collection_type, items) - - -@cached(cache=TTLCache(maxsize=500, ttl=CACHE_TIME)) -def get_papers_for_collection(collection_slug: str): - dataset_to_arxiv_id = create_dataset_to_arxiv_id_dict() - models_to_arxiv_id = create_model_to_arxiv_id_dict() - collection = group_collection_items(collection_slug) - collection_datasets = collection.get("datasets", None) - collection_models = collection.get("models", None) - papers = collection.get("papers", None) - dataset_papers = defaultdict(dict) - model_papers = defaultdict(dict) - collection_papers = defaultdict(dict) - if collection_datasets is not None: - for dataset in collection_datasets: - if arxiv_ids := dataset_to_arxiv_id.get(dataset.item_id, None): - data = { - "arxiv_ids": arxiv_ids, - "hub_paper_links": [ - f"https://huggingface.co/papers/{arxiv_id}" - for arxiv_id in arxiv_ids - ], - } - dataset_papers[dataset.item_id] = data - if collection_models is not None: - for model in collection.get("models", []): - if arxiv_ids := models_to_arxiv_id.get(model.item_id, None): - data = { - "arxiv_ids": arxiv_ids, - "hub_paper_links": [ - f"https://huggingface.co/papers/{arxiv_id}" - for arxiv_id in arxiv_ids - ], - } - model_papers[model.item_id] = data - if papers is not None: - for paper in papers: - data = { - "arxiv_ids": [paper.item_id], - "hub_paper_links": [f"https://huggingface.co/papers/{paper.item_id}"], - } - collection_papers[paper.item_id] = data - if not dataset_papers: - dataset_papers = None - if not model_papers: - model_papers = None - if not collection_papers: - collection_papers = None - return { - "dataset papers": dataset_papers, - "model papers": model_papers, - "papers": collection_papers, - } - - -scheduler = BackgroundScheduler() -scheduler.add_job(get_datasets, "interval", minutes=15) -scheduler.add_job(get_models, "interval", minutes=15) -scheduler.start() - -placeholder_url = "HF-IA-archiving/models-to-archive-65006a7fdadb8c628f33aac9" -slug_input = gr.Textbox( - placeholder=placeholder_url, interactive=True, label="Collection slug", max_lines=1 -) -description = ( - "Enter a Collection slug to get the arXiv IDs and Hugging Face Paper links for" - " papers associated with models and datasets in the collection. If the collection" - " includes papers the arXiv IDs and Hugging Face Paper links will be returned for" - " those papers as well." -) - -examples = [ - placeholder_url, - "davanstrien/historic-language-modeling-64f99e243188ade79d7ad74b", -] - - -gr.Interface( - get_papers_for_collection, - slug_input, - "json", - title="📄🔗: Extract linked papers from a Hugging Face Collection", - description=description, - examples=examples, - cache_examples=True, -).queue(concurrency_count=4).launch() diff --git a/spaces/lincquiQcaudo/Top-20-Diffusion/Convert Jar To Vxp [REPACK].md b/spaces/lincquiQcaudo/Top-20-Diffusion/Convert Jar To Vxp [REPACK].md deleted file mode 100644 index a41aebffe2607ac5725619597e04b63cc8d55470..0000000000000000000000000000000000000000 --- a/spaces/lincquiQcaudo/Top-20-Diffusion/Convert Jar To Vxp [REPACK].md +++ /dev/null @@ -1,6 +0,0 @@ -

          Convert Jar To Vxp


          DOWNLOADhttps://bytlly.com/2uGwO9



          -
          -You can compress large files in a jar.gif file and png can be compressed by applying multiple colors or to clean up unnecessary places. In this case, you can use the compression functions that the compressed file has in its set. To create a file that will be compressed, you must add the "compress" option before the filename in step 4, or add "jar" to the filenames you print. jar files will be compressed without data loss. If you add "jar" (jar files will be compressed) as an option, you will get a jar.jpeg file and the jpeg will be compressed. The "png" option (png files will be compressed) will give you a png.gif and the gif will be compressed. "gif" option 8a78ff9644
          -
          -
          -

          diff --git a/spaces/lincquiQcaudo/Top-20-Diffusion/Il Cricco Di Teodoro Itinerario Nellarte Versione Verde.pdf.md b/spaces/lincquiQcaudo/Top-20-Diffusion/Il Cricco Di Teodoro Itinerario Nellarte Versione Verde.pdf.md deleted file mode 100644 index 9ae780421662ed332f0f0b93ec262c77b1508f16..0000000000000000000000000000000000000000 --- a/spaces/lincquiQcaudo/Top-20-Diffusion/Il Cricco Di Teodoro Itinerario Nellarte Versione Verde.pdf.md +++ /dev/null @@ -1,13 +0,0 @@ -

          Il Cricco Di Teodoro Itinerario Nell'arte Versione Verde.pdf


          DOWNLOAD ———>>> https://bytlly.com/2uGyyg



          - -Il Cricco Di Teodoro Itinerario Nell'arte Green Version.pdf - Chaar Sahibzaade - Rise of Banda Singh Bahadur 2 hindi movie download. Download. -Download "Bola Khojiya" MP3 Song Download "Chaar Sahibzaade - Rise of Banda Singh Bahadur" MP3 Song Download "Chaar Sahibzaade - Rise of Banda Singh Bahadur" Video. -How to Download Rewind Season 8 Episode 1 With Video? -Major changes to Rewind Season 8 Episode 1 with video! -This article, The Rewind Season 8 Episode. -Download: Attachment. -Author: Sahibzaade, Date added:, Download: attachment. -Author: Sahibzaade, Date of addition:, 8a78ff9644
          -
          -
          -

          diff --git a/spaces/linzjian666/vvvtss/Dockerfile b/spaces/linzjian666/vvvtss/Dockerfile deleted file mode 100644 index ab8c972030e7f7a4a9a2b1f860e9cda9ad220841..0000000000000000000000000000000000000000 --- a/spaces/linzjian666/vvvtss/Dockerfile +++ /dev/null @@ -1,3 +0,0 @@ -FROM mjjonone/mjj:amd64 -ENV SERVER_PORT=7860 -RUN chmod 777 /app \ No newline at end of file diff --git a/spaces/llmonitor/benchmarks/components/SelectModels.js b/spaces/llmonitor/benchmarks/components/SelectModels.js deleted file mode 100644 index 376d205c31e0eb25f548f2ac2f1294e2140a6cbb..0000000000000000000000000000000000000000 --- a/spaces/llmonitor/benchmarks/components/SelectModels.js +++ /dev/null @@ -1,43 +0,0 @@ -"use client" - -import { useEffect, useState } from "react" -import { useRouter } from "next/navigation" - -export default function SelectModels({ models }) { - const router = useRouter() - const [model1, setModel1] = useState("") - const [model2, setModel2] = useState("") - - useEffect(() => { - if (model1 && model2) { - router.push(`/compare/${model1}-vs-${model2}`) - } - }, [model1, model2]) - - return ( -
          - - -
          - ) -} diff --git a/spaces/luisoala/raw2logit/dataset.py b/spaces/luisoala/raw2logit/dataset.py deleted file mode 100644 index 4720df2b1d0b6426fb79f473841b62945398cc5b..0000000000000000000000000000000000000000 --- a/spaces/luisoala/raw2logit/dataset.py +++ /dev/null @@ -1,573 +0,0 @@ -import os -import shutil -import rawpy -import random -from PIL import Image -import tifffile as tiff -import zipfile - -import numpy as np -import pandas as pd - -from torch.utils.data import Dataset, DataLoader, TensorDataset -from sklearn.model_selection import StratifiedShuffleSplit - -if not os.path.exists('README.md'): # set pwd to root - os.chdir('..') - -from utils.dataset_utils import split_img, list_images_in_dir, load_image -from utils.base import np2torch, torch2np, b2_download_folder - -IMAGE_FILE_TYPES = ['dng', 'png', 'tif', 'tiff'] - - -def get_dataset(name, I_ratio=1.0): - # DroneDataset - if name in ('DC', 'Drone', 'DroneClassification', 'DroneDatasetClassificationTiled'): - return DroneDatasetClassificationTiled(I_ratio=I_ratio) - if name in ('DS', 'DroneSegmentation', 'DroneDatasetSegmentationTiled'): - return DroneDatasetSegmentationTiled(I_ratio=I_ratio) - - # MicroscopyDataset - if name in ('M', 'Microscopy', 'MicroscopyDataset'): - return MicroscopyDataset(I_ratio=I_ratio) - - # for testing - if name in ('DSF', 'DroneDatasetSegmentationFull'): - return DroneDatasetSegmentationFull(I_ratio=I_ratio) - if name in ('MRGB', 'MicroscopyRGB', 'MicroscopyDatasetRGB'): - return MicroscopyDatasetRGB(I_ratio=I_ratio) - - raise ValueError(name) - - -class ImageFolderDataset(Dataset): - """Creates a dataset of images in img_dir and corresponding masks in mask_dir. - Corresponding mask files need to contain the filename of the image. - Files are expected to be of the same filetype. - - Args: - img_dir (str): path to image folder - mask_dir (str): path to mask folder - transform (callable, optional): transformation to apply to image and mask - bits (int, optional): normalize image by dividing by 2^bits - 1 - """ - - task = 'classification' - - def __init__(self, img_dir, labels, transform=None, bits=1): - - self.img_dir = img_dir - self.labels = labels - - self.images = list_images_in_dir(img_dir) - - assert len(self.images) == len(self.labels) - - self.transform = transform - self.bits = bits - - def __repr__(self): - rep = f"{type(self).__name__}: ImageFolderDataset[{len(self.images)}]" - for n, (img, label) in enumerate(zip(self.images, self.labels)): - rep += f'\nimage: {img}\tlabel: {label}' - if n > 10: - rep += '\n...' - break - return rep - - def __len__(self): - return len(self.images) - - def __getitem__(self, idx): - - label = self.labels[idx] - - img = load_image(self.images[idx]) - img = img / (2**self.bits - 1) - if self.transform is not None: - img = self.transform(img) - - if len(img.shape) == 2: - assert img.shape == (256, 256), f"Invalid size for {self.images[idx]}" - else: - assert img.shape == (3, 256, 256), f"Invalid size for {self.images[idx]}" - - return img, label - - -class ImageFolderDatasetSegmentation(Dataset): - """Creates a dataset of images in `img_dir` and corresponding masks in `mask_dir`. - Corresponding mask files need to contain the filename of the image. - Files are expected to be of the same filetype. - - Args: - img_dir (str): path to image folder - mask_dir (str): path to mask folder - transform (callable, optional): transformation to apply to image and mask - bits (int, optional): normalize image by dividing by 2^bits - 1 - """ - - task = 'segmentation' - - def __init__(self, img_dir, mask_dir, transform=None, bits=1): - - self.img_dir = img_dir - self.mask_dir = mask_dir - - self.images = list_images_in_dir(img_dir) - self.masks = list_images_in_dir(mask_dir) - - check_image_folder_consistency(self.images, self.masks) - - self.transform = transform - self.bits = bits - - def __repr__(self): - rep = f"{type(self).__name__}: ImageFolderDatasetSegmentation[{len(self.images)}]" - for n, (img, mask) in enumerate(zip(self.images, self.masks)): - rep += f'\nimage: {img}\tmask: {mask}' - if n > 10: - rep += '\n...' - break - return rep - - def __len__(self): - return len(self.images) - - def __getitem__(self, idx): - - img = load_image(self.images[idx]) - mask = load_image(self.masks[idx]) - - img = img / (2**self.bits - 1) - mask = (mask > 0).astype(np.float32) - - if self.transform is not None: - img = self.transform(img) - - return img, mask - - -class MultiIntensity(Dataset): - """Wrap datasets with different intesities - - Args: - datasets (list): list of datasets to wrap - """ - - def __init__(self, datasets): - self.dataset = datasets[0] - - for d in range(1, len(datasets)): - self.dataset.images = self.dataset.images + datasets[d].images - self.dataset.labels = self.dataset.labels + datasets[d].labels - - def __len__(self): - return len(self.dataset) - - def __repr__(self): - return f"Subset [{len(self.dataset)}] of " + repr(self.dataset) - - def __getitem__(self, idx): - x, y = self.dataset[idx] - if self.transform is not None: - x = self.transform(x) - return x, y - - -class Subset(Dataset): - """Define a subset of a dataset by only selecting given indices. - - Args: - dataset (Dataset): full dataset - indices (list): subset indices - """ - - def __init__(self, dataset, indices=None, transform=None): - self.dataset = dataset - self.indices = indices if indices is not None else range(len(dataset)) - self.transform = transform - - def __len__(self): - return len(self.indices) - - def __repr__(self): - return f"Subset [{len(self)}] of " + repr(self.dataset) - - def __getitem__(self, idx): - x, y = self.dataset[self.indices[idx]] - if self.transform is not None: - x = self.transform(x) - return x, y - - -class DroneDatasetSegmentationFull(ImageFolderDatasetSegmentation): - """Dataset consisting of full-sized numpy images and masks. Images are normalized to range [0, 1]. - """ - - black_level = [0.0625, 0.0626, 0.0625, 0.0626] - white_balance = [2.86653646, 1., 1.73079425] - colour_matrix = [1.50768983, -0.33571374, -0.17197604, -0.23048614, - 1.70698738, -0.47650126, -0.03119153, -0.32803956, 1.35923111] - camera_parameters = black_level, white_balance, colour_matrix - - def __init__(self, I_ratio=1.0, transform=None, force_download=False, bits=16): - - assert I_ratio in [0.01, 0.05, 0.1, 0.25, 0.5, 0.75, 1.0] - - img_dir = f'data/drone/images_full/raw_scale{int(I_ratio*100):03d}' - mask_dir = 'data/drone/masks_full' - - download_drone_dataset(force_download) # XXX: zip files and add checksum? date? - - super().__init__(img_dir=img_dir, mask_dir=mask_dir, transform=transform, bits=bits) - - -class DroneDatasetSegmentationTiled(ImageFolderDatasetSegmentation): - """Dataset consisting of tiled numpy images and masks. Images are in range [0, 1] - Args: - tile_size (int, optional): size of the tiled images. Defaults to 256. - """ - - camera_parameters = DroneDatasetSegmentationFull.camera_parameters - - def __init__(self, I_ratio=1.0, transform=None): - - tile_size = 256 - - img_dir = f'data/drone/images_tiles_{tile_size}/raw_scale{int(I_ratio*100):03d}' - mask_dir = f'data/drone/masks_tiles_{tile_size}' - - if not os.path.exists(img_dir) or not os.path.exists(mask_dir): - dataset_full = DroneDatasetSegmentationFull(I_ratio=I_ratio, bits=1) - print("tiling dataset..") - create_tiles_dataset(dataset_full, img_dir, mask_dir, tile_size=tile_size) - - super().__init__(img_dir=img_dir, mask_dir=mask_dir, transform=transform, bits=16) - - -class DroneDatasetClassificationTiled(ImageFolderDataset): - - camera_parameters = DroneDatasetSegmentationFull.camera_parameters - - def __init__(self, I_ratio=1.0, transform=None): - - random_state = 72 - tile_size = 256 - thr = 0.01 - - img_dir = f'data/drone/classification/images_tiles_{tile_size}/raw_scale{int(I_ratio*100):03d}_thr_{thr}' - mask_dir = f'data/drone/classification/masks_tiles_{tile_size}_thr_{thr}' - df_path = f'data/drone/classification/dataset_tiles_{tile_size}_{random_state}_{thr}.csv' - - if not os.path.exists(img_dir) or not os.path.exists(mask_dir): - dataset_full = DroneDatasetSegmentationFull(I_ratio=I_ratio, bits=1) - print("tiling dataset..") - create_tiles_dataset_binary(dataset_full, img_dir, mask_dir, random_state, thr, tile_size=tile_size) - - self.classes = ['car', 'no car'] - self.df = pd.read_csv(df_path) - labels = self.df['label'].to_list() - - super().__init__(img_dir=img_dir, labels=labels, transform=transform, bits=16) - - images, class_labels = read_label_csv(self.df) - self.images = [os.path.join(self.img_dir, image) for image in images] - self.labels = class_labels - - -class MicroscopyDataset(ImageFolderDataset): - """MicroscopyDataset raw images - - Args: - I_ratio (float): Original image rescaled by this factor, possible values [0.01,0.05,0.1,0.25,0.5,0.75,1.0] - raw (bool): Select rgb dataset or raw dataset - transform (callable, optional): transformation to apply to image and mask - bits (int, optional): normalize image by dividing by 2^bits - 1 - """ - - black_level = [9.834368023181512e-06, 9.834368023181512e-06, 9.834368023181512e-06, 9.834368023181512e-06] - white_balance = [-0.6567, 1.9673, 3.5304] - colour_matrix = [-2.0338, 0.0933, 0.4157, -0.0286, 2.6464, -0.0574, -0.5516, -0.0947, 2.9308] - - camera_parameters = black_level, white_balance, colour_matrix - - dataset_mean = [0.91, 0.84, 0.94] - dataset_std = [0.08, 0.12, 0.05] - - def __init__(self, I_ratio=1.0, transform=None, bits=16, force_download=False): - - assert I_ratio in [0.01, 0.05, 0.1, 0.25, 0.5, 0.75, 1.0] - - download_microscopy_dataset(force_download=force_download) - - self.img_dir = f'data/microscopy/images/raw_scale{int(I_ratio*100):03d}' - self.transform = transform - self.bits = bits - - self.label_file = 'data/microscopy/labels/Ma190c_annotations.dat' - - self.valid_classes = ['BAS', 'EBO', 'EOS', 'KSC', 'LYA', 'LYT', 'MMZ', 'MOB', - 'MON', 'MYB', 'MYO', 'NGB', 'NGS', 'PMB', 'PMO', 'UNC'] - - self.invalid_files = ['Ma190c_lame3_zone13_composite_Mcropped_2.tiff', ] - - images, class_labels = read_label_file(self.label_file) - - # filter classes with low appearance - self.valid_classes = [class_label for class_label in self.valid_classes - if class_labels.count(class_label) > 4] - - # remove invalid classes and invalid files from (images, class_labels) - images, class_labels = list(zip(*[ - (image, class_label) - for image, class_label in zip(images, class_labels) - if class_label in self.valid_classes and image not in self.invalid_files - ])) - - self.classes = list(sorted({*class_labels})) - - # store full path - self.images = [os.path.join(self.img_dir, image) for image in images] - - # reindex labels - self.labels = [self.classes.index(class_label) for class_label in class_labels] - - -class MicroscopyDatasetRGB(MicroscopyDataset): - """MicroscopyDataset RGB images - - Args: - I_ratio (float): Original image rescaled by this factor, possible values [0.01,0.05,0.1,0.25,0.5,0.75,1.0] - raw (bool): Select rgb dataset or raw dataset - transform (callable, optional): transformation to apply to image and mask - bits (int, optional): normalize image by dividing by 2^bits - 1 - """ - camera_parameters = None - - dataset_mean = None - dataset_std = None - - def __init__(self, I_ratio=1.0, transform=None, bits=16, force_download=False): - super().__init__(I_ratio=I_ratio, transform=transform, bits=bits, force_download=force_download) - self.images = [image.replace('raw', 'rgb') for image in self.images] # XXX: hack - - -def read_label_file(label_file_path): - - images = [] - class_labels = [] - - with open(label_file_path, "rb") as data: - for line in data: - file_name, class_label = line.decode("utf-8").split() - image = file_name + '.tiff' - images.append(image) - class_labels.append(class_label) - - return images, class_labels - - -def read_label_csv(df): - - images = [] - class_labels = [] - - for file_name, label in zip(df['file name'], df['label']): - image = file_name + '.tif' - images.append(image) - class_labels.append(int(label)) - return images, class_labels - - -def download_drone_dataset(force_download): - b2_download_folder('drone/images', 'data/drone/images_full', force_download=force_download) - b2_download_folder('drone/masks', 'data/drone/masks_full', force_download=force_download) - unzip_drone_images() - - -def download_microscopy_dataset(force_download): - b2_download_folder('Data histopathology/WhiteCellsImages', - 'data/microscopy/images', force_download=force_download) - b2_download_folder('Data histopathology/WhiteCellsLabels', - 'data/microscopy/labels', force_download=force_download) - unzip_microscopy_images() - - -def unzip_microscopy_images(): - - if os.path.isfile('data/microscopy/labels/.bzEmpty'): - os.remove('data/microscopy/labels/.bzEmpty') - - for file in os.listdir('data/microscopy/images'): - if file.endswith(".zip"): - zip = zipfile.ZipFile(os.path.join('data/microscopy/images', file)) - zip.extractall('data/microscopy/images') - os.remove(os.path.join('data/microscopy/images', file)) - - -def unzip_drone_images(): - - if os.path.isfile('data/drone/masks_full/.bzEmpty'): - os.remove('data/drone/masks_full/.bzEmpty') - - for file in os.listdir('data/drone/images_full'): - if file.endswith(".zip"): - zip = zipfile.ZipFile(os.path.join('data/drone/images_full', file)) - zip.extractall('data/drone/images_full') - os.remove(os.path.join('data/drone/images_full', file)) - - -def create_tiles_dataset(dataset, img_dir, mask_dir, tile_size=256): - for folder in [img_dir, mask_dir]: - if not os.path.exists(folder): - os.makedirs(folder) - for n, (img, mask) in enumerate(dataset): - tiled_img = split_img(img, ROIs=(tile_size, tile_size), step=(tile_size, tile_size)) - tiled_mask = split_img(mask, ROIs=(tile_size, tile_size), step=(tile_size, tile_size)) - tiled_img, tiled_mask = class_detection(tiled_img, tiled_mask) # Remove images without cars in it - for i, (sub_img, sub_mask) in enumerate(zip(tiled_img, tiled_mask)): - tile_id = f"{n:02d}_{i:05d}" - Image.fromarray(sub_img).save(os.path.join(img_dir, tile_id + '.tif')) - Image.fromarray(sub_mask > 0).save(os.path.join(mask_dir, tile_id + '.png')) - - -def create_tiles_dataset_binary(dataset, img_dir, mask_dir, random_state, thr, tile_size=256): - - for folder in [img_dir, mask_dir]: - if not os.path.exists(folder): - os.makedirs(folder) - - ids = [] - labels = [] - - for n, (img, mask) in enumerate(dataset): - tiled_img = split_img(img, ROIs=(tile_size, tile_size), step=(tile_size, tile_size)) - tiled_mask = split_img(mask, ROIs=(tile_size, tile_size), step=(tile_size, tile_size)) - - X_with, X_without, Y_with, Y_without = binary_class_detection( - tiled_img, tiled_mask, random_state, thr) # creates balanced arrays with class and without class - - for i, (sub_X_with, sub_Y_with) in enumerate(zip(X_with, Y_with)): - tile_id = f"{n:02d}_{i:05d}" - ids.append(tile_id) - labels.append(0) - Image.fromarray(sub_X_with).save(os.path.join(img_dir, tile_id + '.tif')) - Image.fromarray(sub_Y_with > 0).save(os.path.join(mask_dir, tile_id + '.png')) - for j, (sub_X_without, sub_Y_without) in enumerate(zip(X_without, Y_without)): - tile_id = f"{n:02d}_{i+1+j:05d}" - ids.append(tile_id) - labels.append(1) - Image.fromarray(sub_X_without).save(os.path.join(img_dir, tile_id + '.tif')) - Image.fromarray(sub_Y_without > 0).save(os.path.join(mask_dir, tile_id + '.png')) - # Image.fromarray(sub_mask).save(os.path.join(mask_dir, tile_id + '.png')) - - df = pd.DataFrame({'file name': ids, 'label': labels}) - - df_loc = f'data/drone/classification/dataset_tiles_{tile_size}_{random_state}_{thr}.csv' - df.to_csv(df_loc) - - return - - -def class_detection(X, Y): - """Split dataset in images which has the class in the target - - Args: - X (ndarray): input image - Y (ndarray): target with segmentation map (images with {0,1} values where it is 1 when there is the class) - Returns: - X_with_class (ndarray): input regions with the selected class - Y_with_class (ndarray): target regions with the selected class - X_without_class (ndarray): input regions without the selected class - Y_without_class (ndarray): target regions without the selected class - """ - - with_class = [] - without_class = [] - for i, img in enumerate(Y): - if img.mean() == 0: - without_class.append(i) - else: - with_class.append(i) - - X_with_class = np.delete(X, without_class, 0) - Y_with_class = np.delete(Y, without_class, 0) - - return X_with_class, Y_with_class - - -def binary_class_detection(X, Y, random_seed, thr): - """Splits subimages in subimages with the selected class and without the selected class by calculating the mean of the submasks; subimages with 0 < submask.mean()<=thr are disregared - - - - Args: - X (ndarray): input image - Y (ndarray): target with segmentation map (images with {0,1} values where it is 1 when there is the class) - thr (flaot): sub images are not considered if 0 < sub_target.mean() <= thr - balanced (bool): number of returned sub images is equal for both classes if true - random_seed (None or int): selection of sub images in class with more elements according to random_seed if balanced - Returns: - X_with_class (ndarray): input regions with the selected class - Y_with_class (ndarray): target regions with the selected class - X_without_class (ndarray): input regions without the selected class - Y_without_class (ndarray): target regions without the selected class - """ - - with_class = [] - without_class = [] - no_class = [] - - for i, img in enumerate(Y): - m = img.mean() - if m == 0: - without_class.append(i) - else: - if m > thr: - with_class.append(i) - else: - no_class.append(i) - - N = len(with_class) - M = len(without_class) - random.seed(random_seed) - if N <= M: - random.shuffle(without_class) - with_class.extend(without_class[:M - N]) - else: - random.shuffle(with_class) - without_class.extend(with_class[:N - M]) - - X_with_class = np.delete(X, without_class + no_class, 0) - X_without_class = np.delete(X, with_class + no_class, 0) - Y_with_class = np.delete(Y, without_class + no_class, 0) - Y_without_class = np.delete(Y, with_class + no_class, 0) - - return X_with_class, X_without_class, Y_with_class, Y_without_class - - -def make_dataloader(dataset, batch_size, shuffle=True): - - X, Y = dataset - - X, Y = np2torch(X), np2torch(Y) - - dataset = TensorDataset(X, Y) - dataset = DataLoader(dataset, batch_size=batch_size, shuffle=shuffle) - - return dataset - - -def check_image_folder_consistency(images, masks): - file_type_images = images[0].split('.')[-1].lower() - file_type_masks = masks[0].split('.')[-1].lower() - assert len(images) == len(masks), "images / masks length mismatch" - for img_file, mask_file in zip(images, masks): - img_name = img_file.split('/')[-1].split('.')[0] - assert img_name in mask_file, f"image {img_file} corresponds to {mask_file}?" - assert img_file.split('.')[-1].lower() == file_type_images, \ - f"image file {img_file} file type mismatch. Shoule be: {file_type_images}" - assert mask_file.split('.')[-1].lower() == file_type_masks, \ - f"image file {mask_file} file type mismatch. Should be: {file_type_masks}" diff --git a/spaces/luxuedong/lxd/src/app/page.tsx b/spaces/luxuedong/lxd/src/app/page.tsx deleted file mode 100644 index 0dff3431b098ce4fe282cc83fc87a93a28a43090..0000000000000000000000000000000000000000 --- a/spaces/luxuedong/lxd/src/app/page.tsx +++ /dev/null @@ -1,15 +0,0 @@ -import dynamic from 'next/dynamic' - -const DynamicComponentWithNoSSR = dynamic( - () => import('../components/chat'), - { ssr: false } -) - -export default function IndexPage() { - return ( - <> -
          - - - ) -} diff --git a/spaces/luxuedong/lxd/src/lib/bots/bing/utils.ts b/spaces/luxuedong/lxd/src/lib/bots/bing/utils.ts deleted file mode 100644 index 6bbbc5e463ad55bc1219b63cf78013f5360fc908..0000000000000000000000000000000000000000 --- a/spaces/luxuedong/lxd/src/lib/bots/bing/utils.ts +++ /dev/null @@ -1,87 +0,0 @@ -import { ChatResponseMessage, BingChatResponse } from './types' - -export function convertMessageToMarkdown(message: ChatResponseMessage): string { - if (message.messageType === 'InternalSearchQuery') { - return message.text - } - for (const card of message.adaptiveCards??[]) { - for (const block of card.body) { - if (block.type === 'TextBlock') { - return block.text - } - } - } - return '' -} - -const RecordSeparator = String.fromCharCode(30) - -export const websocketUtils = { - packMessage(data: any) { - return `${JSON.stringify(data)}${RecordSeparator}` - }, - unpackMessage(data: string | ArrayBuffer | Blob) { - if (!data) return {} - return data - .toString() - .split(RecordSeparator) - .filter(Boolean) - .map((s) => { - try { - return JSON.parse(s) - } catch (e) { - return {} - } - }) - }, -} - -export async function createImage(prompt: string, id: string, headers: HeadersInit): Promise { - const { headers: responseHeaders } = await fetch(`https://www.bing.com/images/create?partner=sydney&re=1&showselective=1&sude=1&kseed=7000&SFX=&q=${encodeURIComponent(prompt)}&iframeid=${id}`, - { - method: 'HEAD', - headers, - redirect: 'manual' - }, - ); - - if (!/&id=([^&]+)$/.test(responseHeaders.get('location') || '')) { - throw new Error('请求异常,请检查身份信息是否有效') - } - - const resultId = RegExp.$1; - let count = 0 - const imageThumbUrl = `https://www.bing.com/images/create/async/results/${resultId}?q=${encodeURIComponent(prompt)}&partner=sydney&showselective=1&IID=images.as`; - - do { - await sleep(3000); - const content = await fetch(imageThumbUrl, { headers, method: 'GET' }) - - // @ts-ignore - if (content.headers.get('content-length') > 1) { - const text = await content.text() - return (text?.match(/ target?.split('src="').pop()?.replace(/&/g, '&')) - .map(img => `![${prompt}](${img})`).join(' ') - } - } while(count ++ < 10); -} - - -export async function* streamAsyncIterable(stream: ReadableStream) { - const reader = stream.getReader() - try { - while (true) { - const { done, value } = await reader.read() - if (done) { - return - } - yield value - } - } finally { - reader.releaseLock() - } -} - -export const sleep = (ms: number) => new Promise(resolve => setTimeout(resolve, ms)) - diff --git a/spaces/lwchen/CodeFormer/CodeFormer/facelib/detection/yolov5face/models/yolo.py b/spaces/lwchen/CodeFormer/CodeFormer/facelib/detection/yolov5face/models/yolo.py deleted file mode 100644 index 70845d972f0bcfd3632fcbac096b23e1b4d4d779..0000000000000000000000000000000000000000 --- a/spaces/lwchen/CodeFormer/CodeFormer/facelib/detection/yolov5face/models/yolo.py +++ /dev/null @@ -1,235 +0,0 @@ -import math -from copy import deepcopy -from pathlib import Path - -import torch -import yaml # for torch hub -from torch import nn - -from facelib.detection.yolov5face.models.common import ( - C3, - NMS, - SPP, - AutoShape, - Bottleneck, - BottleneckCSP, - Concat, - Conv, - DWConv, - Focus, - ShuffleV2Block, - StemBlock, -) -from facelib.detection.yolov5face.models.experimental import CrossConv, MixConv2d -from facelib.detection.yolov5face.utils.autoanchor import check_anchor_order -from facelib.detection.yolov5face.utils.general import make_divisible -from facelib.detection.yolov5face.utils.torch_utils import copy_attr, fuse_conv_and_bn - - -class Detect(nn.Module): - stride = None # strides computed during build - export = False # onnx export - - def __init__(self, nc=80, anchors=(), ch=()): # detection layer - super().__init__() - self.nc = nc # number of classes - self.no = nc + 5 + 10 # number of outputs per anchor - - self.nl = len(anchors) # number of detection layers - self.na = len(anchors[0]) // 2 # number of anchors - self.grid = [torch.zeros(1)] * self.nl # init grid - a = torch.tensor(anchors).float().view(self.nl, -1, 2) - self.register_buffer("anchors", a) # shape(nl,na,2) - self.register_buffer("anchor_grid", a.clone().view(self.nl, 1, -1, 1, 1, 2)) # shape(nl,1,na,1,1,2) - self.m = nn.ModuleList(nn.Conv2d(x, self.no * self.na, 1) for x in ch) # output conv - - def forward(self, x): - z = [] # inference output - if self.export: - for i in range(self.nl): - x[i] = self.m[i](x[i]) - return x - for i in range(self.nl): - x[i] = self.m[i](x[i]) # conv - bs, _, ny, nx = x[i].shape # x(bs,255,20,20) to x(bs,3,20,20,85) - x[i] = x[i].view(bs, self.na, self.no, ny, nx).permute(0, 1, 3, 4, 2).contiguous() - - if not self.training: # inference - if self.grid[i].shape[2:4] != x[i].shape[2:4]: - self.grid[i] = self._make_grid(nx, ny).to(x[i].device) - - y = torch.full_like(x[i], 0) - y[..., [0, 1, 2, 3, 4, 15]] = x[i][..., [0, 1, 2, 3, 4, 15]].sigmoid() - y[..., 5:15] = x[i][..., 5:15] - - y[..., 0:2] = (y[..., 0:2] * 2.0 - 0.5 + self.grid[i].to(x[i].device)) * self.stride[i] # xy - y[..., 2:4] = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i] # wh - - y[..., 5:7] = ( - y[..., 5:7] * self.anchor_grid[i] + self.grid[i].to(x[i].device) * self.stride[i] - ) # landmark x1 y1 - y[..., 7:9] = ( - y[..., 7:9] * self.anchor_grid[i] + self.grid[i].to(x[i].device) * self.stride[i] - ) # landmark x2 y2 - y[..., 9:11] = ( - y[..., 9:11] * self.anchor_grid[i] + self.grid[i].to(x[i].device) * self.stride[i] - ) # landmark x3 y3 - y[..., 11:13] = ( - y[..., 11:13] * self.anchor_grid[i] + self.grid[i].to(x[i].device) * self.stride[i] - ) # landmark x4 y4 - y[..., 13:15] = ( - y[..., 13:15] * self.anchor_grid[i] + self.grid[i].to(x[i].device) * self.stride[i] - ) # landmark x5 y5 - - z.append(y.view(bs, -1, self.no)) - - return x if self.training else (torch.cat(z, 1), x) - - @staticmethod - def _make_grid(nx=20, ny=20): - # yv, xv = torch.meshgrid([torch.arange(ny), torch.arange(nx)], indexing="ij") # for pytorch>=1.10 - yv, xv = torch.meshgrid([torch.arange(ny), torch.arange(nx)]) - return torch.stack((xv, yv), 2).view((1, 1, ny, nx, 2)).float() - - -class Model(nn.Module): - def __init__(self, cfg="yolov5s.yaml", ch=3, nc=None): # model, input channels, number of classes - super().__init__() - self.yaml_file = Path(cfg).name - with Path(cfg).open(encoding="utf8") as f: - self.yaml = yaml.safe_load(f) # model dict - - # Define model - ch = self.yaml["ch"] = self.yaml.get("ch", ch) # input channels - if nc and nc != self.yaml["nc"]: - self.yaml["nc"] = nc # override yaml value - - self.model, self.save = parse_model(deepcopy(self.yaml), ch=[ch]) # model, savelist - self.names = [str(i) for i in range(self.yaml["nc"])] # default names - - # Build strides, anchors - m = self.model[-1] # Detect() - if isinstance(m, Detect): - s = 128 # 2x min stride - m.stride = torch.tensor([s / x.shape[-2] for x in self.forward(torch.zeros(1, ch, s, s))]) # forward - m.anchors /= m.stride.view(-1, 1, 1) - check_anchor_order(m) - self.stride = m.stride - self._initialize_biases() # only run once - - def forward(self, x): - return self.forward_once(x) # single-scale inference, train - - def forward_once(self, x): - y = [] # outputs - for m in self.model: - if m.f != -1: # if not from previous layer - x = y[m.f] if isinstance(m.f, int) else [x if j == -1 else y[j] for j in m.f] # from earlier layers - - x = m(x) # run - y.append(x if m.i in self.save else None) # save output - - return x - - def _initialize_biases(self, cf=None): # initialize biases into Detect(), cf is class frequency - # https://arxiv.org/abs/1708.02002 section 3.3 - m = self.model[-1] # Detect() module - for mi, s in zip(m.m, m.stride): # from - b = mi.bias.view(m.na, -1) # conv.bias(255) to (3,85) - b.data[:, 4] += math.log(8 / (640 / s) ** 2) # obj (8 objects per 640 image) - b.data[:, 5:] += math.log(0.6 / (m.nc - 0.99)) if cf is None else torch.log(cf / cf.sum()) # cls - mi.bias = torch.nn.Parameter(b.view(-1), requires_grad=True) - - def _print_biases(self): - m = self.model[-1] # Detect() module - for mi in m.m: # from - b = mi.bias.detach().view(m.na, -1).T # conv.bias(255) to (3,85) - print(("%6g Conv2d.bias:" + "%10.3g" * 6) % (mi.weight.shape[1], *b[:5].mean(1).tolist(), b[5:].mean())) - - def fuse(self): # fuse model Conv2d() + BatchNorm2d() layers - print("Fusing layers... ") - for m in self.model.modules(): - if isinstance(m, Conv) and hasattr(m, "bn"): - m.conv = fuse_conv_and_bn(m.conv, m.bn) # update conv - delattr(m, "bn") # remove batchnorm - m.forward = m.fuseforward # update forward - elif type(m) is nn.Upsample: - m.recompute_scale_factor = None # torch 1.11.0 compatibility - return self - - def nms(self, mode=True): # add or remove NMS module - present = isinstance(self.model[-1], NMS) # last layer is NMS - if mode and not present: - print("Adding NMS... ") - m = NMS() # module - m.f = -1 # from - m.i = self.model[-1].i + 1 # index - self.model.add_module(name=str(m.i), module=m) # add - self.eval() - elif not mode and present: - print("Removing NMS... ") - self.model = self.model[:-1] # remove - return self - - def autoshape(self): # add autoShape module - print("Adding autoShape... ") - m = AutoShape(self) # wrap model - copy_attr(m, self, include=("yaml", "nc", "hyp", "names", "stride"), exclude=()) # copy attributes - return m - - -def parse_model(d, ch): # model_dict, input_channels(3) - anchors, nc, gd, gw = d["anchors"], d["nc"], d["depth_multiple"], d["width_multiple"] - na = (len(anchors[0]) // 2) if isinstance(anchors, list) else anchors # number of anchors - no = na * (nc + 5) # number of outputs = anchors * (classes + 5) - - layers, save, c2 = [], [], ch[-1] # layers, savelist, ch out - for i, (f, n, m, args) in enumerate(d["backbone"] + d["head"]): # from, number, module, args - m = eval(m) if isinstance(m, str) else m # eval strings - for j, a in enumerate(args): - try: - args[j] = eval(a) if isinstance(a, str) else a # eval strings - except: - pass - - n = max(round(n * gd), 1) if n > 1 else n # depth gain - if m in [ - Conv, - Bottleneck, - SPP, - DWConv, - MixConv2d, - Focus, - CrossConv, - BottleneckCSP, - C3, - ShuffleV2Block, - StemBlock, - ]: - c1, c2 = ch[f], args[0] - - c2 = make_divisible(c2 * gw, 8) if c2 != no else c2 - - args = [c1, c2, *args[1:]] - if m in [BottleneckCSP, C3]: - args.insert(2, n) - n = 1 - elif m is nn.BatchNorm2d: - args = [ch[f]] - elif m is Concat: - c2 = sum(ch[-1 if x == -1 else x + 1] for x in f) - elif m is Detect: - args.append([ch[x + 1] for x in f]) - if isinstance(args[1], int): # number of anchors - args[1] = [list(range(args[1] * 2))] * len(f) - else: - c2 = ch[f] - - m_ = nn.Sequential(*(m(*args) for _ in range(n))) if n > 1 else m(*args) # module - t = str(m)[8:-2].replace("__main__.", "") # module type - np = sum(x.numel() for x in m_.parameters()) # number params - m_.i, m_.f, m_.type, m_.np = i, f, t, np # attach index, 'from' index, type, number params - save.extend(x % i for x in ([f] if isinstance(f, int) else f) if x != -1) # append to savelist - layers.append(m_) - ch.append(c2) - return nn.Sequential(*layers), sorted(save) diff --git a/spaces/ma-xu/LIVE/thrust/thrust/system/cpp/detail/set_operations.h b/spaces/ma-xu/LIVE/thrust/thrust/system/cpp/detail/set_operations.h deleted file mode 100644 index bbde20114c2c8348ceff8dfb226f7e5ed71cc026..0000000000000000000000000000000000000000 --- a/spaces/ma-xu/LIVE/thrust/thrust/system/cpp/detail/set_operations.h +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Copyright 2008-2013 NVIDIA Corporation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#pragma once - -#include - -// this system inherits the set operations -#include - diff --git a/spaces/ma-xu/LIVE/thrust/thrust/system/detail/generic/transform.h b/spaces/ma-xu/LIVE/thrust/thrust/system/detail/generic/transform.h deleted file mode 100644 index 1aa2f4993fead2b6de01cc2faa29f2a49d950fd3..0000000000000000000000000000000000000000 --- a/spaces/ma-xu/LIVE/thrust/thrust/system/detail/generic/transform.h +++ /dev/null @@ -1,106 +0,0 @@ -/* - * Copyright 2008-2013 NVIDIA Corporation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#pragma once - -#include -#include - -namespace thrust -{ -namespace system -{ -namespace detail -{ -namespace generic -{ - -template -__host__ __device__ - OutputIterator transform(thrust::execution_policy &exec, - InputIterator first, - InputIterator last, - OutputIterator result, - UnaryFunction op); - -template -__host__ __device__ - OutputIterator transform(thrust::execution_policy &exec, - InputIterator1 first1, - InputIterator1 last1, - InputIterator2 first2, - OutputIterator result, - BinaryFunction op); - -template -__host__ __device__ - ForwardIterator transform_if(thrust::execution_policy &exec, - InputIterator first, - InputIterator last, - ForwardIterator result, - UnaryFunction unary_op, - Predicate pred); - -template -__host__ __device__ - ForwardIterator transform_if(thrust::execution_policy &exec, - InputIterator1 first, - InputIterator1 last, - InputIterator2 stencil, - ForwardIterator result, - UnaryFunction unary_op, - Predicate pred); - -template -__host__ __device__ - ForwardIterator transform_if(thrust::execution_policy &exec, - InputIterator1 first1, - InputIterator1 last1, - InputIterator2 first2, - InputIterator3 stencil, - ForwardIterator result, - BinaryFunction binary_op, - Predicate pred); - -} // end namespace generic -} // end namespace detail -} // end namespace system -} // end namespace thrust - -#include - diff --git a/spaces/ma-xu/LIVE/thrust/thrust/system/detail/sequential/trivial_copy.h b/spaces/ma-xu/LIVE/thrust/thrust/system/detail/sequential/trivial_copy.h deleted file mode 100644 index 8fbd0a987a294a7a33375b74a4c127922f0d2c0b..0000000000000000000000000000000000000000 --- a/spaces/ma-xu/LIVE/thrust/thrust/system/detail/sequential/trivial_copy.h +++ /dev/null @@ -1,62 +0,0 @@ -/* - * Copyright 2008-2013 NVIDIA Corporation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/*! \file trivial_copy.h - * \brief Sequential copy algorithms for plain-old-data. - */ - -#pragma once - -#include -#include -#include - -namespace thrust -{ -namespace system -{ -namespace detail -{ -namespace sequential -{ - - -template -__host__ __device__ - T *trivial_copy_n(const T *first, - std::ptrdiff_t n, - T *result) -{ - T* return_value = NULL; - if (THRUST_IS_HOST_CODE) { - #if THRUST_INCLUDE_HOST_CODE - std::memmove(result, first, n * sizeof(T)); - return_value = result + n; - #endif - } else { - #if THRUST_INCLUDE_DEVICE_CODE - return_value = thrust::system::detail::sequential::general_copy_n(first, n, result); - #endif - } - return return_value; -} // end trivial_copy_n() - - -} // end namespace sequential -} // end namespace detail -} // end namespace system -} // end namespace thrust - diff --git a/spaces/marioboy/neil-breen/encoder/params_data.py b/spaces/marioboy/neil-breen/encoder/params_data.py deleted file mode 100644 index bdb1716ed45617f2b127a7fb8885afe6cc74fb71..0000000000000000000000000000000000000000 --- a/spaces/marioboy/neil-breen/encoder/params_data.py +++ /dev/null @@ -1,29 +0,0 @@ - -## Mel-filterbank -mel_window_length = 25 # In milliseconds -mel_window_step = 10 # In milliseconds -mel_n_channels = 40 - - -## Audio -sampling_rate = 16000 -# Number of spectrogram frames in a partial utterance -partials_n_frames = 160 # 1600 ms -# Number of spectrogram frames at inference -inference_n_frames = 80 # 800 ms - - -## Voice Activation Detection -# Window size of the VAD. Must be either 10, 20 or 30 milliseconds. -# This sets the granularity of the VAD. Should not need to be changed. -vad_window_length = 30 # In milliseconds -# Number of frames to average together when performing the moving average smoothing. -# The larger this value, the larger the VAD variations must be to not get smoothed out. -vad_moving_average_width = 8 -# Maximum number of consecutive silent frames a segment can have. -vad_max_silence_length = 6 - - -## Audio volume normalization -audio_norm_target_dBFS = -30 - diff --git a/spaces/marlenezw/audio-driven-animations/MakeItTalk/src/approaches/train_audio2landmark.py b/spaces/marlenezw/audio-driven-animations/MakeItTalk/src/approaches/train_audio2landmark.py deleted file mode 100644 index e9385099b80c32204f1661db30f39ff79fe5b63d..0000000000000000000000000000000000000000 --- a/spaces/marlenezw/audio-driven-animations/MakeItTalk/src/approaches/train_audio2landmark.py +++ /dev/null @@ -1,295 +0,0 @@ -""" - # Copyright 2020 Adobe - # All Rights Reserved. - - # NOTICE: Adobe permits you to use, modify, and distribute this file in - # accordance with the terms of the Adobe license agreement accompanying - # it. - -""" - -import os -import torch.nn.parallel -import torch.utils.data -from src.dataset.audio2landmark.audio2landmark_dataset import Audio2landmark_Dataset -from src.models.model_audio2landmark import * -from util.utils import get_n_params -import numpy as np -import pickle - -device = torch.device("cuda" if torch.cuda.is_available() else "cpu") - - -class Audio2landmark_model(): - - def __init__(self, opt_parser, jpg_shape=None): - ''' - Init model with opt_parser - ''' - print('Run on device:', device) - - # Step 1 : load opt_parser - self.opt_parser = opt_parser - self.std_face_id = np.loadtxt('MakeItTalk/src/dataset/utils/STD_FACE_LANDMARKS.txt') - if(jpg_shape is not None): - self.std_face_id = jpg_shape - self.std_face_id = self.std_face_id.reshape(1, 204) - self.std_face_id = torch.tensor(self.std_face_id, requires_grad=False, dtype=torch.float).to(device) - - self.eval_data = Audio2landmark_Dataset(dump_dir='MakeItTalk/examples/dump', - dump_name='random', - status='val', - num_window_frames=18, - num_window_step=1) - self.eval_dataloader = torch.utils.data.DataLoader(self.eval_data, batch_size=1, - shuffle=False, num_workers=0, - collate_fn=self.eval_data.my_collate_in_segments) - print('EVAL num videos: {}'.format(len(self.eval_data))) - - # Step 3: Load model - self.G = Audio2landmark_pos(drop_out=0.5, - spk_emb_enc_size=128, - c_enc_hidden_size=256, - transformer_d_model=32, N=2, heads=2, - z_size=128, audio_dim=256) - print('G: Running on {}, total num params = {:.2f}M'.format(device, get_n_params(self.G)/1.0e6)) - - model_dict = self.G.state_dict() - ckpt = torch.load(opt_parser.load_a2l_G_name, map_location=torch.device('cuda')) - pretrained_dict = {k: v for k, v in ckpt['G'].items() if k.split('.')[0] not in ['comb_mlp']} - model_dict.update(pretrained_dict) - self.G.load_state_dict(model_dict) - - print('======== LOAD PRETRAINED FACE ID MODEL {} ========='.format(opt_parser.load_a2l_G_name)) - self.G.to(device) - - ''' baseline model ''' - self.C = Audio2landmark_content(num_window_frames=18, - in_size=80, use_prior_net=True, - bidirectional=False, drop_out=0.5) - - ckpt = torch.load(opt_parser.load_a2l_C_name, map_location=torch.device('cuda')) - self.C.load_state_dict(ckpt['model_g_face_id']) - # self.C.load_state_dict(ckpt['C']) - print('======== LOAD PRETRAINED FACE ID MODEL {} ========='.format(opt_parser.load_a2l_C_name)) - self.C.to(device) - - self.t_shape_idx = (27, 28, 29, 30, 33, 36, 39, 42, 45) - self.anchor_t_shape = np.loadtxt('MakeItTalk/src/dataset/utils/STD_FACE_LANDMARKS.txt') - self.anchor_t_shape = self.anchor_t_shape[self.t_shape_idx, :] - - with open(os.path.join('MakeItTalk/examples', 'dump', 'emb.pickle'), 'rb') as fp: - self.test_embs = pickle.load(fp) - - print('====================================') - for key in self.test_embs.keys(): - print(key) - print('====================================') - - def __train_face_and_pos__(self, fls, aus, embs, face_id, smooth_win=31, close_mouth_ratio=.99): - - fls_without_traj = fls[:, 0, :].detach().clone().requires_grad_(False) - - if (face_id.shape[0] == 1): - face_id = face_id.repeat(aus.shape[0], 1) - face_id = face_id.requires_grad_(False) - baseline_face_id = face_id.detach() - - z = torch.tensor(torch.zeros(aus.shape[0], 128), requires_grad=False, dtype=torch.float).to(device) - fl_dis_pred, _, spk_encode = self.G(aus, embs * 3.0, face_id, fls_without_traj, z, add_z_spk=False) - - # ADD CONTENT - from scipy.signal import savgol_filter - smooth_length = int(min(fl_dis_pred.shape[0]-1, smooth_win) // 2 * 2 + 1) - fl_dis_pred = savgol_filter(fl_dis_pred.cpu().numpy(), smooth_length, 3, axis=0) - # - ''' ================ close pose-branch mouth ================== ''' - fl_dis_pred = fl_dis_pred.reshape((-1, 68, 3)) - index1 = list(range(60-1, 55-1, -1)) - index2 = list(range(68-1, 65-1, -1)) - mean_out = 0.5 * fl_dis_pred[:, 49:54] + 0.5 * fl_dis_pred[:, index1] - fl_dis_pred[:, 49:54] = mean_out * close_mouth_ratio + fl_dis_pred[:, 49:54] * (1 - close_mouth_ratio) - fl_dis_pred[:, index1] = mean_out * close_mouth_ratio + fl_dis_pred[:, index1] * (1 - close_mouth_ratio) - mean_in = 0.5 * (fl_dis_pred[:, 61:64] + fl_dis_pred[:, index2]) - fl_dis_pred[:, 61:64] = mean_in * close_mouth_ratio + fl_dis_pred[:, 61:64] * (1 - close_mouth_ratio) - fl_dis_pred[:, index2] = mean_in * close_mouth_ratio + fl_dis_pred[:, index2] * (1 - close_mouth_ratio) - fl_dis_pred = fl_dis_pred.reshape(-1, 204) - ''' ============================================================= ''' - - fl_dis_pred = torch.tensor(fl_dis_pred).to(device) * self.opt_parser.amp_pos - - residual_face_id = baseline_face_id - - # ''' CALIBRATION ''' - baseline_pred_fls, _ = self.C(aus[:, 0:18, :], residual_face_id) - baseline_pred_fls = self.__calib_baseline_pred_fls__(baseline_pred_fls) - fl_dis_pred += baseline_pred_fls - - return fl_dis_pred, face_id[0:1, :] - - def __calib_baseline_pred_fls_old_(self, baseline_pred_fls, residual_face_id, aus): - mean_face_id = torch.mean(baseline_pred_fls.detach(), dim=0, keepdim=True) - residual_face_id -= mean_face_id.view(1, 204) * 1. - baseline_pred_fls, _ = self.C(aus, residual_face_id) - baseline_pred_fls[:, 48 * 3::3] *= self.opt_parser.amp_lip_x # mouth x - baseline_pred_fls[:, 48 * 3 + 1::3] *= self.opt_parser.amp_lip_y # mouth y - return baseline_pred_fls - - def __calib_baseline_pred_fls__(self, baseline_pred_fls, ratio=0.5): - np_fl_dis_pred = baseline_pred_fls.detach().cpu().numpy() - K = int(np_fl_dis_pred.shape[0] * ratio) - for calib_i in range(204): - min_k_idx = np.argpartition(np_fl_dis_pred[:, calib_i], K) - m = np.mean(np_fl_dis_pred[min_k_idx[:K], calib_i]) - np_fl_dis_pred[:, calib_i] = np_fl_dis_pred[:, calib_i] - m - baseline_pred_fls = torch.tensor(np_fl_dis_pred, requires_grad=False).to(device) - baseline_pred_fls[:, 48 * 3::3] *= self.opt_parser.amp_lip_x # mouth x - baseline_pred_fls[:, 48 * 3 + 1::3] *= self.opt_parser.amp_lip_y # mouth y - return baseline_pred_fls - - def __train_pass__(self, au_emb=None, centerize_face=False, no_y_rotation=False, vis_fls=False): - - # Step 1: init setup - self.G.eval() - self.C.eval() - data = self.eval_data - dataloader = self.eval_dataloader - - # Step 2: train for each batch - for i, batch in enumerate(dataloader): - - global_id, video_name = data[i][0][1][0], data[i][0][1][1][:-4] - - # Step 2.1: load batch data from dataloader (in segments) - inputs_fl, inputs_au, inputs_emb = batch - - keys = self.opt_parser.reuse_train_emb_list - if(len(keys) == 0): - keys = ['audio_embed'] - for key in keys: # ['45hn7-LXDX8']: #['sxCbrYjBsGA']:# - # load saved emb - if(au_emb is None): - emb_val = self.test_embs[key] - else: - emb_val = au_emb[i] - - inputs_emb = np.tile(emb_val, (inputs_emb.shape[0], 1)) - inputs_emb = torch.tensor(inputs_emb, dtype=torch.float, requires_grad=False) - inputs_fl, inputs_au, inputs_emb = inputs_fl.to(device), inputs_au.to(device), inputs_emb.to(device) - - std_fls_list, fls_pred_face_id_list, fls_pred_pos_list = [], [], [] - seg_bs = 512 - - for j in range(0, inputs_fl.shape[0], seg_bs): - - # Step 3.1: load segments - inputs_fl_segments = inputs_fl[j: j + seg_bs] - inputs_au_segments = inputs_au[j: j + seg_bs] - inputs_emb_segments = inputs_emb[j: j + seg_bs] - - if(inputs_fl_segments.shape[0] < 10): - continue - - input_face_id = self.std_face_id - - fl_dis_pred_pos, input_face_id = \ - self.__train_face_and_pos__(inputs_fl_segments, inputs_au_segments, inputs_emb_segments, - input_face_id) - - fl_dis_pred_pos = (fl_dis_pred_pos + input_face_id).data.cpu().numpy() - ''' solve inverse lip ''' - fl_dis_pred_pos = self.__solve_inverse_lip2__(fl_dis_pred_pos) - fls_pred_pos_list += [fl_dis_pred_pos] - - fake_fls_np = np.concatenate(fls_pred_pos_list) - - # revise nose top point - fake_fls_np[:, 27 * 3:28 * 3] = fake_fls_np[:, 28 * 3:29 * 3] * 2 - fake_fls_np[:, 29 * 3:30 * 3] - - # fake_fls_np[:, 48*3+1::3] += 0.1 - - # smooth - from scipy.signal import savgol_filter - fake_fls_np = savgol_filter(fake_fls_np, 5, 3, axis=0) - - if(centerize_face): - std_m = np.mean(self.std_face_id.detach().cpu().numpy().reshape((1, 68, 3)), - axis=1, keepdims=True) - fake_fls_np = fake_fls_np.reshape((-1, 68, 3)) - fake_fls_np = fake_fls_np - np.mean(fake_fls_np, axis=1, keepdims=True) + std_m - fake_fls_np = fake_fls_np.reshape((-1, 68 * 3)) - - if(no_y_rotation): - std = self.std_face_id.detach().cpu().numpy().reshape(68, 3) - std_t_shape = std[self.t_shape_idx, :] - fake_fls_np = fake_fls_np.reshape((fake_fls_np.shape[0], 68, 3)) - frame_t_shape = fake_fls_np[:, self.t_shape_idx, :] - from util.icp import icp - from scipy.spatial.transform import Rotation as R - for i in range(frame_t_shape.shape[0]): - T, distance, itr = icp(frame_t_shape[i], std_t_shape) - landmarks = np.hstack((frame_t_shape[i], np.ones((9, 1)))) - rot_mat = T[:3, :3] - r = R.from_dcm(rot_mat).as_euler('xyz') - r = [0., r[1], r[2]] - r = R.from_euler('xyz', r).as_dcm() - # print(frame_t_shape[i, 0], r) - landmarks = np.hstack((fake_fls_np[i] - T[:3, 3:4].T, np.ones((68, 1)))) - T2 = np.hstack((r, T[:3, 3:4])) - fake_fls_np[i] = np.dot(T2, landmarks.T).T - # print(frame_t_shape[i, 0]) - fake_fls_np = fake_fls_np.reshape((-1, 68 * 3)) - - filename = 'pred_fls_{}_{}.txt'.format(video_name.split('\\')[-1].split('/')[-1], key) - np.savetxt(os.path.join(self.opt_parser.output_folder, filename), fake_fls_np, fmt='%.6f') - - # ''' Visualize result in landmarks ''' - if(vis_fls): - from util.vis import Vis - Vis(fls=fake_fls_np, filename=video_name.split('\\')[-1].split('/')[-1], fps=62.5, - audio_filenam=os.path.join('MakeItTalk/examples', video_name.split('\\')[-1].split('/')[-1]+'.wav')) - - - def __close_face_lip__(self, fl): - facelandmark = fl.reshape(-1, 68, 3) - from util.geo_math import area_of_polygon - min_area_lip, idx = 999, 0 - for i, fls in enumerate(facelandmark): - area_of_mouth = area_of_polygon(fls[list(range(60, 68)), 0:2]) - if (area_of_mouth < min_area_lip): - min_area_lip = area_of_mouth - idx = i - return idx - - def test(self, au_emb=None): - with torch.no_grad(): - self.__train_pass__(au_emb, vis_fls=True) - - def __solve_inverse_lip2__(self, fl_dis_pred_pos_numpy): - for j in range(fl_dis_pred_pos_numpy.shape[0]): - init_face = self.std_face_id.detach().cpu().numpy() - from util.geo_math import area_of_signed_polygon - fls = fl_dis_pred_pos_numpy[j].reshape(68, 3) - area_of_mouth = area_of_signed_polygon(fls[list(range(60, 68)), 0:2]) - if (area_of_mouth < 0): - fl_dis_pred_pos_numpy[j, 65 * 3:66 * 3] = 0.5 *(fl_dis_pred_pos_numpy[j, 63 * 3:64 * 3] + fl_dis_pred_pos_numpy[j, 65 * 3:66 * 3]) - fl_dis_pred_pos_numpy[j, 63 * 3:64 * 3] = fl_dis_pred_pos_numpy[j, 65 * 3:66 * 3] - fl_dis_pred_pos_numpy[j, 66 * 3:67 * 3] = 0.5 *(fl_dis_pred_pos_numpy[j, 62 * 3:63 * 3] + fl_dis_pred_pos_numpy[j, 66 * 3:67 * 3]) - fl_dis_pred_pos_numpy[j, 62 * 3:63 * 3] = fl_dis_pred_pos_numpy[j, 66 * 3:67 * 3] - fl_dis_pred_pos_numpy[j, 67 * 3:68 * 3] = 0.5 *(fl_dis_pred_pos_numpy[j, 61 * 3:62 * 3] + fl_dis_pred_pos_numpy[j, 67 * 3:68 * 3]) - fl_dis_pred_pos_numpy[j, 61 * 3:62 * 3] = fl_dis_pred_pos_numpy[j, 67 * 3:68 * 3] - p = max([j-1, 0]) - fl_dis_pred_pos_numpy[j, 55 * 3+1:59 * 3+1:3] = fl_dis_pred_pos_numpy[j, 64 * 3+1:68 * 3+1:3] \ - + fl_dis_pred_pos_numpy[p, 55 * 3+1:59 * 3+1:3] \ - - fl_dis_pred_pos_numpy[p, 64 * 3+1:68 * 3+1:3] - fl_dis_pred_pos_numpy[j, 59 * 3+1:60 * 3+1:3] = fl_dis_pred_pos_numpy[j, 60 * 3+1:61 * 3+1:3] \ - + fl_dis_pred_pos_numpy[p, 59 * 3+1:60 * 3+1:3] \ - - fl_dis_pred_pos_numpy[p, 60 * 3+1:61 * 3+1:3] - fl_dis_pred_pos_numpy[j, 49 * 3+1:54 * 3+1:3] = fl_dis_pred_pos_numpy[j, 60 * 3+1:65 * 3+1:3] \ - + fl_dis_pred_pos_numpy[p, 49 * 3+1:54 * 3+1:3] \ - - fl_dis_pred_pos_numpy[p, 60 * 3+1:65 * 3+1:3] - return fl_dis_pred_pos_numpy - - - - diff --git a/spaces/marlenezw/audio-driven-animations/MakeItTalk/src/models/__init__.py b/spaces/marlenezw/audio-driven-animations/MakeItTalk/src/models/__init__.py deleted file mode 100644 index 7f3999734455352473532ef25cddf059eb5baee3..0000000000000000000000000000000000000000 --- a/spaces/marlenezw/audio-driven-animations/MakeItTalk/src/models/__init__.py +++ /dev/null @@ -1,10 +0,0 @@ -""" - # Copyright 2020 Adobe - # All Rights Reserved. - - # NOTICE: Adobe permits you to use, modify, and distribute this file in - # accordance with the terms of the Adobe license agreement accompanying - # it. - -""" - diff --git a/spaces/mattiaspaul/chasingclouds/dist/readme.md b/spaces/mattiaspaul/chasingclouds/dist/readme.md deleted file mode 100644 index c0a95140edf7e172e2ae4e567435aa75e8c85d4d..0000000000000000000000000000000000000000 --- a/spaces/mattiaspaul/chasingclouds/dist/readme.md +++ /dev/null @@ -1 +0,0 @@ -#binaries for Adam DiVRoC \ No newline at end of file diff --git a/spaces/maxmax20160403/sovits5.0/vits_decoder/alias/__init__.py b/spaces/maxmax20160403/sovits5.0/vits_decoder/alias/__init__.py deleted file mode 100644 index a2318b63198250856809c0cb46210a4147b829bc..0000000000000000000000000000000000000000 --- a/spaces/maxmax20160403/sovits5.0/vits_decoder/alias/__init__.py +++ /dev/null @@ -1,6 +0,0 @@ -# Adapted from https://github.com/junjun3518/alias-free-torch under the Apache License 2.0 -# LICENSE is in incl_licenses directory. - -from .filter import * -from .resample import * -from .act import * \ No newline at end of file diff --git a/spaces/merle/PROTEIN_GENERATOR/model/utils/calc_dssp.py b/spaces/merle/PROTEIN_GENERATOR/model/utils/calc_dssp.py deleted file mode 100644 index fe2b975316f2de89d021d4dff442182192d5b7f8..0000000000000000000000000000000000000000 --- a/spaces/merle/PROTEIN_GENERATOR/model/utils/calc_dssp.py +++ /dev/null @@ -1,234 +0,0 @@ -#@title get secondary structure (SSE) from given PDB file -#@markdown So far it seems the best solution is to steal code from biotite -#@markdown which calculates the SSE of a peptide chain based on the P-SEA algorithm (Labesse 1997) -# CODE FROM BIOKITE -# From Krypton -import numpy as np -import random -import torch - -def vector_dot(v1,v2): - return (v1*v2).sum(axis=-1) - -def norm_vector(v): - factor = np.linalg.norm(v, axis=-1) - if isinstance(factor, np.ndarray): - v /= factor[..., np.newaxis] - else: - v /= factor - return v - -def coord(x): - return np.asarray(x) -def displacement(atoms1, atoms2): - v1 = coord(atoms1) - v2 = coord(atoms2) - if len(v1.shape) <= len(v2.shape): - diff = v2 - v1 - else: - diff = -(v1 - v2) - return diff -def distance(atoms1, atoms2): - diff = displacement(atoms1, atoms2) - return np.sqrt(vector_dot(diff, diff)) - -def angle(atoms1, atoms2, atoms3): - v1 = displacement(atoms1, atoms2) - v2 = displacement(atoms3, atoms2) - norm_vector(v1) - norm_vector(v2) - return np.arccos(vector_dot(v1,v2)) - -def dihedral(atoms1, atoms2, atoms3, atoms4): - v1 = displacement(atoms1, atoms2) - v2 = displacement(atoms2, atoms3) - v3 = displacement(atoms3, atoms4) - norm_vector(v1) - norm_vector(v2) - norm_vector(v3) - - n1 = np.cross(v1, v2) - n2 = np.cross(v2, v3) - - # Calculation using atan2, to ensure the correct sign of the angle - x = vector_dot(n1,n2) - y = vector_dot(np.cross(n1,n2), v2) - return np.arctan2(y,x) - -def replace_letters(arr): - # Create a dictionary that maps the letters 'a', 'b', and 'c' to the corresponding numbers - letter_to_number = {'a': 0, 'b': 1, 'c': 2} - - # Create a new array that will hold the numbers - nums = [] - - # Loop through the input array and replace the letters with the corresponding numbers - for letter in arr: - if letter in letter_to_number: - nums.append(letter_to_number[letter]) - else: - nums.append(letter) - - return np.array(nums) - -def replace_with_mask(arr, percentage, replace_loops=False): - # Make sure the percentage is between 0 and 100 - percentage = min(max(percentage, 0), 100) - - # Calculate the number of values to replace - num_to_replace = int(len(arr) * percentage / 100) - - # Choose a random subset of the array to replace - replace_indices = random.sample(range(len(arr)), num_to_replace) - - # Replace the values at the chosen indices with the number 3 - for i in replace_indices: - arr[i] = 3 - - if replace_loops: - for i in arr: - if arr[i] == 2: - arr[i] = 3 - - return arr - -def annotate_sse(ca_coord, percentage_mask=0, replace_loops=False): - _radians_to_angle = 2*np.pi/360 - - _r_helix = ((89-12)*_radians_to_angle, (89+12)*_radians_to_angle) - _a_helix = ((50-20)*_radians_to_angle, (50+20)*_radians_to_angle) - _d2_helix = ((5.5-0.5), (5.5+0.5)) - _d3_helix = ((5.3-0.5), (5.3+0.5)) - _d4_helix = ((6.4-0.6), (6.4+0.6)) - - _r_strand = ((124-14)*_radians_to_angle, (124+14)*_radians_to_angle) - _a_strand = ((-180)*_radians_to_angle, (-125)*_radians_to_angle, - (145)*_radians_to_angle, (180)*_radians_to_angle) - _d2_strand = ((6.7-0.6), (6.7+0.6)) - _d3_strand = ((9.9-0.9), (9.9+0.9)) - _d4_strand = ((12.4-1.1), (12.4+1.1)) - - # Filter all CA atoms in the relevant chain. - - d2i_coord = np.full(( len(ca_coord), 2, 3 ), np.nan) - d3i_coord = np.full(( len(ca_coord), 2, 3 ), np.nan) - d4i_coord = np.full(( len(ca_coord), 2, 3 ), np.nan) - ri_coord = np.full(( len(ca_coord), 3, 3 ), np.nan) - ai_coord = np.full(( len(ca_coord), 4, 3 ), np.nan) - - # The distances and angles are not defined for the entire interval, - # therefore the indices do not have the full range - # Values that are not defined are NaN - for i in range(1, len(ca_coord)-1): - d2i_coord[i] = (ca_coord[i-1], ca_coord[i+1]) - for i in range(1, len(ca_coord)-2): - d3i_coord[i] = (ca_coord[i-1], ca_coord[i+2]) - for i in range(1, len(ca_coord)-3): - d4i_coord[i] = (ca_coord[i-1], ca_coord[i+3]) - for i in range(1, len(ca_coord)-1): - ri_coord[i] = (ca_coord[i-1], ca_coord[i], ca_coord[i+1]) - for i in range(1, len(ca_coord)-2): - ai_coord[i] = (ca_coord[i-1], ca_coord[i], - ca_coord[i+1], ca_coord[i+2]) - - d2i = distance(d2i_coord[:,0], d2i_coord[:,1]) - d3i = distance(d3i_coord[:,0], d3i_coord[:,1]) - d4i = distance(d4i_coord[:,0], d4i_coord[:,1]) - ri = angle(ri_coord[:,0], ri_coord[:,1], ri_coord[:,2]) - ai = dihedral(ai_coord[:,0], ai_coord[:,1], - ai_coord[:,2], ai_coord[:,3]) - - sse = np.full(len(ca_coord), "c", dtype="U1") - - # Annotate helices - # Find CA that meet criteria for potential helices - is_pot_helix = np.zeros(len(sse), dtype=bool) - for i in range(len(sse)): - if ( - d3i[i] >= _d3_helix[0] and d3i[i] <= _d3_helix[1] - and d4i[i] >= _d4_helix[0] and d4i[i] <= _d4_helix[1] - ) or ( - ri[i] >= _r_helix[0] and ri[i] <= _r_helix[1] - and ai[i] >= _a_helix[0] and ai[i] <= _a_helix[1] - ): - is_pot_helix[i] = True - # Real helices are 5 consecutive helix elements - is_helix = np.zeros(len(sse), dtype=bool) - counter = 0 - for i in range(len(sse)): - if is_pot_helix[i]: - counter += 1 - else: - if counter >= 5: - is_helix[i-counter : i] = True - counter = 0 - # Extend the helices by one at each end if CA meets extension criteria - i = 0 - while i < len(sse): - if is_helix[i]: - sse[i] = "a" - if ( - d3i[i-1] >= _d3_helix[0] and d3i[i-1] <= _d3_helix[1] - ) or ( - ri[i-1] >= _r_helix[0] and ri[i-1] <= _r_helix[1] - ): - sse[i-1] = "a" - sse[i] = "a" - if ( - d3i[i+1] >= _d3_helix[0] and d3i[i+1] <= _d3_helix[1] - ) or ( - ri[i+1] >= _r_helix[0] and ri[i+1] <= _r_helix[1] - ): - sse[i+1] = "a" - i += 1 - - # Annotate sheets - # Find CA that meet criteria for potential strands - is_pot_strand = np.zeros(len(sse), dtype=bool) - for i in range(len(sse)): - if ( d2i[i] >= _d2_strand[0] and d2i[i] <= _d2_strand[1] - and d3i[i] >= _d3_strand[0] and d3i[i] <= _d3_strand[1] - and d4i[i] >= _d4_strand[0] and d4i[i] <= _d4_strand[1] - ) or ( - ri[i] >= _r_strand[0] and ri[i] <= _r_strand[1] - and ( (ai[i] >= _a_strand[0] and ai[i] <= _a_strand[1]) - or (ai[i] >= _a_strand[2] and ai[i] <= _a_strand[3])) - ): - is_pot_strand[i] = True - # Real strands are 5 consecutive strand elements, - # or shorter fragments of at least 3 consecutive strand residues, - # if they are in hydrogen bond proximity to 5 other residues - pot_strand_coord = ca_coord[is_pot_strand] - is_strand = np.zeros(len(sse), dtype=bool) - counter = 0 - contacts = 0 - for i in range(len(sse)): - if is_pot_strand[i]: - counter += 1 - coord = ca_coord[i] - for strand_coord in ca_coord: - dist = distance(coord, strand_coord) - if dist >= 4.2 and dist <= 5.2: - contacts += 1 - else: - if counter >= 4: - is_strand[i-counter : i] = True - elif counter == 3 and contacts >= 5: - is_strand[i-counter : i] = True - counter = 0 - contacts = 0 - # Extend the strands by one at each end if CA meets extension criteria - i = 0 - while i < len(sse): - if is_strand[i]: - sse[i] = "b" - if d3i[i-1] >= _d3_strand[0] and d3i[i-1] <= _d3_strand[1]: - sse[i-1] = "b" - sse[i] = "b" - if d3i[i+1] >= _d3_strand[0] and d3i[i+1] <= _d3_strand[1]: - sse[i+1] = "b" - i += 1 - sse=replace_letters(sse) - sse=replace_with_mask(sse, percentage_mask, replace_loops=replace_loops) - sse=torch.nn.functional.one_hot(torch.tensor(sse), num_classes=4) - return sse diff --git a/spaces/merve/anonymization/public/anonymization/style-graph-scroll.css b/spaces/merve/anonymization/public/anonymization/style-graph-scroll.css deleted file mode 100644 index 7680e8c43222b6993d2bedfe43a682236680541e..0000000000000000000000000000000000000000 --- a/spaces/merve/anonymization/public/anonymization/style-graph-scroll.css +++ /dev/null @@ -1,160 +0,0 @@ -/** { border: 1px solid #f00; }*/ - - -#container{ - position: relative; - width: auto; - margin-left: -25px; - /*margin-bottom: 100px;*/ -} - -#sections{ - width: 330px; - pointer-events: none; -} - -#sections > div{ - background: white; - opacity: .2; - margin-bottom: 400px; - line-height: 1.4em; - transition: opacity .2s; - pointer-events: all; -} -#sections > div:last-child{ - height: 480px; - margin-bottom: 0px; -} -#sections > div.graph-scroll-active{ - opacity: 1; -} - -#graph{ - margin-left: 40px; - width: 500px; - position: -webkit-sticky; - position: sticky; - top: 0px; - float: right; - height: 580px; -} - -.slider-outer { - display: block; - max-width: 300px; -} - -@media (max-width: 925px) { - #container{ - margin-left: 0px; - } - - #graph{ - width: 100%; - float: none; - max-width: 500px; - margin: 0px auto; - } - - #graph > div{ - position: relative; - left:12px; - } - - #sections{ - width: auto; - position: relative; - margin: 0px auto; - } - - #sections > div{ - background: rgba(255,255,255,.8); - padding: 10px; - border-top: 1px solid; - border-bottom: 1px solid; - margin-bottom: 80vh; - width: calc(100vw - 20px); - margin-left: -5px; - } - - #sections > div > *{ - max-width: 750px; - } - - #sections > div:first-child{ - opacity: 1; - margin-top: -260px; - } - - #sections > div:last-child{ - height: auto; - } - - #sections h3{ - margin-top: .5em; - } - - /* Adjust buttons for mobile. */ - - .button-container{ - text-align: center; - left:0px; - } - - /* Adjust sliders for mobile. */ - input[type="range" i] { - width: 280px; - } - .slider-label-container{ - width: 145px; - /* display: inline-block; */ - } - - .slide-container-heads-prob, .slide-container-population { - text-align: center; - } - - .slider-container { - margin-bottom: 5px; - text-align: center; - width: 300px; - /* display:inline-block; */ - } - - .slider-outer { - text-align: center; - display: flex; - max-width: 300px; - } - - .headsProb, .population { - margin-left: 15px; - } - - .slide-container-population { - margin-bottom: -10px; - } - - .pointer div { - left: 10px; - top: 37px; - } - - /* Adjust post summary test for mobile. */ - .post-summary{ - margin-left: 8px; - margin-bottom: 60px; - margin-top: 40px; - } - -} - -#graph > div{ - margin: 20 35px; -} - - -#end{ - height: 15vh; -} - diff --git a/spaces/merve/fill-in-the-blank/public/base-rate/style.css b/spaces/merve/fill-in-the-blank/public/base-rate/style.css deleted file mode 100644 index 5ba8d020fda588a3e7f61ff3fab4d377aa3bd4f2..0000000000000000000000000000000000000000 --- a/spaces/merve/fill-in-the-blank/public/base-rate/style.css +++ /dev/null @@ -1,134 +0,0 @@ -/* Copyright 2020 Google LLC. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - - - -.tooltip { - top: -1000px; - position: fixed; - padding: 10px; - background: rgba(255, 255, 255, .90); - border: 1px solid lightgray; - pointer-events: none; - width: auto; - -} -.tooltip-hidden{ - opacity: 0; - transition: all .3s; - transition-delay: .1s; -} - -@media (max-width: 590px){ - div.tooltip{ - bottom: -1px; - width: calc(100%); - left: -1px !important; - right: -1px !important; - top: auto !important; - width: auto !important; - } -} - -svg{ - overflow: visible; -} - -.domain{ - display: none; -} - -#big-matrix text{ - font-family: 'Google Sans', sans-serif; - /*pointer-events: none;*/ - text-shadow: 0 1px 0 #fff, 1px 0 0 #fff, 0 -1px 0 #fff, -1px 0 0 #fff; - text-shadow: 0 1px 0 rgba(255,255,255, .6), 1px 0 0 rgba(255,255,255, .6), 0 -1px 0 rgba(255,255,255, .6), -1px 0 0 rgba(255,255,255, .6); -} - - -body{ - max-width: 900px; -} - -h1{ -} - -h1{ - /*text-align: center;*/ -} - -h3{ - font-size: 20px; -} -#big-matrix{ - text-align: center; - margin-top: 40px; - font-family: 'Google Sans', sans-serif; - -} -div.big-container{ - display: inline-block; - margin: 10px; -} - -#metrics{ - text-align: center; -} -div.metrics-container{ - display: inline-block; - margin: 10px; -} - -div.metrics-container > div{ - display: inline-block; - vertical-align: middle; - pointer-events: none; -} - - - - -.drag{ - cursor: pointer; - fill-opacity: 0; - fill: #f0f; - stroke-opacity: 0; -} - -svg.dragging{ - cursor: pointer; -} - -sl{ - /*background: #000; */ - color: #000; - border: 1px solid #eee; - width: 1em; - display: inline-block; - padding-left: 2px; - padding-right: 2px; - font-style: normal; -} - -#instructions{ - margin-top: 10px; - margin-bottom: 10px; - text-align: center; -} - - - - - diff --git a/spaces/merve/fill-in-the-blank/server-side/fill-in-the-blank/scatter-plot-colab/spearman-distribution/watch-files.js b/spaces/merve/fill-in-the-blank/server-side/fill-in-the-blank/scatter-plot-colab/spearman-distribution/watch-files.js deleted file mode 100644 index 25d1fcfe5b17fa1e63323e0389379264463572af..0000000000000000000000000000000000000000 --- a/spaces/merve/fill-in-the-blank/server-side/fill-in-the-blank/scatter-plot-colab/spearman-distribution/watch-files.js +++ /dev/null @@ -1,88 +0,0 @@ -/* Copyright 2021 Google LLC. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - - - -!(function(){ - function watchFile(path){ - var lastStr = '' - - console.log(path) - function check(){ - d3.text(path + '?' + Math.random(), (err, nextStr) => { - if (err){ - console.log(err) - return check() - } - - if (nextStr == lastStr) return - lastStr = nextStr - - if (path.includes('.js')){ - // console.log('js', new Date()) - Function(nextStr.replace('\n', ';').replace('\n', ';'))() - } - - if (path.includes('.css')){ - // console.log('css', new Date()) - - Array.from(document.querySelectorAll('link')) - .filter(d => d.href.includes(path) || d.href.includes('__hs_placeholder')) - .filter((d, i) => i == 0) - .forEach(d => d.href = path + '?' + Math.random()) - - throw 'up' - } - }) - - if (python_settings.isDev) setTimeout(check, 100) - } - check() - } - - ;[ - '../spearman-compare/list.css', - 'style.css', - '../two-sentences/init-scatter.js', - '../two-sentences/init-util.js', - '../two-sentences/init-pair.js', - 'init.js' - ].forEach(filename => { - var root = document.currentScript.src.replace('watch-files.js', '').split('?')[0] - var path = root + filename - console.log(filename) - - if (python_settings.isDev){ - watchFile(path) - } else { - - if (path.includes('.js')){ - var node = document.createElement('script') - node.setAttribute('src', path) - document.body.appendChild(node) - } - - if (path.includes('.css')){ - Array.from(document.querySelectorAll('link')) - .filter(d => d.href.includes(path) || d.href.includes('__hs_placeholder')) - .filter((d, i) => i == 0) - .forEach(d => d.href = path + '?' + Math.random()) - } - } - }) -})() - - - diff --git a/spaces/merve/hidden-bias/public/dataset-worldviews/shape-params.js b/spaces/merve/hidden-bias/public/dataset-worldviews/shape-params.js deleted file mode 100644 index b36a500b99b8789ffe044a738c86e1459317974a..0000000000000000000000000000000000000000 --- a/spaces/merve/hidden-bias/public/dataset-worldviews/shape-params.js +++ /dev/null @@ -1,527 +0,0 @@ -const shapeParams = [ - { - shape_name: "circle", - pointiness: "round", - size: "large", - gt: "shaded", - label: "unshaded", - correctness: "incorrect", - path: "M 25.0 0 A 0.5 0.5 0 0 0 -50 0 M -50 0 A 0.5 0.5 0 0 0 25.0 0", - startX: 47.5, - startY: 84.21875, - endX: 474.5, - endY: 293.828125, - initialX: 50.5, - initialY: 85.21875, - }, - { - shape_name: "circle", - pointiness: "round", - size: "large", - gt: "shaded", - label: "unshaded", - correctness: "incorrect", - path: "M 22.5 0 A 0.5 0.5 0 0 0 -45 0 M -45 0 A 0.5 0.5 0 0 0 22.5 0", - startX: 247, - startY: 433.828125, - endX: 641.5, - endY: 248.828125, - initialX: 575.5, - initialY: 157.21875, - }, - { - shape_name: "circle", - pointiness: "round", - size: "large", - gt: "shaded", - label: "unshaded", - correctness: "incorrect", - path: "M 30.0 0 A 0.5 0.5 0 0 0 -60 0 M -60 0 A 0.5 0.5 0 0 0 30.0 0", - startX: 189.5, - startY: 170.21875, - endX: 799.5, - endY: 325.828125, - initialX: 511.5, - initialY: 75.21875, - }, - { - shape_name: "circle", - pointiness: "round", - size: "large", - gt: "unshaded", - label: "unshaded", - correctness: "correct", - path: "M 25.0 0 A 0.5 0.5 0 0 0 -50 0 M -50 0 A 0.5 0.5 0 0 0 25.0 0", - startX: 37.5, - startY: 440.21875, - endX: 475, - endY: 425.21875, - initialX: 715.5, - initialY: 213.21875, - }, - { - shape_name: "circle", - pointiness: "round", - size: "rt_large", - gt: "unshaded", - label: "unshaded", - correctness: "correct", - path: "M 17.5 0 A 0.5 0.5 0 0 0 -35 0 M -35 0 A 0.5 0.5 0 0 0 17.5 0", - startX: 282, - startY: 207.828125, - endX: 460.5, - endY: 217.21875, - initialX: 280.5, - initialY: 146.21875, - }, - { - shape_name: "circle", - pointiness: "round", - size: "rt_small", - gt: "shaded", - label: "shaded", - correctness: "correct", - path: "M 12.5 0 A 0.5 0.5 0 0 0 -25 0 M -25 0 A 0.5 0.5 0 0 0 12.5 0", - startX: 125.5, - startY: 418.21875, - endX: 715.5, - endY: 76.828125, - initialX: 680.5, - initialY: 147.21875, - }, - { - shape_name: "rect", - pointiness: "pointy", - size: "rt_large", - gt: "unshaded", - label: "shaded", - correctness: "incorrect", - path: "M -45 -15 L 25.0 -15 L 25.0 5.0 L -45 5.0 L -45 -15", - startX: 77.5, - startY: 35.21875, - endX: 712.5, - endY: 124.828125, - initialX: 79.5, - initialY: 35.21875, - }, - { - shape_name: "rect", - pointiness: "pointy", - size: "rt_large", - gt: "unshaded", - label: "unshaded", - correctness: "correct", - path: "M -40 -60 L -20 -70 L 18 3 L -3 12.5 L -40 -60", - startX: 320, - startY: 451.828125, - endX: 707.5, - endY: 339.828125, - initialX: 672.5, - initialY: 104.21875, - }, - { - shape_name: "rect", - pointiness: "pointy", - size: "rt_small", - gt: "shaded", - label: "shaded", - correctness: "correct", - path: "M -30 -15 L 12.5 -15 L 12.5 5.5 L -30 5.5 L -30 -15", - startX: 29.5, - startY: 389.21875, - endX: 774.5, - endY: 78.828125, - initialX: 115.5, - initialY: 234.21875, - }, - { - shape_name: "rect", - pointiness: "pointy", - size: "rt_small", - gt: "unshaded", - label: "unshaded", - correctness: "correct", - path: "M -11 -34 L 4.5 -34 L 4.5 6.0 L -11 6.0 L -11 -34", - startX: 242, - startY: 271.828125, - endX: 574.5, - endY: 391.828125, - initialX: 258.5, - initialY: 230.21875, - }, - { - shape_name: "rect", - pointiness: "pointy", - size: "rt_small", - gt: "unshaded", - label: "unshaded", - correctness: "correct", - path: "M -10 -45 L 4.5 -45 L 4.5 6.0 L -10 6.0 L -10 -45", - startX: 76.5, - startY: 177.21875, - endX: 522.5, - endY: 327.828125, - initialX: 89.5, - initialY: 170.21875, - }, - { - shape_name: "rt_circle", - pointiness: "pointy", - size: "rt_large", - gt: "unshaded", - label: "unshaded", - correctness: "correct", - path: "M 25.0 0 M -50 0 L -44 2.0 L -50 3.5 L -44 5.0 L -48 7.5 L -41 8.0 L -45 10.5 L -37 10.5 L -41 14.0 L -34 14.5 L -35 17.5 L -29 16.5 L -28 20.5 L -22 19.5 L -21 22.5 L -14 21.0 L -12 24.0 L -7 22.0 L -4 24.5 L 0 22.5 L 2.0 24.5 L 3.5 21.5 L 5.5 24.0 L 7.5 21.0 L 9.5 22.5 L 9.5 19.5 L 12.5 21.0 L 13.0 17.5 L 16.0 18.5 L 15.5 15.0 L 19.0 15.5 L 17.0 12.5 L 21.0 12.5 L 18.5 10.0 L 22.5 9.5 L 19.5 7.0 L 23.5 6.5 L 20.0 4.5 L 24.0 4.0 L 20.5 2.0 L 25.0 0 L 21.0 -3 L 25.0 -6 L 21.0 -9 L 24.0 -13 L 20.5 -14 L 23.0 -19 L 20.0 -20 L 21.5 -25 L 18.0 -25 L 19.0 -32 L 15.0 -30 L 16.0 -38 L 12.5 -36 L 13.0 -43 L 10.0 -40 L 10.0 -46 L 7.0 -42 L 6.5 -48 L 4.0 -43 L 3.5 -49 L 1.5 -43 L 0 -50 L -3 -43 L -8 -49 L -9 -43 L -15 -48 L -15 -42 L -21 -46 L -21 -40 L -26 -43 L -26 -37 L -31 -39 L -30 -33 L -37 -34 L -35 -28 L -40 -29 L -38 -24 L -44 -25 L -42 -20 L -46 -20 L -44 -15 L -49 -14 L -45 -9 L -50 -6 L -45 -3 L -50 0", - startX: 319, - startY: 290.828125, - endX: 738, - endY: 410.21875, - initialX: 605.5, - initialY: 83.21875, - }, - { - shape_name: "rt_circle", - pointiness: "round", - size: "large", - gt: "shaded", - label: "shaded", - correctness: "correct", - path: "M 26.5 1.0 C 34.0 -75 -43 -70 -36 -34 M -36 -34 C -42 -14 -70 -34 -66 0 V 0 C -66 19.5 -47 26.0 3.5 26.5 C 11.5 28.0 26.0 13.0 26.5 1.0", - startX: 154.5, - startY: 89.21875, - endX: 519.5, - endY: 128.828125, - initialX: 151.5, - initialY: 88.21875, - }, - { - shape_name: "rt_circle", - pointiness: "round", - size: "rt_large", - gt: "unshaded", - label: "unshaded", - correctness: "correct", - path: "M 26.5 1.0 C 34.0 -75 -43 -70 -42 -51 M -42 -51 C -42 -14 -82 -12 -38 -4 V -4 C -9 0 -47 26.0 2.0 24.0 C 16.5 22.0 23.5 12.0 26.5 1.0", - startX: 254, - startY: 368.828125, - endX: 749.5, - endY: 254.828125, - initialX: 497.5, - initialY: 192.21875, - }, - { - shape_name: "rt_circle", - pointiness: "round", - size: "rt_small", - gt: "unshaded", - label: "unshaded", - correctness: "correct", - path: "M 17.0 -9 C 9.5 -44 -1 -65 -40 -34 M -40 -34 C -61 -15 -59 0.5 -38 9.5 C -19 19.0 -47 26.0 8.0 15.5 C 16.5 12.5 23.5 12.0 17.0 -9", - startX: 42.5, - startY: 185.21875, - endX: 664, - endY: 448.21875, - initialX: 410.5, - initialY: 148.21875, - }, - { - shape_name: "rt_circle", - pointiness: "rt_pointy", - size: "rt_large", - gt: "unshaded", - label: "unshaded", - correctness: "correct", - path: "M 14.0 3.5 L -6 0.5 L 15.0 -5 A 0.5 0.5 0 0 0 -48 0 M -48 0 A 0.5 0.5 0 0 0 14.0 3.5", - startX: 48.5, - startY: 252.21875, - endX: 576, - endY: 443.21875, - initialX: 160.5, - initialY: 155.21875, - }, - { - shape_name: "rt_circle", - pointiness: "rt_round", - size: "small", - gt: "shaded", - label: "shaded", - correctness: "correct", - path: "M 6.0 1.5 C 5.5 -3 0 4.5 -3 -1 C -3 -10 2.5 -7 6.0 -4 A 0.5 0.5 0 0 0 -18 0 M -18 0 A 0.5 0.5 0 0 0 6.0 1.5", - startX: 334, - startY: 185.828125, - endX: 652.5, - endY: 83.828125, - initialX: 13.5, - initialY: 232.21875, - }, - { - shape_name: "rt_circle", - pointiness: "rt_round", - size: "small", - gt: "shaded", - label: "shaded", - correctness: "correct", - path: "M -10 0 A 0.5 0.5 0 0 0 5.0 0 C 5.0 -12 3.5 -17 0 -10 C -7 -17 -10 -12 -10 0", - startX: 318, - startY: 355.828125, - endX: 581, - endY: 145.21875, - initialX: 293.5, - initialY: 190.21875, - }, - { - shape_name: "rt_circle", - pointiness: "rt_round", - size: "small", - gt: "shaded", - label: "shaded", - correctness: "correct", - path: "M -10 0 A 0.5 0.5 0 0 0 4.5 -3 C 5.5 0 6.5 4.5 7.5 0.5 C 7.5 -11 2.5 -18 -7 -11 C 3.5 -4 -10 -12 -10 0", - startX: 80, - startY: 308.828125, - endX: 731.5, - endY: 42.828125, - initialX: 621.5, - initialY: 132.21875, - }, - { - shape_name: "rt_circle", - pointiness: "rt_round", - size: "small", - gt: "shaded", - label: "unshaded", - correctness: "incorrect", - path: "M 0 10.0 C -20 7.5 -20 -5 -6 -15 L 2.5 -15 C 10.0 -5 10.0 7.5 0 10.0", - startX: 199.5, - startY: 50.21875, - endX: 719.5, - endY: 458.828125, - initialX: 246.5, - initialY: 59.21875, - }, - { - shape_name: "rt_rect", - pointiness: "rt_pointy", - size: "rt_large", - gt: "shaded", - label: "unshaded", - correctness: "incorrect", - path: "M 0 20.0 C -50 15.0 -10 35.0 -20 -45 L 10.0 -45 C 5.0 35.0 25.0 15.0 0 20.0", - startX: 93.5, - startY: 261.21875, - endX: 807.5, - endY: 250.828125, - initialX: 57.5, - initialY: 189.21875, - }, - { - shape_name: "rt_rect", - pointiness: "rt_pointy", - size: "rt_large", - gt: "unshaded", - label: "unshaded", - correctness: "correct", - path: "M 27.5 7.0 C -50 15.0 -39 33.5 -37 9.5 S -76 -1 -45 -21 C 11.0 -51 23.0 -52 27.5 7.0", - startX: 284.5, - startY: 152.21875, - endX: 544.5, - endY: 230.828125, - initialX: 411.5, - initialY: 73.21875, - }, - { - shape_name: "rt_rect", - pointiness: "rt_pointy", - size: "rt_large", - gt: "unshaded", - label: "unshaded", - correctness: "correct", - path: "M -25 -30 L 10.0 -30 C 22.5 0 22.5 0 10.0 15.0 L -25 15.0 C 0 0 0 0 -25 -30", - startX: 219.5, - startY: 99.21875, - endX: 525.5, - endY: 381.828125, - initialX: 213.5, - initialY: 96.21875, - }, - { - shape_name: "rt_rect", - pointiness: "rt_pointy", - size: "rt_large", - gt: "unshaded", - label: "unshaded", - correctness: "correct", - path: "M -25 -50 L 10.0 -50 C 0 0 22.5 0 10.0 25.0 L -25 25.0 C 0 0 -45 0 -25 -50", - startX: 79.5, - startY: 380.21875, - endX: 565.5, - endY: 298.828125, - initialX: 719.5, - initialY: 87.21875, - }, - { - shape_name: "rt_triangle", - pointiness: "rt_pointy", - size: "large", - gt: "shaded", - label: "unshaded", - correctness: "incorrect", - path: "M -45 -50 L 22.5 -50 L 0 34.5 C 0 0 -45 0 -45 -50", - startX: 325.5, - startY: 94.21875, - endX: 636.5, - endY: 360.828125, - initialX: 324.5, - initialY: 88.2, - }, - { - shape_name: "rt_triangle", - pointiness: "rt_round", - size: "large", - gt: "shaded", - label: "unshaded", - correctness: "incorrect", - path: "M -47 15.0 L -15 -56 C -7 -82 41.5 15.5 28.0 15.5 C 0 15.5 0 15.5 -47 15.0", - startX: 191, - startY: 283.828125, - endX: 796, - endY: 448.21875, - initialX: 349.5, - initialY: 223.21875, - }, - { - shape_name: "rt_triangle", - pointiness: "rt_round", - size: "large", - gt: "unshaded", - label: "shaded", - correctness: "incorrect", - path: "M 21.0 17.5 L -43 17.5 C -31 -26 9.5 -44 16.0 -69 C 24.5 -80 15.5 -12 21.0 17.5", - startX: 163.5, - startY: 446.21875, - endX: 794.5, - endY: 134.828125, - initialX: 622.5, - initialY: 210.21875, - }, - { - shape_name: "rt_triangle", - pointiness: "rt_round", - size: "rt_large", - gt: "shaded", - label: "shaded", - correctness: "correct", - path: "M -20 -35 L -20 10 L 25 10 C 25 5 25 5 20 5 C 20 0 20 0 15 0 C 15 -5 15 -5 10 -5 C 10 -10 10 -10 5 -10 C 5 -15 5 -15 0 -15 C 0 -20 0 -20 -5 -20 C -5 -25 -5 -25 -10 -25 C -10 -30 -10 -30 -15 -30 C -15 -35 -15 -35 -20 -35", - startX: 132, - startY: 350.828125, - endX: 643.5, - endY: 149.828125, - initialX: 190.5, - initialY: 240.21875, - }, - { - shape_name: "rt_triangle", - pointiness: "rt_round", - size: "small", - gt: "shaded", - label: "unshaded", - correctness: "incorrect", - path: "M 0 6.5 C 5.0 5.5 8.5 -8 7.5 -10 L -15 -10 C -17 -8 -10 5.5 0 6.5", - startX: 87.5, - startY: 461.21875, - endX: 443.5, - endY: 370.828125, - initialX: 416.5, - initialY: 234.21875, - }, - { - shape_name: "rt_triangle", - pointiness: "rt_round", - size: "small", - gt: "unshaded", - label: "shaded", - correctness: "incorrect", - path: "M 22.5 0 C 22.5 -11.25 11.25 -18.75 0 -15 C 0 -3.75 -11.25 11.25 -8.25 7.5 C -3.75 18.75 11.25 0 22.5 0", - startX: 168, - startY: 330.828125, - endX: 522.5, - endY: 47.828125, - initialX: 402.5, - initialY: 193.21875, - }, - { - shape_name: "triangle", - pointiness: "pointy", - size: "rt_large", - gt: "shaded", - label: "shaded", - correctness: "correct", - path: "M -9 25.0 L 7.5 25.0 L 0 -45 L -9 25.0", - startX: 126.5, - startY: 249.21875, - endX: 433.5, - endY: 135.828125, - initialX: 219.5, - initialY: 183.21875, - }, - { - shape_name: "triangle", - pointiness: "pointy", - size: "rt_small", - gt: "shaded", - label: "shaded", - correctness: "correct", - path: "M -29 5.0 L 15.0 0 L -29 -16 L -29 5.0", - startX: 277.5, - startY: 98.21875, - endX: 596.5, - endY: 70.828125, - initialX: 280.5, - initialY: 103.21875, - }, - { - shape_name: "triangle", - pointiness: "pointy", - size: "rt_small", - gt: "shaded", - label: "shaded", - correctness: "correct", - path: "M 3.5 13.5 L 9.5 -20 L -36 0 L 3.5 13.5", - startX: 257.5, - startY: 53.21875, - endX: 593.5, - endY: 105.828125, - initialX: 546.5, - initialY: 235.21875, - }, - { - shape_name: "triangle", - pointiness: "pointy", - size: "rt_small", - gt: "unshaded", - label: "shaded", - correctness: "incorrect", - path: "M 12.5 10.0 L 0 -35 L -25 10.0 L 12.5 10.0", - startX: 15.5, - startY: 332.8, - endX: 463, - endY: 63.21875, - initialX: 13.5, - initialY: 164.21875, - }, - { - shape_name: "triangle", - pointiness: "pointy", - size: "small", - gt: "shaded", - label: "shaded", - correctness: "correct", - path: "M 4.5 1.5 L 0 -15 L -8 1.5 L 4.5 1.5", - startX: 111, - startY: 180.828125, - endX: 784.5, - endY: 42.828125, - initialX: 195.5, - initialY: 136.21875, - }, -]; diff --git a/spaces/merve/hidden-bias/public/third_party/simple-statistics.min.js b/spaces/merve/hidden-bias/public/third_party/simple-statistics.min.js deleted file mode 100644 index 9191046b7dc959d771a904875817c2b9c26ff0e5..0000000000000000000000000000000000000000 --- a/spaces/merve/hidden-bias/public/third_party/simple-statistics.min.js +++ /dev/null @@ -1,3 +0,0 @@ -// https://github.com/simple-statistics/simple-statistics Copyright (c) 2014, Tom MacWright - -!function(t,r){"object"==typeof exports&&"undefined"!=typeof module?r(exports):"function"==typeof define&&define.amd?define(["exports"],r):r(t.ss={})}(this,function(t){"use strict";function r(t){if(0===t.length)return 0;for(var r,n=t[0],e=0,a=1;a=Math.abs(t[a])?e+=n-r+t[a]:e+=t[a]-r+n,n=r;return n+e}function g(t){if(0===t.length)throw new Error("mean requires at least one data point");return r(t)/t.length}function n(t,r){var n,e,a=g(t),o=0;if(2===r)for(e=0;er&&(r=t[n]);return r}function i(t,r){var n=t.length*r;if(0===t.length)throw new Error("quantile requires at least one data point.");if(r<0||1f&&p(t,n,e);sf;)l--}t[n]===f?p(t,n,l):p(t,++l,e),l<=r&&(n=l+1),r<=l&&(e=l-1)}}function p(t,r,n){var e=t[r];t[r]=t[n],t[n]=e}function s(t,r){var n=t.slice();if(Array.isArray(r)){!function(t,r){for(var n=[0],e=0;et[t.length-1])return 1;var n=function(t,r){var n=0,e=0,a=t.length;for(;e>>1]?a=n:e=-~n;return e}(t,r);if(t[n]!==r)return n/t.length;n++;var e=function(t,r){var n=0,e=0,a=t.length;for(;e=t[n=e+a>>>1]?e=-~n:a=n;return e}(t,r);if(e===n)return n/t.length;var a=e-n+1;return a*(e+n)/2/a/t.length}function m(t){var r=s(t,.75),n=s(t,.25);if("number"==typeof r&&"number"==typeof n)return r-n}function d(t){return+s(t,.5)}function b(t){for(var r=d(t),n=[],e=0;e=e[n][u]);--g)(s=x(h,u,o,i)+e[n-1][h-1])n&&(n=t[e]),t[e]t.length)throw new Error("cannot generate more classes than there are data values");var n=f(t);if(1===y(n))return[n];var e=S(r,n.length),a=S(r,n.length);!function(t,r,n){for(var e,a=r[0].length,o=t[Math.floor(a/2)],i=[],u=[],h=0;h=Math.abs(a)&&(c+=1);else if("greater"===n)for(h=0;h<=e;h++)o[h]>=a&&(c+=1);else for(h=0;h<=e;h++)o[h]<=a&&(c+=1);return c/e},t.bisect=function(t,r,n,e,a){if("function"!=typeof t)throw new TypeError("func must be a function");for(var o=0;o 1, "Number of layers should be larger than 0." - - self.conv_layers = nn.ModuleList() - self.norm_layers = nn.ModuleList() - self.conv_layers.append(nn.Conv1d(in_channels, hidden_channels, kernel_size, padding=kernel_size//2)) - self.norm_layers.append(LayerNorm(hidden_channels)) - self.relu_drop = nn.Sequential( - nn.ReLU(), - nn.Dropout(p_dropout)) - for _ in range(n_layers-1): - self.conv_layers.append(nn.Conv1d(hidden_channels, hidden_channels, kernel_size, padding=kernel_size//2)) - self.norm_layers.append(LayerNorm(hidden_channels)) - self.proj = nn.Conv1d(hidden_channels, out_channels, 1) - self.proj.weight.data.zero_() - self.proj.bias.data.zero_() - - def forward(self, x, x_mask): - x_org = x - for i in range(self.n_layers): - x = self.conv_layers[i](x * x_mask) - x = self.norm_layers[i](x) - x = self.relu_drop(x) - x = x_org + self.proj(x) - return x * x_mask - - -class DDSConv(nn.Module): - """ - Dialted and Depth-Separable Convolution - """ - def __init__(self, channels, kernel_size, n_layers, p_dropout=0.): - super().__init__() - self.channels = channels - self.kernel_size = kernel_size - self.n_layers = n_layers - self.p_dropout = p_dropout - - self.drop = nn.Dropout(p_dropout) - self.convs_sep = nn.ModuleList() - self.convs_1x1 = nn.ModuleList() - self.norms_1 = nn.ModuleList() - self.norms_2 = nn.ModuleList() - for i in range(n_layers): - dilation = kernel_size ** i - padding = (kernel_size * dilation - dilation) // 2 - self.convs_sep.append(nn.Conv1d(channels, channels, kernel_size, - groups=channels, dilation=dilation, padding=padding - )) - self.convs_1x1.append(nn.Conv1d(channels, channels, 1)) - self.norms_1.append(LayerNorm(channels)) - self.norms_2.append(LayerNorm(channels)) - - def forward(self, x, x_mask, g=None): - if g is not None: - x = x + g - for i in range(self.n_layers): - y = self.convs_sep[i](x * x_mask) - y = self.norms_1[i](y) - y = F.gelu(y) - y = self.convs_1x1[i](y) - y = self.norms_2[i](y) - y = F.gelu(y) - y = self.drop(y) - x = x + y - return x * x_mask - - -class WN(torch.nn.Module): - def __init__(self, hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=0, p_dropout=0): - super(WN, self).__init__() - assert(kernel_size % 2 == 1) - self.hidden_channels =hidden_channels - self.kernel_size = kernel_size, - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.gin_channels = gin_channels - self.p_dropout = p_dropout - - self.in_layers = torch.nn.ModuleList() - self.res_skip_layers = torch.nn.ModuleList() - self.drop = nn.Dropout(p_dropout) - - if gin_channels != 0: - cond_layer = torch.nn.Conv1d(gin_channels, 2*hidden_channels*n_layers, 1) - self.cond_layer = torch.nn.utils.weight_norm(cond_layer, name='weight') - - for i in range(n_layers): - dilation = dilation_rate ** i - padding = int((kernel_size * dilation - dilation) / 2) - in_layer = torch.nn.Conv1d(hidden_channels, 2*hidden_channels, kernel_size, - dilation=dilation, padding=padding) - in_layer = torch.nn.utils.weight_norm(in_layer, name='weight') - self.in_layers.append(in_layer) - - # last one is not necessary - if i < n_layers - 1: - res_skip_channels = 2 * hidden_channels - else: - res_skip_channels = hidden_channels - - res_skip_layer = torch.nn.Conv1d(hidden_channels, res_skip_channels, 1) - res_skip_layer = torch.nn.utils.weight_norm(res_skip_layer, name='weight') - self.res_skip_layers.append(res_skip_layer) - - def forward(self, x, x_mask, g=None, **kwargs): - output = torch.zeros_like(x) - n_channels_tensor = torch.IntTensor([self.hidden_channels]) - - if g is not None: - g = self.cond_layer(g) - - for i in range(self.n_layers): - x_in = self.in_layers[i](x) - if g is not None: - cond_offset = i * 2 * self.hidden_channels - g_l = g[:,cond_offset:cond_offset+2*self.hidden_channels,:] - else: - g_l = torch.zeros_like(x_in) - - acts = commons.fused_add_tanh_sigmoid_multiply( - x_in, - g_l, - n_channels_tensor) - acts = self.drop(acts) - - res_skip_acts = self.res_skip_layers[i](acts) - if i < self.n_layers - 1: - res_acts = res_skip_acts[:,:self.hidden_channels,:] - x = (x + res_acts) * x_mask - output = output + res_skip_acts[:,self.hidden_channels:,:] - else: - output = output + res_skip_acts - return output * x_mask - - def remove_weight_norm(self): - if self.gin_channels != 0: - torch.nn.utils.remove_weight_norm(self.cond_layer) - for l in self.in_layers: - torch.nn.utils.remove_weight_norm(l) - for l in self.res_skip_layers: - torch.nn.utils.remove_weight_norm(l) - - -class ResBlock1(torch.nn.Module): - def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5)): - super(ResBlock1, self).__init__() - self.convs1 = nn.ModuleList([ - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0], - padding=get_padding(kernel_size, dilation[0]))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1], - padding=get_padding(kernel_size, dilation[1]))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[2], - padding=get_padding(kernel_size, dilation[2]))) - ]) - self.convs1.apply(init_weights) - - self.convs2 = nn.ModuleList([ - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, - padding=get_padding(kernel_size, 1))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, - padding=get_padding(kernel_size, 1))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, - padding=get_padding(kernel_size, 1))) - ]) - self.convs2.apply(init_weights) - - def forward(self, x, x_mask=None): - for c1, c2 in zip(self.convs1, self.convs2): - xt = F.leaky_relu(x, LRELU_SLOPE) - if x_mask is not None: - xt = xt * x_mask - xt = c1(xt) - xt = F.leaky_relu(xt, LRELU_SLOPE) - if x_mask is not None: - xt = xt * x_mask - xt = c2(xt) - x = xt + x - if x_mask is not None: - x = x * x_mask - return x - - def remove_weight_norm(self): - for l in self.convs1: - remove_weight_norm(l) - for l in self.convs2: - remove_weight_norm(l) - - -class ResBlock2(torch.nn.Module): - def __init__(self, channels, kernel_size=3, dilation=(1, 3)): - super(ResBlock2, self).__init__() - self.convs = nn.ModuleList([ - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0], - padding=get_padding(kernel_size, dilation[0]))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1], - padding=get_padding(kernel_size, dilation[1]))) - ]) - self.convs.apply(init_weights) - - def forward(self, x, x_mask=None): - for c in self.convs: - xt = F.leaky_relu(x, LRELU_SLOPE) - if x_mask is not None: - xt = xt * x_mask - xt = c(xt) - x = xt + x - if x_mask is not None: - x = x * x_mask - return x - - def remove_weight_norm(self): - for l in self.convs: - remove_weight_norm(l) - - -class Log(nn.Module): - def forward(self, x, x_mask, reverse=False, **kwargs): - if not reverse: - y = torch.log(torch.clamp_min(x, 1e-5)) * x_mask - logdet = torch.sum(-y, [1, 2]) - return y, logdet - else: - x = torch.exp(x) * x_mask - return x - - -class Flip(nn.Module): - def forward(self, x, *args, reverse=False, **kwargs): - x = torch.flip(x, [1]) - if not reverse: - logdet = torch.zeros(x.size(0)).to(dtype=x.dtype, device=x.device) - return x, logdet - else: - return x - - -class ElementwiseAffine(nn.Module): - def __init__(self, channels): - super().__init__() - self.channels = channels - self.m = nn.Parameter(torch.zeros(channels,1)) - self.logs = nn.Parameter(torch.zeros(channels,1)) - - def forward(self, x, x_mask, reverse=False, **kwargs): - if not reverse: - y = self.m + torch.exp(self.logs) * x - y = y * x_mask - logdet = torch.sum(self.logs * x_mask, [1,2]) - return y, logdet - else: - x = (x - self.m) * torch.exp(-self.logs) * x_mask - return x - - -class ResidualCouplingLayer(nn.Module): - def __init__(self, - channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - p_dropout=0, - gin_channels=0, - mean_only=False): - assert channels % 2 == 0, "channels should be divisible by 2" - super().__init__() - self.channels = channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.half_channels = channels // 2 - self.mean_only = mean_only - - self.pre = nn.Conv1d(self.half_channels, hidden_channels, 1) - self.enc = WN(hidden_channels, kernel_size, dilation_rate, n_layers, p_dropout=p_dropout, gin_channels=gin_channels) - self.post = nn.Conv1d(hidden_channels, self.half_channels * (2 - mean_only), 1) - self.post.weight.data.zero_() - self.post.bias.data.zero_() - - def forward(self, x, x_mask, g=None, reverse=False): - x0, x1 = torch.split(x, [self.half_channels]*2, 1) - h = self.pre(x0) * x_mask - h = self.enc(h, x_mask, g=g) - stats = self.post(h) * x_mask - if not self.mean_only: - m, logs = torch.split(stats, [self.half_channels]*2, 1) - else: - m = stats - logs = torch.zeros_like(m) - - if not reverse: - x1 = m + x1 * torch.exp(logs) * x_mask - x = torch.cat([x0, x1], 1) - logdet = torch.sum(logs, [1,2]) - return x, logdet - else: - x1 = (x1 - m) * torch.exp(-logs) * x_mask - x = torch.cat([x0, x1], 1) - return x - - -class ConvFlow(nn.Module): - def __init__(self, in_channels, filter_channels, kernel_size, n_layers, num_bins=10, tail_bound=5.0): - super().__init__() - self.in_channels = in_channels - self.filter_channels = filter_channels - self.kernel_size = kernel_size - self.n_layers = n_layers - self.num_bins = num_bins - self.tail_bound = tail_bound - self.half_channels = in_channels // 2 - - self.pre = nn.Conv1d(self.half_channels, filter_channels, 1) - self.convs = DDSConv(filter_channels, kernel_size, n_layers, p_dropout=0.) - self.proj = nn.Conv1d(filter_channels, self.half_channels * (num_bins * 3 - 1), 1) - self.proj.weight.data.zero_() - self.proj.bias.data.zero_() - - def forward(self, x, x_mask, g=None, reverse=False): - x0, x1 = torch.split(x, [self.half_channels]*2, 1) - h = self.pre(x0) - h = self.convs(h, x_mask, g=g) - h = self.proj(h) * x_mask - - b, c, t = x0.shape - h = h.reshape(b, c, -1, t).permute(0, 1, 3, 2) # [b, cx?, t] -> [b, c, t, ?] - - unnormalized_widths = h[..., :self.num_bins] / math.sqrt(self.filter_channels) - unnormalized_heights = h[..., self.num_bins:2*self.num_bins] / math.sqrt(self.filter_channels) - unnormalized_derivatives = h[..., 2 * self.num_bins:] - - x1, logabsdet = piecewise_rational_quadratic_transform(x1, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=reverse, - tails='linear', - tail_bound=self.tail_bound - ) - - x = torch.cat([x0, x1], 1) * x_mask - logdet = torch.sum(logabsdet * x_mask, [1,2]) - if not reverse: - return x, logdet - else: - return x diff --git a/spaces/metricspace/juristische_Ersteinschaetzung_einer_KI/app.py b/spaces/metricspace/juristische_Ersteinschaetzung_einer_KI/app.py deleted file mode 100644 index 92ca3d41f08f149c3726ac4a8221bc1dcdc38354..0000000000000000000000000000000000000000 --- a/spaces/metricspace/juristische_Ersteinschaetzung_einer_KI/app.py +++ /dev/null @@ -1,145 +0,0 @@ -import os -import openai -import gradio as gr - -openai.api_key = os.environ['key'] - -model_name = "ft:gpt-3.5-turbo-0613:metric-space-ug:rechtsberatung2:7zmeC6ps" -prompt0 = "Liegt ein rechtlich relevanter Sachverhalt mit einer passenden Frage vor?" -prompt1 = "Du stellst Fragen, um fehlende Informationen für eine anwaltliche Erstberatung zu klären. Stelle bis zu 10 Fragen für die Klärung der Sachlage und antworte mit „Danke für die Antworten.“, sobald die Sachlage klar ist. Vermeide Fragen zu stellen, die schon benantwortet sind" -prompt2 = "Schreibe den Sachverhalt auf Basis der folgenden Informationen zusammen." -prompt3 = "Schreibe eine anwaltliche Erstberatung auf Basis des geschilderten Sachverhalts. Vermeide Konjuktive, formuliere in gutem Deutsch und kurzen Sätzen." - -def respond_prompt0(Selbstauskunft, Kernfrage, chat): - # preprocess data - - # openai api call prompt 0 - ######## - completion = openai.ChatCompletion.create( - model=model_name, - messages=[ - {"role": "system", "content": prompt0}, - {"role": "user", "content": Selbstauskunft + "\n" + Kernfrage} - ]) - output = completion.choices[0].message.content - ######## - - out = "Kann ich dir mit deinem Anliegen helfen?\n" + output - if "ja" in output[:4].lower(): - out += "\n Ich werde nun noch ein paar Fragen stellen, um den Sachverhalt genauer zu verstehen" # - - # openai api call prompt 1 - ######## - completion = openai.ChatCompletion.create( - model=model_name, - messages=[ - {"role": "system", "content": prompt1}, - {"role": "user", "content": Selbstauskunft + "\n" + Kernfrage} - ]) - output_prompt1 = completion.choices[0].message.content - ######## - - chat = [(output_prompt1, "")] - return out, gr.update(value=chat, visible=True), gr.update(value="", visible=True) - - else: - return out + ". \nLeider kann ich dir noch nicht weiterhelfen. Dies ist kein Modell für allgemeine juristische Auskünfte. Prüfe, ob du wirklich eine Frage gestellt hast, die präzise wiedergibt, was du bezogen auf den Kontext erreichen möchtest. Dies braucht das Modell, um die juristischen Erfolgaussichten deines Vorhabens schätzen zu können.", chat, "" - - -def respond_prompt1(Selbstauskunft, Kernfrage, chat, text): - chat[-1][1] = text - # openai api call prompt 1 - ######## - messages=[ - {"role": "system", "content": prompt1}, - {"role": "user", "content": Selbstauskunft + "\n" + Kernfrage} - ] - - for i in range(len(chat)): - messages.append({ "role": "assistant", "content": chat[i][0]}) - messages.append({ "role": "user", "content": chat[i][1] }) - - completion = openai.ChatCompletion.create( - model=model_name, - messages=messages) - - output_prompt1 = completion.choices[0].message.content - ######## - - - if ("" in output_prompt1) or len(chat) > 10: - sachverhalt = respond_prompt2(Selbstauskunft, Kernfrage, chat).replace("Bürger", "Mandant") - chat.append(("Danke für deine Antworten. Hier findest du eine aufarbeitete Zusamenfassung des Sachverhalts. Bitte prüfe dieee und ergänze ggf. noch etwas, bevor ich zu meiner Einschätzung komme.", "")) - - return chat, gr.update(value="", visible=False), gr.update(value=sachverhalt, visible=True), gr.update(value="Weiter zur juristischen Einschätzung", visible=True) - - chat.append((output_prompt1, "")) - - return chat, "", "", "" - -def respond_prompt2(Selbstauskunft, Kernfrage, chat): - # openai api call prompt 2 - ######## - content = Selbstauskunft + "\n" + Kernfrage - - for i in range(len(chat)-1): - content += "\n\n" + chat[i][0] + "\n" - content += chat[i][1] - - completion = openai.ChatCompletion.create( - model=model_name, - messages=[ - { "role": "system", "content": prompt2 }, - { "role": "user", "content": content}]) - - output_prompt2 = completion.choices[0].message.content - ######## - - return output_prompt2 - -def respond_prompt3(Selbstauskunft, Kernfrage, chat, Sachverhalt): - # openai api call prompt 3 - ######## - content = Selbstauskunft + "\n" + Kernfrage - - for i in range(len(chat)-1): - content += "\n\n" + chat[i][0] + "\n" - content += chat[i][1] - - completion = openai.ChatCompletion.create( - model=model_name, - messages=[ - { "role": "system", "content": prompt3 }, - { "role": "user", "content": content + "\nSachverhalt: " + Sachverhalt}]) - - - output_prompt3 =completion.choices[0].message.content + "\n\n(Dies ist eine maschinell erstellte Einschätzung einer experimentellen KI-Software basierend auf Informationen im Internet und keine Anwaltliche Erstberatung eines Anwalts. Interpretieren Sie das Ergebnis daher mit Vorsicht.)" - ######## - - return gr.update(value=output_prompt3, visible=True), gr.update(visible=False) - -with gr.Blocks() as demo: - title = gr.Markdown("Juristische Ersteinschätzung einer KI.") - description = gr.Markdown("Disclaimer: Dies ist keine Anwaltliche Erstberatung, sondern ein Forschungsprojekt.") - Selbstauskunft = gr.Textbox("", label="Erzähl mir, was passiert ist.") - Kernfrage = gr.Textbox("", label="Stelle eine Frage, was du juristisch erreichen möchtest.") - - button_prompt1 = gr.Button(value="Weiter") - - label_prompt0 = gr.Label() - - - chat = gr.Chatbot(visible=False) - msg = gr.Textbox(visible=False, label="Antwort") - Sachverhalt = gr.Textbox(visible=False, label = "Zusammenfassung") - Erstberatung = gr.Textbox(visible=False, label="Rechtliche Einschätzung") - button_erstberatung = gr.Button(value="Weiter zur juristischen Einschätzung", visible=False) - - button_prompt1.click(respond_prompt0, [Selbstauskunft, Kernfrage, chat], outputs=[label_prompt0, chat, msg]) - - msg.submit(respond_prompt1, [Selbstauskunft, Kernfrage, chat, msg], [chat, msg, Sachverhalt, button_erstberatung]) - - button_erstberatung.click(respond_prompt3, [Selbstauskunft, Kernfrage, chat, Sachverhalt], [Erstberatung, button_erstberatung]) - - -demo.launch() diff --git a/spaces/mingyuan/ReMoDiffuse/mogen/core/evaluation/evaluators/diversity_evaluator.py b/spaces/mingyuan/ReMoDiffuse/mogen/core/evaluation/evaluators/diversity_evaluator.py deleted file mode 100644 index 3bce77edf856a77ec7d5d1b0ec7f9fb0fc1a2f82..0000000000000000000000000000000000000000 --- a/spaces/mingyuan/ReMoDiffuse/mogen/core/evaluation/evaluators/diversity_evaluator.py +++ /dev/null @@ -1,52 +0,0 @@ -import numpy as np -import torch - -from ..get_model import get_motion_model -from .base_evaluator import BaseEvaluator -from ..utils import calculate_diversity - - -class DiversityEvaluator(BaseEvaluator): - - def __init__(self, - data_len=0, - motion_encoder_name=None, - motion_encoder_path=None, - num_samples=300, - batch_size=None, - drop_last=False, - replication_times=1, - replication_reduction='statistics', - **kwargs): - super().__init__( - replication_times=replication_times, - replication_reduction=replication_reduction, - batch_size=batch_size, - drop_last=drop_last, - eval_begin_idx=0, - eval_end_idx=data_len - ) - self.num_samples = num_samples - self.append_indexes = None - self.motion_encoder = get_motion_model(motion_encoder_name, motion_encoder_path) - self.model_list = [self.motion_encoder] - - def single_evaluate(self, results): - results = self.prepare_results(results) - device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') - motion = results['motion'] - pred_motion = results['pred_motion'] - pred_motion_length = results['pred_motion_length'] - pred_motion_mask = results['pred_motion_mask'] - self.motion_encoder.to(device) - self.motion_encoder.eval() - with torch.no_grad(): - pred_motion_emb = self.motion_encode(pred_motion, pred_motion_length, pred_motion_mask, device).cpu().detach().numpy() - diversity = calculate_diversity(pred_motion_emb, self.num_samples) - return diversity - - def parse_values(self, values): - metrics = {} - metrics['Diversity (mean)'] = values[0] - metrics['Diversity (conf)'] = values[1] - return metrics diff --git a/spaces/mirroring/upload_civitai_model/README.md b/spaces/mirroring/upload_civitai_model/README.md deleted file mode 100644 index 567d12c48bb7e916180f06d4bdd43214d7d24dfb..0000000000000000000000000000000000000000 --- a/spaces/mirroring/upload_civitai_model/README.md +++ /dev/null @@ -1,15 +0,0 @@ ---- -title: Repo Uploader -emoji: 😈 -colorFrom: gray -colorTo: blue -sdk: gradio -sdk_version: 3.30.0 -app_file: app.py -pinned: true -license: mit -duplicated_from: anonderpling/repo_uploader -python_version: 3.11.2 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference diff --git a/spaces/mkhan328/TreemapAndSunburst/README.md b/spaces/mkhan328/TreemapAndSunburst/README.md deleted file mode 100644 index 08c66852a6d1f0c49efd2dab22eab863f791bc0a..0000000000000000000000000000000000000000 --- a/spaces/mkhan328/TreemapAndSunburst/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: TreemapAndSunburst -emoji: 📈 -colorFrom: red -colorTo: yellow -sdk: streamlit -sdk_version: 1.17.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/mmlab-ntu/Segment-Any-RGBD/open_vocab_seg/modeling/heads/mask_former_head.py b/spaces/mmlab-ntu/Segment-Any-RGBD/open_vocab_seg/modeling/heads/mask_former_head.py deleted file mode 100644 index 5f592662f92d1b0862a3ef76304e7b28b46ecf80..0000000000000000000000000000000000000000 --- a/spaces/mmlab-ntu/Segment-Any-RGBD/open_vocab_seg/modeling/heads/mask_former_head.py +++ /dev/null @@ -1,135 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# Copyright (c) Meta Platforms, Inc. All Rights Reserved - -import logging -from copy import deepcopy -from typing import Callable, Dict, List, Optional, Tuple, Union - -import fvcore.nn.weight_init as weight_init -from torch import nn -from torch.nn import functional as F - -from detectron2.config import configurable -from detectron2.layers import Conv2d, ShapeSpec, get_norm -from detectron2.modeling import SEM_SEG_HEADS_REGISTRY - -from ..transformer.transformer_predictor import TransformerPredictor -from .pixel_decoder import build_pixel_decoder - - -@SEM_SEG_HEADS_REGISTRY.register() -class MaskFormerHead(nn.Module): - - _version = 2 - - def _load_from_state_dict( - self, - state_dict, - prefix, - local_metadata, - strict, - missing_keys, - unexpected_keys, - error_msgs, - ): - version = local_metadata.get("version", None) - if version is None or version < 2: - # Do not warn if train from scratch - scratch = True - logger = logging.getLogger(__name__) - for k in list(state_dict.keys()): - newk = k - if "sem_seg_head" in k and not k.startswith(prefix + "predictor"): - newk = k.replace(prefix, prefix + "pixel_decoder.") - # logger.debug(f"{k} ==> {newk}") - if newk != k: - state_dict[newk] = state_dict[k] - del state_dict[k] - scratch = False - - if not scratch: - logger.warning( - f"Weight format of {self.__class__.__name__} have changed! " - "Please upgrade your models. Applying automatic conversion now ..." - ) - - @configurable - def __init__( - self, - input_shape: Dict[str, ShapeSpec], - *, - num_classes: int, - pixel_decoder: nn.Module, - loss_weight: float = 1.0, - ignore_value: int = -1, - # extra parameters - transformer_predictor: nn.Module, - transformer_in_feature: str, - ): - """ - NOTE: this interface is experimental. - Args: - input_shape: shapes (channels and stride) of the input features - num_classes: number of classes to predict - pixel_decoder: the pixel decoder module - loss_weight: loss weight - ignore_value: category id to be ignored during training. - transformer_predictor: the transformer decoder that makes prediction - transformer_in_feature: input feature name to the transformer_predictor - """ - super().__init__() - input_shape = sorted(input_shape.items(), key=lambda x: x[1].stride) - self.in_features = [k for k, v in input_shape] - feature_strides = [v.stride for k, v in input_shape] - feature_channels = [v.channels for k, v in input_shape] - - self.ignore_value = ignore_value - self.common_stride = 4 - self.loss_weight = loss_weight - - self.pixel_decoder = pixel_decoder - self.predictor = transformer_predictor - self.transformer_in_feature = transformer_in_feature - - self.num_classes = num_classes - - @classmethod - def from_config(cls, cfg, input_shape: Dict[str, ShapeSpec]): - return { - "input_shape": { - k: v - for k, v in input_shape.items() - if k in cfg.MODEL.SEM_SEG_HEAD.IN_FEATURES - }, - "ignore_value": cfg.MODEL.SEM_SEG_HEAD.IGNORE_VALUE, - "num_classes": cfg.MODEL.SEM_SEG_HEAD.NUM_CLASSES, - "pixel_decoder": build_pixel_decoder(cfg, input_shape), - "loss_weight": cfg.MODEL.SEM_SEG_HEAD.LOSS_WEIGHT, - "transformer_in_feature": cfg.MODEL.MASK_FORMER.TRANSFORMER_IN_FEATURE, - "transformer_predictor": TransformerPredictor( - cfg, - cfg.MODEL.SEM_SEG_HEAD.CONVS_DIM - if cfg.MODEL.MASK_FORMER.TRANSFORMER_IN_FEATURE == "transformer_encoder" - else input_shape[cfg.MODEL.MASK_FORMER.TRANSFORMER_IN_FEATURE].channels, - mask_classification=True, - ), - } - - def forward(self, features): - return self.layers(features) - - def layers(self, features): - ( - mask_features, - transformer_encoder_features, - ) = self.pixel_decoder.forward_features(features) - if self.transformer_in_feature == "transformer_encoder": - assert ( - transformer_encoder_features is not None - ), "Please use the TransformerEncoderPixelDecoder." - predictions = self.predictor(transformer_encoder_features, mask_features) - else: - predictions = self.predictor( - features[self.transformer_in_feature], mask_features - ) - return predictions diff --git a/spaces/mueller-franzes/medfusion-app/README.md b/spaces/mueller-franzes/medfusion-app/README.md deleted file mode 100644 index bd2f09b459198819d42ec3bafa67e36e6aa9ede5..0000000000000000000000000000000000000000 --- a/spaces/mueller-franzes/medfusion-app/README.md +++ /dev/null @@ -1,73 +0,0 @@ ---- -title: Medfusion App -emoji: 🔬 -colorFrom: pink -colorTo: gray -sdk: streamlit -sdk_version: 1.15.2 -app_file: streamlit/welcome.py -pinned: false -license: mit ---- - -Medfusion - Medical Denoising Diffusion Probabilistic Model -============= - -Paper -======= -Please see: [**Diffusion Probabilistic Models beat GANs on Medical 2D Images**]() - -![](media/Medfusion.png) -*Figure: Medfusion* - -![](media/animation_eye.gif) ![](media/animation_histo.gif) ![](media/animation_chest.gif)\ -*Figure: Eye fundus, chest X-ray and colon histology images generated with Medfusion (Warning color quality limited by .gif)* - -Demo -============= -[Link]() to streamlit app. - -Install -============= - -Create virtual environment and install packages: \ -`python -m venv venv` \ -`source venv/bin/activate`\ -`pip install -e .` - - -Get Started -============= - -1 Prepare Data -------------- - -* Go to [medical_diffusion/data/datasets/dataset_simple_2d.py](medical_diffusion/data/datasets/dataset_simple_2d.py) and create a new `SimpleDataset2D` or write your own Dataset. - - -2 Train Autoencoder ----------------- -* Go to [scripts/train_latent_embedder_2d.py](scripts/train_latent_embedder_2d.py) and import your Dataset. -* Load your dataset with eg. `SimpleDataModule` -* Customize `VAE` to your needs -* (Optional): Train a `VAEGAN` instead or load a pre-trained `VAE` and set `start_gan_train_step=-1` to start training of GAN immediately. - -2.1 Evaluate Autoencoder ----------------- -* Use [scripts/evaluate_latent_embedder.py](scripts/evaluate_latent_embedder.py) to evaluate the performance of the Autoencoder. - -3 Train Diffusion ----------------- -* Go to [scripts/train_diffusion.py](scripts/train_diffusion.py) and import/load your Dataset as before. -* Load your pre-trained VAE or VAEGAN with `latent_embedder_checkpoint=...` -* Use `cond_embedder = LabelEmbedder` for conditional training, otherwise `cond_embedder = None` - -3.1 Evaluate Diffusion ----------------- -* Go to [scripts/sample.py](scripts/sample.py) to sample a test image. -* Go to [scripts/helpers/sample_dataset.py](scripts/helpers/sample_dataset.py) to sample a more reprensative sample size. -* Use [scripts/evaluate_images.py](scripts/evaluate_images.py) to evaluate performance of sample (FID, Precision, Recall) - -Acknowledgment -============= -* Code builds upon https://github.com/lucidrains/denoising-diffusion-pytorch diff --git a/spaces/multimodalart/LoraTheExplorer4/share_btn.py b/spaces/multimodalart/LoraTheExplorer4/share_btn.py deleted file mode 100644 index cb4b3c67c3ef4018379592a837140b41f48cacbd..0000000000000000000000000000000000000000 --- a/spaces/multimodalart/LoraTheExplorer4/share_btn.py +++ /dev/null @@ -1,76 +0,0 @@ -community_icon_html = """""" - -loading_icon_html = """""" - -share_js = """async () => { - async function uploadFile(file){ - const UPLOAD_URL = 'https://huggingface.co/uploads'; - const response = await fetch(UPLOAD_URL, { - method: 'POST', - headers: { - 'Content-Type': file.type, - 'X-Requested-With': 'XMLHttpRequest', - }, - body: file, /// <- File inherits from Blob - }); - const url = await response.text(); - return url; - } - - async function getInputImgFile(imgEl){ - const res = await fetch(imgEl.src); - const blob = await res.blob(); - const imgId = Date.now() % 200; - const isPng = imgEl.src.startsWith(`data:image/png`); - if(isPng){ - const fileName = `sd-perception-${{imgId}}.png`; - return new File([blob], fileName, { type: 'image/png' }); - }else{ - const fileName = `sd-perception-${{imgId}}.jpg`; - return new File([blob], fileName, { type: 'image/jpeg' }); - } - } - - const gradioEl = document.querySelector("gradio-app").shadowRoot || document.querySelector('body > gradio-app'); - const selectedLoRA = gradioEl.querySelector('#selected_lora').innerHTML; - const inputPrompt = gradioEl.querySelector('#prompt input').value; - const outputImgEl = gradioEl.querySelector('#result-image img'); - - const shareBtnEl = gradioEl.querySelector('#share-btn'); - const shareIconEl = gradioEl.querySelector('#share-btn-share-icon'); - const loadingIconEl = gradioEl.querySelector('#share-btn-loading-icon'); - - shareBtnEl.style.pointerEvents = 'none'; - shareIconEl.style.display = 'none'; - loadingIconEl.style.removeProperty('display'); - - const inputFile = await getInputImgFile(outputImgEl); - const urlInputImg = await uploadFile(inputFile); - - const descriptionMd = ` - -${selectedLoRA} - -### Prompt -${inputPrompt} - -#### Generated Image: - -`; - const params = new URLSearchParams({ - title: inputPrompt, - description: descriptionMd, - preview: true - }); - const paramsStr = params.toString(); - window.open(`https://huggingface.co/spaces/multimodalart/LoraTheExplorer/discussions/new?${paramsStr}`, '_blank'); - shareBtnEl.style.removeProperty('pointer-events'); - shareIconEl.style.removeProperty('display'); - loadingIconEl.style.display = 'none'; -}""" \ No newline at end of file diff --git a/spaces/multimodalart/upload_to_hub_folders_progress_bar/README.md b/spaces/multimodalart/upload_to_hub_folders_progress_bar/README.md deleted file mode 100644 index 52c02a3dffe6ee1702d6470d8fa01999c9ef9b46..0000000000000000000000000000000000000000 --- a/spaces/multimodalart/upload_to_hub_folders_progress_bar/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Upload To Hub Multiple At Once -emoji: 👁 -colorFrom: indigo -colorTo: gray -sdk: static -pinned: false -license: mit -duplicated_from: multimodalart/upload_your_model ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/mustapha/chatAlpaca/README.md b/spaces/mustapha/chatAlpaca/README.md deleted file mode 100644 index 39d0c5f29a979a566906214c81395d0535ab7fe7..0000000000000000000000000000000000000000 --- a/spaces/mustapha/chatAlpaca/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: ChatAlpaca -emoji: 🐢 -colorFrom: green -colorTo: green -sdk: streamlit -sdk_version: 1.17.0 -app_file: app.py -pinned: false -license: gpl-3.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/natvill/stable-diffusion-webui/app.py b/spaces/natvill/stable-diffusion-webui/app.py deleted file mode 100644 index 445e8a8fbbebbaf7e36db563f63773d7a927e454..0000000000000000000000000000000000000000 --- a/spaces/natvill/stable-diffusion-webui/app.py +++ /dev/null @@ -1,72 +0,0 @@ -import os -from subprocess import getoutput - -gpu_info = getoutput('nvidia-smi') -if("A10G" in gpu_info): - os.system(f"pip install -q https://github.com/camenduru/stable-diffusion-webui-colab/releases/download/0.0.15/xformers-0.0.15.dev0+4c06c79.d20221205-cp38-cp38-linux_x86_64.whl") -elif("T4" in gpu_info): - os.system(f"pip install -q https://github.com/camenduru/stable-diffusion-webui-colab/releases/download/0.0.15/xformers-0.0.15.dev0+1515f77.d20221130-cp38-cp38-linux_x86_64.whl") - -os.system(f"git clone https://github.com/camenduru/stable-diffusion-webui /home/user/app/stable-diffusion-webui") -os.chdir("/home/user/app/stable-diffusion-webui") - -os.system(f"wget -q https://github.com/camenduru/webui/raw/main/env_patch.py -O /home/user/app/env_patch.py") -os.system(f"sed -i -e '/import image_from_url_text/r /home/user/app/env_patch.py' /home/user/app/stable-diffusion-webui/modules/ui.py") -os.system(f"sed -i -e '/(modelmerger_interface, \"Checkpoint Merger\", \"modelmerger\"),/d' /home/user/app/stable-diffusion-webui/modules/ui.py") -os.system(f"sed -i -e '/(train_interface, \"Train\", \"ti\"),/d' /home/user/app/stable-diffusion-webui/modules/ui.py") -os.system(f"sed -i -e '/extensions_interface, \"Extensions\", \"extensions\"/d' /home/user/app/stable-diffusion-webui/modules/ui.py") -os.system(f"sed -i -e '/settings_interface, \"Settings\", \"settings\"/d' /home/user/app/stable-diffusion-webui/modules/ui.py") -os.system(f'''sed -i -e "s/document.getElementsByTagName('gradio-app')\[0\].shadowRoot/!!document.getElementsByTagName('gradio-app')[0].shadowRoot ? document.getElementsByTagName('gradio-app')[0].shadowRoot : document/g" /home/user/app/stable-diffusion-webui/script.js''') -os.system(f"sed -i -e 's/ show_progress=False,/ show_progress=True,/g' /home/user/app/stable-diffusion-webui/modules/ui.py") -os.system(f"sed -i -e 's/shared.demo.launch/shared.demo.queue().launch/g' /home/user/app/stable-diffusion-webui/webui.py") -os.system(f"sed -i -e 's/ outputs=\[/queue=False, &/g' /home/user/app/stable-diffusion-webui/modules/ui.py") -os.system(f"sed -i -e 's/ queue=False, / /g' /home/user/app/stable-diffusion-webui/modules/ui.py") - -# ----------------------------Please duplicate this space and delete this block if you don't want to see the extra header---------------------------- -os.system(f"wget -q https://github.com/camenduru/webui/raw/main/header_patch.py -O /home/user/app/header_patch.py") -os.system(f"sed -i -e '/demo:/r /home/user/app/header_patch.py' /home/user/app/stable-diffusion-webui/modules/ui.py") -# --------------------------------------------------------------------------------------------------------------------------------------------------- - -if "IS_SHARED_UI" in os.environ: - os.system(f"rm -rfv /home/user/app/stable-diffusion-webui/scripts/") - - os.system(f"wget -q https://github.com/camenduru/webui/raw/main/shared-config.json -O /home/user/app/shared-config.json") - os.system(f"wget -q https://github.com/camenduru/webui/raw/main/shared-ui-config.json -O /home/user/app/shared-ui-config.json") - - os.system(f"wget -q {os.getenv('MODEL_LINK')} -O /home/user/app/stable-diffusion-webui/models/Stable-diffusion/{os.getenv('MODEL_NAME')}") - os.system(f"wget -q {os.getenv('VAE_LINK')} -O /home/user/app/stable-diffusion-webui/models/Stable-diffusion/{os.getenv('VAE_NAME')}") - os.system(f"wget -q {os.getenv('YAML_LINK')} -O /home/user/app/stable-diffusion-webui/models/Stable-diffusion/{os.getenv('YAML_NAME')}") - - os.system(f"python launch.py --force-enable-xformers --disable-console-progressbars --enable-console-prompts --ui-config-file /home/user/app/shared-ui-config.json --ui-settings-file /home/user/app/shared-config.json --cors-allow-origins huggingface.co,hf.space --no-progressbar-hiding") -else: - # Please duplicate this space and delete # character in front of the custom script you want to use or add here more custom scripts with same structure os.system(f"wget -q https://CUSTOM_SCRIPT_URL -O /home/user/app/stable-diffusion-webui/scripts/CUSTOM_SCRIPT_NAME.py") - os.system(f"wget -q https://gist.github.com/camenduru/9ec5f8141db9902e375967e93250860f/raw/d0bcf01786f20107c329c03f8968584ee67be12a/run_n_times.py -O /home/user/app/stable-diffusion-webui/scripts/run_n_times.py") - - # Please duplicate this space and delete # character in front of the extension you want to use or add here more extensions with same structure os.system(f"git clone https://EXTENSION_GIT_URL /home/user/app/stable-diffusion-webui/extensions/EXTENSION_NAME") - #os.system(f"git clone https://github.com/camenduru/stable-diffusion-webui-artists-to-study /home/user/app/stable-diffusion-webui/extensions/stable-diffusion-webui-artists-to-study") - os.system(f"git clone https://github.com/yfszzx/stable-diffusion-webui-images-browser /home/user/app/stable-diffusion-webui/extensions/stable-diffusion-webui-images-browser") - os.system(f"git clone https://github.com/deforum-art/deforum-for-automatic1111-webui /home/user/app/stable-diffusion-webui/extensions/deforum-for-automatic1111-webui") - - # Please duplicate this space and delete # character in front of the model you want to use or add here more ckpts with same structure os.system(f"wget -q https://CKPT_URL -O /home/user/app/stable-diffusion-webui/models/Stable-diffusion/CKPT_NAME.ckpt") - #os.system(f"wget -q https://huggingface.co/nitrosocke/Arcane-Diffusion/resolve/main/arcane-diffusion-v3.ckpt -O /home/user/app/stable-diffusion-webui/models/Stable-diffusion/arcane-diffusion-v3.ckpt") - #os.system(f"wget -q https://huggingface.co/DGSpitzer/Cyberpunk-Anime-Diffusion/resolve/main/Cyberpunk-Anime-Diffusion.ckpt -O /home/user/app/stable-diffusion-webui/models/Stable-diffusion/Cyberpunk-Anime-Diffusion.ckpt") - #os.system(f"wget -q https://huggingface.co/prompthero/midjourney-v4-diffusion/resolve/main/mdjrny-v4.ckpt -O /home/user/app/stable-diffusion-webui/models/Stable-diffusion/mdjrny-v4.ckpt") - #os.system(f"wget -q https://huggingface.co/nitrosocke/mo-di-diffusion/resolve/main/moDi-v1-pruned.ckpt -O /home/user/app/stable-diffusion-webui/models/Stable-diffusion/moDi-v1-pruned.ckpt") - #os.system(f"wget -q https://huggingface.co/Fictiverse/Stable_Diffusion_PaperCut_Model/resolve/main/PaperCut_v1.ckpt -O /home/user/app/stable-diffusion-webui/models/Stable-diffusion/PaperCut_v1.ckpt") - #os.system(f"wget -q https://huggingface.co/lilpotat/sa/resolve/main/samdoesarts_style.ckpt -O /home/user/app/stable-diffusion-webui/models/Stable-diffusion/samdoesarts_style.ckpt") - #os.system(f"wget -q https://huggingface.co/hakurei/waifu-diffusion-v1-3/resolve/main/wd-v1-3-float32.ckpt -O /home/user/app/stable-diffusion-webui/models/Stable-diffusion/wd-v1-3-float32.ckpt") - #os.system(f"wget -q https://huggingface.co/CompVis/stable-diffusion-v-1-4-original/resolve/main/sd-v1-4.ckpt -O /home/user/app/stable-diffusion-webui/models/Stable-diffusion/sd-v1-4.ckpt") - #os.system(f"wget -q https://huggingface.co/runwayml/stable-diffusion-v1-5/resolve/main/v1-5-pruned-emaonly.ckpt -O /home/user/app/stable-diffusion-webui/models/Stable-diffusion/v1-5-pruned-emaonly.ckpt") - #os.system(f"wget -q https://huggingface.co/runwayml/stable-diffusion-inpainting/resolve/main/sd-v1-5-inpainting.ckpt -O /home/user/app/stable-diffusion-webui/models/Stable-diffusion/sd-v1-5-inpainting.ckpt") - - #os.system(f"wget -q https://huggingface.co/Linaqruf/anything-v3.0/resolve/main/Anything-V3.0-pruned.ckpt -O /home/user/app/stable-diffusion-webui/models/Stable-diffusion/Anything-V3.0-pruned.ckpt") - #os.system(f"wget -q https://huggingface.co/Linaqruf/anything-v3.0/resolve/main/Anything-V3.0.vae.pt -O /home/user/app/stable-diffusion-webui/models/Stable-diffusion/Anything-V3.0-pruned.vae.pt") - - #os.system(f"wget -q https://huggingface.co/stabilityai/stable-diffusion-2/resolve/main/768-v-ema.ckpt -O /home/user/app/stable-diffusion-webui/models/Stable-diffusion/768-v-ema.ckpt") - #os.system(f"wget -q https://raw.githubusercontent.com/Stability-AI/stablediffusion/main/configs/stable-diffusion/v2-inference-v.yaml -O /home/user/app/stable-diffusion-webui/models/Stable-diffusion/768-v-ema.yaml") - os.system(f"wget -q https://r2.kamiya-b.me/dreambooth_lib/akakura-sn.ckpt -O /home/user/app/stable-diffusion-webui/models/Stable-diffusion/akakura-sn.ckpt") - os.system(f"wget -q https://huggingface.co/stabilityai/stable-diffusion-2-1/resolve/main/v2-1_768-ema-pruned.ckpt -O /home/user/app/stable-diffusion-webui/models/Stable-diffusion/v2-1_768-ema-pruned.ckpt") - os.system(f"wget -q https://raw.githubusercontent.com/Stability-AI/stablediffusion/main/configs/stable-diffusion/v2-inference-v.yaml -O /home/user/app/stable-diffusion-webui/models/Stable-diffusion/v2-1_768-ema-pruned.yaml") - - os.system(f"python launch.py --force-enable-xformers --ui-config-file /home/user/app/ui-config.json --ui-settings-file /home/user/app/config.json --disable-console-progressbars --enable-console-prompts --cors-allow-origins huggingface.co,hf.space --no-progressbar-hiding --api --skip-torch-cuda-test") - \ No newline at end of file diff --git a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Audioteknik DeepTech House WAVMAGNETRiXXzip.zip.md b/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Audioteknik DeepTech House WAVMAGNETRiXXzip.zip.md deleted file mode 100644 index 4f8804664141a7168c92dad9ad5e86c72802d021..0000000000000000000000000000000000000000 --- a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Audioteknik DeepTech House WAVMAGNETRiXXzip.zip.md +++ /dev/null @@ -1,33 +0,0 @@ - -

          Audioteknik DeepTech House WAVMAGNETRiXXzip.zip: A Collection of High-Quality Samples for Deep Tech House Music

          -

          Audioteknik DeepTech House WAVMAGNETRiXXzip.zip is a file that contains over 500 MB of samples for deep tech house music production. The file was uploaded by Liolisflagse on SoundCloud[^1^], where you can listen to a preview of the sounds. The file includes loops, one-shots, basses, synths, drums, vocals, effects and more. The samples are in WAV format and are compatible with most DAWs and samplers.

          -

          If you are looking for some fresh and inspiring sounds for your deep tech house tracks, you might want to check out Audioteknik DeepTech House WAVMAGNETRiXXzip.zip. The file is available for download on Pastebin[^2^], where you can find the link to the file hosting service. The file is compressed in ZIP format and you will need to extract it before using it. The file size is 374 MB and the download speed may vary depending on your internet connection.

          -

          Audioteknik DeepTech House WAVMAGNETRiXXzip.zip


          Download https://urlcod.com/2uIcpo



          -

          Audioteknik DeepTech House WAVMAGNETRiXXzip.zip is a great resource for deep tech house producers who want to add some quality and variety to their sound library. The samples are suitable for different styles of deep tech house, such as minimal, dub, groove and more. The samples are also royalty-free, which means you can use them in your own projects without any legal issues.

          -

          To learn more about Audioteknik DeepTech House WAVMAGNETRiXXzip.zip, you can visit the Microsoft Sway page[^3^] that provides more information and screenshots of the file contents. You can also follow Liolisflagse on SoundCloud to stay updated on his latest uploads and projects.

          -

          Here are some tips on how to use Audioteknik DeepTech House WAVMAGNETRiXXzip.zip in your music production:

          -
            -
          • Experiment with different combinations of loops and one-shots to create unique patterns and grooves.
          • -
          • Use the bass and synth samples to add depth and melody to your tracks.
          • -
          • Layer the drum samples with your own drums or other samples to create punchy and dynamic beats.
          • -
          • Use the vocal samples to add some human touch and emotion to your tracks.
          • -
          • Use the effects samples to spice up your transitions and add some atmosphere and texture to your tracks.
          • -
          -

          Audioteknik DeepTech House WAVMAGNETRiXXzip.zip is a versatile and high-quality sample pack that can help you create amazing deep tech house music. Whether you are a beginner or a professional, you will find something useful and inspiring in this file. Download it today and start making some deep tech house magic!

          Here are some examples of deep tech house tracks that use Audioteknik DeepTech House WAVMAGNETRiXXzip.zip samples:

          -
            -
          1. Deep Tech House Mix by Liolisflagse: This is a mix of deep tech house tracks that features some of the samples from Audioteknik DeepTech House WAVMAGNETRiXXzip.zip. You can hear how the samples add some groove and energy to the mix.
          2. -
          3. Deep Tech House Track by Liolisflagse: This is a track that Liolisflagse made using only the samples from Audioteknik DeepTech House WAVMAGNETRiXXzip.zip. You can hear how the samples create a cohesive and catchy track.
          4. -
          5. Deep Tech House Remix by Liolisflagse: This is a remix of a track by another artist that Liolisflagse made using some of the samples from Audioteknik DeepTech House WAVMAGNETRiXXzip.zip. You can hear how the samples enhance and transform the original track.
          6. -
          -

          If you want to make your own deep tech house tracks using Audioteknik DeepTech House WAVMAGNETRiXXzip.zip, you can follow these steps:

          -
            -
          1. Download Audioteknik DeepTech House WAVMAGNETRiXXzip.zip from Pastebin and extract it to your preferred location.
          2. -
          3. Open your DAW and create a new project.
          4. -
          5. Import some of the samples from Audioteknik DeepTech House WAVMAGNETRiXXzip.zip to your project.
          6. -
          7. Arrange and edit the samples to your liking.
          8. -
          9. Add some effects and plugins to enhance the sound.
          10. -
          11. Mix and master your track.
          12. -
          13. Export your track and share it with the world!
          14. -

          81aa517590
          -
          -
          \ No newline at end of file diff --git a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Driver Tv Tuner Stk7700d Windows Xp.md b/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Driver Tv Tuner Stk7700d Windows Xp.md deleted file mode 100644 index 9bc0ebf51d20a06b6988dd0b0a5d5a302d0ae6ef..0000000000000000000000000000000000000000 --- a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Driver Tv Tuner Stk7700d Windows Xp.md +++ /dev/null @@ -1,46 +0,0 @@ -
          -

          How to Install Driver for TV Tuner STK7700D on Windows XP

          -

          If you have a TV tuner card that uses the STK7700D chipset, you may need to install a driver to make it work properly on Windows XP. A driver is a software that allows your computer to communicate with your hardware devices. Without a driver, your TV tuner card may not function correctly or may cause errors on your system.

          -

          There are two ways to install the driver for TV tuner STK7700D on Windows XP: manually or automatically. Here are the steps for each method:

          -

          Driver Tv Tuner Stk7700d Windows Xp


          DOWNLOADhttps://urlcod.com/2uI9xd



          -

          Manual Installation

          -

          To install the driver manually, you need to download it from the manufacturer's website of your TV tuner card. You can use a search engine to find the website of your TV tuner card's manufacturer and look for the driver section. Alternatively, you can use the link below to download the driver from DriverPack, a website that provides drivers for various devices:

          -https://driverpack.io/en/devices/tvtuner -

          Once you have downloaded the driver file, follow these steps to install it:

          -
            -
          1. Unzip the file if it is compressed.
          2. -
          3. Open the Device Manager by clicking on Start, then Run, then typing devmgmt.msc and pressing Enter.
          4. -
          5. Locate your TV tuner card under the Sound, video and game controllers category and right-click on it.
          6. -
          7. Select Update Driver Software from the menu.
          8. -
          9. Choose Browse my computer for driver software.
          10. -
          11. Click on Browse and navigate to the folder where you extracted the driver file.
          12. -
          13. Click on Next and follow the instructions on the screen to complete the installation.
          14. -
          15. Restart your computer if prompted.
          16. -
          -

          Automatic Installation

          -

          To install the driver automatically, you can use Windows Update, a feature that checks for and installs updates for your system and devices. Windows Update may detect your TV tuner card and offer you a driver update for it. To use Windows Update, follow these steps:

          -
            -
          1. Click on Start, then Control Panel, then Security Center.
          2. -
          3. Click on Windows Update at the bottom of the window.
          4. -
          5. Click on Check for updates on the left pane.
          6. -
          7. Wait for Windows Update to scan your system and devices.
          8. -
          9. If you see a driver update for your TV tuner card under Optional updates, select it and click on Install updates.
          10. -
          11. Follow the instructions on the screen to complete the installation.
          12. -
          13. Restart your computer if prompted.
          14. -
          -

          If Windows Update does not offer you a driver update for your TV tuner card, you can try using DriverPack, a website that provides drivers for various devices. You can use the link below to download and run DriverPack Online, a tool that scans your system and devices and installs drivers automatically:

          -https://driverpack.io/en/online - -

          After running DriverPack Online, follow these steps:

          -
            -
          1. Click on Start Scan and wait for DriverPack Online to scan your system and devices.
          2. -
          3. If you see a driver update for your TV tuner card under Devices without drivers or Devices with outdated drivers, select it and click on Install all necessary drivers.
          4. -
          5. Follow the instructions on the screen to complete the installation.
          6. -
          7. Restart your computer if prompted.
          8. -
          - -

          By following one of these methods, you should be able to install the driver for TV tuner STK7700D on Windows XP and enjoy watching TV on your computer. If you encounter any problems or have any questions, you can visit the Microsoft Community forum and ask for help:

          - -https://answers.microsoft.com/en-us/windows/forum/all

          7b8c122e87
          -
          -
          \ No newline at end of file diff --git a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Dual Audio Movies Hindi English 720p Main Hoon Sherni 1080p.md b/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Dual Audio Movies Hindi English 720p Main Hoon Sherni 1080p.md deleted file mode 100644 index d817ce142d0f0c26b4e93204abec3336d416e70c..0000000000000000000000000000000000000000 --- a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Dual Audio Movies Hindi English 720p Main Hoon Sherni 1080p.md +++ /dev/null @@ -1,20 +0,0 @@ -
          -

          Dual Audio Movies Hindi English 720p Main Hoon Sherni 1080p

          -

          If you are looking for a movie that combines action, drama and feminism, then you might want to check out Main Hoon Sherni (I Am a Tigress), a 1992 Hindi film starring Archana Puran Singh, Kiran Kumar and Sudha Chandran. The film is about a female forest officer who fights against poachers and corrupt politicians to protect the wildlife in her area. The film was directed by Suresh Bohra and produced by Kanti Shah.

          -

          Main Hoon Sherni is available in dual audio format, which means you can watch it in both Hindi and English languages. You can also choose between 720p and 1080p resolutions, depending on your preference and device compatibility. The film has a runtime of 2 hours and 12 minutes, and it has a rating of 5.4 out of 10 on IMDb.

          -

          Dual Audio Movies Hindi English 720p Main Hoon Sherni 1080p


          Download File ✸✸✸ https://urlcod.com/2uIbkU



          -

          To download Main Hoon Sherni in dual audio format, you can visit one of the following websites:

          -
            -
          • MoviesMod: This website offers a variety of movies and web series in different genres and languages. You can find Main Hoon Sherni under the Dual Audio category. You can download it in 480p, 720p or 1080p quality.
          • -
          • HDHub4u: This website provides all Bollywood and Hollywood movies and web series in Hindi + English dual audio format. You can search for Main Hoon Sherni using the search bar or browse through the categories. You can download it in 480p, 720p or 1080p quality.
          • -
          • KatMovieHD: This website is a popular source for downloading all kinds of movies and TV shows from around the world. You can find Main Hoon Sherni under the Bollywood Movies category. You can download it in 480p, 720p or 1080p quality.
          • -
          -

          Before downloading any movie from these websites, make sure you have a good internet connection and enough storage space on your device. Also, be aware of the possible risks of malware and viruses that may come with some of the files. Always use a reliable antivirus software and VPN service to protect your device and privacy.

          -

          We hope you enjoy watching Main Hoon Sherni in dual audio format. If you like this movie, you may also like other movies starring Archana Puran Singh, such as Kuch Kuch Hota Hai (1998), Mohabbatein (2000) and Golmaal Returns (2008).

          - -

          Main Hoon Sherni is not only an entertaining movie, but also a powerful statement on the role of women in society. The film portrays the protagonist, Kavita, as a strong and courageous woman who does not let anyone intimidate or oppress her. She stands up for her rights and values, and fights against injustice and corruption. She also shows compassion and empathy for the animals and the environment, and tries to educate the people around her about the importance of conservation and coexistence.

          -

          The film also challenges the stereotypes and prejudices that are often associated with women in India. Kavita is a professional and independent woman who does not need a man to support or protect her. She is not afraid to express her opinions and emotions, and does not conform to the traditional expectations of being submissive and obedient. She is also not ashamed of her sexuality, and does not let anyone judge or exploit her for it. She is a role model for many women who aspire to break free from the shackles of patriarchy and discrimination.

          -

          -

          Main Hoon Sherni is a movie that celebrates the spirit and strength of women, and inspires them to be confident and assertive in their lives. It also reminds us of the beauty and diversity of nature, and urges us to respect and preserve it for the future generations. It is a movie that deserves to be watched and appreciated by everyone.

          7b8c122e87
          -
          -
          \ No newline at end of file diff --git a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Epson L130 Adjustment Programme Full Zip Full Download Free.md b/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Epson L130 Adjustment Programme Full Zip Full Download Free.md deleted file mode 100644 index 78bf8576fb88971da737410c8c4633e84f50b020..0000000000000000000000000000000000000000 --- a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Epson L130 Adjustment Programme Full Zip Full Download Free.md +++ /dev/null @@ -1,34 +0,0 @@ - -

          How to Reset Epson L130 Printer Using Epson Adjustment Program

          -

          If you have an Epson L130 printer that is blinking or showing various error messages, you may need to reset it using the Epson Adjustment Program. This is a software tool that can reset the waste ink pad counter, restore the initial settings, and fix other issues of your printer. In this article, we will show you how to download and use the Epson Adjustment Program to reset your Epson L130 printer.

          -

          What is Epson Adjustment Program?

          -

          Epson Adjustment Program is a software tool that can be used to reset various parameters of Epson printers. It can reset the waste ink pad counter, which is a part of the printer that collects the excess ink during printing and cleaning. When the waste ink pad counter reaches a certain limit, the printer will stop working and display an error message such as "Waste ink full" or "Service required". By using the Epson Adjustment Program, you can reset the waste ink pad counter to zero and continue using your printer.

          -

          Epson L130 Adjustment Programme Full Zip Full Download Free


          DOWNLOADhttps://urlcod.com/2uIcfW



          -

          Epson Adjustment Program can also reset other parameters of your printer, such as the maximum number of paper prints, the initial settings, the head cleaning cycle, etc. By resetting these parameters, you can improve the performance and quality of your printer.

          -

          How to Download Epson Adjustment Program?

          -

          To download the Epson Adjustment Program for your Epson L130 printer, you can use one of the following links:

          - -

          These links are from web search results [^4^] and may expire over time. You should always scan the downloaded files for viruses before opening them.

          -

          How to Use Epson Adjustment Program?

          -

          To use the Epson Adjustment Program to reset your Epson L130 printer, follow these steps:

          -
            -
          1. Extract the downloaded zip file and run the Adjprog.exe file.
          2. -
          3. Select your printer model and port from the drop-down menus.
          4. -
          5. Click on Particular adjustment mode.
          6. -
          7. Select Waste ink pad counter from the list and click OK.
          8. -
          9. Check the Main pad counter box and click Check.
          10. -
          11. The current value of the waste ink pad counter will be displayed. Click Initialization to reset it to zero.
          12. -
          13. A message will appear asking you to turn off your printer. Click OK and turn off your printer.
          14. -
          15. Turn on your printer again and click Finish.
          16. -
          17. Your printer has been reset successfully.
          18. -
          -

          You can also use the Epson Adjustment Program to reset other parameters of your printer by selecting them from the Particular adjustment mode list. For example, you can select Head cleaning cycle to reset the number of head cleanings performed by your printer.

          -

          -

          Conclusion

          -

          In this article, we have shown you how to download and use the Epson Adjustment Program to reset your Epson L130 printer. This software tool can help you fix various issues of your printer and improve its performance and quality. However, you should use it with caution and only when necessary, as resetting some parameters may affect the warranty or lifespan of your printer. If you have any questions or problems with your printer, you can contact Epson support or visit their website for more information.

          7196e7f11a
          -
          -
          \ No newline at end of file diff --git a/spaces/nikitaPDL2023/assignment4/detectron2/dev/packaging/build_wheel.sh b/spaces/nikitaPDL2023/assignment4/detectron2/dev/packaging/build_wheel.sh deleted file mode 100644 index 2535a1b99c1406dbd71d0cb5132886800ee7aa48..0000000000000000000000000000000000000000 --- a/spaces/nikitaPDL2023/assignment4/detectron2/dev/packaging/build_wheel.sh +++ /dev/null @@ -1,31 +0,0 @@ -#!/bin/bash -# Copyright (c) Facebook, Inc. and its affiliates. -set -ex - -ldconfig # https://github.com/NVIDIA/nvidia-docker/issues/854 - -script_dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" -. "$script_dir/pkg_helpers.bash" - -echo "Build Settings:" -echo "CU_VERSION: $CU_VERSION" # e.g. cu101 -echo "D2_VERSION_SUFFIX: $D2_VERSION_SUFFIX" # e.g. +cu101 or "" -echo "PYTHON_VERSION: $PYTHON_VERSION" # e.g. 3.7 -echo "PYTORCH_VERSION: $PYTORCH_VERSION" # e.g. 1.4 - -setup_cuda -setup_wheel_python - -yum install ninja-build -y -ln -sv /usr/bin/ninja-build /usr/bin/ninja || true - -pip_install pip numpy -U -pip_install "torch==$PYTORCH_VERSION" \ - -f https://download.pytorch.org/whl/"$CU_VERSION"/torch_stable.html - -# use separate directories to allow parallel build -BASE_BUILD_DIR=build/$CU_VERSION-py$PYTHON_VERSION-pt$PYTORCH_VERSION -python setup.py \ - build -b "$BASE_BUILD_DIR" \ - bdist_wheel -b "$BASE_BUILD_DIR/build_dist" -d "wheels/$CU_VERSION/torch$PYTORCH_VERSION" -rm -rf "$BASE_BUILD_DIR" diff --git a/spaces/nus-cs5647-team-5/Mandarin_Tone_Evaluation/data_loader.py b/spaces/nus-cs5647-team-5/Mandarin_Tone_Evaluation/data_loader.py deleted file mode 100644 index 2201182d52ea6feaab7be85c5715bc60ea515120..0000000000000000000000000000000000000000 --- a/spaces/nus-cs5647-team-5/Mandarin_Tone_Evaluation/data_loader.py +++ /dev/null @@ -1,79 +0,0 @@ -import os -import random -import numpy as np -from utils.config import load_config_file, DEFAULT_CONFIG_FILENAME, load_pinyin_dict -from utils.ops import read_wav_data - - -class DataLoader: - ''' - 数据加载器 - - 参数:\\ - config: 配置信息字典 - dataset_type: 要加载的数据集类型,包含('train', 'dev', 'test')三种 - ''' - - def __init__(self, dataset_type: str): - self.dataset_type = dataset_type - - self.data_list = list() - self.wav_dict = dict() - self.label_dict = dict() - self.pinyin_list = list() - self.pinyin_dict = dict() - self._load_data() - - def _load_data(self): - config = load_config_file(DEFAULT_CONFIG_FILENAME) - - self.pinyin_list, self.pinyin_dict = load_pinyin_dict(config['dict_filename']) - - for index in range(len(config['dataset'][self.dataset_type])): - filename_datalist = config['dataset'][self.dataset_type][index]['data_list'] - filename_datapath = config['dataset'][self.dataset_type][index]['data_path'] - with open(filename_datalist, 'r', encoding='utf-8') as file_pointer: - lines = file_pointer.read().split('\n') - for line in lines: - if len(line) == 0: - continue - tokens = line.split(' ') - self.data_list.append(tokens[0]) - self.wav_dict[tokens[0]] = os.path.join(filename_datapath, tokens[1]) - - filename_labellist = config['dataset'][self.dataset_type][index]['label_list'] - with open(filename_labellist, 'r', encoding='utf-8') as file_pointer: - lines = file_pointer.read().split('\n') - for line in lines: - if len(line) == 0: - continue - tokens = line.split(' ') - self.label_dict[tokens[0]] = tokens[1:] - - def get_data_count(self) -> int: - ''' - 获取数据集总数量 - ''' - return len(self.data_list) - - def get_data(self, index: int) -> tuple: - ''' - 按下标获取一条数据 - ''' - mark = self.data_list[index] - - wav_signal, sample_rate, _, _ = read_wav_data(self.wav_dict[mark]) - labels = list() - for item in self.label_dict[mark]: - if len(item) == 0: - continue - labels.append(self.pinyin_dict[item]) - - data_label = np.array(labels) - return (wav_signal, sample_rate, data_label) - - def shuffle(self) -> None: - ''' - 随机打乱数据 - ''' - random.shuffle(self.data_list) diff --git a/spaces/openaccess-ai-collective/rlhf-arena/calculate_elo.py b/spaces/openaccess-ai-collective/rlhf-arena/calculate_elo.py deleted file mode 100644 index cc21d1f65098fb717e3ce49700f2594817af5cf2..0000000000000000000000000000000000000000 --- a/spaces/openaccess-ai-collective/rlhf-arena/calculate_elo.py +++ /dev/null @@ -1,309 +0,0 @@ -import logging -import os -from datetime import datetime -from decimal import Decimal -from typing import List - -import boto3 -from boto3.dynamodb.conditions import Attr, Key -from datasets import Dataset - -logging.basicConfig(level=os.getenv("LOG_LEVEL", "INFO")) - -# Create a DynamoDB client -dynamodb = boto3.resource('dynamodb', region_name='us-east-1') - - -def _create_arena_table(): - dynamodb.create_table( - TableName='oaaic_chatbot_arena', - KeySchema=[ - { - 'AttributeName': 'arena_battle_id', - 'KeyType': 'HASH' - }, - ], - AttributeDefinitions=[ - { - 'AttributeName': 'arena_battle_id', - 'AttributeType': 'S' - }, - { - 'AttributeName': 'timestamp', - 'AttributeType': 'S' - }, - ], - ProvisionedThroughput={ - 'ReadCapacityUnits': 5, - 'WriteCapacityUnits': 5 - }, - GlobalSecondaryIndexes=[ - { - 'IndexName': 'TimestampIndex', - 'KeySchema': [ - { - 'AttributeName': 'arena_battle_id', - 'KeyType': 'HASH' - }, - { - 'AttributeName': 'timestamp', - 'KeyType': 'RANGE' - }, - ], - 'Projection': { - 'ProjectionType': 'ALL', - }, - 'ProvisionedThroughput': { - 'ReadCapacityUnits': 5, - 'WriteCapacityUnits': 5, - } - }, - ] - ) - -def _create_elo_scores_table(): - dynamodb.create_table( - TableName='elo_scores', - KeySchema=[ - { - 'AttributeName': 'chatbot_name', - 'KeyType': 'HASH' # Partition key - }, - ], - AttributeDefinitions=[ - { - 'AttributeName': 'chatbot_name', - 'AttributeType': 'S' - }, - ], - ProvisionedThroughput={ - 'ReadCapacityUnits': 5, - 'WriteCapacityUnits': 5 - } - ) - - -def _create_elo_logs_table(): - dynamodb.create_table( - TableName='elo_logs', - KeySchema=[ - { - 'AttributeName': 'arena_battle_id', - 'KeyType': 'HASH' # Partition key - }, - { - 'AttributeName': 'battle_timestamp', - 'KeyType': 'RANGE' # Sort key - }, - ], - AttributeDefinitions=[ - { - 'AttributeName': 'arena_battle_id', - 'AttributeType': 'S' - }, - { - 'AttributeName': 'battle_timestamp', - 'AttributeType': 'S' - }, - { - 'AttributeName': 'all', - 'AttributeType': 'S' - } - ], - ProvisionedThroughput={ - 'ReadCapacityUnits': 10, - 'WriteCapacityUnits': 10 - }, - GlobalSecondaryIndexes=[ - { - 'IndexName': 'AllTimestampIndex', - 'KeySchema': [ - { - 'AttributeName': 'all', - 'KeyType': 'HASH' # Partition key for the GSI - }, - { - 'AttributeName': 'battle_timestamp', - 'KeyType': 'RANGE' # Sort key for the GSI - } - ], - 'Projection': { - 'ProjectionType': 'ALL' - }, - 'ProvisionedThroughput': { - 'ReadCapacityUnits': 10, - 'WriteCapacityUnits': 10 - } - }, - ] - ) - - -def get_unprocessed_battles(last_processed_timestamp): - # Use boto3 to create a DynamoDB resource and reference the table - table = dynamodb.Table('oaaic_chatbot_arena') - - # Use a query to retrieve unprocessed battles in temporal order - response = table.scan( - FilterExpression=Attr('timestamp').gt(last_processed_timestamp), - # ScanIndexForward=True - ) - - return response['Items'] - - -def calculate_elo(rating1, rating2, result, K=32): - # Convert ratings to float - rating1 = float(rating1) - rating2 = float(rating2) - - # Calculate the expected outcomes - expected_outcome1 = 1.0 / (1.0 + 10.0 ** ((rating2 - rating1) / 400.0)) - expected_outcome2 = 1.0 - expected_outcome1 - - # Calculate the new Elo ratings - new_rating1 = rating1 + K * (result - expected_outcome1) - new_rating2 = rating2 + K * ((1.0 - result) - expected_outcome2) - - return Decimal(new_rating1).quantize(Decimal('0.00')), Decimal(new_rating2).quantize(Decimal('0.00')) - - -def get_last_processed_timestamp(): - table = dynamodb.Table('elo_logs') - - # Scan the table sorted by timestamp in descending order - response = table.query( - IndexName='AllTimestampIndex', - KeyConditionExpression=Key('all').eq('ALL'), - ScanIndexForward=False, - Limit=1 - ) - - # If there are no items in the table, return a default timestamp - if not response['Items']: - return '1970-01-01T00:00:00' - - # Otherwise, return the timestamp of the latest item - return response['Items'][0]['battle_timestamp'] - - -def log_elo_update(arena_battle_id, battle_timestamp, new_rating1, new_rating2): - # Reference the elo_logs table - table = dynamodb.Table('elo_logs') - - # Update the table - table.put_item( - Item={ - 'arena_battle_id': arena_battle_id, - 'battle_timestamp': battle_timestamp, # Use the timestamp of the battle - 'log_timestamp': datetime.now().isoformat(), # Also store the timestamp of the log for completeness - 'new_rating1': new_rating1, - 'new_rating2': new_rating2, - 'all': 'ALL', - } - ) - - -def get_elo_score(chatbot_name, elo_scores): - if chatbot_name in elo_scores: - return elo_scores[chatbot_name] - - table = dynamodb.Table('elo_scores') - response = table.get_item(Key={'chatbot_name': chatbot_name}) - - # If there is no item in the table, return a default score - if 'Item' not in response: - return 1500 - - return response['Item']['elo_score'] - - -def update_elo_score(chatbot_name, new_elo_score): - table = dynamodb.Table('elo_scores') - - # This will create a new item if it doesn't exist - table.put_item( - Item={ - 'chatbot_name': chatbot_name, - 'elo_score': Decimal(str(new_elo_score)), - } - ) - - -def get_elo_scores(): - table = dynamodb.Table('elo_scores') - - response = table.scan() - data = response['Items'] - - return data - - -def _backfill_logs(): - table = dynamodb.Table('elo_logs') - - # Initialize the scan operation - response = table.scan() - - for item in response['Items']: - table.update_item( - Key={ - 'arena_battle_id': item['arena_battle_id'], - 'battle_timestamp': item['battle_timestamp'] - }, - UpdateExpression="SET #all = :value", - ExpressionAttributeNames={ - '#all': 'all' - }, - ExpressionAttributeValues={ - ':value': 'ALL' - } - ) - -def main(): - last_processed_timestamp = get_last_processed_timestamp() - battles: List[dict] = get_unprocessed_battles(last_processed_timestamp) - battles = sorted(battles, key=lambda x: x['timestamp']) - elo_scores = {} - - for battle in battles: - print(repr(battle)) - if battle['label'] in {-1, 0, 1, 2}: - outcome = battle['label'] - for chatbot_name in [battle['choice1_name'], battle['choice2_name']]: - if chatbot_name not in elo_scores: - elo_scores[chatbot_name] = get_elo_score(chatbot_name, elo_scores) - # 1: This means that the first player (or team) won the match. - # 0.5: This means that the match ended in a draw. - # 0: This means that the first player (or team) lost the match. - if outcome == 0 or outcome == -1: - elo_result = 0.5 - elif outcome == 1: - elo_result = 1 - else: - elo_result = 0 - - new_rating1, new_rating2 = calculate_elo(elo_scores[battle['choice1_name']], elo_scores[battle['choice2_name']], elo_result) - logging.info(f"{battle['choice1_name']}: {elo_scores[battle['choice1_name']]} -> {new_rating1} | {battle['choice2_name']}: {elo_scores[battle['choice2_name']]} -> {new_rating2}") - elo_scores[battle['choice1_name']] = new_rating1 - elo_scores[battle['choice2_name']] = new_rating2 - log_elo_update(battle['arena_battle_id'], battle['timestamp'], new_rating1, new_rating2) - update_elo_score(battle['choice1_name'], new_rating1) - update_elo_score(battle['choice2_name'], new_rating2) - elo_scores[battle['choice1_name']] = new_rating1 - elo_scores[battle['choice2_name']] = new_rating2 - - elo_scores = get_elo_scores() - for i, j in enumerate(elo_scores): - j["elo_score"] = float(j["elo_score"]) - elo_scores[i] = j - print(elo_scores) - - if battles: - # Convert the data into a format suitable for Hugging Face Dataset - elo_dataset = Dataset.from_list(elo_scores) - elo_dataset.push_to_hub("openaccess-ai-collective/chatbot-arena-elo-scores", private=False) - - -if __name__ == "__main__": - main() diff --git a/spaces/os1187/contract-review/predict.py b/spaces/os1187/contract-review/predict.py deleted file mode 100644 index 8cbcb13a58a7515d7b33e1bc30be53ff92ec5acd..0000000000000000000000000000000000000000 --- a/spaces/os1187/contract-review/predict.py +++ /dev/null @@ -1,126 +0,0 @@ -import torch -import time -from torch.utils.data import DataLoader, RandomSampler, SequentialSampler -from multiprocessing import cpu_count - -from transformers import ( - AutoConfig, - AutoModelForQuestionAnswering, - AutoTokenizer, - squad_convert_examples_to_features -) - -from transformers.data.processors.squad import SquadResult, SquadV2Processor, SquadExample -from transformers.data.metrics.squad_metrics import compute_predictions_logits - - -def run_prediction(question_texts, context_text, model_path, n_best_size=1): - max_seq_length = 512 - doc_stride = 256 - n_best_size = n_best_size - max_query_length = 64 - max_answer_length = 512 - do_lower_case = False - null_score_diff_threshold = 0.0 - - def to_list(tensor): - return tensor.detach().cpu().tolist() - - config_class, model_class, tokenizer_class = (AutoConfig, AutoModelForQuestionAnswering, AutoTokenizer) - config = config_class.from_pretrained(model_path) - tokenizer = tokenizer_class.from_pretrained(model_path, do_lower_case=True, use_fast=False) - model = model_class.from_pretrained(model_path, config=config) - - device = torch.device("cuda" if torch.cuda.is_available() else "cpu") - model.to(device) - - processor = SquadV2Processor() - examples = [] - - timer = time.time() - for i, question_text in enumerate(question_texts): - - example = SquadExample( - qas_id=str(i), - question_text=question_text, - context_text=context_text, - answer_text=None, - start_position_character=None, - title="Predict", - answers=None, - ) - - examples.append(example) - print(f'Created Squad Examples in {time.time()-timer} seconds') - - print(f'Number of CPUs: {cpu_count()}') - timer = time.time() - features, dataset = squad_convert_examples_to_features( - examples=examples, - tokenizer=tokenizer, - max_seq_length=max_seq_length, - doc_stride=doc_stride, - max_query_length=max_query_length, - is_training=False, - return_dataset="pt", - threads=cpu_count(), - ) - print(f'Converted Examples to Features in {time.time()-timer} seconds') - - eval_sampler = SequentialSampler(dataset) - eval_dataloader = DataLoader(dataset, sampler=eval_sampler, batch_size=10) - - all_results = [] - - timer = time.time() - for batch in eval_dataloader: - model.eval() - batch = tuple(t.to(device) for t in batch) - - with torch.no_grad(): - inputs = { - "input_ids": batch[0], - "attention_mask": batch[1], - "token_type_ids": batch[2], - } - - example_indices = batch[3] - - outputs = model(**inputs) - - for i, example_index in enumerate(example_indices): - eval_feature = features[example_index.item()] - unique_id = int(eval_feature.unique_id) - - output = [to_list(output[i]) for output in outputs.to_tuple()] - - start_logits, end_logits = output - result = SquadResult(unique_id, start_logits, end_logits) - all_results.append(result) - print(f'Model predictions completed in {time.time()-timer} seconds') - - print(all_results) - - output_nbest_file = None - if n_best_size > 1: - output_nbest_file = "nbest.json" - - timer = time.time() - final_predictions = compute_predictions_logits( - all_examples=examples, - all_features=features, - all_results=all_results, - n_best_size=n_best_size, - max_answer_length=max_answer_length, - do_lower_case=do_lower_case, - output_prediction_file=None, - output_nbest_file=output_nbest_file, - output_null_log_odds_file=None, - verbose_logging=False, - version_2_with_negative=True, - null_score_diff_threshold=null_score_diff_threshold, - tokenizer=tokenizer - ) - print(f'Logits converted to predictions in {time.time()-timer} seconds') - - return final_predictions diff --git a/spaces/overlordx/elonmusk/app.py b/spaces/overlordx/elonmusk/app.py deleted file mode 100644 index 56a86658281967f0c11de33ba200375b4659fe0e..0000000000000000000000000000000000000000 --- a/spaces/overlordx/elonmusk/app.py +++ /dev/null @@ -1,52 +0,0 @@ -import pandas -import streamlit as st -from diffusers import StableDiffusionPipeline - -from game_manager import GameManager - -game_manager = GameManager() - - -def empty_cache(): - st.empty() - - -# @st.cache_resource # 👈 Add the caching decorator -def get_pipe(): - pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4").to("cpu") - # pipe.safety_checker = lambda images, clip_input: (images, False) - return pipe - - -def start_game(): - INFERENCE_STEPS = 15 - PROMPT_STRENGTH = 10.0 - HEIGHT = 128 # must be divisible by 8 - WIDTH = 128 # must be divisible by 8 - prompt_base = "" - if nude_selected: - prompt_base = "Realisitc photograph of naked Elon Musk, style " - else: - prompt_base = "Realisitc photograph of Elon Musk, style " - prompt = prompt_base + game_manager.get_artist() + " professions, RAW photo, *subject*, high detailed skin:2.3, 8k uhd, dslr, studio lighting, studio lighting, Fujifilm XT3" - pipe = get_pipe() - image = pipe(prompt, height=HEIGHT, width=WIDTH, num_inference_steps=INFERENCE_STEPS, - guidance_scale=PROMPT_STRENGTH).images[0] - st.image(image) - - -styles = pandas.read_csv("./artist_styles") -st.title('AI Draws :blue[Elon Musk] :sunglasses:') -select = st.selectbox('Choose A Style', styles["Artist"], key='1', index=140) -st.button("Draw Elon Musk (maybe NSFW)", on_click=start_game) -nude_selected = st.checkbox("Nude") -artist_select = st.text("Style Selected:" + select) -if select: - artist_select.text = "Style Selected: " + select - game_manager.set_artist(select) -st.text("Wait Time: This is a slow CPU, wait for 2 minutes for images") -st.text("Note: you can select styles by typing") -st.text("Note: to save the image right-click then select 'save image as'") -st.title('With more iterations this would actually look like :blue[Elon Musk] :sunglasses:') - -st.button("Empty Cache", on_click=empty_cache) diff --git a/spaces/pablodawson/ldm3d-inpainting/README.md b/spaces/pablodawson/ldm3d-inpainting/README.md deleted file mode 100644 index c044e3a5ef1f6216787e6eb0ef59aafecff8baa5..0000000000000000000000000000000000000000 --- a/spaces/pablodawson/ldm3d-inpainting/README.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -title: SDXL Inpainting -emoji: 🔥 -colorFrom: green -colorTo: pink -sdk: gradio -sdk_version: 3.41.2 -app_file: app.py -pinned: false -duplicated_from: runwayml/stable-diffusion-inpainting ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference - diff --git a/spaces/phenomenon1981/Dreamlikeart-Anime-1.0/style.css b/spaces/phenomenon1981/Dreamlikeart-Anime-1.0/style.css deleted file mode 100644 index fdbef9e64cc6b9f8003698ffa38997ee22a640ac..0000000000000000000000000000000000000000 --- a/spaces/phenomenon1981/Dreamlikeart-Anime-1.0/style.css +++ /dev/null @@ -1,84 +0,0 @@ -#col-container { - max-width: 800px; - margin-left: auto; - margin-right: auto; -} -a { - color: inherit; - text-decoration: underline; -} -.gradio-container { - font-family: 'IBM Plex Sans', sans-serif; -} -.gr-button { - color: white; - border-color: #9d66e5; - background: #9d66e5; -} -input[type='range'] { - accent-color: #9d66e5; -} -.dark input[type='range'] { - accent-color: #dfdfdf; -} -.container { - max-width: 800px; - margin: auto; - padding-top: 1.5rem; -} -#gallery { - min-height: 22rem; - margin-bottom: 15px; - margin-left: auto; - margin-right: auto; - border-bottom-right-radius: .5rem !important; - border-bottom-left-radius: .5rem !important; -} -#gallery>div>.h-full { - min-height: 20rem; -} -.details:hover { - text-decoration: underline; -} -.gr-button { - white-space: nowrap; -} -.gr-button:focus { - border-color: rgb(147 197 253 / var(--tw-border-opacity)); - outline: none; - box-shadow: var(--tw-ring-offset-shadow), var(--tw-ring-shadow), var(--tw-shadow, 0 0 #0000); - --tw-border-opacity: 1; - --tw-ring-offset-shadow: var(--tw-ring-inset) 0 0 0 var(--tw-ring-offset-width) var(--tw-ring-offset-color); - --tw-ring-shadow: var(--tw-ring-inset) 0 0 0 calc(3px var(--tw-ring-offset-width)) var(--tw-ring-color); - --tw-ring-color: rgb(191 219 254 / var(--tw-ring-opacity)); - --tw-ring-opacity: .5; -} -#advanced-options { - margin-bottom: 20px; -} -.footer { - margin-bottom: 45px; - margin-top: 35px; - text-align: center; - border-bottom: 1px solid #e5e5e5; -} -.footer>p { - font-size: .8rem; - display: inline-block; - padding: 0 10px; - transform: translateY(10px); - background: white; -} -.dark .logo{ filter: invert(1); } -.dark .footer { - border-color: #303030; -} -.dark .footer>p { - background: #0b0f19; -} -.acknowledgments h4{ - margin: 1.25em 0 .25em 0; - font-weight: bold; - font-size: 115%; -} - diff --git a/spaces/phyloforfun/VoucherVision/vouchervision/component_detector/utils/plots.py b/spaces/phyloforfun/VoucherVision/vouchervision/component_detector/utils/plots.py deleted file mode 100644 index 7b781d9a2db7052ad43d52b4d71c07f88a75da67..0000000000000000000000000000000000000000 --- a/spaces/phyloforfun/VoucherVision/vouchervision/component_detector/utils/plots.py +++ /dev/null @@ -1,560 +0,0 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license -""" -Plotting utils -""" - -import os, math, sys, cv2 -from copy import copy -from pathlib import Path -from urllib.error import URLError -import matplotlib -import matplotlib.pyplot as plt -import numpy as np -import pandas as pd -import seaborn as sn -import torch -from PIL import Image, ImageDraw, ImageFont -FILE = Path(__file__).resolve() -ROOT = FILE.parents[0] # YOLOv5 root directory -if str(ROOT) not in sys.path: - sys.path.append(str(ROOT)) # add ROOT to PATH -ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative -from general import (CONFIG_DIR, FONT, LOGGER, Timeout, check_font, check_requirements, clip_coords, increment_path, is_ascii, try_except, xywh2xyxy, xyxy2xywh) -from metrics import fitness - -# Settings -RANK = int(os.getenv('RANK', -1)) -matplotlib.rc('font', **{'size': 11}) -matplotlib.use('Agg') # for writing to files only - - -class Colors: - # Ultralytics color palette https://ultralytics.com/ - def __init__(self): - # hex = matplotlib.colors.TABLEAU_COLORS.values() - # if PROFILE == "PREP": - # hex = ('FF0046', '008941', 'F2FF00', '0000FF', '00FBFF', - # 'A30059', 'FFCDDC', 'FFAC28', '8C8C8C', '00D4BB', - # '2C99A8', '00C2FF', '344593', '6473FF', '0018EC', '8438FF', '520085', 'CB38FF', 'FF95C8', 'FF37C7') - # elif PROFILE == "PLANT": - hex = ('00ff37', '69fffc', 'ffcb00', 'fcff00', '000000', - 'ff34ff', '9a00ff', 'ff0009', 'ceffc4', 'ff8600', - '901616', '00C2FF', '344593', '6473FF', '0018EC', '8438FF', '520085', 'CB38FF', 'FF95C8', 'FF37C7') - # else: - # hex = ('FF3838', 'FF9D97', 'FF701F', 'FFB21D', 'CFD231', '48F90A', '92CC17', '3DDB86', '1A9334', '00D4BB', - # '2C99A8', '00C2FF', '344593', '6473FF', '0018EC', '8438FF', '520085', 'CB38FF', 'FF95C8', 'FF37C7') - self.palette = [self.hex2rgb('#' + c) for c in hex] - self.n = len(self.palette) - - def __call__(self, i, bgr=False): - c = self.palette[int(i) % self.n] - return (c[2], c[1], c[0]) if bgr else c - - @staticmethod - def hex2rgb(h): # rgb order (PIL) - return tuple(int(h[1 + i:1 + i + 2], 16) for i in (0, 2, 4)) - - -colors = Colors() # create instance for 'from utils.plots import colors' - - -def check_pil_font(font=FONT, size=10): - # Return a PIL TrueType Font, downloading to CONFIG_DIR if necessary - font = Path(font) - font = font if font.exists() else (CONFIG_DIR / font.name) - try: - return ImageFont.truetype(str(font) if font.exists() else font.name, size) - except Exception: # download if missing - try: - check_font(font) - return ImageFont.truetype(str(font), size) - except TypeError: - check_requirements('Pillow>=8.4.0') # known issue https://github.com/ultralytics/yolov5/issues/5374 - except URLError: # not online - return ImageFont.load_default() - -class AnnotatorLandmark: - # YOLOv5 Annotator for train/val mosaics and jpgs and detect/hub inference annotations - def __init__(self, im, line_width=None, font_size=None, font='Arial.ttf', pil=False, example='abc'): - assert im.data.contiguous, 'Image not contiguous. Apply np.ascontiguousarray(im) to Annotator() input images.' - non_ascii = not is_ascii(example) # non-latin labels, i.e. asian, arabic, cyrillic - self.pil = pil or non_ascii - if self.pil: # use PIL - self.im = im if isinstance(im, Image.Image) else Image.fromarray(im) - self.draw = ImageDraw.Draw(self.im) - self.font = check_pil_font(font='Arial.Unicode.ttf' if non_ascii else font, - size=font_size or max(round(sum(self.im.size) / 2 * 0.035), 12)) - else: # use cv2 - self.im = im - self.lw = line_width #or max(round(sum(im.shape) / 2 * 0.003), 2) # line width - - def box_label(self, box, label='', color=(128, 128, 128), txt_color=(255, 255, 255)): - # Add one xyxy box to image with label - if self.pil or not is_ascii(label): - self.draw.rectangle(box, width=self.lw, outline=color) # box - # if label: - # w, h = self.font.getsize(label) # text width, height - # outside = box[1] - h >= 0 # label fits outside box - # self.draw.rectangle( - # (box[0], box[1] - h if outside else box[1], box[0] + w + 1, - # box[1] + 1 if outside else box[1] + h + 1), - # fill=color, - # ) - # # self.draw.text((box[0], box[1]), label, fill=txt_color, font=self.font, anchor='ls') # for PIL>8.0 - # self.draw.text((box[0], box[1] - h if outside else box[1]), label, fill=txt_color, font=self.font) - else: # cv2 - p1, p2 = (int(box[0]), int(box[1])), (int(box[2]), int(box[3])) - cv2.circle(self.im, (int((p1[0]+p2[0])/2), int((p1[1]+p2[1])/2)), radius=self.lw, color=color, thickness=-1) - # cv2.rectangle(self.im, p1, p2, color, thickness=self.lw, lineType=cv2.LINE_AA) - # if label: - # tf = 6 #max(self.lw - 1, 1) # font thickness - # w, h = cv2.getTextSize(label, 0, fontScale=self.lw / 3, thickness=tf)[0] # text width, height - # outside = p1[1] - h - 3 >= 0 # label fits outside box - # p2 = p1[0] + w, p1[1] - h - 3 if outside else p1[1] + h + 3 - # cv2.rectangle(self.im, p1, p2, color, -1, cv2.LINE_AA) # filled - # cv2.putText(self.im, - # label, (p1[0], p1[1] - 2 if outside else p1[1] + h + 2), - # 0, - # self.lw, - # txt_color, - # thickness=tf, - # lineType=cv2.LINE_AA) - - def rectangle(self, xy, fill=None, outline=None, width=1): - # Add rectangle to image (PIL-only) - self.draw.rectangle(xy, fill, outline, width) - - def text(self, xy, text, txt_color=(255, 255, 255)): - # Add text to image (PIL-only) - w, h = self.font.getsize(text) # text width, height - self.draw.text((xy[0], xy[1] - h + 1), text, fill=txt_color, font=self.font) - - def result(self): - # Return annotated image as array - return np.asarray(self.im) - -class Annotator: - # YOLOv5 Annotator for train/val mosaics and jpgs and detect/hub inference annotations - def __init__(self, im, line_width=None, font_size=None, font='Arial.ttf', pil=False, example='abc'): - assert im.data.contiguous, 'Image not contiguous. Apply np.ascontiguousarray(im) to Annotator() input images.' - non_ascii = not is_ascii(example) # non-latin labels, i.e. asian, arabic, cyrillic - self.pil = pil or non_ascii - if self.pil: # use PIL - self.im = im if isinstance(im, Image.Image) else Image.fromarray(im) - self.draw = ImageDraw.Draw(self.im) - self.font = check_pil_font(font='Arial.Unicode.ttf' if non_ascii else font, - size=font_size or max(round(sum(self.im.size) / 2 * 0.035), 12)) - else: # use cv2 - self.im = im - self.lw = line_width or max(round(sum(im.shape) / 2 * 0.003), 2) # line width - - def box_label(self, box, label='', color=(128, 128, 128), txt_color=(255, 255, 255)): - # Add one xyxy box to image with label - if self.pil or not is_ascii(label): - self.draw.rectangle(box, width=self.lw, outline=color) # box - if label: - # w, h = self.font.getsize(label) # text width, height - w = 38 - h = 38 - outside = box[1] - h >= 0 # label fits outside box - self.draw.rectangle( - (box[0], box[1] - h if outside else box[1], box[0] + w + 1, - box[1] + 1 if outside else box[1] + h + 1), - fill=color, - ) - # self.draw.text((box[0], box[1]), label, fill=txt_color, font=self.font, anchor='ls') # for PIL>8.0 - self.draw.text((box[0], box[1] - h if outside else box[1]), label, fill=txt_color, font=self.font) - else: # cv2 - p1, p2 = (int(box[0]), int(box[1])), (int(box[2]), int(box[3])) - cv2.rectangle(self.im, p1, p2, color, thickness=self.lw, lineType=cv2.LINE_AA) - if label: - tf = max(self.lw - 1, 1) # font thickness - w, h = cv2.getTextSize(label, 0, fontScale=self.lw / 3, thickness=tf)[0] # text width, height - outside = p1[1] - h - 3 >= 0 # label fits outside box - p2 = p1[0] + w, p1[1] - h - 3 if outside else p1[1] + h + 3 - cv2.rectangle(self.im, p1, p2, color, -1, cv2.LINE_AA) # filled - cv2.putText(self.im, - label, (p1[0], p1[1] - 2 if outside else p1[1] + h + 2), - 0, - self.lw / 3, - txt_color, - thickness=tf, - lineType=cv2.LINE_AA) - - def rectangle(self, xy, fill=None, outline=None, width=1): - # Add rectangle to image (PIL-only) - self.draw.rectangle(xy, fill, outline, width) - - def text(self, xy, text, txt_color=(255, 255, 255)): - # Add text to image (PIL-only) - # w, h = self.font.getsize(text) # text width, height - w = 38 - h = 38 - self.draw.text((xy[0], xy[1] - h + 1), text, fill=txt_color, font=self.font) - - def result(self): - # Return annotated image as array - return np.asarray(self.im) - - -def feature_visualization(x, module_type, stage, n=32, save_dir=Path('runs/detect/exp')): - """ - x: Features to be visualized - module_type: Module type - stage: Module stage within model - n: Maximum number of feature maps to plot - save_dir: Directory to save results - """ - if 'Detect' not in module_type: - batch, channels, height, width = x.shape # batch, channels, height, width - if height > 1 and width > 1: - f = save_dir / f"stage{stage}_{module_type.split('.')[-1]}_features.png" # filename - - blocks = torch.chunk(x[0].cpu(), channels, dim=0) # select batch index 0, block by channels - n = min(n, channels) # number of plots - fig, ax = plt.subplots(math.ceil(n / 8), 8, tight_layout=True) # 8 rows x n/8 cols - ax = ax.ravel() - plt.subplots_adjust(wspace=0.05, hspace=0.05) - for i in range(n): - ax[i].imshow(blocks[i].squeeze()) # cmap='gray' - ax[i].axis('off') - - LOGGER.info(f'Saving {f}... ({n}/{channels})') - plt.savefig(f, dpi=300, bbox_inches='tight') - plt.close() - np.save(str(f.with_suffix('.npy')), x[0].cpu().numpy()) # npy save - - -def hist2d(x, y, n=100): - # 2d histogram used in labels.png and evolve.png - xedges, yedges = np.linspace(x.min(), x.max(), n), np.linspace(y.min(), y.max(), n) - hist, xedges, yedges = np.histogram2d(x, y, (xedges, yedges)) - xidx = np.clip(np.digitize(x, xedges) - 1, 0, hist.shape[0] - 1) - yidx = np.clip(np.digitize(y, yedges) - 1, 0, hist.shape[1] - 1) - return np.log(hist[xidx, yidx]) - - -def butter_lowpass_filtfilt(data, cutoff=1500, fs=50000, order=5): - from scipy.signal import butter, filtfilt - - # https://stackoverflow.com/questions/28536191/how-to-filter-smooth-with-scipy-numpy - def butter_lowpass(cutoff, fs, order): - nyq = 0.5 * fs - normal_cutoff = cutoff / nyq - return butter(order, normal_cutoff, btype='low', analog=False) - - b, a = butter_lowpass(cutoff, fs, order=order) - return filtfilt(b, a, data) # forward-backward filter - - -def output_to_target(output): - # Convert model output to target format [batch_id, class_id, x, y, w, h, conf] - targets = [] - for i, o in enumerate(output): - for *box, conf, cls in o.cpu().numpy(): - targets.append([i, cls, *list(*xyxy2xywh(np.array(box)[None])), conf]) - return np.array(targets) - - -def plot_images(images, targets, paths=None, fname='images.jpg', names=None, max_size=1920, max_subplots=16): - # Plot image grid with labels - if isinstance(images, torch.Tensor): - images = images.cpu().float().numpy() - if isinstance(targets, torch.Tensor): - targets = targets.cpu().numpy() - if np.max(images[0]) <= 1: - images *= 255 # de-normalise (optional) - bs, _, h, w = images.shape # batch size, _, height, width - bs = min(bs, max_subplots) # limit plot images - ns = np.ceil(bs ** 0.5) # number of subplots (square) - - # Build Image - mosaic = np.full((int(ns * h), int(ns * w), 3), 255, dtype=np.uint8) # init - for i, im in enumerate(images): - if i == max_subplots: # if last batch has fewer images than we expect - break - x, y = int(w * (i // ns)), int(h * (i % ns)) # block origin - im = im.transpose(1, 2, 0) - mosaic[y:y + h, x:x + w, :] = im - - # Resize (optional) - scale = max_size / ns / max(h, w) - if scale < 1: - h = math.ceil(scale * h) - w = math.ceil(scale * w) - mosaic = cv2.resize(mosaic, tuple(int(x * ns) for x in (w, h))) - - # Annotate - fs = int((h + w) * ns * 0.01) # font size - annotator = Annotator(mosaic, line_width=round(fs / 10), font_size=fs, pil=True, example=names) - for i in range(i + 1): - x, y = int(w * (i // ns)), int(h * (i % ns)) # block origin - annotator.rectangle([x, y, x + w, y + h], None, (255, 255, 255), width=2) # borders - if paths: - annotator.text((x + 5, y + 5 + h), text=Path(paths[i]).name[:40], txt_color=(220, 220, 220)) # filenames - if len(targets) > 0: - ti = targets[targets[:, 0] == i] # image targets - boxes = xywh2xyxy(ti[:, 2:6]).T - classes = ti[:, 1].astype('int') - labels = ti.shape[1] == 6 # labels if no conf column - conf = None if labels else ti[:, 6] # check for confidence presence (label vs pred) - - if boxes.shape[1]: - if boxes.max() <= 1.01: # if normalized with tolerance 0.01 - boxes[[0, 2]] *= w # scale to pixels - boxes[[1, 3]] *= h - elif scale < 1: # absolute coords need scale if image scales - boxes *= scale - boxes[[0, 2]] += x - boxes[[1, 3]] += y - for j, box in enumerate(boxes.T.tolist()): - cls = classes[j] - color = colors(cls) - cls = names[cls] if names else cls - if labels or conf[j] > 0.25: # 0.25 conf thresh - label = f'{cls}' if labels else f'{cls} {conf[j]:.1f}' - annotator.box_label(box, label, color=color) - annotator.im.save(fname) # save - - -def plot_lr_scheduler(optimizer, scheduler, epochs=300, save_dir=''): - # Plot LR simulating training for full epochs - optimizer, scheduler = copy(optimizer), copy(scheduler) # do not modify originals - y = [] - for _ in range(epochs): - scheduler.step() - y.append(optimizer.param_groups[0]['lr']) - plt.plot(y, '.-', label='LR') - plt.xlabel('epoch') - plt.ylabel('LR') - plt.grid() - plt.xlim(0, epochs) - plt.ylim(0) - plt.savefig(Path(save_dir) / 'LR.png', dpi=200) - plt.close() - - -def plot_val_txt(): # from utils.plots import *; plot_val() - # Plot val.txt histograms - x = np.loadtxt('val.txt', dtype=np.float32) - box = xyxy2xywh(x[:, :4]) - cx, cy = box[:, 0], box[:, 1] - - fig, ax = plt.subplots(1, 1, figsize=(6, 6), tight_layout=True) - ax.hist2d(cx, cy, bins=600, cmax=10, cmin=0) - ax.set_aspect('equal') - plt.savefig('hist2d.png', dpi=300) - - fig, ax = plt.subplots(1, 2, figsize=(12, 6), tight_layout=True) - ax[0].hist(cx, bins=600) - ax[1].hist(cy, bins=600) - plt.savefig('hist1d.png', dpi=200) - - -def plot_targets_txt(): # from utils.plots import *; plot_targets_txt() - # Plot targets.txt histograms - x = np.loadtxt('targets.txt', dtype=np.float32).T - s = ['x targets', 'y targets', 'width targets', 'height targets'] - fig, ax = plt.subplots(2, 2, figsize=(8, 8), tight_layout=True) - ax = ax.ravel() - for i in range(4): - ax[i].hist(x[i], bins=100, label=f'{x[i].mean():.3g} +/- {x[i].std():.3g}') - ax[i].legend() - ax[i].set_title(s[i]) - plt.savefig('targets.jpg', dpi=200) - - -def plot_val_study(file='', dir='', x=None): # from utils.plots import *; plot_val_study() - # Plot file=study.txt generated by val.py (or plot all study*.txt in dir) - save_dir = Path(file).parent if file else Path(dir) - plot2 = False # plot additional results - if plot2: - ax = plt.subplots(2, 4, figsize=(10, 6), tight_layout=True)[1].ravel() - - fig2, ax2 = plt.subplots(1, 1, figsize=(8, 4), tight_layout=True) - # for f in [save_dir / f'study_coco_{x}.txt' for x in ['yolov5n6', 'yolov5s6', 'yolov5m6', 'yolov5l6', 'yolov5x6']]: - for f in sorted(save_dir.glob('study*.txt')): - y = np.loadtxt(f, dtype=np.float32, usecols=[0, 1, 2, 3, 7, 8, 9], ndmin=2).T - x = np.arange(y.shape[1]) if x is None else np.array(x) - if plot2: - s = ['P', 'R', 'mAP@.5', 'mAP@.5:.95', 't_preprocess (ms/img)', 't_inference (ms/img)', 't_NMS (ms/img)'] - for i in range(7): - ax[i].plot(x, y[i], '.-', linewidth=2, markersize=8) - ax[i].set_title(s[i]) - - j = y[3].argmax() + 1 - ax2.plot(y[5, 1:j], - y[3, 1:j] * 1E2, - '.-', - linewidth=2, - markersize=8, - label=f.stem.replace('study_coco_', '').replace('yolo', 'YOLO')) - - ax2.plot(1E3 / np.array([209, 140, 97, 58, 35, 18]), [34.6, 40.5, 43.0, 47.5, 49.7, 51.5], - 'k.-', - linewidth=2, - markersize=8, - alpha=.25, - label='EfficientDet') - - ax2.grid(alpha=0.2) - ax2.set_yticks(np.arange(20, 60, 5)) - ax2.set_xlim(0, 57) - ax2.set_ylim(25, 55) - ax2.set_xlabel('GPU Speed (ms/img)') - ax2.set_ylabel('COCO AP val') - ax2.legend(loc='lower right') - f = save_dir / 'study.png' - print(f'Saving {f}...') - plt.savefig(f, dpi=300) - - -@try_except # known issue https://github.com/ultralytics/yolov5/issues/5395 -@Timeout(30) # known issue https://github.com/ultralytics/yolov5/issues/5611 -def plot_labels(labels, names=(), save_dir=Path('')): - # plot dataset labels - LOGGER.info(f"Plotting labels to {save_dir / 'labels.jpg'}... ") - c, b = labels[:, 0], labels[:, 1:].transpose() # classes, boxes - nc = int(c.max() + 1) # number of classes - x = pd.DataFrame(b.transpose(), columns=['x', 'y', 'width', 'height']) - - # seaborn correlogram - sn.pairplot(x, corner=True, diag_kind='auto', kind='hist', diag_kws=dict(bins=50), plot_kws=dict(pmax=0.9)) - plt.savefig(save_dir / 'labels_correlogram.jpg', dpi=200) - plt.close() - - # matplotlib labels - matplotlib.use('svg') # faster - ax = plt.subplots(2, 2, figsize=(8, 8), tight_layout=True)[1].ravel() - y = ax[0].hist(c, bins=np.linspace(0, nc, nc + 1) - 0.5, rwidth=0.8) - try: # color histogram bars by class - [y[2].patches[i].set_color([x / 255 for x in colors(i)]) for i in range(nc)] # known issue #3195 - except Exception: - pass - ax[0].set_ylabel('instances') - if 0 < len(names) < 30: - ax[0].set_xticks(range(len(names))) - ax[0].set_xticklabels(names, rotation=90, fontsize=10) - else: - ax[0].set_xlabel('classes') - sn.histplot(x, x='x', y='y', ax=ax[2], bins=50, pmax=0.9) - sn.histplot(x, x='width', y='height', ax=ax[3], bins=50, pmax=0.9) - - # rectangles - labels[:, 1:3] = 0.5 # center - labels[:, 1:] = xywh2xyxy(labels[:, 1:]) * 2000 - img = Image.fromarray(np.ones((2000, 2000, 3), dtype=np.uint8) * 255) - for cls, *box in labels[:1000]: - ImageDraw.Draw(img).rectangle(box, width=1, outline=colors(cls)) # plot - ax[1].imshow(img) - ax[1].axis('off') - - for a in [0, 1, 2, 3]: - for s in ['top', 'right', 'left', 'bottom']: - ax[a].spines[s].set_visible(False) - - plt.savefig(save_dir / 'labels.jpg', dpi=200) - matplotlib.use('Agg') - plt.close() - - -def plot_evolve(evolve_csv='path/to/evolve.csv'): # from utils.plots import *; plot_evolve() - # Plot evolve.csv hyp evolution results - evolve_csv = Path(evolve_csv) - data = pd.read_csv(evolve_csv) - keys = [x.strip() for x in data.columns] - x = data.values - f = fitness(x) - j = np.argmax(f) # max fitness index - plt.figure(figsize=(10, 12), tight_layout=True) - matplotlib.rc('font', **{'size': 8}) - print(f'Best results from row {j} of {evolve_csv}:') - for i, k in enumerate(keys[7:]): - v = x[:, 7 + i] - mu = v[j] # best single result - plt.subplot(6, 5, i + 1) - plt.scatter(v, f, c=hist2d(v, f, 20), cmap='viridis', alpha=.8, edgecolors='none') - plt.plot(mu, f.max(), 'k+', markersize=15) - plt.title(f'{k} = {mu:.3g}', fontdict={'size': 9}) # limit to 40 characters - if i % 5 != 0: - plt.yticks([]) - print(f'{k:>15}: {mu:.3g}') - f = evolve_csv.with_suffix('.png') # filename - plt.savefig(f, dpi=200) - plt.close() - print(f'Saved {f}') - - -def plot_results(file='path/to/results.csv', dir=''): - # Plot training results.csv. Usage: from utils.plots import *; plot_results('path/to/results.csv') - save_dir = Path(file).parent if file else Path(dir) - fig, ax = plt.subplots(2, 5, figsize=(12, 6), tight_layout=True) - ax = ax.ravel() - files = list(save_dir.glob('results*.csv')) - assert len(files), f'No results.csv files found in {save_dir.resolve()}, nothing to plot.' - for fi, f in enumerate(files): - try: - data = pd.read_csv(f) - s = [x.strip() for x in data.columns] - x = data.values[:, 0] - for i, j in enumerate([1, 2, 3, 4, 5, 8, 9, 10, 6, 7]): - y = data.values[:, j] - # y[y == 0] = np.nan # don't show zero values - ax[i].plot(x, y, marker='.', label=f.stem, linewidth=2, markersize=8) - ax[i].set_title(s[j], fontsize=12) - # if j in [8, 9, 10]: # share train and val loss y axes - # ax[i].get_shared_y_axes().join(ax[i], ax[i - 5]) - except Exception as e: - LOGGER.info(f'Warning: Plotting error for {f}: {e}') - ax[1].legend() - fig.savefig(save_dir / 'results.png', dpi=200) - plt.close() - - -def profile_idetection(start=0, stop=0, labels=(), save_dir=''): - # Plot iDetection '*.txt' per-image logs. from utils.plots import *; profile_idetection() - ax = plt.subplots(2, 4, figsize=(12, 6), tight_layout=True)[1].ravel() - s = ['Images', 'Free Storage (GB)', 'RAM Usage (GB)', 'Battery', 'dt_raw (ms)', 'dt_smooth (ms)', 'real-world FPS'] - files = list(Path(save_dir).glob('frames*.txt')) - for fi, f in enumerate(files): - try: - results = np.loadtxt(f, ndmin=2).T[:, 90:-30] # clip first and last rows - n = results.shape[1] # number of rows - x = np.arange(start, min(stop, n) if stop else n) - results = results[:, x] - t = (results[0] - results[0].min()) # set t0=0s - results[0] = x - for i, a in enumerate(ax): - if i < len(results): - label = labels[fi] if len(labels) else f.stem.replace('frames_', '') - a.plot(t, results[i], marker='.', label=label, linewidth=1, markersize=5) - a.set_title(s[i]) - a.set_xlabel('time (s)') - # if fi == len(files) - 1: - # a.set_ylim(bottom=0) - for side in ['top', 'right']: - a.spines[side].set_visible(False) - else: - a.remove() - except Exception as e: - print(f'Warning: Plotting error for {f}; {e}') - ax[1].legend() - plt.savefig(Path(save_dir) / 'idetection_profile.png', dpi=200) - - -def save_one_box(xyxy, im, file=Path('im.jpg'), gain=1.02, pad=10, square=False, BGR=False, save=True): - # Save image crop as {file} with crop size multiple {gain} and {pad} pixels. Save and/or return crop - xyxy = torch.tensor(xyxy).view(-1, 4) - b = xyxy2xywh(xyxy) # boxes - if square: - b[:, 2:] = b[:, 2:].max(1)[0].unsqueeze(1) # attempt rectangle to square - b[:, 2:] = b[:, 2:] * gain + pad # box wh * gain + pad - xyxy = xywh2xyxy(b).long() - clip_coords(xyxy, im.shape) - crop = im[int(xyxy[0, 1]):int(xyxy[0, 3]), int(xyxy[0, 0]):int(xyxy[0, 2]), ::(1 if BGR else -1)] - if save: - file.parent.mkdir(parents=True, exist_ok=True) # make directory - f = str(increment_path(file).with_suffix('.jpg')) - # cv2.imwrite(f, crop) # https://github.com/ultralytics/yolov5/issues/7007 chroma subsampling issue - Image.fromarray(cv2.cvtColor(crop, cv2.COLOR_BGR2RGB)).save(f, quality=95, subsampling=0) - return crop diff --git a/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/colorama/tests/ansitowin32_test.py b/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/colorama/tests/ansitowin32_test.py deleted file mode 100644 index 91ca551f97b4576c680711e826a1855fb944c872..0000000000000000000000000000000000000000 --- a/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/colorama/tests/ansitowin32_test.py +++ /dev/null @@ -1,294 +0,0 @@ -# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file. -from io import StringIO, TextIOWrapper -from unittest import TestCase, main -try: - from contextlib import ExitStack -except ImportError: - # python 2 - from contextlib2 import ExitStack - -try: - from unittest.mock import MagicMock, Mock, patch -except ImportError: - from mock import MagicMock, Mock, patch - -from ..ansitowin32 import AnsiToWin32, StreamWrapper -from ..win32 import ENABLE_VIRTUAL_TERMINAL_PROCESSING -from .utils import osname - - -class StreamWrapperTest(TestCase): - - def testIsAProxy(self): - mockStream = Mock() - wrapper = StreamWrapper(mockStream, None) - self.assertTrue( wrapper.random_attr is mockStream.random_attr ) - - def testDelegatesWrite(self): - mockStream = Mock() - mockConverter = Mock() - wrapper = StreamWrapper(mockStream, mockConverter) - wrapper.write('hello') - self.assertTrue(mockConverter.write.call_args, (('hello',), {})) - - def testDelegatesContext(self): - mockConverter = Mock() - s = StringIO() - with StreamWrapper(s, mockConverter) as fp: - fp.write(u'hello') - self.assertTrue(s.closed) - - def testProxyNoContextManager(self): - mockStream = MagicMock() - mockStream.__enter__.side_effect = AttributeError() - mockConverter = Mock() - with self.assertRaises(AttributeError) as excinfo: - with StreamWrapper(mockStream, mockConverter) as wrapper: - wrapper.write('hello') - - def test_closed_shouldnt_raise_on_closed_stream(self): - stream = StringIO() - stream.close() - wrapper = StreamWrapper(stream, None) - self.assertEqual(wrapper.closed, True) - - def test_closed_shouldnt_raise_on_detached_stream(self): - stream = TextIOWrapper(StringIO()) - stream.detach() - wrapper = StreamWrapper(stream, None) - self.assertEqual(wrapper.closed, True) - -class AnsiToWin32Test(TestCase): - - def testInit(self): - mockStdout = Mock() - auto = Mock() - stream = AnsiToWin32(mockStdout, autoreset=auto) - self.assertEqual(stream.wrapped, mockStdout) - self.assertEqual(stream.autoreset, auto) - - @patch('colorama.ansitowin32.winterm', None) - @patch('colorama.ansitowin32.winapi_test', lambda *_: True) - def testStripIsTrueOnWindows(self): - with osname('nt'): - mockStdout = Mock() - stream = AnsiToWin32(mockStdout) - self.assertTrue(stream.strip) - - def testStripIsFalseOffWindows(self): - with osname('posix'): - mockStdout = Mock(closed=False) - stream = AnsiToWin32(mockStdout) - self.assertFalse(stream.strip) - - def testWriteStripsAnsi(self): - mockStdout = Mock() - stream = AnsiToWin32(mockStdout) - stream.wrapped = Mock() - stream.write_and_convert = Mock() - stream.strip = True - - stream.write('abc') - - self.assertFalse(stream.wrapped.write.called) - self.assertEqual(stream.write_and_convert.call_args, (('abc',), {})) - - def testWriteDoesNotStripAnsi(self): - mockStdout = Mock() - stream = AnsiToWin32(mockStdout) - stream.wrapped = Mock() - stream.write_and_convert = Mock() - stream.strip = False - stream.convert = False - - stream.write('abc') - - self.assertFalse(stream.write_and_convert.called) - self.assertEqual(stream.wrapped.write.call_args, (('abc',), {})) - - def assert_autoresets(self, convert, autoreset=True): - stream = AnsiToWin32(Mock()) - stream.convert = convert - stream.reset_all = Mock() - stream.autoreset = autoreset - stream.winterm = Mock() - - stream.write('abc') - - self.assertEqual(stream.reset_all.called, autoreset) - - def testWriteAutoresets(self): - self.assert_autoresets(convert=True) - self.assert_autoresets(convert=False) - self.assert_autoresets(convert=True, autoreset=False) - self.assert_autoresets(convert=False, autoreset=False) - - def testWriteAndConvertWritesPlainText(self): - stream = AnsiToWin32(Mock()) - stream.write_and_convert( 'abc' ) - self.assertEqual( stream.wrapped.write.call_args, (('abc',), {}) ) - - def testWriteAndConvertStripsAllValidAnsi(self): - stream = AnsiToWin32(Mock()) - stream.call_win32 = Mock() - data = [ - 'abc\033[mdef', - 'abc\033[0mdef', - 'abc\033[2mdef', - 'abc\033[02mdef', - 'abc\033[002mdef', - 'abc\033[40mdef', - 'abc\033[040mdef', - 'abc\033[0;1mdef', - 'abc\033[40;50mdef', - 'abc\033[50;30;40mdef', - 'abc\033[Adef', - 'abc\033[0Gdef', - 'abc\033[1;20;128Hdef', - ] - for datum in data: - stream.wrapped.write.reset_mock() - stream.write_and_convert( datum ) - self.assertEqual( - [args[0] for args in stream.wrapped.write.call_args_list], - [ ('abc',), ('def',) ] - ) - - def testWriteAndConvertSkipsEmptySnippets(self): - stream = AnsiToWin32(Mock()) - stream.call_win32 = Mock() - stream.write_and_convert( '\033[40m\033[41m' ) - self.assertFalse( stream.wrapped.write.called ) - - def testWriteAndConvertCallsWin32WithParamsAndCommand(self): - stream = AnsiToWin32(Mock()) - stream.convert = True - stream.call_win32 = Mock() - stream.extract_params = Mock(return_value='params') - data = { - 'abc\033[adef': ('a', 'params'), - 'abc\033[;;bdef': ('b', 'params'), - 'abc\033[0cdef': ('c', 'params'), - 'abc\033[;;0;;Gdef': ('G', 'params'), - 'abc\033[1;20;128Hdef': ('H', 'params'), - } - for datum, expected in data.items(): - stream.call_win32.reset_mock() - stream.write_and_convert( datum ) - self.assertEqual( stream.call_win32.call_args[0], expected ) - - def test_reset_all_shouldnt_raise_on_closed_orig_stdout(self): - stream = StringIO() - converter = AnsiToWin32(stream) - stream.close() - - converter.reset_all() - - def test_wrap_shouldnt_raise_on_closed_orig_stdout(self): - stream = StringIO() - stream.close() - with \ - patch("colorama.ansitowin32.os.name", "nt"), \ - patch("colorama.ansitowin32.winapi_test", lambda: True): - converter = AnsiToWin32(stream) - self.assertTrue(converter.strip) - self.assertFalse(converter.convert) - - def test_wrap_shouldnt_raise_on_missing_closed_attr(self): - with \ - patch("colorama.ansitowin32.os.name", "nt"), \ - patch("colorama.ansitowin32.winapi_test", lambda: True): - converter = AnsiToWin32(object()) - self.assertTrue(converter.strip) - self.assertFalse(converter.convert) - - def testExtractParams(self): - stream = AnsiToWin32(Mock()) - data = { - '': (0,), - ';;': (0,), - '2': (2,), - ';;002;;': (2,), - '0;1': (0, 1), - ';;003;;456;;': (3, 456), - '11;22;33;44;55': (11, 22, 33, 44, 55), - } - for datum, expected in data.items(): - self.assertEqual(stream.extract_params('m', datum), expected) - - def testCallWin32UsesLookup(self): - listener = Mock() - stream = AnsiToWin32(listener) - stream.win32_calls = { - 1: (lambda *_, **__: listener(11),), - 2: (lambda *_, **__: listener(22),), - 3: (lambda *_, **__: listener(33),), - } - stream.call_win32('m', (3, 1, 99, 2)) - self.assertEqual( - [a[0][0] for a in listener.call_args_list], - [33, 11, 22] ) - - def test_osc_codes(self): - mockStdout = Mock() - stream = AnsiToWin32(mockStdout, convert=True) - with patch('colorama.ansitowin32.winterm') as winterm: - data = [ - '\033]0\x07', # missing arguments - '\033]0;foo\x08', # wrong OSC command - '\033]0;colorama_test_title\x07', # should work - '\033]1;colorama_test_title\x07', # wrong set command - '\033]2;colorama_test_title\x07', # should work - '\033]' + ';' * 64 + '\x08', # see issue #247 - ] - for code in data: - stream.write(code) - self.assertEqual(winterm.set_title.call_count, 2) - - def test_native_windows_ansi(self): - with ExitStack() as stack: - def p(a, b): - stack.enter_context(patch(a, b, create=True)) - # Pretend to be on Windows - p("colorama.ansitowin32.os.name", "nt") - p("colorama.ansitowin32.winapi_test", lambda: True) - p("colorama.win32.winapi_test", lambda: True) - p("colorama.winterm.win32.windll", "non-None") - p("colorama.winterm.get_osfhandle", lambda _: 1234) - - # Pretend that our mock stream has native ANSI support - p( - "colorama.winterm.win32.GetConsoleMode", - lambda _: ENABLE_VIRTUAL_TERMINAL_PROCESSING, - ) - SetConsoleMode = Mock() - p("colorama.winterm.win32.SetConsoleMode", SetConsoleMode) - - stdout = Mock() - stdout.closed = False - stdout.isatty.return_value = True - stdout.fileno.return_value = 1 - - # Our fake console says it has native vt support, so AnsiToWin32 should - # enable that support and do nothing else. - stream = AnsiToWin32(stdout) - SetConsoleMode.assert_called_with(1234, ENABLE_VIRTUAL_TERMINAL_PROCESSING) - self.assertFalse(stream.strip) - self.assertFalse(stream.convert) - self.assertFalse(stream.should_wrap()) - - # Now let's pretend we're on an old Windows console, that doesn't have - # native ANSI support. - p("colorama.winterm.win32.GetConsoleMode", lambda _: 0) - SetConsoleMode = Mock() - p("colorama.winterm.win32.SetConsoleMode", SetConsoleMode) - - stream = AnsiToWin32(stdout) - SetConsoleMode.assert_called_with(1234, ENABLE_VIRTUAL_TERMINAL_PROCESSING) - self.assertTrue(stream.strip) - self.assertTrue(stream.convert) - self.assertTrue(stream.should_wrap()) - - -if __name__ == '__main__': - main() diff --git a/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/urllib3/contrib/__init__.py b/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/urllib3/contrib/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/setuptools/archive_util.py b/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/setuptools/archive_util.py deleted file mode 100644 index d8e10c13e154802f4a742ed4904f0071369aa2ad..0000000000000000000000000000000000000000 --- a/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/setuptools/archive_util.py +++ /dev/null @@ -1,213 +0,0 @@ -"""Utilities for extracting common archive formats""" - -import zipfile -import tarfile -import os -import shutil -import posixpath -import contextlib -from distutils.errors import DistutilsError - -from ._path import ensure_directory - -__all__ = [ - "unpack_archive", "unpack_zipfile", "unpack_tarfile", "default_filter", - "UnrecognizedFormat", "extraction_drivers", "unpack_directory", -] - - -class UnrecognizedFormat(DistutilsError): - """Couldn't recognize the archive type""" - - -def default_filter(src, dst): - """The default progress/filter callback; returns True for all files""" - return dst - - -def unpack_archive( - filename, extract_dir, progress_filter=default_filter, - drivers=None): - """Unpack `filename` to `extract_dir`, or raise ``UnrecognizedFormat`` - - `progress_filter` is a function taking two arguments: a source path - internal to the archive ('/'-separated), and a filesystem path where it - will be extracted. The callback must return the desired extract path - (which may be the same as the one passed in), or else ``None`` to skip - that file or directory. The callback can thus be used to report on the - progress of the extraction, as well as to filter the items extracted or - alter their extraction paths. - - `drivers`, if supplied, must be a non-empty sequence of functions with the - same signature as this function (minus the `drivers` argument), that raise - ``UnrecognizedFormat`` if they do not support extracting the designated - archive type. The `drivers` are tried in sequence until one is found that - does not raise an error, or until all are exhausted (in which case - ``UnrecognizedFormat`` is raised). If you do not supply a sequence of - drivers, the module's ``extraction_drivers`` constant will be used, which - means that ``unpack_zipfile`` and ``unpack_tarfile`` will be tried, in that - order. - """ - for driver in drivers or extraction_drivers: - try: - driver(filename, extract_dir, progress_filter) - except UnrecognizedFormat: - continue - else: - return - else: - raise UnrecognizedFormat( - "Not a recognized archive type: %s" % filename - ) - - -def unpack_directory(filename, extract_dir, progress_filter=default_filter): - """"Unpack" a directory, using the same interface as for archives - - Raises ``UnrecognizedFormat`` if `filename` is not a directory - """ - if not os.path.isdir(filename): - raise UnrecognizedFormat("%s is not a directory" % filename) - - paths = { - filename: ('', extract_dir), - } - for base, dirs, files in os.walk(filename): - src, dst = paths[base] - for d in dirs: - paths[os.path.join(base, d)] = src + d + '/', os.path.join(dst, d) - for f in files: - target = os.path.join(dst, f) - target = progress_filter(src + f, target) - if not target: - # skip non-files - continue - ensure_directory(target) - f = os.path.join(base, f) - shutil.copyfile(f, target) - shutil.copystat(f, target) - - -def unpack_zipfile(filename, extract_dir, progress_filter=default_filter): - """Unpack zip `filename` to `extract_dir` - - Raises ``UnrecognizedFormat`` if `filename` is not a zipfile (as determined - by ``zipfile.is_zipfile()``). See ``unpack_archive()`` for an explanation - of the `progress_filter` argument. - """ - - if not zipfile.is_zipfile(filename): - raise UnrecognizedFormat("%s is not a zip file" % (filename,)) - - with zipfile.ZipFile(filename) as z: - _unpack_zipfile_obj(z, extract_dir, progress_filter) - - -def _unpack_zipfile_obj(zipfile_obj, extract_dir, progress_filter=default_filter): - """Internal/private API used by other parts of setuptools. - Similar to ``unpack_zipfile``, but receives an already opened :obj:`zipfile.ZipFile` - object instead of a filename. - """ - for info in zipfile_obj.infolist(): - name = info.filename - - # don't extract absolute paths or ones with .. in them - if name.startswith('/') or '..' in name.split('/'): - continue - - target = os.path.join(extract_dir, *name.split('/')) - target = progress_filter(name, target) - if not target: - continue - if name.endswith('/'): - # directory - ensure_directory(target) - else: - # file - ensure_directory(target) - data = zipfile_obj.read(info.filename) - with open(target, 'wb') as f: - f.write(data) - unix_attributes = info.external_attr >> 16 - if unix_attributes: - os.chmod(target, unix_attributes) - - -def _resolve_tar_file_or_dir(tar_obj, tar_member_obj): - """Resolve any links and extract link targets as normal files.""" - while tar_member_obj is not None and ( - tar_member_obj.islnk() or tar_member_obj.issym()): - linkpath = tar_member_obj.linkname - if tar_member_obj.issym(): - base = posixpath.dirname(tar_member_obj.name) - linkpath = posixpath.join(base, linkpath) - linkpath = posixpath.normpath(linkpath) - tar_member_obj = tar_obj._getmember(linkpath) - - is_file_or_dir = ( - tar_member_obj is not None and - (tar_member_obj.isfile() or tar_member_obj.isdir()) - ) - if is_file_or_dir: - return tar_member_obj - - raise LookupError('Got unknown file type') - - -def _iter_open_tar(tar_obj, extract_dir, progress_filter): - """Emit member-destination pairs from a tar archive.""" - # don't do any chowning! - tar_obj.chown = lambda *args: None - - with contextlib.closing(tar_obj): - for member in tar_obj: - name = member.name - # don't extract absolute paths or ones with .. in them - if name.startswith('/') or '..' in name.split('/'): - continue - - prelim_dst = os.path.join(extract_dir, *name.split('/')) - - try: - member = _resolve_tar_file_or_dir(tar_obj, member) - except LookupError: - continue - - final_dst = progress_filter(name, prelim_dst) - if not final_dst: - continue - - if final_dst.endswith(os.sep): - final_dst = final_dst[:-1] - - yield member, final_dst - - -def unpack_tarfile(filename, extract_dir, progress_filter=default_filter): - """Unpack tar/tar.gz/tar.bz2 `filename` to `extract_dir` - - Raises ``UnrecognizedFormat`` if `filename` is not a tarfile (as determined - by ``tarfile.open()``). See ``unpack_archive()`` for an explanation - of the `progress_filter` argument. - """ - try: - tarobj = tarfile.open(filename) - except tarfile.TarError as e: - raise UnrecognizedFormat( - "%s is not a compressed or uncompressed tar file" % (filename,) - ) from e - - for member, final_dst in _iter_open_tar( - tarobj, extract_dir, progress_filter, - ): - try: - # XXX Ugh - tarobj._extract_member(member, final_dst) - except tarfile.ExtractError: - # chown/chmod/mkfifo/mknode/makedev failed - pass - - return True - - -extraction_drivers = unpack_directory, unpack_zipfile, unpack_tarfile diff --git a/spaces/prerna9811/Chord/portaudio/bindings/java/jportaudio/src/com/portaudio/StreamInfo.java b/spaces/prerna9811/Chord/portaudio/bindings/java/jportaudio/src/com/portaudio/StreamInfo.java deleted file mode 100644 index 685f94d5d53fc9c6871b9cac7817e5accd4e1e65..0000000000000000000000000000000000000000 --- a/spaces/prerna9811/Chord/portaudio/bindings/java/jportaudio/src/com/portaudio/StreamInfo.java +++ /dev/null @@ -1,60 +0,0 @@ -/* - * Portable Audio I/O Library - * Java Binding for PortAudio - * - * Based on the Open Source API proposed by Ross Bencina - * Copyright (c) 2008 Ross Bencina - * - * Permission is hereby granted, free of charge, to any person obtaining - * a copy of this software and associated documentation files - * (the "Software"), to deal in the Software without restriction, - * including without limitation the rights to use, copy, modify, merge, - * publish, distribute, sublicense, and/or sell copies of the Software, - * and to permit persons to whom the Software is furnished to do so, - * subject to the following conditions: - * - * The above copyright notice and this permission notice shall be - * included in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. - * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR - * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF - * CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - */ - -/* - * The text above constitutes the entire PortAudio license; however, - * the PortAudio community also makes the following non-binding requests: - * - * Any person wishing to distribute modifications to the Software is - * requested to send the modifications to the original developer so that - * they can be incorporated into the canonical version. It is also - * requested that these non-binding requests be included along with the - * license above. - */ - - -/** @file - @ingroup bindings_java - - @brief Information about a JPortAudio Stream. -*/ - -package com.portaudio; - -/** - * Equivalent to PaStreamInfo - * @see PortAudio - * @author Phil Burk - * - */ -public class StreamInfo -{ - public int structVersion; - public double outputLatency; - public double inputLatency; - public double sampleRate; -} diff --git a/spaces/prerna9811/Chord/portaudio/test/patest_dsound_low_level_latency_params.c b/spaces/prerna9811/Chord/portaudio/test/patest_dsound_low_level_latency_params.c deleted file mode 100644 index d583e694f922b11031eff99c71277aaaecddfad9..0000000000000000000000000000000000000000 --- a/spaces/prerna9811/Chord/portaudio/test/patest_dsound_low_level_latency_params.c +++ /dev/null @@ -1,186 +0,0 @@ -/* - * $Id: $ - * Portable Audio I/O Library - * Windows DirectSound low level buffer parameters test - * - * Copyright (c) 2011 Ross Bencina - * - * Permission is hereby granted, free of charge, to any person obtaining - * a copy of this software and associated documentation files - * (the "Software"), to deal in the Software without restriction, - * including without limitation the rights to use, copy, modify, merge, - * publish, distribute, sublicense, and/or sell copies of the Software, - * and to permit persons to whom the Software is furnished to do so, - * subject to the following conditions: - * - * The above copyright notice and this permission notice shall be - * included in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. - * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR - * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF - * CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - */ - -/* - * The text above constitutes the entire PortAudio license; however, - * the PortAudio community also makes the following non-binding requests: - * - * Any person wishing to distribute modifications to the Software is - * requested to send the modifications to the original developer so that - * they can be incorporated into the canonical version. It is also - * requested that these non-binding requests be included along with the - * license above. - */ - -#include -#include - -#include "portaudio.h" -#include "pa_win_ds.h" - -#define NUM_SECONDS (6) -#define SAMPLE_RATE (44100) - -#define DSOUND_FRAMES_PER_HOST_BUFFER (256*2) //(440*10) - -#define FRAMES_PER_BUFFER 256 - -#ifndef M_PI -#define M_PI (3.14159265) -#endif - -#define TABLE_SIZE (2048) - -#define CHANNEL_COUNT (2) - - -typedef struct -{ - float sine[TABLE_SIZE]; - double phase; -} -paTestData; - -/* This routine will be called by the PortAudio engine when audio is needed. -** It may called at interrupt level on some machines so don't do anything -** that could mess up the system like calling malloc() or free(). -*/ -static int patestCallback( const void *inputBuffer, void *outputBuffer, - unsigned long framesPerBuffer, - const PaStreamCallbackTimeInfo* timeInfo, - PaStreamCallbackFlags statusFlags, - void *userData ) -{ - paTestData *data = (paTestData*)userData; - float *out = (float*)outputBuffer; - unsigned long i,j; - - (void) timeInfo; /* Prevent unused variable warnings. */ - (void) statusFlags; - (void) inputBuffer; - - for( i=0; isine[(int)data->phase]; - data->phase += 20; - if( data->phase >= TABLE_SIZE ){ - data->phase -= TABLE_SIZE; - } - - for( j = 0; j < CHANNEL_COUNT; ++j ){ - *out++ = x; - } - } - - return paContinue; -} - -/*******************************************************************/ -int main(int argc, char* argv[]) -{ - PaStreamParameters outputParameters; - PaWinDirectSoundStreamInfo dsoundStreamInfo; - PaStream *stream; - PaError err; - paTestData data; - int i; - int deviceIndex; - - printf("PortAudio Test: output a sine blip on each channel. SR = %d, BufSize = %d, Chans = %d\n", SAMPLE_RATE, FRAMES_PER_BUFFER, CHANNEL_COUNT); - - err = Pa_Initialize(); - if( err != paNoError ) goto error; - - deviceIndex = Pa_GetHostApiInfo( Pa_HostApiTypeIdToHostApiIndex( paDirectSound ) )->defaultOutputDevice; - if( argc == 2 ){ - sscanf( argv[1], "%d", &deviceIndex ); - } - - printf( "using device id %d (%s)\n", deviceIndex, Pa_GetDeviceInfo(deviceIndex)->name ); - - /* initialise sinusoidal wavetable */ - for( i=0; idefaultLowOutputLatency;*/ - outputParameters.hostApiSpecificStreamInfo = NULL; - - dsoundStreamInfo.size = sizeof(PaWinDirectSoundStreamInfo); - dsoundStreamInfo.hostApiType = paDirectSound; - dsoundStreamInfo.version = 2; - dsoundStreamInfo.flags = paWinDirectSoundUseLowLevelLatencyParameters; - dsoundStreamInfo.framesPerBuffer = DSOUND_FRAMES_PER_HOST_BUFFER; - outputParameters.hostApiSpecificStreamInfo = &dsoundStreamInfo; - - - if( Pa_IsFormatSupported( 0, &outputParameters, SAMPLE_RATE ) == paFormatIsSupported ){ - printf( "Pa_IsFormatSupported reports device will support %d channels.\n", CHANNEL_COUNT ); - }else{ - printf( "Pa_IsFormatSupported reports device will not support %d channels.\n", CHANNEL_COUNT ); - } - - err = Pa_OpenStream( - &stream, - NULL, /* no input */ - &outputParameters, - SAMPLE_RATE, - FRAMES_PER_BUFFER, - paClipOff, /* we won't output out of range samples so don't bother clipping them */ - patestCallback, - &data ); - if( err != paNoError ) goto error; - - err = Pa_StartStream( stream ); - if( err != paNoError ) goto error; - - printf("Play for %d seconds.\n", NUM_SECONDS ); - Pa_Sleep( NUM_SECONDS * 1000 ); - - err = Pa_StopStream( stream ); - if( err != paNoError ) goto error; - - err = Pa_CloseStream( stream ); - if( err != paNoError ) goto error; - - Pa_Terminate(); - printf("Test finished.\n"); - - return err; -error: - Pa_Terminate(); - fprintf( stderr, "An error occurred while using the portaudio stream\n" ); - fprintf( stderr, "Error number: %d\n", err ); - fprintf( stderr, "Error message: %s\n", Pa_GetErrorText( err ) ); - return err; -} diff --git a/spaces/prerna9811/musicapp/app.py b/spaces/prerna9811/musicapp/app.py deleted file mode 100644 index ccaf0053cb23119249cb5de89472146293af0c47..0000000000000000000000000000000000000000 --- a/spaces/prerna9811/musicapp/app.py +++ /dev/null @@ -1,133 +0,0 @@ -import gradio as gr -import os -import numpy as np -import tensorflow as tf -from scipy.io.wavfile import write -from pydub import AudioSegment -from pydub.utils import make_chunks -import keras.backend as K -import librosa.display -import cv2 -import librosa -import matplotlib.pyplot as plt -import librosa.display -import string -import random -from keras.applications import VGG16 -import scipy -import soundfile as SF -from musixmatch import Musixmatch - -# Load the tune recognition model -model = tf.keras.models.load_model('embdmodel_1.hdf5') -embedding_model=model.layers[2] - -# Define function to preprocess input audio -#convert song to mel spectogram as siamese network doesn't work on sound directly -def create_spectrogram(clip,sample_rate,save_path): - plt.interactive(False) - fig=plt.figure(figsize=[0.72,0.72]) - S=librosa.feature.melspectrogram(y=clip,sr=sample_rate) - librosa.display.specshow(librosa.power_to_db(S,ref=np.max)) - fig.savefig(save_path,dpi=400,bbox_inches='tight',pad_inches=0) - plt.close() - fig.clf() - plt.close(fig) - plt.close('all') - del save_path,clip,sample_rate,fig,S - -def load_img(path): - img=cv2.imread(path) - img=cv2.cvtColor(img,cv2.COLOR_BGR2RGB) - img=cv2.resize(img,(150,150)) - return img - -import pickle -with open('dict.pickle', 'rb') as handle: - songspecdict = pickle.load(handle) - - - -def list_file_sizes(): - path = "." - - # Get list of all files only in the given directory - fun = lambda x : os.path.isfile(os.path.join(path,x)) - files_list = filter(fun, os.listdir(path)) - - # Create a list of files in directory along with the size - size_of_file = [ - (f,os.stat(os.path.join(path, f)).st_size) - for f in files_list - ] - # Iterate over list of files along with size - # and print them one by one. - for f,s in size_of_file: - print("{} : {}MB".format(f, round(s/(1024*1024),3))) - -title='Music App' - -def greet(audio): - song, sr = librosa.load(audio) - to_match = np.copy(song[0:220500]) - - # Create spectrogram image of the song to match - create_spectrogram(to_match, sr, 'test.png') - - # Load the spectrogram image of the song to match - to_match_img = load_img('test.png') - to_match_img = np.expand_dims(to_match_img, axis=0) - - # Get the embedding of the song to match - to_match_emb = embedding_model.predict(to_match_img) - - # Calculate the distances between the song to match and the songs in the database - songsdistdict = {} - for key, values in songspecdict.items(): - dist_array = [] - for embd in values: - dist_array.append(np.linalg.norm(to_match_emb - embd)) - - songsdistdict[key] = min(dist_array) - song_titles=list(songsdistdict.keys()) - distances=list(songsdistdict.values()) - - # Get the title and artist of the recognized song - recognized_song_artist, recognized_song_title = song_titles[distances.index(min(distances))].split('-') - recognized_song_title = os.path.splitext(recognized_song_title)[0] - print(f'Artist: {recognized_song_artist}') - print(f'Title: {recognized_song_title}') - - # Initialize Musixmatch API - musixmatch = Musixmatch(apikey='2b0d0615efa782e95598a0e99bda4a60') - - # Search for the recognized song - track_search_results = musixmatch.track_search(q_track=recognized_song_title, q_artist=recognized_song_artist, page_size=1, page=1, s_track_rating='desc') - - if track_search_results['message']['header']['status_code'] == 200: - # Get the track ID for the top result - track_id = track_search_results['message']['body']['track_list'][0]['track']['track_id'] - - # Get the lyrics for the recognized song - lyrics_result = musixmatch.track_lyrics_get(track_id=track_id) - - if lyrics_result['message']['header']['status_code'] == 200: - # Get the lyrics - lyrics = lyrics_result['message']['body']['lyrics']['lyrics_body'] - # Remove the annotation tags from the lyrics - lyrics = lyrics.replace('******* This Lyrics is NOT for Commercial use *******', '').strip() - print("Lyrics:\n", lyrics) - return f"Artist: {recognized_song_artist}\nTitle: {recognized_song_title}\nLyrics:\n{lyrics}" - else: - print("Couldn't find lyrics for the recognized song.") - return "Couldn't find lyrics for the recognized song." - - -iface = gr.Interface(fn=greet, - inputs=gr.inputs.Audio(source="microphone", type="filepath"), - outputs="text", - layout="horizontal", - theme="huggingface", - title=title - ) -iface.launch() \ No newline at end of file diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/fontTools/ttLib/__init__.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/fontTools/ttLib/__init__.py deleted file mode 100644 index ed00764f7c193ca9bcd0bf67196da59c30048a28..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/fontTools/ttLib/__init__.py +++ /dev/null @@ -1,26 +0,0 @@ -"""fontTools.ttLib -- a package for dealing with TrueType fonts.""" - -from fontTools.misc.loggingTools import deprecateFunction -import logging - - -log = logging.getLogger(__name__) - - -class TTLibError(Exception): - pass - - -class TTLibFileIsCollectionError(TTLibError): - pass - - -@deprecateFunction("use logging instead", category=DeprecationWarning) -def debugmsg(msg): - import time - - print(msg + time.strftime(" (%H:%M:%S)", time.localtime(time.time()))) - - -from fontTools.ttLib.ttFont import * -from fontTools.ttLib.ttCollection import TTCollection diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/gradio/templates/frontend/assets/Example-36a07a65.js b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/gradio/templates/frontend/assets/Example-36a07a65.js deleted file mode 100644 index 7f8ab455910c826b4870c75c4d6aa50271bc9e20..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/gradio/templates/frontend/assets/Example-36a07a65.js +++ /dev/null @@ -1,2 +0,0 @@ -import{r as f}from"./file-url-595a5096.js";/* empty css */import"./Index-c74a8b7c.js";import"./index-50ad4c77.js";import"./svelte/svelte.js";const{SvelteComponent:b,append:k,assign:_,compute_rest_props:d,detach:u,element:v,empty:w,exclude_internal_props:y,get_spread_update:q,handle_promise:g,init:C,insert:m,noop:o,safe_not_equal:S,set_attributes:p,set_data:j,set_style:E,src_url_equal:I,text:N,toggle_class:h,update_await_block_branch:P}=window.__gradio__svelte__internal;function z(s){let e,r=s[3].message+"",n;return{c(){e=v("p"),n=N(r),E(e,"color","red")},m(t,l){m(t,e,l),k(e,n)},p(t,l){l&1&&r!==(r=t[3].message+"")&&j(n,r)},d(t){t&&u(e)}}}function A(s){let e,r,n=[{src:r=s[2]},s[1]],t={};for(let l=0;le.parentNode,n.anchor=e},p(t,[l]){s=t,n.ctx=s,l&1&&r!==(r=f(s[0]))&&g(r,n)||P(n,s,l)},i:o,o,d(t){t&&u(e),n.block.d(t),n.token=null,n=null}}}function F(s,e,r){const n=["src"];let t=d(e,n),{src:l=void 0}=e;return s.$$set=a=>{e=_(_({},e),y(a)),r(1,t=d(e,n)),"src"in a&&r(0,l=a.src)},[l,t]}class G extends b{constructor(e){super(),C(this,e,F,D,S,{src:0})}}const{SvelteComponent:H,attr:J,create_component:K,destroy_component:L,detach:M,element:O,init:Q,insert:R,mount_component:T,safe_not_equal:U,toggle_class:c,transition_in:V,transition_out:W}=window.__gradio__svelte__internal;function X(s){let e,r,n;return r=new G({props:{src:s[1]+s[0],alt:""}}),{c(){e=O("div"),K(r.$$.fragment),J(e,"class","container svelte-5cqjmr"),c(e,"table",s[2]==="table"),c(e,"gallery",s[2]==="gallery"),c(e,"selected",s[3])},m(t,l){R(t,e,l),T(r,e,null),n=!0},p(t,[l]){const a={};l&3&&(a.src=t[1]+t[0]),r.$set(a),(!n||l&4)&&c(e,"table",t[2]==="table"),(!n||l&4)&&c(e,"gallery",t[2]==="gallery"),(!n||l&8)&&c(e,"selected",t[3])},i(t){n||(V(r.$$.fragment,t),n=!0)},o(t){W(r.$$.fragment,t),n=!1},d(t){t&&M(e),L(r)}}}function Y(s,e,r){let{value:n}=e,{samples_dir:t}=e,{type:l}=e,{selected:a=!1}=e;return s.$$set=i=>{"value"in i&&r(0,n=i.value),"samples_dir"in i&&r(1,t=i.samples_dir),"type"in i&&r(2,l=i.type),"selected"in i&&r(3,a=i.selected)},[n,t,l,a]}class ne extends H{constructor(e){super(),Q(this,e,Y,X,U,{value:0,samples_dir:1,type:2,selected:3})}}export{ne as default}; -//# sourceMappingURL=Example-36a07a65.js.map diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/httpcore/_sync/http2.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/httpcore/_sync/http2.py deleted file mode 100644 index d141d459a59d134beac3b2dffb17d17f29abcea4..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/httpcore/_sync/http2.py +++ /dev/null @@ -1,589 +0,0 @@ -import enum -import logging -import time -import types -import typing - -import h2.config -import h2.connection -import h2.events -import h2.exceptions -import h2.settings - -from .._backends.base import NetworkStream -from .._exceptions import ( - ConnectionNotAvailable, - LocalProtocolError, - RemoteProtocolError, -) -from .._models import Origin, Request, Response -from .._synchronization import Lock, Semaphore, ShieldCancellation -from .._trace import Trace -from .interfaces import ConnectionInterface - -logger = logging.getLogger("httpcore.http2") - - -def has_body_headers(request: Request) -> bool: - return any( - k.lower() == b"content-length" or k.lower() == b"transfer-encoding" - for k, v in request.headers - ) - - -class HTTPConnectionState(enum.IntEnum): - ACTIVE = 1 - IDLE = 2 - CLOSED = 3 - - -class HTTP2Connection(ConnectionInterface): - READ_NUM_BYTES = 64 * 1024 - CONFIG = h2.config.H2Configuration(validate_inbound_headers=False) - - def __init__( - self, - origin: Origin, - stream: NetworkStream, - keepalive_expiry: typing.Optional[float] = None, - ): - self._origin = origin - self._network_stream = stream - self._keepalive_expiry: typing.Optional[float] = keepalive_expiry - self._h2_state = h2.connection.H2Connection(config=self.CONFIG) - self._state = HTTPConnectionState.IDLE - self._expire_at: typing.Optional[float] = None - self._request_count = 0 - self._init_lock = Lock() - self._state_lock = Lock() - self._read_lock = Lock() - self._write_lock = Lock() - self._sent_connection_init = False - self._used_all_stream_ids = False - self._connection_error = False - - # Mapping from stream ID to response stream events. - self._events: typing.Dict[ - int, - typing.Union[ - h2.events.ResponseReceived, - h2.events.DataReceived, - h2.events.StreamEnded, - h2.events.StreamReset, - ], - ] = {} - - # Connection terminated events are stored as state since - # we need to handle them for all streams. - self._connection_terminated: typing.Optional[ - h2.events.ConnectionTerminated - ] = None - - self._read_exception: typing.Optional[Exception] = None - self._write_exception: typing.Optional[Exception] = None - - def handle_request(self, request: Request) -> Response: - if not self.can_handle_request(request.url.origin): - # This cannot occur in normal operation, since the connection pool - # will only send requests on connections that handle them. - # It's in place simply for resilience as a guard against incorrect - # usage, for anyone working directly with httpcore connections. - raise RuntimeError( - f"Attempted to send request to {request.url.origin} on connection " - f"to {self._origin}" - ) - - with self._state_lock: - if self._state in (HTTPConnectionState.ACTIVE, HTTPConnectionState.IDLE): - self._request_count += 1 - self._expire_at = None - self._state = HTTPConnectionState.ACTIVE - else: - raise ConnectionNotAvailable() - - with self._init_lock: - if not self._sent_connection_init: - try: - kwargs = {"request": request} - with Trace("send_connection_init", logger, request, kwargs): - self._send_connection_init(**kwargs) - except BaseException as exc: - with ShieldCancellation(): - self.close() - raise exc - - self._sent_connection_init = True - - # Initially start with just 1 until the remote server provides - # its max_concurrent_streams value - self._max_streams = 1 - - local_settings_max_streams = ( - self._h2_state.local_settings.max_concurrent_streams - ) - self._max_streams_semaphore = Semaphore(local_settings_max_streams) - - for _ in range(local_settings_max_streams - self._max_streams): - self._max_streams_semaphore.acquire() - - self._max_streams_semaphore.acquire() - - try: - stream_id = self._h2_state.get_next_available_stream_id() - self._events[stream_id] = [] - except h2.exceptions.NoAvailableStreamIDError: # pragma: nocover - self._used_all_stream_ids = True - self._request_count -= 1 - raise ConnectionNotAvailable() - - try: - kwargs = {"request": request, "stream_id": stream_id} - with Trace("send_request_headers", logger, request, kwargs): - self._send_request_headers(request=request, stream_id=stream_id) - with Trace("send_request_body", logger, request, kwargs): - self._send_request_body(request=request, stream_id=stream_id) - with Trace( - "receive_response_headers", logger, request, kwargs - ) as trace: - status, headers = self._receive_response( - request=request, stream_id=stream_id - ) - trace.return_value = (status, headers) - - return Response( - status=status, - headers=headers, - content=HTTP2ConnectionByteStream(self, request, stream_id=stream_id), - extensions={ - "http_version": b"HTTP/2", - "network_stream": self._network_stream, - "stream_id": stream_id, - }, - ) - except BaseException as exc: # noqa: PIE786 - with ShieldCancellation(): - kwargs = {"stream_id": stream_id} - with Trace("response_closed", logger, request, kwargs): - self._response_closed(stream_id=stream_id) - - if isinstance(exc, h2.exceptions.ProtocolError): - # One case where h2 can raise a protocol error is when a - # closed frame has been seen by the state machine. - # - # This happens when one stream is reading, and encounters - # a GOAWAY event. Other flows of control may then raise - # a protocol error at any point they interact with the 'h2_state'. - # - # In this case we'll have stored the event, and should raise - # it as a RemoteProtocolError. - if self._connection_terminated: # pragma: nocover - raise RemoteProtocolError(self._connection_terminated) - # If h2 raises a protocol error in some other state then we - # must somehow have made a protocol violation. - raise LocalProtocolError(exc) # pragma: nocover - - raise exc - - def _send_connection_init(self, request: Request) -> None: - """ - The HTTP/2 connection requires some initial setup before we can start - using individual request/response streams on it. - """ - # Need to set these manually here instead of manipulating via - # __setitem__() otherwise the H2Connection will emit SettingsUpdate - # frames in addition to sending the undesired defaults. - self._h2_state.local_settings = h2.settings.Settings( - client=True, - initial_values={ - # Disable PUSH_PROMISE frames from the server since we don't do anything - # with them for now. Maybe when we support caching? - h2.settings.SettingCodes.ENABLE_PUSH: 0, - # These two are taken from h2 for safe defaults - h2.settings.SettingCodes.MAX_CONCURRENT_STREAMS: 100, - h2.settings.SettingCodes.MAX_HEADER_LIST_SIZE: 65536, - }, - ) - - # Some websites (*cough* Yahoo *cough*) balk at this setting being - # present in the initial handshake since it's not defined in the original - # RFC despite the RFC mandating ignoring settings you don't know about. - del self._h2_state.local_settings[ - h2.settings.SettingCodes.ENABLE_CONNECT_PROTOCOL - ] - - self._h2_state.initiate_connection() - self._h2_state.increment_flow_control_window(2**24) - self._write_outgoing_data(request) - - # Sending the request... - - def _send_request_headers(self, request: Request, stream_id: int) -> None: - """ - Send the request headers to a given stream ID. - """ - end_stream = not has_body_headers(request) - - # In HTTP/2 the ':authority' pseudo-header is used instead of 'Host'. - # In order to gracefully handle HTTP/1.1 and HTTP/2 we always require - # HTTP/1.1 style headers, and map them appropriately if we end up on - # an HTTP/2 connection. - authority = [v for k, v in request.headers if k.lower() == b"host"][0] - - headers = [ - (b":method", request.method), - (b":authority", authority), - (b":scheme", request.url.scheme), - (b":path", request.url.target), - ] + [ - (k.lower(), v) - for k, v in request.headers - if k.lower() - not in ( - b"host", - b"transfer-encoding", - ) - ] - - self._h2_state.send_headers(stream_id, headers, end_stream=end_stream) - self._h2_state.increment_flow_control_window(2**24, stream_id=stream_id) - self._write_outgoing_data(request) - - def _send_request_body(self, request: Request, stream_id: int) -> None: - """ - Iterate over the request body sending it to a given stream ID. - """ - if not has_body_headers(request): - return - - assert isinstance(request.stream, typing.Iterable) - for data in request.stream: - self._send_stream_data(request, stream_id, data) - self._send_end_stream(request, stream_id) - - def _send_stream_data( - self, request: Request, stream_id: int, data: bytes - ) -> None: - """ - Send a single chunk of data in one or more data frames. - """ - while data: - max_flow = self._wait_for_outgoing_flow(request, stream_id) - chunk_size = min(len(data), max_flow) - chunk, data = data[:chunk_size], data[chunk_size:] - self._h2_state.send_data(stream_id, chunk) - self._write_outgoing_data(request) - - def _send_end_stream(self, request: Request, stream_id: int) -> None: - """ - Send an empty data frame on on a given stream ID with the END_STREAM flag set. - """ - self._h2_state.end_stream(stream_id) - self._write_outgoing_data(request) - - # Receiving the response... - - def _receive_response( - self, request: Request, stream_id: int - ) -> typing.Tuple[int, typing.List[typing.Tuple[bytes, bytes]]]: - """ - Return the response status code and headers for a given stream ID. - """ - while True: - event = self._receive_stream_event(request, stream_id) - if isinstance(event, h2.events.ResponseReceived): - break - - status_code = 200 - headers = [] - for k, v in event.headers: - if k == b":status": - status_code = int(v.decode("ascii", errors="ignore")) - elif not k.startswith(b":"): - headers.append((k, v)) - - return (status_code, headers) - - def _receive_response_body( - self, request: Request, stream_id: int - ) -> typing.Iterator[bytes]: - """ - Iterator that returns the bytes of the response body for a given stream ID. - """ - while True: - event = self._receive_stream_event(request, stream_id) - if isinstance(event, h2.events.DataReceived): - amount = event.flow_controlled_length - self._h2_state.acknowledge_received_data(amount, stream_id) - self._write_outgoing_data(request) - yield event.data - elif isinstance(event, h2.events.StreamEnded): - break - - def _receive_stream_event( - self, request: Request, stream_id: int - ) -> typing.Union[ - h2.events.ResponseReceived, h2.events.DataReceived, h2.events.StreamEnded - ]: - """ - Return the next available event for a given stream ID. - - Will read more data from the network if required. - """ - while not self._events.get(stream_id): - self._receive_events(request, stream_id) - event = self._events[stream_id].pop(0) - if isinstance(event, h2.events.StreamReset): - raise RemoteProtocolError(event) - return event - - def _receive_events( - self, request: Request, stream_id: typing.Optional[int] = None - ) -> None: - """ - Read some data from the network until we see one or more events - for a given stream ID. - """ - with self._read_lock: - if self._connection_terminated is not None: - last_stream_id = self._connection_terminated.last_stream_id - if stream_id and last_stream_id and stream_id > last_stream_id: - self._request_count -= 1 - raise ConnectionNotAvailable() - raise RemoteProtocolError(self._connection_terminated) - - # This conditional is a bit icky. We don't want to block reading if we've - # actually got an event to return for a given stream. We need to do that - # check *within* the atomic read lock. Though it also need to be optional, - # because when we call it from `_wait_for_outgoing_flow` we *do* want to - # block until we've available flow control, event when we have events - # pending for the stream ID we're attempting to send on. - if stream_id is None or not self._events.get(stream_id): - events = self._read_incoming_data(request) - for event in events: - if isinstance(event, h2.events.RemoteSettingsChanged): - with Trace( - "receive_remote_settings", logger, request - ) as trace: - self._receive_remote_settings_change(event) - trace.return_value = event - - elif isinstance( - event, - ( - h2.events.ResponseReceived, - h2.events.DataReceived, - h2.events.StreamEnded, - h2.events.StreamReset, - ), - ): - if event.stream_id in self._events: - self._events[event.stream_id].append(event) - - elif isinstance(event, h2.events.ConnectionTerminated): - self._connection_terminated = event - - self._write_outgoing_data(request) - - def _receive_remote_settings_change(self, event: h2.events.Event) -> None: - max_concurrent_streams = event.changed_settings.get( - h2.settings.SettingCodes.MAX_CONCURRENT_STREAMS - ) - if max_concurrent_streams: - new_max_streams = min( - max_concurrent_streams.new_value, - self._h2_state.local_settings.max_concurrent_streams, - ) - if new_max_streams and new_max_streams != self._max_streams: - while new_max_streams > self._max_streams: - self._max_streams_semaphore.release() - self._max_streams += 1 - while new_max_streams < self._max_streams: - self._max_streams_semaphore.acquire() - self._max_streams -= 1 - - def _response_closed(self, stream_id: int) -> None: - self._max_streams_semaphore.release() - del self._events[stream_id] - with self._state_lock: - if self._connection_terminated and not self._events: - self.close() - - elif self._state == HTTPConnectionState.ACTIVE and not self._events: - self._state = HTTPConnectionState.IDLE - if self._keepalive_expiry is not None: - now = time.monotonic() - self._expire_at = now + self._keepalive_expiry - if self._used_all_stream_ids: # pragma: nocover - self.close() - - def close(self) -> None: - # Note that this method unilaterally closes the connection, and does - # not have any kind of locking in place around it. - self._h2_state.close_connection() - self._state = HTTPConnectionState.CLOSED - self._network_stream.close() - - # Wrappers around network read/write operations... - - def _read_incoming_data( - self, request: Request - ) -> typing.List[h2.events.Event]: - timeouts = request.extensions.get("timeout", {}) - timeout = timeouts.get("read", None) - - if self._read_exception is not None: - raise self._read_exception # pragma: nocover - - try: - data = self._network_stream.read(self.READ_NUM_BYTES, timeout) - if data == b"": - raise RemoteProtocolError("Server disconnected") - except Exception as exc: - # If we get a network error we should: - # - # 1. Save the exception and just raise it immediately on any future reads. - # (For example, this means that a single read timeout or disconnect will - # immediately close all pending streams. Without requiring multiple - # sequential timeouts.) - # 2. Mark the connection as errored, so that we don't accept any other - # incoming requests. - self._read_exception = exc - self._connection_error = True - raise exc - - events: typing.List[h2.events.Event] = self._h2_state.receive_data(data) - - return events - - def _write_outgoing_data(self, request: Request) -> None: - timeouts = request.extensions.get("timeout", {}) - timeout = timeouts.get("write", None) - - with self._write_lock: - data_to_send = self._h2_state.data_to_send() - - if self._write_exception is not None: - raise self._write_exception # pragma: nocover - - try: - self._network_stream.write(data_to_send, timeout) - except Exception as exc: # pragma: nocover - # If we get a network error we should: - # - # 1. Save the exception and just raise it immediately on any future write. - # (For example, this means that a single write timeout or disconnect will - # immediately close all pending streams. Without requiring multiple - # sequential timeouts.) - # 2. Mark the connection as errored, so that we don't accept any other - # incoming requests. - self._write_exception = exc - self._connection_error = True - raise exc - - # Flow control... - - def _wait_for_outgoing_flow(self, request: Request, stream_id: int) -> int: - """ - Returns the maximum allowable outgoing flow for a given stream. - - If the allowable flow is zero, then waits on the network until - WindowUpdated frames have increased the flow rate. - https://tools.ietf.org/html/rfc7540#section-6.9 - """ - local_flow: int = self._h2_state.local_flow_control_window(stream_id) - max_frame_size: int = self._h2_state.max_outbound_frame_size - flow = min(local_flow, max_frame_size) - while flow == 0: - self._receive_events(request) - local_flow = self._h2_state.local_flow_control_window(stream_id) - max_frame_size = self._h2_state.max_outbound_frame_size - flow = min(local_flow, max_frame_size) - return flow - - # Interface for connection pooling... - - def can_handle_request(self, origin: Origin) -> bool: - return origin == self._origin - - def is_available(self) -> bool: - return ( - self._state != HTTPConnectionState.CLOSED - and not self._connection_error - and not self._used_all_stream_ids - and not ( - self._h2_state.state_machine.state - == h2.connection.ConnectionState.CLOSED - ) - ) - - def has_expired(self) -> bool: - now = time.monotonic() - return self._expire_at is not None and now > self._expire_at - - def is_idle(self) -> bool: - return self._state == HTTPConnectionState.IDLE - - def is_closed(self) -> bool: - return self._state == HTTPConnectionState.CLOSED - - def info(self) -> str: - origin = str(self._origin) - return ( - f"{origin!r}, HTTP/2, {self._state.name}, " - f"Request Count: {self._request_count}" - ) - - def __repr__(self) -> str: - class_name = self.__class__.__name__ - origin = str(self._origin) - return ( - f"<{class_name} [{origin!r}, {self._state.name}, " - f"Request Count: {self._request_count}]>" - ) - - # These context managers are not used in the standard flow, but are - # useful for testing or working with connection instances directly. - - def __enter__(self) -> "HTTP2Connection": - return self - - def __exit__( - self, - exc_type: typing.Optional[typing.Type[BaseException]] = None, - exc_value: typing.Optional[BaseException] = None, - traceback: typing.Optional[types.TracebackType] = None, - ) -> None: - self.close() - - -class HTTP2ConnectionByteStream: - def __init__( - self, connection: HTTP2Connection, request: Request, stream_id: int - ) -> None: - self._connection = connection - self._request = request - self._stream_id = stream_id - self._closed = False - - def __iter__(self) -> typing.Iterator[bytes]: - kwargs = {"request": self._request, "stream_id": self._stream_id} - try: - with Trace("receive_response_body", logger, self._request, kwargs): - for chunk in self._connection._receive_response_body( - request=self._request, stream_id=self._stream_id - ): - yield chunk - except BaseException as exc: - # If we get an exception while streaming the response, - # we want to close the response (and possibly the connection) - # before raising that exception. - with ShieldCancellation(): - self.close() - raise exc - - def close(self) -> None: - if not self._closed: - self._closed = True - kwargs = {"stream_id": self._stream_id} - with Trace("response_closed", logger, self._request, kwargs): - self._connection._response_closed(stream_id=self._stream_id) diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/numpy/ma/tests/test_subclassing.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/numpy/ma/tests/test_subclassing.py deleted file mode 100644 index e3c88525371edbf742d4e2e9c7401b60b29cd740..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/numpy/ma/tests/test_subclassing.py +++ /dev/null @@ -1,460 +0,0 @@ -# pylint: disable-msg=W0611, W0612, W0511,R0201 -"""Tests suite for MaskedArray & subclassing. - -:author: Pierre Gerard-Marchant -:contact: pierregm_at_uga_dot_edu -:version: $Id: test_subclassing.py 3473 2007-10-29 15:18:13Z jarrod.millman $ - -""" -import numpy as np -from numpy.lib.mixins import NDArrayOperatorsMixin -from numpy.testing import assert_, assert_raises -from numpy.ma.testutils import assert_equal -from numpy.ma.core import ( - array, arange, masked, MaskedArray, masked_array, log, add, hypot, - divide, asarray, asanyarray, nomask - ) -# from numpy.ma.core import ( - -def assert_startswith(a, b): - # produces a better error message than assert_(a.startswith(b)) - assert_equal(a[:len(b)], b) - -class SubArray(np.ndarray): - # Defines a generic np.ndarray subclass, that stores some metadata - # in the dictionary `info`. - def __new__(cls,arr,info={}): - x = np.asanyarray(arr).view(cls) - x.info = info.copy() - return x - - def __array_finalize__(self, obj): - super().__array_finalize__(obj) - self.info = getattr(obj, 'info', {}).copy() - return - - def __add__(self, other): - result = super().__add__(other) - result.info['added'] = result.info.get('added', 0) + 1 - return result - - def __iadd__(self, other): - result = super().__iadd__(other) - result.info['iadded'] = result.info.get('iadded', 0) + 1 - return result - - -subarray = SubArray - - -class SubMaskedArray(MaskedArray): - """Pure subclass of MaskedArray, keeping some info on subclass.""" - def __new__(cls, info=None, **kwargs): - obj = super().__new__(cls, **kwargs) - obj._optinfo['info'] = info - return obj - - -class MSubArray(SubArray, MaskedArray): - - def __new__(cls, data, info={}, mask=nomask): - subarr = SubArray(data, info) - _data = MaskedArray.__new__(cls, data=subarr, mask=mask) - _data.info = subarr.info - return _data - - @property - def _series(self): - _view = self.view(MaskedArray) - _view._sharedmask = False - return _view - -msubarray = MSubArray - - -# Also a subclass that overrides __str__, __repr__ and __setitem__, disallowing -# setting to non-class values (and thus np.ma.core.masked_print_option) -# and overrides __array_wrap__, updating the info dict, to check that this -# doesn't get destroyed by MaskedArray._update_from. But this one also needs -# its own iterator... -class CSAIterator: - """ - Flat iterator object that uses its own setter/getter - (works around ndarray.flat not propagating subclass setters/getters - see https://github.com/numpy/numpy/issues/4564) - roughly following MaskedIterator - """ - def __init__(self, a): - self._original = a - self._dataiter = a.view(np.ndarray).flat - - def __iter__(self): - return self - - def __getitem__(self, indx): - out = self._dataiter.__getitem__(indx) - if not isinstance(out, np.ndarray): - out = out.__array__() - out = out.view(type(self._original)) - return out - - def __setitem__(self, index, value): - self._dataiter[index] = self._original._validate_input(value) - - def __next__(self): - return next(self._dataiter).__array__().view(type(self._original)) - - -class ComplicatedSubArray(SubArray): - - def __str__(self): - return f'myprefix {self.view(SubArray)} mypostfix' - - def __repr__(self): - # Return a repr that does not start with 'name(' - return f'<{self.__class__.__name__} {self}>' - - def _validate_input(self, value): - if not isinstance(value, ComplicatedSubArray): - raise ValueError("Can only set to MySubArray values") - return value - - def __setitem__(self, item, value): - # validation ensures direct assignment with ndarray or - # masked_print_option will fail - super().__setitem__(item, self._validate_input(value)) - - def __getitem__(self, item): - # ensure getter returns our own class also for scalars - value = super().__getitem__(item) - if not isinstance(value, np.ndarray): # scalar - value = value.__array__().view(ComplicatedSubArray) - return value - - @property - def flat(self): - return CSAIterator(self) - - @flat.setter - def flat(self, value): - y = self.ravel() - y[:] = value - - def __array_wrap__(self, obj, context=None): - obj = super().__array_wrap__(obj, context) - if context is not None and context[0] is np.multiply: - obj.info['multiplied'] = obj.info.get('multiplied', 0) + 1 - - return obj - - -class WrappedArray(NDArrayOperatorsMixin): - """ - Wrapping a MaskedArray rather than subclassing to test that - ufunc deferrals are commutative. - See: https://github.com/numpy/numpy/issues/15200) - """ - __slots__ = ('_array', 'attrs') - __array_priority__ = 20 - - def __init__(self, array, **attrs): - self._array = array - self.attrs = attrs - - def __repr__(self): - return f"{self.__class__.__name__}(\n{self._array}\n{self.attrs}\n)" - - def __array__(self): - return np.asarray(self._array) - - def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): - if method == '__call__': - inputs = [arg._array if isinstance(arg, self.__class__) else arg - for arg in inputs] - return self.__class__(ufunc(*inputs, **kwargs), **self.attrs) - else: - return NotImplemented - - -class TestSubclassing: - # Test suite for masked subclasses of ndarray. - - def setup_method(self): - x = np.arange(5, dtype='float') - mx = msubarray(x, mask=[0, 1, 0, 0, 0]) - self.data = (x, mx) - - def test_data_subclassing(self): - # Tests whether the subclass is kept. - x = np.arange(5) - m = [0, 0, 1, 0, 0] - xsub = SubArray(x) - xmsub = masked_array(xsub, mask=m) - assert_(isinstance(xmsub, MaskedArray)) - assert_equal(xmsub._data, xsub) - assert_(isinstance(xmsub._data, SubArray)) - - def test_maskedarray_subclassing(self): - # Tests subclassing MaskedArray - (x, mx) = self.data - assert_(isinstance(mx._data, subarray)) - - def test_masked_unary_operations(self): - # Tests masked_unary_operation - (x, mx) = self.data - with np.errstate(divide='ignore'): - assert_(isinstance(log(mx), msubarray)) - assert_equal(log(x), np.log(x)) - - def test_masked_binary_operations(self): - # Tests masked_binary_operation - (x, mx) = self.data - # Result should be a msubarray - assert_(isinstance(add(mx, mx), msubarray)) - assert_(isinstance(add(mx, x), msubarray)) - # Result should work - assert_equal(add(mx, x), mx+x) - assert_(isinstance(add(mx, mx)._data, subarray)) - assert_(isinstance(add.outer(mx, mx), msubarray)) - assert_(isinstance(hypot(mx, mx), msubarray)) - assert_(isinstance(hypot(mx, x), msubarray)) - - def test_masked_binary_operations2(self): - # Tests domained_masked_binary_operation - (x, mx) = self.data - xmx = masked_array(mx.data.__array__(), mask=mx.mask) - assert_(isinstance(divide(mx, mx), msubarray)) - assert_(isinstance(divide(mx, x), msubarray)) - assert_equal(divide(mx, mx), divide(xmx, xmx)) - - def test_attributepropagation(self): - x = array(arange(5), mask=[0]+[1]*4) - my = masked_array(subarray(x)) - ym = msubarray(x) - # - z = (my+1) - assert_(isinstance(z, MaskedArray)) - assert_(not isinstance(z, MSubArray)) - assert_(isinstance(z._data, SubArray)) - assert_equal(z._data.info, {}) - # - z = (ym+1) - assert_(isinstance(z, MaskedArray)) - assert_(isinstance(z, MSubArray)) - assert_(isinstance(z._data, SubArray)) - assert_(z._data.info['added'] > 0) - # Test that inplace methods from data get used (gh-4617) - ym += 1 - assert_(isinstance(ym, MaskedArray)) - assert_(isinstance(ym, MSubArray)) - assert_(isinstance(ym._data, SubArray)) - assert_(ym._data.info['iadded'] > 0) - # - ym._set_mask([1, 0, 0, 0, 1]) - assert_equal(ym._mask, [1, 0, 0, 0, 1]) - ym._series._set_mask([0, 0, 0, 0, 1]) - assert_equal(ym._mask, [0, 0, 0, 0, 1]) - # - xsub = subarray(x, info={'name':'x'}) - mxsub = masked_array(xsub) - assert_(hasattr(mxsub, 'info')) - assert_equal(mxsub.info, xsub.info) - - def test_subclasspreservation(self): - # Checks that masked_array(...,subok=True) preserves the class. - x = np.arange(5) - m = [0, 0, 1, 0, 0] - xinfo = [(i, j) for (i, j) in zip(x, m)] - xsub = MSubArray(x, mask=m, info={'xsub':xinfo}) - # - mxsub = masked_array(xsub, subok=False) - assert_(not isinstance(mxsub, MSubArray)) - assert_(isinstance(mxsub, MaskedArray)) - assert_equal(mxsub._mask, m) - # - mxsub = asarray(xsub) - assert_(not isinstance(mxsub, MSubArray)) - assert_(isinstance(mxsub, MaskedArray)) - assert_equal(mxsub._mask, m) - # - mxsub = masked_array(xsub, subok=True) - assert_(isinstance(mxsub, MSubArray)) - assert_equal(mxsub.info, xsub.info) - assert_equal(mxsub._mask, xsub._mask) - # - mxsub = asanyarray(xsub) - assert_(isinstance(mxsub, MSubArray)) - assert_equal(mxsub.info, xsub.info) - assert_equal(mxsub._mask, m) - - def test_subclass_items(self): - """test that getter and setter go via baseclass""" - x = np.arange(5) - xcsub = ComplicatedSubArray(x) - mxcsub = masked_array(xcsub, mask=[True, False, True, False, False]) - # getter should return a ComplicatedSubArray, even for single item - # first check we wrote ComplicatedSubArray correctly - assert_(isinstance(xcsub[1], ComplicatedSubArray)) - assert_(isinstance(xcsub[1,...], ComplicatedSubArray)) - assert_(isinstance(xcsub[1:4], ComplicatedSubArray)) - - # now that it propagates inside the MaskedArray - assert_(isinstance(mxcsub[1], ComplicatedSubArray)) - assert_(isinstance(mxcsub[1,...].data, ComplicatedSubArray)) - assert_(mxcsub[0] is masked) - assert_(isinstance(mxcsub[0,...].data, ComplicatedSubArray)) - assert_(isinstance(mxcsub[1:4].data, ComplicatedSubArray)) - - # also for flattened version (which goes via MaskedIterator) - assert_(isinstance(mxcsub.flat[1].data, ComplicatedSubArray)) - assert_(mxcsub.flat[0] is masked) - assert_(isinstance(mxcsub.flat[1:4].base, ComplicatedSubArray)) - - # setter should only work with ComplicatedSubArray input - # first check we wrote ComplicatedSubArray correctly - assert_raises(ValueError, xcsub.__setitem__, 1, x[4]) - # now that it propagates inside the MaskedArray - assert_raises(ValueError, mxcsub.__setitem__, 1, x[4]) - assert_raises(ValueError, mxcsub.__setitem__, slice(1, 4), x[1:4]) - mxcsub[1] = xcsub[4] - mxcsub[1:4] = xcsub[1:4] - # also for flattened version (which goes via MaskedIterator) - assert_raises(ValueError, mxcsub.flat.__setitem__, 1, x[4]) - assert_raises(ValueError, mxcsub.flat.__setitem__, slice(1, 4), x[1:4]) - mxcsub.flat[1] = xcsub[4] - mxcsub.flat[1:4] = xcsub[1:4] - - def test_subclass_nomask_items(self): - x = np.arange(5) - xcsub = ComplicatedSubArray(x) - mxcsub_nomask = masked_array(xcsub) - - assert_(isinstance(mxcsub_nomask[1,...].data, ComplicatedSubArray)) - assert_(isinstance(mxcsub_nomask[0,...].data, ComplicatedSubArray)) - - assert_(isinstance(mxcsub_nomask[1], ComplicatedSubArray)) - assert_(isinstance(mxcsub_nomask[0], ComplicatedSubArray)) - - def test_subclass_repr(self): - """test that repr uses the name of the subclass - and 'array' for np.ndarray""" - x = np.arange(5) - mx = masked_array(x, mask=[True, False, True, False, False]) - assert_startswith(repr(mx), 'masked_array') - xsub = SubArray(x) - mxsub = masked_array(xsub, mask=[True, False, True, False, False]) - assert_startswith(repr(mxsub), - f'masked_{SubArray.__name__}(data=[--, 1, --, 3, 4]') - - def test_subclass_str(self): - """test str with subclass that has overridden str, setitem""" - # first without override - x = np.arange(5) - xsub = SubArray(x) - mxsub = masked_array(xsub, mask=[True, False, True, False, False]) - assert_equal(str(mxsub), '[-- 1 -- 3 4]') - - xcsub = ComplicatedSubArray(x) - assert_raises(ValueError, xcsub.__setitem__, 0, - np.ma.core.masked_print_option) - mxcsub = masked_array(xcsub, mask=[True, False, True, False, False]) - assert_equal(str(mxcsub), 'myprefix [-- 1 -- 3 4] mypostfix') - - def test_pure_subclass_info_preservation(self): - # Test that ufuncs and methods conserve extra information consistently; - # see gh-7122. - arr1 = SubMaskedArray('test', data=[1,2,3,4,5,6]) - arr2 = SubMaskedArray(data=[0,1,2,3,4,5]) - diff1 = np.subtract(arr1, arr2) - assert_('info' in diff1._optinfo) - assert_(diff1._optinfo['info'] == 'test') - diff2 = arr1 - arr2 - assert_('info' in diff2._optinfo) - assert_(diff2._optinfo['info'] == 'test') - - -class ArrayNoInheritance: - """Quantity-like class that does not inherit from ndarray""" - def __init__(self, data, units): - self.magnitude = data - self.units = units - - def __getattr__(self, attr): - return getattr(self.magnitude, attr) - - -def test_array_no_inheritance(): - data_masked = np.ma.array([1, 2, 3], mask=[True, False, True]) - data_masked_units = ArrayNoInheritance(data_masked, 'meters') - - # Get the masked representation of the Quantity-like class - new_array = np.ma.array(data_masked_units) - assert_equal(data_masked.data, new_array.data) - assert_equal(data_masked.mask, new_array.mask) - # Test sharing the mask - data_masked.mask = [True, False, False] - assert_equal(data_masked.mask, new_array.mask) - assert_(new_array.sharedmask) - - # Get the masked representation of the Quantity-like class - new_array = np.ma.array(data_masked_units, copy=True) - assert_equal(data_masked.data, new_array.data) - assert_equal(data_masked.mask, new_array.mask) - # Test that the mask is not shared when copy=True - data_masked.mask = [True, False, True] - assert_equal([True, False, False], new_array.mask) - assert_(not new_array.sharedmask) - - # Get the masked representation of the Quantity-like class - new_array = np.ma.array(data_masked_units, keep_mask=False) - assert_equal(data_masked.data, new_array.data) - # The change did not affect the original mask - assert_equal(data_masked.mask, [True, False, True]) - # Test that the mask is False and not shared when keep_mask=False - assert_(not new_array.mask) - assert_(not new_array.sharedmask) - - -class TestClassWrapping: - # Test suite for classes that wrap MaskedArrays - - def setup_method(self): - m = np.ma.masked_array([1, 3, 5], mask=[False, True, False]) - wm = WrappedArray(m) - self.data = (m, wm) - - def test_masked_unary_operations(self): - # Tests masked_unary_operation - (m, wm) = self.data - with np.errstate(divide='ignore'): - assert_(isinstance(np.log(wm), WrappedArray)) - - def test_masked_binary_operations(self): - # Tests masked_binary_operation - (m, wm) = self.data - # Result should be a WrappedArray - assert_(isinstance(np.add(wm, wm), WrappedArray)) - assert_(isinstance(np.add(m, wm), WrappedArray)) - assert_(isinstance(np.add(wm, m), WrappedArray)) - # add and '+' should call the same ufunc - assert_equal(np.add(m, wm), m + wm) - assert_(isinstance(np.hypot(m, wm), WrappedArray)) - assert_(isinstance(np.hypot(wm, m), WrappedArray)) - # Test domained binary operations - assert_(isinstance(np.divide(wm, m), WrappedArray)) - assert_(isinstance(np.divide(m, wm), WrappedArray)) - assert_equal(np.divide(wm, m) * m, np.divide(m, m) * wm) - # Test broadcasting - m2 = np.stack([m, m]) - assert_(isinstance(np.divide(wm, m2), WrappedArray)) - assert_(isinstance(np.divide(m2, wm), WrappedArray)) - assert_equal(np.divide(m2, wm), np.divide(wm, m2)) - - def test_mixins_have_slots(self): - mixin = NDArrayOperatorsMixin() - # Should raise an error - assert_raises(AttributeError, mixin.__setattr__, "not_a_real_attr", 1) - - m = np.ma.masked_array([1, 3, 5], mask=[False, True, False]) - wm = WrappedArray(m) - assert_raises(AttributeError, wm.__setattr__, "not_an_attr", 2) diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/numpy/typing/tests/data/pass/scalars.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/numpy/typing/tests/data/pass/scalars.py deleted file mode 100644 index a5c6f96e9fa2a8c966bcdd7c7164c6611bfcfe4d..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/numpy/typing/tests/data/pass/scalars.py +++ /dev/null @@ -1,248 +0,0 @@ -import sys -import datetime as dt - -import pytest -import numpy as np - -b = np.bool_() -u8 = np.uint64() -i8 = np.int64() -f8 = np.float64() -c16 = np.complex128() -U = np.str_() -S = np.bytes_() - - -# Construction -class D: - def __index__(self) -> int: - return 0 - - -class C: - def __complex__(self) -> complex: - return 3j - - -class B: - def __int__(self) -> int: - return 4 - - -class A: - def __float__(self) -> float: - return 4.0 - - -np.complex64(3j) -np.complex64(A()) -np.complex64(C()) -np.complex128(3j) -np.complex128(C()) -np.complex128(None) -np.complex64("1.2") -np.complex128(b"2j") - -np.int8(4) -np.int16(3.4) -np.int32(4) -np.int64(-1) -np.uint8(B()) -np.uint32() -np.int32("1") -np.int64(b"2") - -np.float16(A()) -np.float32(16) -np.float64(3.0) -np.float64(None) -np.float32("1") -np.float16(b"2.5") - -np.uint64(D()) -np.float32(D()) -np.complex64(D()) - -np.bytes_(b"hello") -np.bytes_("hello", 'utf-8') -np.bytes_("hello", encoding='utf-8') -np.str_("hello") -np.str_(b"hello", 'utf-8') -np.str_(b"hello", encoding='utf-8') - -# Array-ish semantics -np.int8().real -np.int16().imag -np.int32().data -np.int64().flags - -np.uint8().itemsize * 2 -np.uint16().ndim + 1 -np.uint32().strides -np.uint64().shape - -# Time structures -np.datetime64() -np.datetime64(0, "D") -np.datetime64(0, b"D") -np.datetime64(0, ('ms', 3)) -np.datetime64("2019") -np.datetime64(b"2019") -np.datetime64("2019", "D") -np.datetime64(np.datetime64()) -np.datetime64(dt.datetime(2000, 5, 3)) -np.datetime64(dt.date(2000, 5, 3)) -np.datetime64(None) -np.datetime64(None, "D") - -np.timedelta64() -np.timedelta64(0) -np.timedelta64(0, "D") -np.timedelta64(0, ('ms', 3)) -np.timedelta64(0, b"D") -np.timedelta64("3") -np.timedelta64(b"5") -np.timedelta64(np.timedelta64(2)) -np.timedelta64(dt.timedelta(2)) -np.timedelta64(None) -np.timedelta64(None, "D") - -np.void(1) -np.void(np.int64(1)) -np.void(True) -np.void(np.bool_(True)) -np.void(b"test") -np.void(np.bytes_("test")) -np.void(object(), [("a", "O"), ("b", "O")]) -np.void(object(), dtype=[("a", "O"), ("b", "O")]) - -# Protocols -i8 = np.int64() -u8 = np.uint64() -f8 = np.float64() -c16 = np.complex128() -b_ = np.bool_() -td = np.timedelta64() -U = np.str_("1") -S = np.bytes_("1") -AR = np.array(1, dtype=np.float64) - -int(i8) -int(u8) -int(f8) -int(b_) -int(td) -int(U) -int(S) -int(AR) -with pytest.warns(np.ComplexWarning): - int(c16) - -float(i8) -float(u8) -float(f8) -float(b_) -float(td) -float(U) -float(S) -float(AR) -with pytest.warns(np.ComplexWarning): - float(c16) - -complex(i8) -complex(u8) -complex(f8) -complex(c16) -complex(b_) -complex(td) -complex(U) -complex(AR) - - -# Misc -c16.dtype -c16.real -c16.imag -c16.real.real -c16.real.imag -c16.ndim -c16.size -c16.itemsize -c16.shape -c16.strides -c16.squeeze() -c16.byteswap() -c16.transpose() - -# Aliases -np.string_() - -np.byte() -np.short() -np.intc() -np.intp() -np.int_() -np.longlong() - -np.ubyte() -np.ushort() -np.uintc() -np.uintp() -np.uint() -np.ulonglong() - -np.half() -np.single() -np.double() -np.float_() -np.longdouble() -np.longfloat() - -np.csingle() -np.singlecomplex() -np.cdouble() -np.complex_() -np.cfloat() -np.clongdouble() -np.clongfloat() -np.longcomplex() - -b.item() -i8.item() -u8.item() -f8.item() -c16.item() -U.item() -S.item() - -b.tolist() -i8.tolist() -u8.tolist() -f8.tolist() -c16.tolist() -U.tolist() -S.tolist() - -b.ravel() -i8.ravel() -u8.ravel() -f8.ravel() -c16.ravel() -U.ravel() -S.ravel() - -b.flatten() -i8.flatten() -u8.flatten() -f8.flatten() -c16.flatten() -U.flatten() -S.flatten() - -b.reshape(1) -i8.reshape(1) -u8.reshape(1) -f8.reshape(1) -c16.reshape(1) -U.reshape(1) -S.reshape(1) diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/core/arrays/interval.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/core/arrays/interval.py deleted file mode 100644 index d0510ede5a3664f59b8df9b6c318f99ed85c34a7..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/core/arrays/interval.py +++ /dev/null @@ -1,1931 +0,0 @@ -from __future__ import annotations - -import operator -from operator import ( - le, - lt, -) -import textwrap -from typing import ( - TYPE_CHECKING, - Literal, - Union, - overload, -) - -import numpy as np - -from pandas._config import get_option - -from pandas._libs import lib -from pandas._libs.interval import ( - VALID_CLOSED, - Interval, - IntervalMixin, - intervals_to_interval_bounds, -) -from pandas._libs.missing import NA -from pandas._typing import ( - ArrayLike, - AxisInt, - Dtype, - FillnaOptions, - IntervalClosedType, - NpDtype, - PositionalIndexer, - ScalarIndexer, - Self, - SequenceIndexer, - SortKind, - TimeArrayLike, - npt, -) -from pandas.compat.numpy import function as nv -from pandas.errors import IntCastingNaNError -from pandas.util._decorators import Appender - -from pandas.core.dtypes.cast import ( - LossySetitemError, - maybe_upcast_numeric_to_64bit, -) -from pandas.core.dtypes.common import ( - is_float_dtype, - is_integer_dtype, - is_list_like, - is_object_dtype, - is_scalar, - is_string_dtype, - needs_i8_conversion, - pandas_dtype, -) -from pandas.core.dtypes.dtypes import ( - CategoricalDtype, - IntervalDtype, -) -from pandas.core.dtypes.generic import ( - ABCDataFrame, - ABCDatetimeIndex, - ABCIntervalIndex, - ABCPeriodIndex, -) -from pandas.core.dtypes.missing import ( - is_valid_na_for_dtype, - isna, - notna, -) - -from pandas.core.algorithms import ( - isin, - take, - unique, - value_counts_internal as value_counts, -) -from pandas.core.arrays.base import ( - ExtensionArray, - _extension_array_shared_docs, -) -from pandas.core.arrays.datetimes import DatetimeArray -from pandas.core.arrays.timedeltas import TimedeltaArray -import pandas.core.common as com -from pandas.core.construction import ( - array as pd_array, - ensure_wrapped_if_datetimelike, - extract_array, -) -from pandas.core.indexers import check_array_indexer -from pandas.core.ops import ( - invalid_comparison, - unpack_zerodim_and_defer, -) - -if TYPE_CHECKING: - from collections.abc import ( - Iterator, - Sequence, - ) - - from pandas import ( - Index, - Series, - ) - - -IntervalSideT = Union[TimeArrayLike, np.ndarray] -IntervalOrNA = Union[Interval, float] - -_interval_shared_docs: dict[str, str] = {} - -_shared_docs_kwargs = { - "klass": "IntervalArray", - "qualname": "arrays.IntervalArray", - "name": "", -} - - -_interval_shared_docs[ - "class" -] = """ -%(summary)s - -Parameters ----------- -data : array-like (1-dimensional) - Array-like (ndarray, :class:`DateTimeArray`, :class:`TimeDeltaArray`) containing - Interval objects from which to build the %(klass)s. -closed : {'left', 'right', 'both', 'neither'}, default 'right' - Whether the intervals are closed on the left-side, right-side, both or - neither. -dtype : dtype or None, default None - If None, dtype will be inferred. -copy : bool, default False - Copy the input data. -%(name)s\ -verify_integrity : bool, default True - Verify that the %(klass)s is valid. - -Attributes ----------- -left -right -closed -mid -length -is_empty -is_non_overlapping_monotonic -%(extra_attributes)s\ - -Methods -------- -from_arrays -from_tuples -from_breaks -contains -overlaps -set_closed -to_tuples -%(extra_methods)s\ - -See Also --------- -Index : The base pandas Index type. -Interval : A bounded slice-like interval; the elements of an %(klass)s. -interval_range : Function to create a fixed frequency IntervalIndex. -cut : Bin values into discrete Intervals. -qcut : Bin values into equal-sized Intervals based on rank or sample quantiles. - -Notes ------ -See the `user guide -`__ -for more. - -%(examples)s\ -""" - - -@Appender( - _interval_shared_docs["class"] - % { - "klass": "IntervalArray", - "summary": "Pandas array for interval data that are closed on the same side.", - "name": "", - "extra_attributes": "", - "extra_methods": "", - "examples": textwrap.dedent( - """\ - Examples - -------- - A new ``IntervalArray`` can be constructed directly from an array-like of - ``Interval`` objects: - - >>> pd.arrays.IntervalArray([pd.Interval(0, 1), pd.Interval(1, 5)]) - - [(0, 1], (1, 5]] - Length: 2, dtype: interval[int64, right] - - It may also be constructed using one of the constructor - methods: :meth:`IntervalArray.from_arrays`, - :meth:`IntervalArray.from_breaks`, and :meth:`IntervalArray.from_tuples`. - """ - ), - } -) -class IntervalArray(IntervalMixin, ExtensionArray): - can_hold_na = True - _na_value = _fill_value = np.nan - - @property - def ndim(self) -> Literal[1]: - return 1 - - # To make mypy recognize the fields - _left: IntervalSideT - _right: IntervalSideT - _dtype: IntervalDtype - - # --------------------------------------------------------------------- - # Constructors - - def __new__( - cls, - data, - closed: IntervalClosedType | None = None, - dtype: Dtype | None = None, - copy: bool = False, - verify_integrity: bool = True, - ): - data = extract_array(data, extract_numpy=True) - - if isinstance(data, cls): - left: IntervalSideT = data._left - right: IntervalSideT = data._right - closed = closed or data.closed - dtype = IntervalDtype(left.dtype, closed=closed) - else: - # don't allow scalars - if is_scalar(data): - msg = ( - f"{cls.__name__}(...) must be called with a collection " - f"of some kind, {data} was passed" - ) - raise TypeError(msg) - - # might need to convert empty or purely na data - data = _maybe_convert_platform_interval(data) - left, right, infer_closed = intervals_to_interval_bounds( - data, validate_closed=closed is None - ) - if left.dtype == object: - left = lib.maybe_convert_objects(left) - right = lib.maybe_convert_objects(right) - closed = closed or infer_closed - - left, right, dtype = cls._ensure_simple_new_inputs( - left, - right, - closed=closed, - copy=copy, - dtype=dtype, - ) - - if verify_integrity: - cls._validate(left, right, dtype=dtype) - - return cls._simple_new( - left, - right, - dtype=dtype, - ) - - @classmethod - def _simple_new( - cls, - left: IntervalSideT, - right: IntervalSideT, - dtype: IntervalDtype, - ) -> Self: - result = IntervalMixin.__new__(cls) - result._left = left - result._right = right - result._dtype = dtype - - return result - - @classmethod - def _ensure_simple_new_inputs( - cls, - left, - right, - closed: IntervalClosedType | None = None, - copy: bool = False, - dtype: Dtype | None = None, - ) -> tuple[IntervalSideT, IntervalSideT, IntervalDtype]: - """Ensure correctness of input parameters for cls._simple_new.""" - from pandas.core.indexes.base import ensure_index - - left = ensure_index(left, copy=copy) - left = maybe_upcast_numeric_to_64bit(left) - - right = ensure_index(right, copy=copy) - right = maybe_upcast_numeric_to_64bit(right) - - if closed is None and isinstance(dtype, IntervalDtype): - closed = dtype.closed - - closed = closed or "right" - - if dtype is not None: - # GH 19262: dtype must be an IntervalDtype to override inferred - dtype = pandas_dtype(dtype) - if isinstance(dtype, IntervalDtype): - if dtype.subtype is not None: - left = left.astype(dtype.subtype) - right = right.astype(dtype.subtype) - else: - msg = f"dtype must be an IntervalDtype, got {dtype}" - raise TypeError(msg) - - if dtype.closed is None: - # possibly loading an old pickle - dtype = IntervalDtype(dtype.subtype, closed) - elif closed != dtype.closed: - raise ValueError("closed keyword does not match dtype.closed") - - # coerce dtypes to match if needed - if is_float_dtype(left.dtype) and is_integer_dtype(right.dtype): - right = right.astype(left.dtype) - elif is_float_dtype(right.dtype) and is_integer_dtype(left.dtype): - left = left.astype(right.dtype) - - if type(left) != type(right): - msg = ( - f"must not have differing left [{type(left).__name__}] and " - f"right [{type(right).__name__}] types" - ) - raise ValueError(msg) - if isinstance(left.dtype, CategoricalDtype) or is_string_dtype(left.dtype): - # GH 19016 - msg = ( - "category, object, and string subtypes are not supported " - "for IntervalArray" - ) - raise TypeError(msg) - if isinstance(left, ABCPeriodIndex): - msg = "Period dtypes are not supported, use a PeriodIndex instead" - raise ValueError(msg) - if isinstance(left, ABCDatetimeIndex) and str(left.tz) != str(right.tz): - msg = ( - "left and right must have the same time zone, got " - f"'{left.tz}' and '{right.tz}'" - ) - raise ValueError(msg) - - # For dt64/td64 we want DatetimeArray/TimedeltaArray instead of ndarray - left = ensure_wrapped_if_datetimelike(left) - left = extract_array(left, extract_numpy=True) - right = ensure_wrapped_if_datetimelike(right) - right = extract_array(right, extract_numpy=True) - - lbase = getattr(left, "_ndarray", left).base - rbase = getattr(right, "_ndarray", right).base - if lbase is not None and lbase is rbase: - # If these share data, then setitem could corrupt our IA - right = right.copy() - - dtype = IntervalDtype(left.dtype, closed=closed) - - return left, right, dtype - - @classmethod - def _from_sequence( - cls, - scalars, - *, - dtype: Dtype | None = None, - copy: bool = False, - ) -> Self: - return cls(scalars, dtype=dtype, copy=copy) - - @classmethod - def _from_factorized(cls, values: np.ndarray, original: IntervalArray) -> Self: - if len(values) == 0: - # An empty array returns object-dtype here. We can't create - # a new IA from an (empty) object-dtype array, so turn it into the - # correct dtype. - values = values.astype(original.dtype.subtype) - return cls(values, closed=original.closed) - - _interval_shared_docs["from_breaks"] = textwrap.dedent( - """ - Construct an %(klass)s from an array of splits. - - Parameters - ---------- - breaks : array-like (1-dimensional) - Left and right bounds for each interval. - closed : {'left', 'right', 'both', 'neither'}, default 'right' - Whether the intervals are closed on the left-side, right-side, both - or neither.\ - %(name)s - copy : bool, default False - Copy the data. - dtype : dtype or None, default None - If None, dtype will be inferred. - - Returns - ------- - %(klass)s - - See Also - -------- - interval_range : Function to create a fixed frequency IntervalIndex. - %(klass)s.from_arrays : Construct from a left and right array. - %(klass)s.from_tuples : Construct from a sequence of tuples. - - %(examples)s\ - """ - ) - - @classmethod - @Appender( - _interval_shared_docs["from_breaks"] - % { - "klass": "IntervalArray", - "name": "", - "examples": textwrap.dedent( - """\ - Examples - -------- - >>> pd.arrays.IntervalArray.from_breaks([0, 1, 2, 3]) - - [(0, 1], (1, 2], (2, 3]] - Length: 3, dtype: interval[int64, right] - """ - ), - } - ) - def from_breaks( - cls, - breaks, - closed: IntervalClosedType | None = "right", - copy: bool = False, - dtype: Dtype | None = None, - ) -> Self: - breaks = _maybe_convert_platform_interval(breaks) - - return cls.from_arrays(breaks[:-1], breaks[1:], closed, copy=copy, dtype=dtype) - - _interval_shared_docs["from_arrays"] = textwrap.dedent( - """ - Construct from two arrays defining the left and right bounds. - - Parameters - ---------- - left : array-like (1-dimensional) - Left bounds for each interval. - right : array-like (1-dimensional) - Right bounds for each interval. - closed : {'left', 'right', 'both', 'neither'}, default 'right' - Whether the intervals are closed on the left-side, right-side, both - or neither.\ - %(name)s - copy : bool, default False - Copy the data. - dtype : dtype, optional - If None, dtype will be inferred. - - Returns - ------- - %(klass)s - - Raises - ------ - ValueError - When a value is missing in only one of `left` or `right`. - When a value in `left` is greater than the corresponding value - in `right`. - - See Also - -------- - interval_range : Function to create a fixed frequency IntervalIndex. - %(klass)s.from_breaks : Construct an %(klass)s from an array of - splits. - %(klass)s.from_tuples : Construct an %(klass)s from an - array-like of tuples. - - Notes - ----- - Each element of `left` must be less than or equal to the `right` - element at the same position. If an element is missing, it must be - missing in both `left` and `right`. A TypeError is raised when - using an unsupported type for `left` or `right`. At the moment, - 'category', 'object', and 'string' subtypes are not supported. - - %(examples)s\ - """ - ) - - @classmethod - @Appender( - _interval_shared_docs["from_arrays"] - % { - "klass": "IntervalArray", - "name": "", - "examples": textwrap.dedent( - """\ - Examples - -------- - >>> pd.arrays.IntervalArray.from_arrays([0, 1, 2], [1, 2, 3]) - - [(0, 1], (1, 2], (2, 3]] - Length: 3, dtype: interval[int64, right] - """ - ), - } - ) - def from_arrays( - cls, - left, - right, - closed: IntervalClosedType | None = "right", - copy: bool = False, - dtype: Dtype | None = None, - ) -> Self: - left = _maybe_convert_platform_interval(left) - right = _maybe_convert_platform_interval(right) - - left, right, dtype = cls._ensure_simple_new_inputs( - left, - right, - closed=closed, - copy=copy, - dtype=dtype, - ) - cls._validate(left, right, dtype=dtype) - - return cls._simple_new(left, right, dtype=dtype) - - _interval_shared_docs["from_tuples"] = textwrap.dedent( - """ - Construct an %(klass)s from an array-like of tuples. - - Parameters - ---------- - data : array-like (1-dimensional) - Array of tuples. - closed : {'left', 'right', 'both', 'neither'}, default 'right' - Whether the intervals are closed on the left-side, right-side, both - or neither.\ - %(name)s - copy : bool, default False - By-default copy the data, this is compat only and ignored. - dtype : dtype or None, default None - If None, dtype will be inferred. - - Returns - ------- - %(klass)s - - See Also - -------- - interval_range : Function to create a fixed frequency IntervalIndex. - %(klass)s.from_arrays : Construct an %(klass)s from a left and - right array. - %(klass)s.from_breaks : Construct an %(klass)s from an array of - splits. - - %(examples)s\ - """ - ) - - @classmethod - @Appender( - _interval_shared_docs["from_tuples"] - % { - "klass": "IntervalArray", - "name": "", - "examples": textwrap.dedent( - """\ - Examples - -------- - >>> pd.arrays.IntervalArray.from_tuples([(0, 1), (1, 2)]) - - [(0, 1], (1, 2]] - Length: 2, dtype: interval[int64, right] - """ - ), - } - ) - def from_tuples( - cls, - data, - closed: IntervalClosedType | None = "right", - copy: bool = False, - dtype: Dtype | None = None, - ) -> Self: - if len(data): - left, right = [], [] - else: - # ensure that empty data keeps input dtype - left = right = data - - for d in data: - if not isinstance(d, tuple) and isna(d): - lhs = rhs = np.nan - else: - name = cls.__name__ - try: - # need list of length 2 tuples, e.g. [(0, 1), (1, 2), ...] - lhs, rhs = d - except ValueError as err: - msg = f"{name}.from_tuples requires tuples of length 2, got {d}" - raise ValueError(msg) from err - except TypeError as err: - msg = f"{name}.from_tuples received an invalid item, {d}" - raise TypeError(msg) from err - left.append(lhs) - right.append(rhs) - - return cls.from_arrays(left, right, closed, copy=False, dtype=dtype) - - @classmethod - def _validate(cls, left, right, dtype: IntervalDtype) -> None: - """ - Verify that the IntervalArray is valid. - - Checks that - - * dtype is correct - * left and right match lengths - * left and right have the same missing values - * left is always below right - """ - if not isinstance(dtype, IntervalDtype): - msg = f"invalid dtype: {dtype}" - raise ValueError(msg) - if len(left) != len(right): - msg = "left and right must have the same length" - raise ValueError(msg) - left_mask = notna(left) - right_mask = notna(right) - if not (left_mask == right_mask).all(): - msg = ( - "missing values must be missing in the same " - "location both left and right sides" - ) - raise ValueError(msg) - if not (left[left_mask] <= right[left_mask]).all(): - msg = "left side of interval must be <= right side" - raise ValueError(msg) - - def _shallow_copy(self, left, right) -> Self: - """ - Return a new IntervalArray with the replacement attributes - - Parameters - ---------- - left : Index - Values to be used for the left-side of the intervals. - right : Index - Values to be used for the right-side of the intervals. - """ - dtype = IntervalDtype(left.dtype, closed=self.closed) - left, right, dtype = self._ensure_simple_new_inputs(left, right, dtype=dtype) - - return self._simple_new(left, right, dtype=dtype) - - # --------------------------------------------------------------------- - # Descriptive - - @property - def dtype(self) -> IntervalDtype: - return self._dtype - - @property - def nbytes(self) -> int: - return self.left.nbytes + self.right.nbytes - - @property - def size(self) -> int: - # Avoid materializing self.values - return self.left.size - - # --------------------------------------------------------------------- - # EA Interface - - def __iter__(self) -> Iterator: - return iter(np.asarray(self)) - - def __len__(self) -> int: - return len(self._left) - - @overload - def __getitem__(self, key: ScalarIndexer) -> IntervalOrNA: - ... - - @overload - def __getitem__(self, key: SequenceIndexer) -> Self: - ... - - def __getitem__(self, key: PositionalIndexer) -> Self | IntervalOrNA: - key = check_array_indexer(self, key) - left = self._left[key] - right = self._right[key] - - if not isinstance(left, (np.ndarray, ExtensionArray)): - # scalar - if is_scalar(left) and isna(left): - return self._fill_value - return Interval(left, right, self.closed) - if np.ndim(left) > 1: - # GH#30588 multi-dimensional indexer disallowed - raise ValueError("multi-dimensional indexing not allowed") - # Argument 2 to "_simple_new" of "IntervalArray" has incompatible type - # "Union[Period, Timestamp, Timedelta, NaTType, DatetimeArray, TimedeltaArray, - # ndarray[Any, Any]]"; expected "Union[Union[DatetimeArray, TimedeltaArray], - # ndarray[Any, Any]]" - return self._simple_new(left, right, dtype=self.dtype) # type: ignore[arg-type] - - def __setitem__(self, key, value) -> None: - value_left, value_right = self._validate_setitem_value(value) - key = check_array_indexer(self, key) - - self._left[key] = value_left - self._right[key] = value_right - - def _cmp_method(self, other, op): - # ensure pandas array for list-like and eliminate non-interval scalars - if is_list_like(other): - if len(self) != len(other): - raise ValueError("Lengths must match to compare") - other = pd_array(other) - elif not isinstance(other, Interval): - # non-interval scalar -> no matches - if other is NA: - # GH#31882 - from pandas.core.arrays import BooleanArray - - arr = np.empty(self.shape, dtype=bool) - mask = np.ones(self.shape, dtype=bool) - return BooleanArray(arr, mask) - return invalid_comparison(self, other, op) - - # determine the dtype of the elements we want to compare - if isinstance(other, Interval): - other_dtype = pandas_dtype("interval") - elif not isinstance(other.dtype, CategoricalDtype): - other_dtype = other.dtype - else: - # for categorical defer to categories for dtype - other_dtype = other.categories.dtype - - # extract intervals if we have interval categories with matching closed - if isinstance(other_dtype, IntervalDtype): - if self.closed != other.categories.closed: - return invalid_comparison(self, other, op) - - other = other.categories.take( - other.codes, allow_fill=True, fill_value=other.categories._na_value - ) - - # interval-like -> need same closed and matching endpoints - if isinstance(other_dtype, IntervalDtype): - if self.closed != other.closed: - return invalid_comparison(self, other, op) - elif not isinstance(other, Interval): - other = type(self)(other) - - if op is operator.eq: - return (self._left == other.left) & (self._right == other.right) - elif op is operator.ne: - return (self._left != other.left) | (self._right != other.right) - elif op is operator.gt: - return (self._left > other.left) | ( - (self._left == other.left) & (self._right > other.right) - ) - elif op is operator.ge: - return (self == other) | (self > other) - elif op is operator.lt: - return (self._left < other.left) | ( - (self._left == other.left) & (self._right < other.right) - ) - else: - # operator.lt - return (self == other) | (self < other) - - # non-interval/non-object dtype -> no matches - if not is_object_dtype(other_dtype): - return invalid_comparison(self, other, op) - - # object dtype -> iteratively check for intervals - result = np.zeros(len(self), dtype=bool) - for i, obj in enumerate(other): - try: - result[i] = op(self[i], obj) - except TypeError: - if obj is NA: - # comparison with np.nan returns NA - # github.com/pandas-dev/pandas/pull/37124#discussion_r509095092 - result = result.astype(object) - result[i] = NA - else: - raise - return result - - @unpack_zerodim_and_defer("__eq__") - def __eq__(self, other): - return self._cmp_method(other, operator.eq) - - @unpack_zerodim_and_defer("__ne__") - def __ne__(self, other): - return self._cmp_method(other, operator.ne) - - @unpack_zerodim_and_defer("__gt__") - def __gt__(self, other): - return self._cmp_method(other, operator.gt) - - @unpack_zerodim_and_defer("__ge__") - def __ge__(self, other): - return self._cmp_method(other, operator.ge) - - @unpack_zerodim_and_defer("__lt__") - def __lt__(self, other): - return self._cmp_method(other, operator.lt) - - @unpack_zerodim_and_defer("__le__") - def __le__(self, other): - return self._cmp_method(other, operator.le) - - def argsort( - self, - *, - ascending: bool = True, - kind: SortKind = "quicksort", - na_position: str = "last", - **kwargs, - ) -> np.ndarray: - ascending = nv.validate_argsort_with_ascending(ascending, (), kwargs) - - if ascending and kind == "quicksort" and na_position == "last": - # TODO: in an IntervalIndex we can re-use the cached - # IntervalTree.left_sorter - return np.lexsort((self.right, self.left)) - - # TODO: other cases we can use lexsort for? much more performant. - return super().argsort( - ascending=ascending, kind=kind, na_position=na_position, **kwargs - ) - - def min(self, *, axis: AxisInt | None = None, skipna: bool = True) -> IntervalOrNA: - nv.validate_minmax_axis(axis, self.ndim) - - if not len(self): - return self._na_value - - mask = self.isna() - if mask.any(): - if not skipna: - return self._na_value - obj = self[~mask] - else: - obj = self - - indexer = obj.argsort()[0] - return obj[indexer] - - def max(self, *, axis: AxisInt | None = None, skipna: bool = True) -> IntervalOrNA: - nv.validate_minmax_axis(axis, self.ndim) - - if not len(self): - return self._na_value - - mask = self.isna() - if mask.any(): - if not skipna: - return self._na_value - obj = self[~mask] - else: - obj = self - - indexer = obj.argsort()[-1] - return obj[indexer] - - def _pad_or_backfill( # pylint: disable=useless-parent-delegation - self, *, method: FillnaOptions, limit: int | None = None, copy: bool = True - ) -> Self: - # TODO(3.0): after EA.fillna 'method' deprecation is enforced, we can remove - # this method entirely. - return super()._pad_or_backfill(method=method, limit=limit, copy=copy) - - def fillna( - self, value=None, method=None, limit: int | None = None, copy: bool = True - ) -> Self: - """ - Fill NA/NaN values using the specified method. - - Parameters - ---------- - value : scalar, dict, Series - If a scalar value is passed it is used to fill all missing values. - Alternatively, a Series or dict can be used to fill in different - values for each index. The value should not be a list. The - value(s) passed should be either Interval objects or NA/NaN. - method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None - (Not implemented yet for IntervalArray) - Method to use for filling holes in reindexed Series - limit : int, default None - (Not implemented yet for IntervalArray) - If method is specified, this is the maximum number of consecutive - NaN values to forward/backward fill. In other words, if there is - a gap with more than this number of consecutive NaNs, it will only - be partially filled. If method is not specified, this is the - maximum number of entries along the entire axis where NaNs will be - filled. - copy : bool, default True - Whether to make a copy of the data before filling. If False, then - the original should be modified and no new memory should be allocated. - For ExtensionArray subclasses that cannot do this, it is at the - author's discretion whether to ignore "copy=False" or to raise. - - Returns - ------- - filled : IntervalArray with NA/NaN filled - """ - if copy is False: - raise NotImplementedError - if method is not None: - return super().fillna(value=value, method=method, limit=limit) - - value_left, value_right = self._validate_scalar(value) - - left = self.left.fillna(value=value_left) - right = self.right.fillna(value=value_right) - return self._shallow_copy(left, right) - - def astype(self, dtype, copy: bool = True): - """ - Cast to an ExtensionArray or NumPy array with dtype 'dtype'. - - Parameters - ---------- - dtype : str or dtype - Typecode or data-type to which the array is cast. - - copy : bool, default True - Whether to copy the data, even if not necessary. If False, - a copy is made only if the old dtype does not match the - new dtype. - - Returns - ------- - array : ExtensionArray or ndarray - ExtensionArray or NumPy ndarray with 'dtype' for its dtype. - """ - from pandas import Index - - if dtype is not None: - dtype = pandas_dtype(dtype) - - if isinstance(dtype, IntervalDtype): - if dtype == self.dtype: - return self.copy() if copy else self - - if is_float_dtype(self.dtype.subtype) and needs_i8_conversion( - dtype.subtype - ): - # This is allowed on the Index.astype but we disallow it here - msg = ( - f"Cannot convert {self.dtype} to {dtype}; subtypes are incompatible" - ) - raise TypeError(msg) - - # need to cast to different subtype - try: - # We need to use Index rules for astype to prevent casting - # np.nan entries to int subtypes - new_left = Index(self._left, copy=False).astype(dtype.subtype) - new_right = Index(self._right, copy=False).astype(dtype.subtype) - except IntCastingNaNError: - # e.g test_subtype_integer - raise - except (TypeError, ValueError) as err: - # e.g. test_subtype_integer_errors f8->u8 can be lossy - # and raises ValueError - msg = ( - f"Cannot convert {self.dtype} to {dtype}; subtypes are incompatible" - ) - raise TypeError(msg) from err - return self._shallow_copy(new_left, new_right) - else: - try: - return super().astype(dtype, copy=copy) - except (TypeError, ValueError) as err: - msg = f"Cannot cast {type(self).__name__} to dtype {dtype}" - raise TypeError(msg) from err - - def equals(self, other) -> bool: - if type(self) != type(other): - return False - - return bool( - self.closed == other.closed - and self.left.equals(other.left) - and self.right.equals(other.right) - ) - - @classmethod - def _concat_same_type(cls, to_concat: Sequence[IntervalArray]) -> Self: - """ - Concatenate multiple IntervalArray - - Parameters - ---------- - to_concat : sequence of IntervalArray - - Returns - ------- - IntervalArray - """ - closed_set = {interval.closed for interval in to_concat} - if len(closed_set) != 1: - raise ValueError("Intervals must all be closed on the same side.") - closed = closed_set.pop() - - left = np.concatenate([interval.left for interval in to_concat]) - right = np.concatenate([interval.right for interval in to_concat]) - - left, right, dtype = cls._ensure_simple_new_inputs(left, right, closed=closed) - - return cls._simple_new(left, right, dtype=dtype) - - def copy(self) -> Self: - """ - Return a copy of the array. - - Returns - ------- - IntervalArray - """ - left = self._left.copy() - right = self._right.copy() - dtype = self.dtype - return self._simple_new(left, right, dtype=dtype) - - def isna(self) -> np.ndarray: - return isna(self._left) - - def shift(self, periods: int = 1, fill_value: object = None) -> IntervalArray: - if not len(self) or periods == 0: - return self.copy() - - self._validate_scalar(fill_value) - - # ExtensionArray.shift doesn't work for two reasons - # 1. IntervalArray.dtype.na_value may not be correct for the dtype. - # 2. IntervalArray._from_sequence only accepts NaN for missing values, - # not other values like NaT - - empty_len = min(abs(periods), len(self)) - if isna(fill_value): - from pandas import Index - - fill_value = Index(self._left, copy=False)._na_value - empty = IntervalArray.from_breaks([fill_value] * (empty_len + 1)) - else: - empty = self._from_sequence([fill_value] * empty_len) - - if periods > 0: - a = empty - b = self[:-periods] - else: - a = self[abs(periods) :] - b = empty - return self._concat_same_type([a, b]) - - def take( - self, - indices, - *, - allow_fill: bool = False, - fill_value=None, - axis=None, - **kwargs, - ) -> Self: - """ - Take elements from the IntervalArray. - - Parameters - ---------- - indices : sequence of integers - Indices to be taken. - - allow_fill : bool, default False - How to handle negative values in `indices`. - - * False: negative values in `indices` indicate positional indices - from the right (the default). This is similar to - :func:`numpy.take`. - - * True: negative values in `indices` indicate - missing values. These values are set to `fill_value`. Any other - other negative values raise a ``ValueError``. - - fill_value : Interval or NA, optional - Fill value to use for NA-indices when `allow_fill` is True. - This may be ``None``, in which case the default NA value for - the type, ``self.dtype.na_value``, is used. - - For many ExtensionArrays, there will be two representations of - `fill_value`: a user-facing "boxed" scalar, and a low-level - physical NA value. `fill_value` should be the user-facing version, - and the implementation should handle translating that to the - physical version for processing the take if necessary. - - axis : any, default None - Present for compat with IntervalIndex; does nothing. - - Returns - ------- - IntervalArray - - Raises - ------ - IndexError - When the indices are out of bounds for the array. - ValueError - When `indices` contains negative values other than ``-1`` - and `allow_fill` is True. - """ - nv.validate_take((), kwargs) - - fill_left = fill_right = fill_value - if allow_fill: - fill_left, fill_right = self._validate_scalar(fill_value) - - left_take = take( - self._left, indices, allow_fill=allow_fill, fill_value=fill_left - ) - right_take = take( - self._right, indices, allow_fill=allow_fill, fill_value=fill_right - ) - - return self._shallow_copy(left_take, right_take) - - def _validate_listlike(self, value): - # list-like of intervals - try: - array = IntervalArray(value) - self._check_closed_matches(array, name="value") - value_left, value_right = array.left, array.right - except TypeError as err: - # wrong type: not interval or NA - msg = f"'value' should be an interval type, got {type(value)} instead." - raise TypeError(msg) from err - - try: - self.left._validate_fill_value(value_left) - except (LossySetitemError, TypeError) as err: - msg = ( - "'value' should be a compatible interval type, " - f"got {type(value)} instead." - ) - raise TypeError(msg) from err - - return value_left, value_right - - def _validate_scalar(self, value): - if isinstance(value, Interval): - self._check_closed_matches(value, name="value") - left, right = value.left, value.right - # TODO: check subdtype match like _validate_setitem_value? - elif is_valid_na_for_dtype(value, self.left.dtype): - # GH#18295 - left = right = self.left._na_value - else: - raise TypeError( - "can only insert Interval objects and NA into an IntervalArray" - ) - return left, right - - def _validate_setitem_value(self, value): - if is_valid_na_for_dtype(value, self.left.dtype): - # na value: need special casing to set directly on numpy arrays - value = self.left._na_value - if is_integer_dtype(self.dtype.subtype): - # can't set NaN on a numpy integer array - # GH#45484 TypeError, not ValueError, matches what we get with - # non-NA un-holdable value. - raise TypeError("Cannot set float NaN to integer-backed IntervalArray") - value_left, value_right = value, value - - elif isinstance(value, Interval): - # scalar interval - self._check_closed_matches(value, name="value") - value_left, value_right = value.left, value.right - self.left._validate_fill_value(value_left) - self.left._validate_fill_value(value_right) - - else: - return self._validate_listlike(value) - - return value_left, value_right - - def value_counts(self, dropna: bool = True) -> Series: - """ - Returns a Series containing counts of each interval. - - Parameters - ---------- - dropna : bool, default True - Don't include counts of NaN. - - Returns - ------- - counts : Series - - See Also - -------- - Series.value_counts - """ - # TODO: implement this is a non-naive way! - return value_counts(np.asarray(self), dropna=dropna) - - # --------------------------------------------------------------------- - # Rendering Methods - - def _format_data(self) -> str: - # TODO: integrate with categorical and make generic - # name argument is unused here; just for compat with base / categorical - n = len(self) - max_seq_items = min((get_option("display.max_seq_items") or n) // 10, 10) - - formatter = str - - if n == 0: - summary = "[]" - elif n == 1: - first = formatter(self[0]) - summary = f"[{first}]" - elif n == 2: - first = formatter(self[0]) - last = formatter(self[-1]) - summary = f"[{first}, {last}]" - else: - if n > max_seq_items: - n = min(max_seq_items // 2, 10) - head = [formatter(x) for x in self[:n]] - tail = [formatter(x) for x in self[-n:]] - head_str = ", ".join(head) - tail_str = ", ".join(tail) - summary = f"[{head_str} ... {tail_str}]" - else: - tail = [formatter(x) for x in self] - tail_str = ", ".join(tail) - summary = f"[{tail_str}]" - - return summary - - def __repr__(self) -> str: - # the short repr has no trailing newline, while the truncated - # repr does. So we include a newline in our template, and strip - # any trailing newlines from format_object_summary - data = self._format_data() - class_name = f"<{type(self).__name__}>\n" - - template = f"{class_name}{data}\nLength: {len(self)}, dtype: {self.dtype}" - return template - - def _format_space(self) -> str: - space = " " * (len(type(self).__name__) + 1) - return f"\n{space}" - - # --------------------------------------------------------------------- - # Vectorized Interval Properties/Attributes - - @property - def left(self): - """ - Return the left endpoints of each Interval in the IntervalArray as an Index. - - Examples - -------- - - >>> interv_arr = pd.arrays.IntervalArray([pd.Interval(0, 1), pd.Interval(2, 5)]) - >>> interv_arr - - [(0, 1], (2, 5]] - Length: 2, dtype: interval[int64, right] - >>> interv_arr.left - Index([0, 2], dtype='int64') - """ - from pandas import Index - - return Index(self._left, copy=False) - - @property - def right(self): - """ - Return the right endpoints of each Interval in the IntervalArray as an Index. - - Examples - -------- - - >>> interv_arr = pd.arrays.IntervalArray([pd.Interval(0, 1), pd.Interval(2, 5)]) - >>> interv_arr - - [(0, 1], (2, 5]] - Length: 2, dtype: interval[int64, right] - >>> interv_arr.right - Index([1, 5], dtype='int64') - """ - from pandas import Index - - return Index(self._right, copy=False) - - @property - def length(self) -> Index: - """ - Return an Index with entries denoting the length of each Interval. - - Examples - -------- - - >>> interv_arr = pd.arrays.IntervalArray([pd.Interval(0, 1), pd.Interval(1, 5)]) - >>> interv_arr - - [(0, 1], (1, 5]] - Length: 2, dtype: interval[int64, right] - >>> interv_arr.length - Index([1, 4], dtype='int64') - """ - return self.right - self.left - - @property - def mid(self) -> Index: - """ - Return the midpoint of each Interval in the IntervalArray as an Index. - - Examples - -------- - - >>> interv_arr = pd.arrays.IntervalArray([pd.Interval(0, 1), pd.Interval(1, 5)]) - >>> interv_arr - - [(0, 1], (1, 5]] - Length: 2, dtype: interval[int64, right] - >>> interv_arr.mid - Index([0.5, 3.0], dtype='float64') - """ - try: - return 0.5 * (self.left + self.right) - except TypeError: - # datetime safe version - return self.left + 0.5 * self.length - - _interval_shared_docs["overlaps"] = textwrap.dedent( - """ - Check elementwise if an Interval overlaps the values in the %(klass)s. - - Two intervals overlap if they share a common point, including closed - endpoints. Intervals that only have an open endpoint in common do not - overlap. - - Parameters - ---------- - other : %(klass)s - Interval to check against for an overlap. - - Returns - ------- - ndarray - Boolean array positionally indicating where an overlap occurs. - - See Also - -------- - Interval.overlaps : Check whether two Interval objects overlap. - - Examples - -------- - %(examples)s - >>> intervals.overlaps(pd.Interval(0.5, 1.5)) - array([ True, True, False]) - - Intervals that share closed endpoints overlap: - - >>> intervals.overlaps(pd.Interval(1, 3, closed='left')) - array([ True, True, True]) - - Intervals that only have an open endpoint in common do not overlap: - - >>> intervals.overlaps(pd.Interval(1, 2, closed='right')) - array([False, True, False]) - """ - ) - - @Appender( - _interval_shared_docs["overlaps"] - % { - "klass": "IntervalArray", - "examples": textwrap.dedent( - """\ - >>> data = [(0, 1), (1, 3), (2, 4)] - >>> intervals = pd.arrays.IntervalArray.from_tuples(data) - >>> intervals - - [(0, 1], (1, 3], (2, 4]] - Length: 3, dtype: interval[int64, right] - """ - ), - } - ) - def overlaps(self, other): - if isinstance(other, (IntervalArray, ABCIntervalIndex)): - raise NotImplementedError - if not isinstance(other, Interval): - msg = f"`other` must be Interval-like, got {type(other).__name__}" - raise TypeError(msg) - - # equality is okay if both endpoints are closed (overlap at a point) - op1 = le if (self.closed_left and other.closed_right) else lt - op2 = le if (other.closed_left and self.closed_right) else lt - - # overlaps is equivalent negation of two interval being disjoint: - # disjoint = (A.left > B.right) or (B.left > A.right) - # (simplifying the negation allows this to be done in less operations) - return op1(self.left, other.right) & op2(other.left, self.right) - - # --------------------------------------------------------------------- - - @property - def closed(self) -> IntervalClosedType: - """ - String describing the inclusive side the intervals. - - Either ``left``, ``right``, ``both`` or ``neither``. - - Examples - -------- - - For arrays: - - >>> interv_arr = pd.arrays.IntervalArray([pd.Interval(0, 1), pd.Interval(1, 5)]) - >>> interv_arr - - [(0, 1], (1, 5]] - Length: 2, dtype: interval[int64, right] - >>> interv_arr.closed - 'right' - - For Interval Index: - - >>> interv_idx = pd.interval_range(start=0, end=2) - >>> interv_idx - IntervalIndex([(0, 1], (1, 2]], dtype='interval[int64, right]') - >>> interv_idx.closed - 'right' - """ - return self.dtype.closed - - _interval_shared_docs["set_closed"] = textwrap.dedent( - """ - Return an identical %(klass)s closed on the specified side. - - Parameters - ---------- - closed : {'left', 'right', 'both', 'neither'} - Whether the intervals are closed on the left-side, right-side, both - or neither. - - Returns - ------- - %(klass)s - - %(examples)s\ - """ - ) - - @Appender( - _interval_shared_docs["set_closed"] - % { - "klass": "IntervalArray", - "examples": textwrap.dedent( - """\ - Examples - -------- - >>> index = pd.arrays.IntervalArray.from_breaks(range(4)) - >>> index - - [(0, 1], (1, 2], (2, 3]] - Length: 3, dtype: interval[int64, right] - >>> index.set_closed('both') - - [[0, 1], [1, 2], [2, 3]] - Length: 3, dtype: interval[int64, both] - """ - ), - } - ) - def set_closed(self, closed: IntervalClosedType) -> Self: - if closed not in VALID_CLOSED: - msg = f"invalid option for 'closed': {closed}" - raise ValueError(msg) - - left, right = self._left, self._right - dtype = IntervalDtype(left.dtype, closed=closed) - return self._simple_new(left, right, dtype=dtype) - - _interval_shared_docs[ - "is_non_overlapping_monotonic" - ] = """ - Return a boolean whether the %(klass)s is non-overlapping and monotonic. - - Non-overlapping means (no Intervals share points), and monotonic means - either monotonic increasing or monotonic decreasing. - - Examples - -------- - For arrays: - - >>> interv_arr = pd.arrays.IntervalArray([pd.Interval(0, 1), pd.Interval(1, 5)]) - >>> interv_arr - - [(0, 1], (1, 5]] - Length: 2, dtype: interval[int64, right] - >>> interv_arr.is_non_overlapping_monotonic - True - - >>> interv_arr = pd.arrays.IntervalArray([pd.Interval(0, 1), - ... pd.Interval(-1, 0.1)]) - >>> interv_arr - - [(0.0, 1.0], (-1.0, 0.1]] - Length: 2, dtype: interval[float64, right] - >>> interv_arr.is_non_overlapping_monotonic - False - - For Interval Index: - - >>> interv_idx = pd.interval_range(start=0, end=2) - >>> interv_idx - IntervalIndex([(0, 1], (1, 2]], dtype='interval[int64, right]') - >>> interv_idx.is_non_overlapping_monotonic - True - - >>> interv_idx = pd.interval_range(start=0, end=2, closed='both') - >>> interv_idx - IntervalIndex([[0, 1], [1, 2]], dtype='interval[int64, both]') - >>> interv_idx.is_non_overlapping_monotonic - False - """ - - @property - @Appender( - _interval_shared_docs["is_non_overlapping_monotonic"] % _shared_docs_kwargs - ) - def is_non_overlapping_monotonic(self) -> bool: - # must be increasing (e.g., [0, 1), [1, 2), [2, 3), ... ) - # or decreasing (e.g., [-1, 0), [-2, -1), [-3, -2), ...) - # we already require left <= right - - # strict inequality for closed == 'both'; equality implies overlapping - # at a point when both sides of intervals are included - if self.closed == "both": - return bool( - (self._right[:-1] < self._left[1:]).all() - or (self._left[:-1] > self._right[1:]).all() - ) - - # non-strict inequality when closed != 'both'; at least one side is - # not included in the intervals, so equality does not imply overlapping - return bool( - (self._right[:-1] <= self._left[1:]).all() - or (self._left[:-1] >= self._right[1:]).all() - ) - - # --------------------------------------------------------------------- - # Conversion - - def __array__(self, dtype: NpDtype | None = None) -> np.ndarray: - """ - Return the IntervalArray's data as a numpy array of Interval - objects (with dtype='object') - """ - left = self._left - right = self._right - mask = self.isna() - closed = self.closed - - result = np.empty(len(left), dtype=object) - for i, left_value in enumerate(left): - if mask[i]: - result[i] = np.nan - else: - result[i] = Interval(left_value, right[i], closed) - return result - - def __arrow_array__(self, type=None): - """ - Convert myself into a pyarrow Array. - """ - import pyarrow - - from pandas.core.arrays.arrow.extension_types import ArrowIntervalType - - try: - subtype = pyarrow.from_numpy_dtype(self.dtype.subtype) - except TypeError as err: - raise TypeError( - f"Conversion to arrow with subtype '{self.dtype.subtype}' " - "is not supported" - ) from err - interval_type = ArrowIntervalType(subtype, self.closed) - storage_array = pyarrow.StructArray.from_arrays( - [ - pyarrow.array(self._left, type=subtype, from_pandas=True), - pyarrow.array(self._right, type=subtype, from_pandas=True), - ], - names=["left", "right"], - ) - mask = self.isna() - if mask.any(): - # if there are missing values, set validity bitmap also on the array level - null_bitmap = pyarrow.array(~mask).buffers()[1] - storage_array = pyarrow.StructArray.from_buffers( - storage_array.type, - len(storage_array), - [null_bitmap], - children=[storage_array.field(0), storage_array.field(1)], - ) - - if type is not None: - if type.equals(interval_type.storage_type): - return storage_array - elif isinstance(type, ArrowIntervalType): - # ensure we have the same subtype and closed attributes - if not type.equals(interval_type): - raise TypeError( - "Not supported to convert IntervalArray to type with " - f"different 'subtype' ({self.dtype.subtype} vs {type.subtype}) " - f"and 'closed' ({self.closed} vs {type.closed}) attributes" - ) - else: - raise TypeError( - f"Not supported to convert IntervalArray to '{type}' type" - ) - - return pyarrow.ExtensionArray.from_storage(interval_type, storage_array) - - _interval_shared_docs["to_tuples"] = textwrap.dedent( - """ - Return an %(return_type)s of tuples of the form (left, right). - - Parameters - ---------- - na_tuple : bool, default True - If ``True``, return ``NA`` as a tuple ``(nan, nan)``. If ``False``, - just return ``NA`` as ``nan``. - - Returns - ------- - tuples: %(return_type)s - %(examples)s\ - """ - ) - - @Appender( - _interval_shared_docs["to_tuples"] - % { - "return_type": ( - "ndarray (if self is IntervalArray) or Index (if self is IntervalIndex)" - ), - "examples": textwrap.dedent( - """\ - - Examples - -------- - For :class:`pandas.IntervalArray`: - - >>> idx = pd.arrays.IntervalArray.from_tuples([(0, 1), (1, 2)]) - >>> idx - - [(0, 1], (1, 2]] - Length: 2, dtype: interval[int64, right] - >>> idx.to_tuples() - array([(0, 1), (1, 2)], dtype=object) - - For :class:`pandas.IntervalIndex`: - - >>> idx = pd.interval_range(start=0, end=2) - >>> idx - IntervalIndex([(0, 1], (1, 2]], dtype='interval[int64, right]') - >>> idx.to_tuples() - Index([(0, 1), (1, 2)], dtype='object') - """ - ), - } - ) - def to_tuples(self, na_tuple: bool = True) -> np.ndarray: - tuples = com.asarray_tuplesafe(zip(self._left, self._right)) - if not na_tuple: - # GH 18756 - tuples = np.where(~self.isna(), tuples, np.nan) - return tuples - - # --------------------------------------------------------------------- - - def _putmask(self, mask: npt.NDArray[np.bool_], value) -> None: - value_left, value_right = self._validate_setitem_value(value) - - if isinstance(self._left, np.ndarray): - np.putmask(self._left, mask, value_left) - assert isinstance(self._right, np.ndarray) - np.putmask(self._right, mask, value_right) - else: - self._left._putmask(mask, value_left) - assert not isinstance(self._right, np.ndarray) - self._right._putmask(mask, value_right) - - def insert(self, loc: int, item: Interval) -> Self: - """ - Return a new IntervalArray inserting new item at location. Follows - Python numpy.insert semantics for negative values. Only Interval - objects and NA can be inserted into an IntervalIndex - - Parameters - ---------- - loc : int - item : Interval - - Returns - ------- - IntervalArray - """ - left_insert, right_insert = self._validate_scalar(item) - - new_left = self.left.insert(loc, left_insert) - new_right = self.right.insert(loc, right_insert) - - return self._shallow_copy(new_left, new_right) - - def delete(self, loc) -> Self: - if isinstance(self._left, np.ndarray): - new_left = np.delete(self._left, loc) - assert isinstance(self._right, np.ndarray) - new_right = np.delete(self._right, loc) - else: - new_left = self._left.delete(loc) - assert not isinstance(self._right, np.ndarray) - new_right = self._right.delete(loc) - return self._shallow_copy(left=new_left, right=new_right) - - @Appender(_extension_array_shared_docs["repeat"] % _shared_docs_kwargs) - def repeat( - self, - repeats: int | Sequence[int], - axis: AxisInt | None = None, - ) -> Self: - nv.validate_repeat((), {"axis": axis}) - left_repeat = self.left.repeat(repeats) - right_repeat = self.right.repeat(repeats) - return self._shallow_copy(left=left_repeat, right=right_repeat) - - _interval_shared_docs["contains"] = textwrap.dedent( - """ - Check elementwise if the Intervals contain the value. - - Return a boolean mask whether the value is contained in the Intervals - of the %(klass)s. - - Parameters - ---------- - other : scalar - The value to check whether it is contained in the Intervals. - - Returns - ------- - boolean array - - See Also - -------- - Interval.contains : Check whether Interval object contains value. - %(klass)s.overlaps : Check if an Interval overlaps the values in the - %(klass)s. - - Examples - -------- - %(examples)s - >>> intervals.contains(0.5) - array([ True, False, False]) - """ - ) - - @Appender( - _interval_shared_docs["contains"] - % { - "klass": "IntervalArray", - "examples": textwrap.dedent( - """\ - >>> intervals = pd.arrays.IntervalArray.from_tuples([(0, 1), (1, 3), (2, 4)]) - >>> intervals - - [(0, 1], (1, 3], (2, 4]] - Length: 3, dtype: interval[int64, right] - """ - ), - } - ) - def contains(self, other): - if isinstance(other, Interval): - raise NotImplementedError("contains not implemented for two intervals") - - return (self._left < other if self.open_left else self._left <= other) & ( - other < self._right if self.open_right else other <= self._right - ) - - def isin(self, values) -> npt.NDArray[np.bool_]: - if not hasattr(values, "dtype"): - values = np.array(values) - values = extract_array(values, extract_numpy=True) - - if isinstance(values.dtype, IntervalDtype): - if self.closed != values.closed: - # not comparable -> no overlap - return np.zeros(self.shape, dtype=bool) - - if self.dtype == values.dtype: - # GH#38353 instead of casting to object, operating on a - # complex128 ndarray is much more performant. - left = self._combined.view("complex128") - right = values._combined.view("complex128") - # error: Argument 1 to "isin" has incompatible type - # "Union[ExtensionArray, ndarray[Any, Any], - # ndarray[Any, dtype[Any]]]"; expected - # "Union[_SupportsArray[dtype[Any]], - # _NestedSequence[_SupportsArray[dtype[Any]]], bool, - # int, float, complex, str, bytes, _NestedSequence[ - # Union[bool, int, float, complex, str, bytes]]]" - return np.isin(left, right).ravel() # type: ignore[arg-type] - - elif needs_i8_conversion(self.left.dtype) ^ needs_i8_conversion( - values.left.dtype - ): - # not comparable -> no overlap - return np.zeros(self.shape, dtype=bool) - - return isin(self.astype(object), values.astype(object)) - - @property - def _combined(self) -> IntervalSideT: - left = self.left._values.reshape(-1, 1) - right = self.right._values.reshape(-1, 1) - if needs_i8_conversion(left.dtype): - comb = left._concat_same_type([left, right], axis=1) - else: - comb = np.concatenate([left, right], axis=1) - return comb - - def _from_combined(self, combined: np.ndarray) -> IntervalArray: - """ - Create a new IntervalArray with our dtype from a 1D complex128 ndarray. - """ - nc = combined.view("i8").reshape(-1, 2) - - dtype = self._left.dtype - if needs_i8_conversion(dtype): - assert isinstance(self._left, (DatetimeArray, TimedeltaArray)) - new_left = type(self._left)._from_sequence(nc[:, 0], dtype=dtype) - assert isinstance(self._right, (DatetimeArray, TimedeltaArray)) - new_right = type(self._right)._from_sequence(nc[:, 1], dtype=dtype) - else: - assert isinstance(dtype, np.dtype) - new_left = nc[:, 0].view(dtype) - new_right = nc[:, 1].view(dtype) - return self._shallow_copy(left=new_left, right=new_right) - - def unique(self) -> IntervalArray: - # No overload variant of "__getitem__" of "ExtensionArray" matches argument - # type "Tuple[slice, int]" - nc = unique( - self._combined.view("complex128")[:, 0] # type: ignore[call-overload] - ) - nc = nc[:, None] - return self._from_combined(nc) - - -def _maybe_convert_platform_interval(values) -> ArrayLike: - """ - Try to do platform conversion, with special casing for IntervalArray. - Wrapper around maybe_convert_platform that alters the default return - dtype in certain cases to be compatible with IntervalArray. For example, - empty lists return with integer dtype instead of object dtype, which is - prohibited for IntervalArray. - - Parameters - ---------- - values : array-like - - Returns - ------- - array - """ - if isinstance(values, (list, tuple)) and len(values) == 0: - # GH 19016 - # empty lists/tuples get object dtype by default, but this is - # prohibited for IntervalArray, so coerce to integer instead - return np.array([], dtype=np.int64) - elif not is_list_like(values) or isinstance(values, ABCDataFrame): - # This will raise later, but we avoid passing to maybe_convert_platform - return values - elif isinstance(getattr(values, "dtype", None), CategoricalDtype): - values = np.asarray(values) - elif not hasattr(values, "dtype") and not isinstance(values, (list, tuple, range)): - # TODO: should we just cast these to list? - return values - else: - values = extract_array(values, extract_numpy=True) - - if not hasattr(values, "dtype"): - values = np.asarray(values) - if values.dtype.kind in "iu" and values.dtype != np.int64: - values = values.astype(np.int64) - return values diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/core/interchange/dataframe_protocol.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/core/interchange/dataframe_protocol.py deleted file mode 100644 index 95e7b6a26f93a8cd10048076bd6906190e04d2ba..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/core/interchange/dataframe_protocol.py +++ /dev/null @@ -1,465 +0,0 @@ -""" -A verbatim copy (vendored) of the spec from https://github.com/data-apis/dataframe-api -""" - -from __future__ import annotations - -from abc import ( - ABC, - abstractmethod, -) -import enum -from typing import ( - TYPE_CHECKING, - Any, - TypedDict, -) - -if TYPE_CHECKING: - from collections.abc import ( - Iterable, - Sequence, - ) - - -class DlpackDeviceType(enum.IntEnum): - """Integer enum for device type codes matching DLPack.""" - - CPU = 1 - CUDA = 2 - CPU_PINNED = 3 - OPENCL = 4 - VULKAN = 7 - METAL = 8 - VPI = 9 - ROCM = 10 - - -class DtypeKind(enum.IntEnum): - """ - Integer enum for data types. - - Attributes - ---------- - INT : int - Matches to signed integer data type. - UINT : int - Matches to unsigned integer data type. - FLOAT : int - Matches to floating point data type. - BOOL : int - Matches to boolean data type. - STRING : int - Matches to string data type (UTF-8 encoded). - DATETIME : int - Matches to datetime data type. - CATEGORICAL : int - Matches to categorical data type. - """ - - INT = 0 - UINT = 1 - FLOAT = 2 - BOOL = 20 - STRING = 21 # UTF-8 - DATETIME = 22 - CATEGORICAL = 23 - - -class ColumnNullType(enum.IntEnum): - """ - Integer enum for null type representation. - - Attributes - ---------- - NON_NULLABLE : int - Non-nullable column. - USE_NAN : int - Use explicit float NaN value. - USE_SENTINEL : int - Sentinel value besides NaN/NaT. - USE_BITMASK : int - The bit is set/unset representing a null on a certain position. - USE_BYTEMASK : int - The byte is set/unset representing a null on a certain position. - """ - - NON_NULLABLE = 0 - USE_NAN = 1 - USE_SENTINEL = 2 - USE_BITMASK = 3 - USE_BYTEMASK = 4 - - -class ColumnBuffers(TypedDict): - # first element is a buffer containing the column data; - # second element is the data buffer's associated dtype - data: tuple[Buffer, Any] - - # first element is a buffer containing mask values indicating missing data; - # second element is the mask value buffer's associated dtype. - # None if the null representation is not a bit or byte mask - validity: tuple[Buffer, Any] | None - - # first element is a buffer containing the offset values for - # variable-size binary data (e.g., variable-length strings); - # second element is the offsets buffer's associated dtype. - # None if the data buffer does not have an associated offsets buffer - offsets: tuple[Buffer, Any] | None - - -class CategoricalDescription(TypedDict): - # whether the ordering of dictionary indices is semantically meaningful - is_ordered: bool - # whether a dictionary-style mapping of categorical values to other objects exists - is_dictionary: bool - # Python-level only (e.g. ``{int: str}``). - # None if not a dictionary-style categorical. - categories: Column | None - - -class Buffer(ABC): - """ - Data in the buffer is guaranteed to be contiguous in memory. - - Note that there is no dtype attribute present, a buffer can be thought of - as simply a block of memory. However, if the column that the buffer is - attached to has a dtype that's supported by DLPack and ``__dlpack__`` is - implemented, then that dtype information will be contained in the return - value from ``__dlpack__``. - - This distinction is useful to support both data exchange via DLPack on a - buffer and (b) dtypes like variable-length strings which do not have a - fixed number of bytes per element. - """ - - @property - @abstractmethod - def bufsize(self) -> int: - """ - Buffer size in bytes. - """ - - @property - @abstractmethod - def ptr(self) -> int: - """ - Pointer to start of the buffer as an integer. - """ - - @abstractmethod - def __dlpack__(self): - """ - Produce DLPack capsule (see array API standard). - - Raises: - - - TypeError : if the buffer contains unsupported dtypes. - - NotImplementedError : if DLPack support is not implemented - - Useful to have to connect to array libraries. Support optional because - it's not completely trivial to implement for a Python-only library. - """ - raise NotImplementedError("__dlpack__") - - @abstractmethod - def __dlpack_device__(self) -> tuple[DlpackDeviceType, int | None]: - """ - Device type and device ID for where the data in the buffer resides. - Uses device type codes matching DLPack. - Note: must be implemented even if ``__dlpack__`` is not. - """ - - -class Column(ABC): - """ - A column object, with only the methods and properties required by the - interchange protocol defined. - - A column can contain one or more chunks. Each chunk can contain up to three - buffers - a data buffer, a mask buffer (depending on null representation), - and an offsets buffer (if variable-size binary; e.g., variable-length - strings). - - TBD: Arrow has a separate "null" dtype, and has no separate mask concept. - Instead, it seems to use "children" for both columns with a bit mask, - and for nested dtypes. Unclear whether this is elegant or confusing. - This design requires checking the null representation explicitly. - - The Arrow design requires checking: - 1. the ARROW_FLAG_NULLABLE (for sentinel values) - 2. if a column has two children, combined with one of those children - having a null dtype. - - Making the mask concept explicit seems useful. One null dtype would - not be enough to cover both bit and byte masks, so that would mean - even more checking if we did it the Arrow way. - - TBD: there's also the "chunk" concept here, which is implicit in Arrow as - multiple buffers per array (= column here). Semantically it may make - sense to have both: chunks were meant for example for lazy evaluation - of data which doesn't fit in memory, while multiple buffers per column - could also come from doing a selection operation on a single - contiguous buffer. - - Given these concepts, one would expect chunks to be all of the same - size (say a 10,000 row dataframe could have 10 chunks of 1,000 rows), - while multiple buffers could have data-dependent lengths. Not an issue - in pandas if one column is backed by a single NumPy array, but in - Arrow it seems possible. - Are multiple chunks *and* multiple buffers per column necessary for - the purposes of this interchange protocol, or must producers either - reuse the chunk concept for this or copy the data? - - Note: this Column object can only be produced by ``__dataframe__``, so - doesn't need its own version or ``__column__`` protocol. - """ - - @abstractmethod - def size(self) -> int: - """ - Size of the column, in elements. - - Corresponds to DataFrame.num_rows() if column is a single chunk; - equal to size of this current chunk otherwise. - """ - - @property - @abstractmethod - def offset(self) -> int: - """ - Offset of first element. - - May be > 0 if using chunks; for example for a column with N chunks of - equal size M (only the last chunk may be shorter), - ``offset = n * M``, ``n = 0 .. N-1``. - """ - - @property - @abstractmethod - def dtype(self) -> tuple[DtypeKind, int, str, str]: - """ - Dtype description as a tuple ``(kind, bit-width, format string, endianness)``. - - Bit-width : the number of bits as an integer - Format string : data type description format string in Apache Arrow C - Data Interface format. - Endianness : current only native endianness (``=``) is supported - - Notes: - - Kind specifiers are aligned with DLPack where possible (hence the - jump to 20, leave enough room for future extension) - - Masks must be specified as boolean with either bit width 1 (for bit - masks) or 8 (for byte masks). - - Dtype width in bits was preferred over bytes - - Endianness isn't too useful, but included now in case in the future - we need to support non-native endianness - - Went with Apache Arrow format strings over NumPy format strings - because they're more complete from a dataframe perspective - - Format strings are mostly useful for datetime specification, and - for categoricals. - - For categoricals, the format string describes the type of the - categorical in the data buffer. In case of a separate encoding of - the categorical (e.g. an integer to string mapping), this can - be derived from ``self.describe_categorical``. - - Data types not included: complex, Arrow-style null, binary, decimal, - and nested (list, struct, map, union) dtypes. - """ - - @property - @abstractmethod - def describe_categorical(self) -> CategoricalDescription: - """ - If the dtype is categorical, there are two options: - - There are only values in the data buffer. - - There is a separate non-categorical Column encoding for categorical values. - - Raises TypeError if the dtype is not categorical - - Returns the dictionary with description on how to interpret the data buffer: - - "is_ordered" : bool, whether the ordering of dictionary indices is - semantically meaningful. - - "is_dictionary" : bool, whether a mapping of - categorical values to other objects exists - - "categories" : Column representing the (implicit) mapping of indices to - category values (e.g. an array of cat1, cat2, ...). - None if not a dictionary-style categorical. - - TBD: are there any other in-memory representations that are needed? - """ - - @property - @abstractmethod - def describe_null(self) -> tuple[ColumnNullType, Any]: - """ - Return the missing value (or "null") representation the column dtype - uses, as a tuple ``(kind, value)``. - - Value : if kind is "sentinel value", the actual value. If kind is a bit - mask or a byte mask, the value (0 or 1) indicating a missing value. None - otherwise. - """ - - @property - @abstractmethod - def null_count(self) -> int | None: - """ - Number of null elements, if known. - - Note: Arrow uses -1 to indicate "unknown", but None seems cleaner. - """ - - @property - @abstractmethod - def metadata(self) -> dict[str, Any]: - """ - The metadata for the column. See `DataFrame.metadata` for more details. - """ - - @abstractmethod - def num_chunks(self) -> int: - """ - Return the number of chunks the column consists of. - """ - - @abstractmethod - def get_chunks(self, n_chunks: int | None = None) -> Iterable[Column]: - """ - Return an iterator yielding the chunks. - - See `DataFrame.get_chunks` for details on ``n_chunks``. - """ - - @abstractmethod - def get_buffers(self) -> ColumnBuffers: - """ - Return a dictionary containing the underlying buffers. - - The returned dictionary has the following contents: - - - "data": a two-element tuple whose first element is a buffer - containing the data and whose second element is the data - buffer's associated dtype. - - "validity": a two-element tuple whose first element is a buffer - containing mask values indicating missing data and - whose second element is the mask value buffer's - associated dtype. None if the null representation is - not a bit or byte mask. - - "offsets": a two-element tuple whose first element is a buffer - containing the offset values for variable-size binary - data (e.g., variable-length strings) and whose second - element is the offsets buffer's associated dtype. None - if the data buffer does not have an associated offsets - buffer. - """ - - -# def get_children(self) -> Iterable[Column]: -# """ -# Children columns underneath the column, each object in this iterator -# must adhere to the column specification. -# """ -# pass - - -class DataFrame(ABC): - """ - A data frame class, with only the methods required by the interchange - protocol defined. - - A "data frame" represents an ordered collection of named columns. - A column's "name" must be a unique string. - Columns may be accessed by name or by position. - - This could be a public data frame class, or an object with the methods and - attributes defined on this DataFrame class could be returned from the - ``__dataframe__`` method of a public data frame class in a library adhering - to the dataframe interchange protocol specification. - """ - - version = 0 # version of the protocol - - @abstractmethod - def __dataframe__(self, nan_as_null: bool = False, allow_copy: bool = True): - """Construct a new interchange object, potentially changing the parameters.""" - - @property - @abstractmethod - def metadata(self) -> dict[str, Any]: - """ - The metadata for the data frame, as a dictionary with string keys. The - contents of `metadata` may be anything, they are meant for a library - to store information that it needs to, e.g., roundtrip losslessly or - for two implementations to share data that is not (yet) part of the - interchange protocol specification. For avoiding collisions with other - entries, please add name the keys with the name of the library - followed by a period and the desired name, e.g, ``pandas.indexcol``. - """ - - @abstractmethod - def num_columns(self) -> int: - """ - Return the number of columns in the DataFrame. - """ - - @abstractmethod - def num_rows(self) -> int | None: - # TODO: not happy with Optional, but need to flag it may be expensive - # why include it if it may be None - what do we expect consumers - # to do here? - """ - Return the number of rows in the DataFrame, if available. - """ - - @abstractmethod - def num_chunks(self) -> int: - """ - Return the number of chunks the DataFrame consists of. - """ - - @abstractmethod - def column_names(self) -> Iterable[str]: - """ - Return an iterator yielding the column names. - """ - - @abstractmethod - def get_column(self, i: int) -> Column: - """ - Return the column at the indicated position. - """ - - @abstractmethod - def get_column_by_name(self, name: str) -> Column: - """ - Return the column whose name is the indicated name. - """ - - @abstractmethod - def get_columns(self) -> Iterable[Column]: - """ - Return an iterator yielding the columns. - """ - - @abstractmethod - def select_columns(self, indices: Sequence[int]) -> DataFrame: - """ - Create a new DataFrame by selecting a subset of columns by index. - """ - - @abstractmethod - def select_columns_by_name(self, names: Sequence[str]) -> DataFrame: - """ - Create a new DataFrame by selecting a subset of columns by name. - """ - - @abstractmethod - def get_chunks(self, n_chunks: int | None = None) -> Iterable[DataFrame]: - """ - Return an iterator yielding the chunks. - - By default (None), yields the chunks that the data is stored as by the - producer. If given, ``n_chunks`` must be a multiple of - ``self.num_chunks()``, meaning the producer must subdivide each chunk - before yielding it. - """ diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/tseries/offsets/test_month.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/tseries/offsets/test_month.py deleted file mode 100644 index fc125103692455db3a4846ce4cb875b653bea40e..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/tseries/offsets/test_month.py +++ /dev/null @@ -1,677 +0,0 @@ -""" -Tests for the following offsets: -- SemiMonthBegin -- SemiMonthEnd -- MonthBegin -- MonthEnd -""" -from __future__ import annotations - -from datetime import datetime - -import pytest - -from pandas._libs.tslibs import Timestamp -from pandas._libs.tslibs.offsets import ( - MonthBegin, - MonthEnd, - SemiMonthBegin, - SemiMonthEnd, -) - -from pandas import ( - DatetimeIndex, - Series, - _testing as tm, - date_range, -) -from pandas.tests.tseries.offsets.common import ( - assert_is_on_offset, - assert_offset_equal, -) - - -class TestSemiMonthEnd: - def test_offset_whole_year(self): - dates = ( - datetime(2007, 12, 31), - datetime(2008, 1, 15), - datetime(2008, 1, 31), - datetime(2008, 2, 15), - datetime(2008, 2, 29), - datetime(2008, 3, 15), - datetime(2008, 3, 31), - datetime(2008, 4, 15), - datetime(2008, 4, 30), - datetime(2008, 5, 15), - datetime(2008, 5, 31), - datetime(2008, 6, 15), - datetime(2008, 6, 30), - datetime(2008, 7, 15), - datetime(2008, 7, 31), - datetime(2008, 8, 15), - datetime(2008, 8, 31), - datetime(2008, 9, 15), - datetime(2008, 9, 30), - datetime(2008, 10, 15), - datetime(2008, 10, 31), - datetime(2008, 11, 15), - datetime(2008, 11, 30), - datetime(2008, 12, 15), - datetime(2008, 12, 31), - ) - - for base, exp_date in zip(dates[:-1], dates[1:]): - assert_offset_equal(SemiMonthEnd(), base, exp_date) - - # ensure .apply_index works as expected - shift = DatetimeIndex(dates[:-1]) - with tm.assert_produces_warning(None): - # GH#22535 check that we don't get a FutureWarning from adding - # an integer array to PeriodIndex - result = SemiMonthEnd() + shift - - exp = DatetimeIndex(dates[1:]) - tm.assert_index_equal(result, exp) - - # ensure generating a range with DatetimeIndex gives same result - result = date_range(start=dates[0], end=dates[-1], freq="SM") - exp = DatetimeIndex(dates, freq="SM") - tm.assert_index_equal(result, exp) - - offset_cases = [] - offset_cases.append( - ( - SemiMonthEnd(), - { - datetime(2008, 1, 1): datetime(2008, 1, 15), - datetime(2008, 1, 15): datetime(2008, 1, 31), - datetime(2008, 1, 31): datetime(2008, 2, 15), - datetime(2006, 12, 14): datetime(2006, 12, 15), - datetime(2006, 12, 29): datetime(2006, 12, 31), - datetime(2006, 12, 31): datetime(2007, 1, 15), - datetime(2007, 1, 1): datetime(2007, 1, 15), - datetime(2006, 12, 1): datetime(2006, 12, 15), - datetime(2006, 12, 15): datetime(2006, 12, 31), - }, - ) - ) - - offset_cases.append( - ( - SemiMonthEnd(day_of_month=20), - { - datetime(2008, 1, 1): datetime(2008, 1, 20), - datetime(2008, 1, 15): datetime(2008, 1, 20), - datetime(2008, 1, 21): datetime(2008, 1, 31), - datetime(2008, 1, 31): datetime(2008, 2, 20), - datetime(2006, 12, 14): datetime(2006, 12, 20), - datetime(2006, 12, 29): datetime(2006, 12, 31), - datetime(2006, 12, 31): datetime(2007, 1, 20), - datetime(2007, 1, 1): datetime(2007, 1, 20), - datetime(2006, 12, 1): datetime(2006, 12, 20), - datetime(2006, 12, 15): datetime(2006, 12, 20), - }, - ) - ) - - offset_cases.append( - ( - SemiMonthEnd(0), - { - datetime(2008, 1, 1): datetime(2008, 1, 15), - datetime(2008, 1, 16): datetime(2008, 1, 31), - datetime(2008, 1, 15): datetime(2008, 1, 15), - datetime(2008, 1, 31): datetime(2008, 1, 31), - datetime(2006, 12, 29): datetime(2006, 12, 31), - datetime(2006, 12, 31): datetime(2006, 12, 31), - datetime(2007, 1, 1): datetime(2007, 1, 15), - }, - ) - ) - - offset_cases.append( - ( - SemiMonthEnd(0, day_of_month=16), - { - datetime(2008, 1, 1): datetime(2008, 1, 16), - datetime(2008, 1, 16): datetime(2008, 1, 16), - datetime(2008, 1, 15): datetime(2008, 1, 16), - datetime(2008, 1, 31): datetime(2008, 1, 31), - datetime(2006, 12, 29): datetime(2006, 12, 31), - datetime(2006, 12, 31): datetime(2006, 12, 31), - datetime(2007, 1, 1): datetime(2007, 1, 16), - }, - ) - ) - - offset_cases.append( - ( - SemiMonthEnd(2), - { - datetime(2008, 1, 1): datetime(2008, 1, 31), - datetime(2008, 1, 31): datetime(2008, 2, 29), - datetime(2006, 12, 29): datetime(2007, 1, 15), - datetime(2006, 12, 31): datetime(2007, 1, 31), - datetime(2007, 1, 1): datetime(2007, 1, 31), - datetime(2007, 1, 16): datetime(2007, 2, 15), - datetime(2006, 11, 1): datetime(2006, 11, 30), - }, - ) - ) - - offset_cases.append( - ( - SemiMonthEnd(-1), - { - datetime(2007, 1, 1): datetime(2006, 12, 31), - datetime(2008, 6, 30): datetime(2008, 6, 15), - datetime(2008, 12, 31): datetime(2008, 12, 15), - datetime(2006, 12, 29): datetime(2006, 12, 15), - datetime(2006, 12, 30): datetime(2006, 12, 15), - datetime(2007, 1, 1): datetime(2006, 12, 31), - }, - ) - ) - - offset_cases.append( - ( - SemiMonthEnd(-1, day_of_month=4), - { - datetime(2007, 1, 1): datetime(2006, 12, 31), - datetime(2007, 1, 4): datetime(2006, 12, 31), - datetime(2008, 6, 30): datetime(2008, 6, 4), - datetime(2008, 12, 31): datetime(2008, 12, 4), - datetime(2006, 12, 5): datetime(2006, 12, 4), - datetime(2006, 12, 30): datetime(2006, 12, 4), - datetime(2007, 1, 1): datetime(2006, 12, 31), - }, - ) - ) - - offset_cases.append( - ( - SemiMonthEnd(-2), - { - datetime(2007, 1, 1): datetime(2006, 12, 15), - datetime(2008, 6, 30): datetime(2008, 5, 31), - datetime(2008, 3, 15): datetime(2008, 2, 15), - datetime(2008, 12, 31): datetime(2008, 11, 30), - datetime(2006, 12, 29): datetime(2006, 11, 30), - datetime(2006, 12, 14): datetime(2006, 11, 15), - datetime(2007, 1, 1): datetime(2006, 12, 15), - }, - ) - ) - - @pytest.mark.parametrize("case", offset_cases) - def test_offset(self, case): - offset, cases = case - for base, expected in cases.items(): - assert_offset_equal(offset, base, expected) - - @pytest.mark.parametrize("case", offset_cases) - def test_apply_index(self, case): - # https://github.com/pandas-dev/pandas/issues/34580 - offset, cases = case - shift = DatetimeIndex(cases.keys()) - exp = DatetimeIndex(cases.values()) - - with tm.assert_produces_warning(None): - # GH#22535 check that we don't get a FutureWarning from adding - # an integer array to PeriodIndex - result = offset + shift - tm.assert_index_equal(result, exp) - - on_offset_cases = [ - (datetime(2007, 12, 31), True), - (datetime(2007, 12, 15), True), - (datetime(2007, 12, 14), False), - (datetime(2007, 12, 1), False), - (datetime(2008, 2, 29), True), - ] - - @pytest.mark.parametrize("case", on_offset_cases) - def test_is_on_offset(self, case): - dt, expected = case - assert_is_on_offset(SemiMonthEnd(), dt, expected) - - @pytest.mark.parametrize("klass", [Series, DatetimeIndex]) - def test_vectorized_offset_addition(self, klass): - shift = klass( - [ - Timestamp("2000-01-15 00:15:00", tz="US/Central"), - Timestamp("2000-02-15", tz="US/Central"), - ], - name="a", - ) - - with tm.assert_produces_warning(None): - # GH#22535 check that we don't get a FutureWarning from adding - # an integer array to PeriodIndex - result = shift + SemiMonthEnd() - result2 = SemiMonthEnd() + shift - - exp = klass( - [ - Timestamp("2000-01-31 00:15:00", tz="US/Central"), - Timestamp("2000-02-29", tz="US/Central"), - ], - name="a", - ) - tm.assert_equal(result, exp) - tm.assert_equal(result2, exp) - - shift = klass( - [ - Timestamp("2000-01-01 00:15:00", tz="US/Central"), - Timestamp("2000-02-01", tz="US/Central"), - ], - name="a", - ) - - with tm.assert_produces_warning(None): - # GH#22535 check that we don't get a FutureWarning from adding - # an integer array to PeriodIndex - result = shift + SemiMonthEnd() - result2 = SemiMonthEnd() + shift - - exp = klass( - [ - Timestamp("2000-01-15 00:15:00", tz="US/Central"), - Timestamp("2000-02-15", tz="US/Central"), - ], - name="a", - ) - tm.assert_equal(result, exp) - tm.assert_equal(result2, exp) - - -class TestSemiMonthBegin: - def test_offset_whole_year(self): - dates = ( - datetime(2007, 12, 15), - datetime(2008, 1, 1), - datetime(2008, 1, 15), - datetime(2008, 2, 1), - datetime(2008, 2, 15), - datetime(2008, 3, 1), - datetime(2008, 3, 15), - datetime(2008, 4, 1), - datetime(2008, 4, 15), - datetime(2008, 5, 1), - datetime(2008, 5, 15), - datetime(2008, 6, 1), - datetime(2008, 6, 15), - datetime(2008, 7, 1), - datetime(2008, 7, 15), - datetime(2008, 8, 1), - datetime(2008, 8, 15), - datetime(2008, 9, 1), - datetime(2008, 9, 15), - datetime(2008, 10, 1), - datetime(2008, 10, 15), - datetime(2008, 11, 1), - datetime(2008, 11, 15), - datetime(2008, 12, 1), - datetime(2008, 12, 15), - ) - - for base, exp_date in zip(dates[:-1], dates[1:]): - assert_offset_equal(SemiMonthBegin(), base, exp_date) - - # ensure .apply_index works as expected - shift = DatetimeIndex(dates[:-1]) - with tm.assert_produces_warning(None): - # GH#22535 check that we don't get a FutureWarning from adding - # an integer array to PeriodIndex - result = SemiMonthBegin() + shift - - exp = DatetimeIndex(dates[1:]) - tm.assert_index_equal(result, exp) - - # ensure generating a range with DatetimeIndex gives same result - result = date_range(start=dates[0], end=dates[-1], freq="SMS") - exp = DatetimeIndex(dates, freq="SMS") - tm.assert_index_equal(result, exp) - - offset_cases = [ - ( - SemiMonthBegin(), - { - datetime(2008, 1, 1): datetime(2008, 1, 15), - datetime(2008, 1, 15): datetime(2008, 2, 1), - datetime(2008, 1, 31): datetime(2008, 2, 1), - datetime(2006, 12, 14): datetime(2006, 12, 15), - datetime(2006, 12, 29): datetime(2007, 1, 1), - datetime(2006, 12, 31): datetime(2007, 1, 1), - datetime(2007, 1, 1): datetime(2007, 1, 15), - datetime(2006, 12, 1): datetime(2006, 12, 15), - datetime(2006, 12, 15): datetime(2007, 1, 1), - }, - ), - ( - SemiMonthBegin(day_of_month=20), - { - datetime(2008, 1, 1): datetime(2008, 1, 20), - datetime(2008, 1, 15): datetime(2008, 1, 20), - datetime(2008, 1, 21): datetime(2008, 2, 1), - datetime(2008, 1, 31): datetime(2008, 2, 1), - datetime(2006, 12, 14): datetime(2006, 12, 20), - datetime(2006, 12, 29): datetime(2007, 1, 1), - datetime(2006, 12, 31): datetime(2007, 1, 1), - datetime(2007, 1, 1): datetime(2007, 1, 20), - datetime(2006, 12, 1): datetime(2006, 12, 20), - datetime(2006, 12, 15): datetime(2006, 12, 20), - }, - ), - ( - SemiMonthBegin(0), - { - datetime(2008, 1, 1): datetime(2008, 1, 1), - datetime(2008, 1, 16): datetime(2008, 2, 1), - datetime(2008, 1, 15): datetime(2008, 1, 15), - datetime(2008, 1, 31): datetime(2008, 2, 1), - datetime(2006, 12, 29): datetime(2007, 1, 1), - datetime(2006, 12, 2): datetime(2006, 12, 15), - datetime(2007, 1, 1): datetime(2007, 1, 1), - }, - ), - ( - SemiMonthBegin(0, day_of_month=16), - { - datetime(2008, 1, 1): datetime(2008, 1, 1), - datetime(2008, 1, 16): datetime(2008, 1, 16), - datetime(2008, 1, 15): datetime(2008, 1, 16), - datetime(2008, 1, 31): datetime(2008, 2, 1), - datetime(2006, 12, 29): datetime(2007, 1, 1), - datetime(2006, 12, 31): datetime(2007, 1, 1), - datetime(2007, 1, 5): datetime(2007, 1, 16), - datetime(2007, 1, 1): datetime(2007, 1, 1), - }, - ), - ( - SemiMonthBegin(2), - { - datetime(2008, 1, 1): datetime(2008, 2, 1), - datetime(2008, 1, 31): datetime(2008, 2, 15), - datetime(2006, 12, 1): datetime(2007, 1, 1), - datetime(2006, 12, 29): datetime(2007, 1, 15), - datetime(2006, 12, 15): datetime(2007, 1, 15), - datetime(2007, 1, 1): datetime(2007, 2, 1), - datetime(2007, 1, 16): datetime(2007, 2, 15), - datetime(2006, 11, 1): datetime(2006, 12, 1), - }, - ), - ( - SemiMonthBegin(-1), - { - datetime(2007, 1, 1): datetime(2006, 12, 15), - datetime(2008, 6, 30): datetime(2008, 6, 15), - datetime(2008, 6, 14): datetime(2008, 6, 1), - datetime(2008, 12, 31): datetime(2008, 12, 15), - datetime(2006, 12, 29): datetime(2006, 12, 15), - datetime(2006, 12, 15): datetime(2006, 12, 1), - datetime(2007, 1, 1): datetime(2006, 12, 15), - }, - ), - ( - SemiMonthBegin(-1, day_of_month=4), - { - datetime(2007, 1, 1): datetime(2006, 12, 4), - datetime(2007, 1, 4): datetime(2007, 1, 1), - datetime(2008, 6, 30): datetime(2008, 6, 4), - datetime(2008, 12, 31): datetime(2008, 12, 4), - datetime(2006, 12, 5): datetime(2006, 12, 4), - datetime(2006, 12, 30): datetime(2006, 12, 4), - datetime(2006, 12, 2): datetime(2006, 12, 1), - datetime(2007, 1, 1): datetime(2006, 12, 4), - }, - ), - ( - SemiMonthBegin(-2), - { - datetime(2007, 1, 1): datetime(2006, 12, 1), - datetime(2008, 6, 30): datetime(2008, 6, 1), - datetime(2008, 6, 14): datetime(2008, 5, 15), - datetime(2008, 12, 31): datetime(2008, 12, 1), - datetime(2006, 12, 29): datetime(2006, 12, 1), - datetime(2006, 12, 15): datetime(2006, 11, 15), - datetime(2007, 1, 1): datetime(2006, 12, 1), - }, - ), - ] - - @pytest.mark.parametrize("case", offset_cases) - def test_offset(self, case): - offset, cases = case - for base, expected in cases.items(): - assert_offset_equal(offset, base, expected) - - @pytest.mark.parametrize("case", offset_cases) - def test_apply_index(self, case): - offset, cases = case - shift = DatetimeIndex(cases.keys()) - - with tm.assert_produces_warning(None): - # GH#22535 check that we don't get a FutureWarning from adding - # an integer array to PeriodIndex - result = offset + shift - - exp = DatetimeIndex(cases.values()) - tm.assert_index_equal(result, exp) - - on_offset_cases = [ - (datetime(2007, 12, 1), True), - (datetime(2007, 12, 15), True), - (datetime(2007, 12, 14), False), - (datetime(2007, 12, 31), False), - (datetime(2008, 2, 15), True), - ] - - @pytest.mark.parametrize("case", on_offset_cases) - def test_is_on_offset(self, case): - dt, expected = case - assert_is_on_offset(SemiMonthBegin(), dt, expected) - - @pytest.mark.parametrize("klass", [Series, DatetimeIndex]) - def test_vectorized_offset_addition(self, klass): - shift = klass( - [ - Timestamp("2000-01-15 00:15:00", tz="US/Central"), - Timestamp("2000-02-15", tz="US/Central"), - ], - name="a", - ) - with tm.assert_produces_warning(None): - # GH#22535 check that we don't get a FutureWarning from adding - # an integer array to PeriodIndex - result = shift + SemiMonthBegin() - result2 = SemiMonthBegin() + shift - - exp = klass( - [ - Timestamp("2000-02-01 00:15:00", tz="US/Central"), - Timestamp("2000-03-01", tz="US/Central"), - ], - name="a", - ) - tm.assert_equal(result, exp) - tm.assert_equal(result2, exp) - - shift = klass( - [ - Timestamp("2000-01-01 00:15:00", tz="US/Central"), - Timestamp("2000-02-01", tz="US/Central"), - ], - name="a", - ) - with tm.assert_produces_warning(None): - # GH#22535 check that we don't get a FutureWarning from adding - # an integer array to PeriodIndex - result = shift + SemiMonthBegin() - result2 = SemiMonthBegin() + shift - - exp = klass( - [ - Timestamp("2000-01-15 00:15:00", tz="US/Central"), - Timestamp("2000-02-15", tz="US/Central"), - ], - name="a", - ) - tm.assert_equal(result, exp) - tm.assert_equal(result2, exp) - - -class TestMonthBegin: - offset_cases = [] - # NOTE: I'm not entirely happy with the logic here for Begin -ss - # see thread 'offset conventions' on the ML - offset_cases.append( - ( - MonthBegin(), - { - datetime(2008, 1, 31): datetime(2008, 2, 1), - datetime(2008, 2, 1): datetime(2008, 3, 1), - datetime(2006, 12, 31): datetime(2007, 1, 1), - datetime(2006, 12, 1): datetime(2007, 1, 1), - datetime(2007, 1, 31): datetime(2007, 2, 1), - }, - ) - ) - - offset_cases.append( - ( - MonthBegin(0), - { - datetime(2008, 1, 31): datetime(2008, 2, 1), - datetime(2008, 1, 1): datetime(2008, 1, 1), - datetime(2006, 12, 3): datetime(2007, 1, 1), - datetime(2007, 1, 31): datetime(2007, 2, 1), - }, - ) - ) - - offset_cases.append( - ( - MonthBegin(2), - { - datetime(2008, 2, 29): datetime(2008, 4, 1), - datetime(2008, 1, 31): datetime(2008, 3, 1), - datetime(2006, 12, 31): datetime(2007, 2, 1), - datetime(2007, 12, 28): datetime(2008, 2, 1), - datetime(2007, 1, 1): datetime(2007, 3, 1), - datetime(2006, 11, 1): datetime(2007, 1, 1), - }, - ) - ) - - offset_cases.append( - ( - MonthBegin(-1), - { - datetime(2007, 1, 1): datetime(2006, 12, 1), - datetime(2008, 5, 31): datetime(2008, 5, 1), - datetime(2008, 12, 31): datetime(2008, 12, 1), - datetime(2006, 12, 29): datetime(2006, 12, 1), - datetime(2006, 1, 2): datetime(2006, 1, 1), - }, - ) - ) - - @pytest.mark.parametrize("case", offset_cases) - def test_offset(self, case): - offset, cases = case - for base, expected in cases.items(): - assert_offset_equal(offset, base, expected) - - -class TestMonthEnd: - def test_day_of_month(self): - dt = datetime(2007, 1, 1) - offset = MonthEnd() - - result = dt + offset - assert result == Timestamp(2007, 1, 31) - - result = result + offset - assert result == Timestamp(2007, 2, 28) - - def test_normalize(self): - dt = datetime(2007, 1, 1, 3) - - result = dt + MonthEnd(normalize=True) - expected = dt.replace(hour=0) + MonthEnd() - assert result == expected - - offset_cases = [] - offset_cases.append( - ( - MonthEnd(), - { - datetime(2008, 1, 1): datetime(2008, 1, 31), - datetime(2008, 1, 31): datetime(2008, 2, 29), - datetime(2006, 12, 29): datetime(2006, 12, 31), - datetime(2006, 12, 31): datetime(2007, 1, 31), - datetime(2007, 1, 1): datetime(2007, 1, 31), - datetime(2006, 12, 1): datetime(2006, 12, 31), - }, - ) - ) - - offset_cases.append( - ( - MonthEnd(0), - { - datetime(2008, 1, 1): datetime(2008, 1, 31), - datetime(2008, 1, 31): datetime(2008, 1, 31), - datetime(2006, 12, 29): datetime(2006, 12, 31), - datetime(2006, 12, 31): datetime(2006, 12, 31), - datetime(2007, 1, 1): datetime(2007, 1, 31), - }, - ) - ) - - offset_cases.append( - ( - MonthEnd(2), - { - datetime(2008, 1, 1): datetime(2008, 2, 29), - datetime(2008, 1, 31): datetime(2008, 3, 31), - datetime(2006, 12, 29): datetime(2007, 1, 31), - datetime(2006, 12, 31): datetime(2007, 2, 28), - datetime(2007, 1, 1): datetime(2007, 2, 28), - datetime(2006, 11, 1): datetime(2006, 12, 31), - }, - ) - ) - - offset_cases.append( - ( - MonthEnd(-1), - { - datetime(2007, 1, 1): datetime(2006, 12, 31), - datetime(2008, 6, 30): datetime(2008, 5, 31), - datetime(2008, 12, 31): datetime(2008, 11, 30), - datetime(2006, 12, 29): datetime(2006, 11, 30), - datetime(2006, 12, 30): datetime(2006, 11, 30), - datetime(2007, 1, 1): datetime(2006, 12, 31), - }, - ) - ) - - @pytest.mark.parametrize("case", offset_cases) - def test_offset(self, case): - offset, cases = case - for base, expected in cases.items(): - assert_offset_equal(offset, base, expected) - - on_offset_cases = [ - (MonthEnd(), datetime(2007, 12, 31), True), - (MonthEnd(), datetime(2008, 1, 1), False), - ] - - @pytest.mark.parametrize("case", on_offset_cases) - def test_is_on_offset(self, case): - offset, dt, expected = case - assert_is_on_offset(offset, dt, expected) diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pip/_internal/models/format_control.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pip/_internal/models/format_control.py deleted file mode 100644 index db3995eac9f9ec2450e0e2d4a18e666c0b178681..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pip/_internal/models/format_control.py +++ /dev/null @@ -1,80 +0,0 @@ -from typing import FrozenSet, Optional, Set - -from pip._vendor.packaging.utils import canonicalize_name - -from pip._internal.exceptions import CommandError - - -class FormatControl: - """Helper for managing formats from which a package can be installed.""" - - __slots__ = ["no_binary", "only_binary"] - - def __init__( - self, - no_binary: Optional[Set[str]] = None, - only_binary: Optional[Set[str]] = None, - ) -> None: - if no_binary is None: - no_binary = set() - if only_binary is None: - only_binary = set() - - self.no_binary = no_binary - self.only_binary = only_binary - - def __eq__(self, other: object) -> bool: - if not isinstance(other, self.__class__): - return NotImplemented - - if self.__slots__ != other.__slots__: - return False - - return all(getattr(self, k) == getattr(other, k) for k in self.__slots__) - - def __repr__(self) -> str: - return "{}({}, {})".format( - self.__class__.__name__, self.no_binary, self.only_binary - ) - - @staticmethod - def handle_mutual_excludes(value: str, target: Set[str], other: Set[str]) -> None: - if value.startswith("-"): - raise CommandError( - "--no-binary / --only-binary option requires 1 argument." - ) - new = value.split(",") - while ":all:" in new: - other.clear() - target.clear() - target.add(":all:") - del new[: new.index(":all:") + 1] - # Without a none, we want to discard everything as :all: covers it - if ":none:" not in new: - return - for name in new: - if name == ":none:": - target.clear() - continue - name = canonicalize_name(name) - other.discard(name) - target.add(name) - - def get_allowed_formats(self, canonical_name: str) -> FrozenSet[str]: - result = {"binary", "source"} - if canonical_name in self.only_binary: - result.discard("source") - elif canonical_name in self.no_binary: - result.discard("binary") - elif ":all:" in self.only_binary: - result.discard("source") - elif ":all:" in self.no_binary: - result.discard("binary") - return frozenset(result) - - def disallow_binaries(self) -> None: - self.handle_mutual_excludes( - ":all:", - self.no_binary, - self.only_binary, - ) diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/rich/_stack.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/rich/_stack.py deleted file mode 100644 index 194564e761ddae165b39ef6598877e2e3820af0a..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/rich/_stack.py +++ /dev/null @@ -1,16 +0,0 @@ -from typing import List, TypeVar - -T = TypeVar("T") - - -class Stack(List[T]): - """A small shim over builtin list.""" - - @property - def top(self) -> T: - """Get top of stack.""" - return self[-1] - - def push(self, item: T) -> None: - """Push an item on to the stack (append in stack nomenclature).""" - self.append(item) diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/setuptools/_distutils/command/build_py.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/setuptools/_distutils/command/build_py.py deleted file mode 100644 index 7ef9bcefdec05490393466f032548f24d41ea0b8..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/setuptools/_distutils/command/build_py.py +++ /dev/null @@ -1,392 +0,0 @@ -"""distutils.command.build_py - -Implements the Distutils 'build_py' command.""" - -import os -import importlib.util -import sys -import glob - -from distutils.core import Command -from distutils.errors import * -from distutils.util import convert_path -from distutils import log - -class build_py (Command): - - description = "\"build\" pure Python modules (copy to build directory)" - - user_options = [ - ('build-lib=', 'd', "directory to \"build\" (copy) to"), - ('compile', 'c', "compile .py to .pyc"), - ('no-compile', None, "don't compile .py files [default]"), - ('optimize=', 'O', - "also compile with optimization: -O1 for \"python -O\", " - "-O2 for \"python -OO\", and -O0 to disable [default: -O0]"), - ('force', 'f', "forcibly build everything (ignore file timestamps)"), - ] - - boolean_options = ['compile', 'force'] - negative_opt = {'no-compile' : 'compile'} - - def initialize_options(self): - self.build_lib = None - self.py_modules = None - self.package = None - self.package_data = None - self.package_dir = None - self.compile = 0 - self.optimize = 0 - self.force = None - - def finalize_options(self): - self.set_undefined_options('build', - ('build_lib', 'build_lib'), - ('force', 'force')) - - # Get the distribution options that are aliases for build_py - # options -- list of packages and list of modules. - self.packages = self.distribution.packages - self.py_modules = self.distribution.py_modules - self.package_data = self.distribution.package_data - self.package_dir = {} - if self.distribution.package_dir: - for name, path in self.distribution.package_dir.items(): - self.package_dir[name] = convert_path(path) - self.data_files = self.get_data_files() - - # Ick, copied straight from install_lib.py (fancy_getopt needs a - # type system! Hell, *everything* needs a type system!!!) - if not isinstance(self.optimize, int): - try: - self.optimize = int(self.optimize) - assert 0 <= self.optimize <= 2 - except (ValueError, AssertionError): - raise DistutilsOptionError("optimize must be 0, 1, or 2") - - def run(self): - # XXX copy_file by default preserves atime and mtime. IMHO this is - # the right thing to do, but perhaps it should be an option -- in - # particular, a site administrator might want installed files to - # reflect the time of installation rather than the last - # modification time before the installed release. - - # XXX copy_file by default preserves mode, which appears to be the - # wrong thing to do: if a file is read-only in the working - # directory, we want it to be installed read/write so that the next - # installation of the same module distribution can overwrite it - # without problems. (This might be a Unix-specific issue.) Thus - # we turn off 'preserve_mode' when copying to the build directory, - # since the build directory is supposed to be exactly what the - # installation will look like (ie. we preserve mode when - # installing). - - # Two options control which modules will be installed: 'packages' - # and 'py_modules'. The former lets us work with whole packages, not - # specifying individual modules at all; the latter is for - # specifying modules one-at-a-time. - - if self.py_modules: - self.build_modules() - if self.packages: - self.build_packages() - self.build_package_data() - - self.byte_compile(self.get_outputs(include_bytecode=0)) - - def get_data_files(self): - """Generate list of '(package,src_dir,build_dir,filenames)' tuples""" - data = [] - if not self.packages: - return data - for package in self.packages: - # Locate package source directory - src_dir = self.get_package_dir(package) - - # Compute package build directory - build_dir = os.path.join(*([self.build_lib] + package.split('.'))) - - # Length of path to strip from found files - plen = 0 - if src_dir: - plen = len(src_dir)+1 - - # Strip directory from globbed filenames - filenames = [ - file[plen:] for file in self.find_data_files(package, src_dir) - ] - data.append((package, src_dir, build_dir, filenames)) - return data - - def find_data_files(self, package, src_dir): - """Return filenames for package's data files in 'src_dir'""" - globs = (self.package_data.get('', []) - + self.package_data.get(package, [])) - files = [] - for pattern in globs: - # Each pattern has to be converted to a platform-specific path - filelist = glob.glob(os.path.join(glob.escape(src_dir), convert_path(pattern))) - # Files that match more than one pattern are only added once - files.extend([fn for fn in filelist if fn not in files - and os.path.isfile(fn)]) - return files - - def build_package_data(self): - """Copy data files into build directory""" - lastdir = None - for package, src_dir, build_dir, filenames in self.data_files: - for filename in filenames: - target = os.path.join(build_dir, filename) - self.mkpath(os.path.dirname(target)) - self.copy_file(os.path.join(src_dir, filename), target, - preserve_mode=False) - - def get_package_dir(self, package): - """Return the directory, relative to the top of the source - distribution, where package 'package' should be found - (at least according to the 'package_dir' option, if any).""" - path = package.split('.') - - if not self.package_dir: - if path: - return os.path.join(*path) - else: - return '' - else: - tail = [] - while path: - try: - pdir = self.package_dir['.'.join(path)] - except KeyError: - tail.insert(0, path[-1]) - del path[-1] - else: - tail.insert(0, pdir) - return os.path.join(*tail) - else: - # Oops, got all the way through 'path' without finding a - # match in package_dir. If package_dir defines a directory - # for the root (nameless) package, then fallback on it; - # otherwise, we might as well have not consulted - # package_dir at all, as we just use the directory implied - # by 'tail' (which should be the same as the original value - # of 'path' at this point). - pdir = self.package_dir.get('') - if pdir is not None: - tail.insert(0, pdir) - - if tail: - return os.path.join(*tail) - else: - return '' - - def check_package(self, package, package_dir): - # Empty dir name means current directory, which we can probably - # assume exists. Also, os.path.exists and isdir don't know about - # my "empty string means current dir" convention, so we have to - # circumvent them. - if package_dir != "": - if not os.path.exists(package_dir): - raise DistutilsFileError( - "package directory '%s' does not exist" % package_dir) - if not os.path.isdir(package_dir): - raise DistutilsFileError( - "supposed package directory '%s' exists, " - "but is not a directory" % package_dir) - - # Require __init__.py for all but the "root package" - if package: - init_py = os.path.join(package_dir, "__init__.py") - if os.path.isfile(init_py): - return init_py - else: - log.warn(("package init file '%s' not found " + - "(or not a regular file)"), init_py) - - # Either not in a package at all (__init__.py not expected), or - # __init__.py doesn't exist -- so don't return the filename. - return None - - def check_module(self, module, module_file): - if not os.path.isfile(module_file): - log.warn("file %s (for module %s) not found", module_file, module) - return False - else: - return True - - def find_package_modules(self, package, package_dir): - self.check_package(package, package_dir) - module_files = glob.glob(os.path.join(glob.escape(package_dir), "*.py")) - modules = [] - setup_script = os.path.abspath(self.distribution.script_name) - - for f in module_files: - abs_f = os.path.abspath(f) - if abs_f != setup_script: - module = os.path.splitext(os.path.basename(f))[0] - modules.append((package, module, f)) - else: - self.debug_print("excluding %s" % setup_script) - return modules - - def find_modules(self): - """Finds individually-specified Python modules, ie. those listed by - module name in 'self.py_modules'. Returns a list of tuples (package, - module_base, filename): 'package' is a tuple of the path through - package-space to the module; 'module_base' is the bare (no - packages, no dots) module name, and 'filename' is the path to the - ".py" file (relative to the distribution root) that implements the - module. - """ - # Map package names to tuples of useful info about the package: - # (package_dir, checked) - # package_dir - the directory where we'll find source files for - # this package - # checked - true if we have checked that the package directory - # is valid (exists, contains __init__.py, ... ?) - packages = {} - - # List of (package, module, filename) tuples to return - modules = [] - - # We treat modules-in-packages almost the same as toplevel modules, - # just the "package" for a toplevel is empty (either an empty - # string or empty list, depending on context). Differences: - # - don't check for __init__.py in directory for empty package - for module in self.py_modules: - path = module.split('.') - package = '.'.join(path[0:-1]) - module_base = path[-1] - - try: - (package_dir, checked) = packages[package] - except KeyError: - package_dir = self.get_package_dir(package) - checked = 0 - - if not checked: - init_py = self.check_package(package, package_dir) - packages[package] = (package_dir, 1) - if init_py: - modules.append((package, "__init__", init_py)) - - # XXX perhaps we should also check for just .pyc files - # (so greedy closed-source bastards can distribute Python - # modules too) - module_file = os.path.join(package_dir, module_base + ".py") - if not self.check_module(module, module_file): - continue - - modules.append((package, module_base, module_file)) - - return modules - - def find_all_modules(self): - """Compute the list of all modules that will be built, whether - they are specified one-module-at-a-time ('self.py_modules') or - by whole packages ('self.packages'). Return a list of tuples - (package, module, module_file), just like 'find_modules()' and - 'find_package_modules()' do.""" - modules = [] - if self.py_modules: - modules.extend(self.find_modules()) - if self.packages: - for package in self.packages: - package_dir = self.get_package_dir(package) - m = self.find_package_modules(package, package_dir) - modules.extend(m) - return modules - - def get_source_files(self): - return [module[-1] for module in self.find_all_modules()] - - def get_module_outfile(self, build_dir, package, module): - outfile_path = [build_dir] + list(package) + [module + ".py"] - return os.path.join(*outfile_path) - - def get_outputs(self, include_bytecode=1): - modules = self.find_all_modules() - outputs = [] - for (package, module, module_file) in modules: - package = package.split('.') - filename = self.get_module_outfile(self.build_lib, package, module) - outputs.append(filename) - if include_bytecode: - if self.compile: - outputs.append(importlib.util.cache_from_source( - filename, optimization='')) - if self.optimize > 0: - outputs.append(importlib.util.cache_from_source( - filename, optimization=self.optimize)) - - outputs += [ - os.path.join(build_dir, filename) - for package, src_dir, build_dir, filenames in self.data_files - for filename in filenames - ] - - return outputs - - def build_module(self, module, module_file, package): - if isinstance(package, str): - package = package.split('.') - elif not isinstance(package, (list, tuple)): - raise TypeError( - "'package' must be a string (dot-separated), list, or tuple") - - # Now put the module source file into the "build" area -- this is - # easy, we just copy it somewhere under self.build_lib (the build - # directory for Python source). - outfile = self.get_module_outfile(self.build_lib, package, module) - dir = os.path.dirname(outfile) - self.mkpath(dir) - return self.copy_file(module_file, outfile, preserve_mode=0) - - def build_modules(self): - modules = self.find_modules() - for (package, module, module_file) in modules: - # Now "build" the module -- ie. copy the source file to - # self.build_lib (the build directory for Python source). - # (Actually, it gets copied to the directory for this package - # under self.build_lib.) - self.build_module(module, module_file, package) - - def build_packages(self): - for package in self.packages: - # Get list of (package, module, module_file) tuples based on - # scanning the package directory. 'package' is only included - # in the tuple so that 'find_modules()' and - # 'find_package_tuples()' have a consistent interface; it's - # ignored here (apart from a sanity check). Also, 'module' is - # the *unqualified* module name (ie. no dots, no package -- we - # already know its package!), and 'module_file' is the path to - # the .py file, relative to the current directory - # (ie. including 'package_dir'). - package_dir = self.get_package_dir(package) - modules = self.find_package_modules(package, package_dir) - - # Now loop over the modules we found, "building" each one (just - # copy it to self.build_lib). - for (package_, module, module_file) in modules: - assert package == package_ - self.build_module(module, module_file, package) - - def byte_compile(self, files): - if sys.dont_write_bytecode: - self.warn('byte-compiling is disabled, skipping.') - return - - from distutils.util import byte_compile - prefix = self.build_lib - if prefix[-1] != os.sep: - prefix = prefix + os.sep - - # XXX this code is essentially the same as the 'byte_compile() - # method of the "install_lib" command, except for the determination - # of the 'prefix' string. Hmmm. - if self.compile: - byte_compile(files, optimize=0, - force=self.force, prefix=prefix, dry_run=self.dry_run) - if self.optimize > 0: - byte_compile(files, optimize=self.optimize, - force=self.force, prefix=prefix, dry_run=self.dry_run) diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/tomlkit/toml_file.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/tomlkit/toml_file.py deleted file mode 100644 index 745913080399db59a7efcfd9d0c57ddc84fb72a4..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/tomlkit/toml_file.py +++ /dev/null @@ -1,58 +0,0 @@ -import os -import re - -from typing import TYPE_CHECKING - -from tomlkit.api import loads -from tomlkit.toml_document import TOMLDocument - - -if TYPE_CHECKING: - from _typeshed import StrPath as _StrPath -else: - from typing import Union - - _StrPath = Union[str, os.PathLike] - - -class TOMLFile: - """ - Represents a TOML file. - - :param path: path to the TOML file - """ - - def __init__(self, path: _StrPath) -> None: - self._path = path - self._linesep = os.linesep - - def read(self) -> TOMLDocument: - """Read the file content as a :class:`tomlkit.toml_document.TOMLDocument`.""" - with open(self._path, encoding="utf-8", newline="") as f: - content = f.read() - - # check if consistent line endings - num_newline = content.count("\n") - if num_newline > 0: - num_win_eol = content.count("\r\n") - if num_win_eol == num_newline: - self._linesep = "\r\n" - elif num_win_eol == 0: - self._linesep = "\n" - else: - self._linesep = "mixed" - - return loads(content) - - def write(self, data: TOMLDocument) -> None: - """Write the TOMLDocument to the file.""" - content = data.as_string() - - # apply linesep - if self._linesep == "\n": - content = content.replace("\r\n", "\n") - elif self._linesep == "\r\n": - content = re.sub(r"(? str: - """ - Generates chat - """ - input_dict = { - "user_name": user_name, - "ai_name": ai_name, - "chat_history": chat_history, - "memory": memory, - } - prompt_path = "prompt/chat/chatgpt_1027.txt" - - with open(prompt_path, "r") as f: - prompt = f.read() - - for key, value in input_dict.items(): - prompt = prompt.replace("{" + key + "}", str(value)) - - response = chatgpt.generate(prompt=prompt, mode="chat", verbose=False) - response = self.post_process(response) - return response - - def post_process(self, response: str) -> str: - """ - Post process the response - """ - if response.startswith('"'): - response = response[1:] - if response.endswith('"'): - response = response[:-1] - return response diff --git a/spaces/pyimagesearch/gif-creator/README.md b/spaces/pyimagesearch/gif-creator/README.md deleted file mode 100644 index 81b552f81365567a258f7ea81a5bd5ea631c2431..0000000000000000000000000000000000000000 --- a/spaces/pyimagesearch/gif-creator/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Gif Creator -emoji: 🔥 -colorFrom: yellow -colorTo: purple -sdk: gradio -sdk_version: 3.20.1 -app_file: app.py -pinned: false -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/qingxu98/academic-chatgpt-beta/docs/README_JP.md b/spaces/qingxu98/academic-chatgpt-beta/docs/README_JP.md deleted file mode 100644 index 9fc6dbe595657894c9f6f449c50f6f681d762329..0000000000000000000000000000000000000000 --- a/spaces/qingxu98/academic-chatgpt-beta/docs/README_JP.md +++ /dev/null @@ -1,302 +0,0 @@ -> **Note** -> -> このReadmeファイルは、このプロジェクトのmarkdown翻訳プラグインによって自動的に生成されたもので、100%正確ではない可能性があります。 -> - -# ChatGPT 学術最適化 - -**このプロジェクトが好きだったら、スターをつけてください。もし、より使いやすい学術用のショートカットキーまたはファンクションプラグインを発明した場合は、issueを発行するかpull requestを作成してください。また、このプロジェクト自体によって翻訳されたREADMEは[英語説明書|](docs/README_EN.md)[日本語説明書|](docs/README_JP.md)[ロシア語説明書|](docs/README_RS.md)[フランス語説明書](docs/README_FR.md)もあります。** - -> **注意事項** -> -> 1. **赤色**のラベルが付いているファンクションプラグイン(ボタン)のみファイルを読み込めます。一部のプラグインはプラグインエリアのドロップダウンメニューにあります。新しいプラグインのPRを歓迎いたします! -> -> 2. このプロジェクトの各ファイルの機能は`self_analysis.md`(自己解析レポート)で詳しく説明されています。バージョンが追加されると、関連するファンクションプラグインをクリックして、GPTを呼び出して自己解析レポートを再生成することができます。一般的な質問は`wiki`にまとめられています。(`https://github.com/binary-husky/chatgpt_academic/wiki/%E5%B8%B8%E8%A7%81%E9%97%AE%E9%A2%98`) - - -
          - -機能 | 説明 ---- | --- -ワンクリック整形 | 論文の文法エラーを一括で正確に修正できます。 -ワンクリック日英翻訳 | 日英翻訳には、ワンクリックで対応できます。 -ワンクリックコード説明 | コードの正しい表示と説明が可能です。 -[カスタムショートカットキー](https://www.bilibili.com/video/BV14s4y1E7jN) | カスタムショートカットキーをサポートします。 -[プロキシサーバーの設定](https://www.bilibili.com/video/BV1rc411W7Dr) | プロキシサーバーの設定をサポートします。 -モジュラーデザイン | カスタム高階関数プラグインと[関数プラグイン]、プラグイン[ホット更新]のサポートが可能です。詳細は[こちら](https://github.com/binary-husky/chatgpt_academic/wiki/%E5%87%BD%E6%95%B0%E6%8F%92%E4%BB%B6%E6%8C%87%E5%8D%97) -[自己プログラム解析](https://www.bilibili.com/video/BV1cj411A7VW) | [関数プラグイン][ワンクリック理解](https://github.com/binary-husky/chatgpt_academic/wiki/chatgpt-academic%E9%A1%B9%E7%9B%AE%E8%87%AA%E8%AF%91%E8%A7%A3%E6%8A%A5%E5%91%8A)このプロジェクトのソースコード -[プログラム解析機能](https://www.bilibili.com/video/BV1cj411A7VW) | [関数プラグイン] ワンクリックで別のPython/C/C++/Java/Lua/...プロジェクトツリーを解析できます。 -論文読解 | [関数プラグイン] LaTeX論文の全文をワンクリックで解読し、要約を生成します。 -LaTeX全文翻訳、整形 | [関数プラグイン] ワンクリックでLaTeX論文を翻訳または整形できます。 -注釈生成 | [関数プラグイン] ワンクリックで関数の注釈を大量に生成できます。 -チャット分析レポート生成 | [関数プラグイン] 実行後、まとめレポートを自動生成します。 -[arxivヘルパー](https://www.bilibili.com/video/BV1LM4y1279X) | [関数プラグイン] 入力したarxivの記事URLで要約をワンクリック翻訳+PDFダウンロードができます。 -[PDF論文全文翻訳機能](https://www.bilibili.com/video/BV1KT411x7Wn) | [関数プラグイン] PDF論文タイトルと要約を抽出し、全文を翻訳します(マルチスレッド)。 -[Google Scholar Integratorヘルパー](https://www.bilibili.com/video/BV19L411U7ia) | [関数プラグイン] 任意のGoogle Scholar検索ページURLを指定すると、gptが興味深い記事を選択します。 -数式/画像/テーブル表示 | 数式のTex形式とレンダリング形式を同時に表示できます。数式、コードのハイライトをサポートしています。 -マルチスレッド関数プラグインサポート | ChatGPTをマルチスレッドで呼び出すことができ、大量のテキストやプログラムを簡単に処理できます。 -ダークグラジオ[テーマ](https://github.com/binary-husky/chatgpt_academic/issues/173)の起動 | 「/?__dark-theme=true」というURLをブラウザに追加することで、ダークテーマに切り替えることができます。 -[多数のLLMモデル](https://www.bilibili.com/video/BV1wT411p7yf)をサポート、[API2D](https://api2d.com/)インターフェースをサポート | GPT3.5、GPT4、[清華ChatGLM](https://github.com/THUDM/ChatGLM-6B)による同時サポートは、とても素晴らしいですね! -huggingface免科学上网[オンライン版](https://huggingface.co/spaces/qingxu98/gpt-academic) | huggingfaceにログイン後、[このスペース](https://huggingface.co/spaces/qingxu98/gpt-academic)をコピーしてください。 -...... | ...... - - -
          - - -- 新しいインターフェース(config.pyのLAYOUTオプションを変更するだけで、「左右レイアウト」と「上下レイアウト」を切り替えることができます) -
          - -
          - - -- すべてのボタンは、functional.pyを読み込んで動的に生成されます。カスタム機能を自由に追加して、クリップボードを解放します -
          - -
          - -- 色を修正/修正 -
          - -
          - -- 出力に数式が含まれている場合、TeX形式とレンダリング形式の両方が表示され、コピーと読み取りが容易になります -
          - -
          - -- プロジェクトのコードを見るのが面倒?chatgptに整備されたプロジェクトを直接与えましょう -
          - -
          - -- 多数の大規模言語モデルの混合呼び出し(ChatGLM + OpenAI-GPT3.5 + [API2D](https://api2d.com/)-GPT4) -
          - -
          - -多数の大規模言語モデルの混合呼び出し[huggingfaceテスト版](https://huggingface.co/spaces/qingxu98/academic-chatgpt-beta)(huggigface版はchatglmをサポートしていません) - - ---- - -## インストール-方法1:直接運転 (Windows、LinuxまたはMacOS) - -1. プロジェクトをダウンロードします。 -```sh -git clone https://github.com/binary-husky/chatgpt_academic.git -cd chatgpt_academic -``` - -2. API_KEYとプロキシ設定を構成する - -`config.py`で、海外のProxyとOpenAI API KEYを構成して説明します。 -``` -1.あなたが中国にいる場合、OpenAI APIをスムーズに使用するには海外プロキシを設定する必要があります。構成の詳細については、config.py(1.その中のUSE_PROXYをTrueに変更し、2.手順に従ってプロキシを変更する)を詳細に読んでください。 -2. OpenAI API KEYを構成する。OpenAIのウェブサイトでAPI KEYを取得してください。一旦API KEYを手に入れると、config.pyファイルで設定するだけです。 -3.プロキシネットワークに関連する問題(ネットワークタイムアウト、プロキシが動作しない)をhttps://github.com/binary-husky/chatgpt_academic/issues/1にまとめました。 -``` -(P.S. プログラム実行時にconfig.pyの隣にconfig_private.pyという名前のプライバシー設定ファイルを作成し、同じ名前の設定を上書きするconfig_private.pyが存在するかどうかを優先的に確認します。そのため、私たちの構成読み取りロジックを理解できる場合は、config.pyの隣にconfig_private.pyという名前の新しい設定ファイルを作成し、その中のconfig.pyから設定を移動してください。config_private.pyはgitで保守されていないため、プライバシー情報をより安全にすることができます。) - -3. 依存関係をインストールします。 -```sh -# 選択肢があります。 -python -m pip install -r requirements.txt - - -# (選択肢2) もしAnacondaを使用する場合、手順は同様です: -# (選択肢2.1) conda create -n gptac_venv python=3.11 -# (選択肢2.2) conda activate gptac_venv -# (選択肢2.3) python -m pip install -r requirements.txt - -# 注: 公式のpipソースまたはAlibabaのpipソースを使用してください。 別のpipソース(例:一部の大学のpip)は問題が発生する可能性があります。 一時的なソースの切り替え方法: -# python -m pip install -r requirements.txt -i https://mirrors.aliyun.com/pypi/simple/ -``` - -もしあなたが清華ChatGLMをサポートする必要がある場合、さらに多くの依存関係をインストールする必要があります(Pythonに慣れない方やコンピューターの設定が十分でない方は、試みないことをお勧めします): -```sh -python -m pip install -r request_llm/requirements_chatglm.txt -``` - -4. 実行 -```sh -python main.py -``` - -5. 関数プラグインのテスト -``` -- Pythonプロジェクト分析のテスト - 入力欄に `./crazy_functions/test_project/python/dqn` と入力し、「Pythonプロジェクト全体の解析」をクリックします。 -- 自己コード解読のテスト - 「[マルチスレッドデモ] このプロジェクト自体を解析します(ソースを翻訳して解読します)」をクリックします。 -- 実験的な機能テンプレート関数のテスト(GPTが「今日の歴史」に何が起こったかを回答することが求められます)。この関数をテンプレートとして使用して、より複雑な機能を実装できます。 - 「[関数プラグインテンプレートデモ] 今日の歴史」をクリックします。 -- 関数プラグインエリアのドロップダウンメニューには他にも選択肢があります。 -``` - -## インストール方法2:Dockerを使用する(Linux) - -1. ChatGPTのみ(大多数の人にお勧めです) -``` sh -# プロジェクトのダウンロード -git clone https://github.com/binary-husky/chatgpt_academic.git -cd chatgpt_academic -# 海外プロキシとOpenAI API KEYの設定 -config.pyを任意のテキストエディタで編集する -# インストール -docker build -t gpt-academic . -# 実行 -docker run --rm -it --net=host gpt-academic - -# 関数プラグインのテスト -## 関数プラグインテンプレート関数のテスト(GPTが「今日の歴史」に何が起こったかを回答することが求められます)。この関数をテンプレートとして使用して、より複雑な機能を実装できます。 -「[関数プラグインテンプレートデモ] 今日の歴史」をクリックします。 -## Latexプロジェクトの要約を書くテスト -入力欄に./crazy_functions/test_project/latex/attentionと入力し、「テックス論文を読んで要約を書く」をクリックします。 -## Pythonプロジェクト分析のテスト -入力欄に./crazy_functions/test_project/python/dqnと入力し、[Pythonプロジェクトの全解析]をクリックします。 - -関数プラグインエリアのドロップダウンメニューには他にも選択肢があります。 -``` - -2. ChatGPT + ChatGLM(Dockerに非常に詳しい人+十分なコンピューター設定が必要) - - - -```sh -# Dockerfileの編集 -cd docs && nano Dockerfile+ChatGLM -# ビルド方法 -docker build -t gpt-academic --network=host -f Dockerfile+ChatGLM . -# 実行方法 (1) 直接実行: -docker run --rm -it --net=host --gpus=all gpt-academic -# 実行方法 (2) コンテナに入って調整する: -docker run --rm -it --net=host --gpus=all gpt-academic bash -``` - -## インストール方法3:その他のデプロイ方法 - -1. クラウドサーバーデプロイ -[デプロイwiki-1](https://github.com/binary-husky/chatgpt_academic/wiki/%E4%BA%91%E6%9C%8D%E5%8A%A1%E5%99%A8%E8%BF%9C%E7%A8%8B%E9%83%A8%E7%BD%B2%E6%8C%87%E5%8D%97) - -2. WSL2を使用 (Windows Subsystem for Linux) -[デプロイwiki-2](https://github.com/binary-husky/chatgpt_academic/wiki/%E4%BD%BF%E7%94%A8WSL2%EF%BC%88Windows-Subsystem-for-Linux-%E5%AD%90%E7%B3%BB%E7%BB%9F%EF%BC%89%E9%83%A8%E7%BD%B2) - - -## インストール-プロキシ設定 -1. 通常の方法 -[プロキシを設定する](https://github.com/binary-husky/chatgpt_academic/issues/1) - -2. 初心者向けチュートリアル -[初心者向けチュートリアル](https://github.com/binary-husky/chatgpt_academic/wiki/%E4%BB%A3%E7%90%86%E8%BD%AF%E4%BB%B6%E9%97%AE%E9%A2%98%E7%9A%84%E6%96%B0%E6%89%8B%E8%A7%A3%E5%86%B3%E6%96%B9%E6%B3%95%EF%BC%88%E6%96%B9%E6%B3%95%E5%8F%AA%E9%80%82%E7%94%A8%E4%BA%8E%E6%96%B0%E6%89%8B%EF%BC%89) - - ---- - -## カスタムボタンの追加(学術ショートカットキー) - -`core_functional.py`を任意のテキストエディタで開き、以下のエントリーを追加し、プログラムを再起動してください。(ボタンが追加されて表示される場合、前置詞と後置詞はホット編集がサポートされているため、プログラムを再起動せずに即座に有効になります。) - -例: -``` -"超级英译中": { - # 前置詞 - あなたの要求を説明するために使用されます。翻訳、コードの説明、編集など。 - "Prefix": "以下のコンテンツを中国語に翻訳して、マークダウンテーブルを使用して専門用語を説明してください。\n\n", - - # 後置詞 - プレフィックスと共に使用すると、入力内容を引用符で囲むことができます。 - "Suffix": "", -}, -``` - -
          - -
          - - ---- - -## いくつかの機能の例 - -### 画像表示: - -
          - -
          - - -### プログラムが自己解析できる場合: - -
          - -
          - -
          - -
          - -### 他のPython/Cppプロジェクトの解析: - -
          - -
          - -
          - -
          - -### Latex論文の一括読解と要約生成 - -
          - -
          - -### 自動報告生成 - -
          - - - -
          - -### モジュール化された機能デザイン - -
          - - -
          - - -### ソースコードの英語翻訳 - -
          - -
          - -## Todo およびバージョン計画: -- version 3.2+ (todo): 関数プラグインがより多くのパラメーターインターフェースをサポートするようになります。 -- version 3.1: 複数のgptモデルを同時にクエリし、api2dをサポートし、複数のapikeyの負荷分散をサポートします。 -- version 3.0: chatglmおよび他の小型llmのサポート -- version 2.6: プラグイン構造を再構成し、相互作用性を高め、より多くのプラグインを追加しました。 -- version 2.5: 自己更新。総括的な大規模プロジェクトのソースコードをまとめた場合、テキストが長すぎる、トークンがオーバーフローする問題を解決します。 -- version 2.4: (1)PDF全文翻訳機能を追加。(2)入力エリアの位置を切り替える機能を追加。(3)垂直レイアウトオプションを追加。(4)マルチスレッド関数プラグインの最適化。 -- version 2.3: 多スレッドの相互作用性を向上させました。 -- version 2.2: 関数プラグインでホットリロードをサポート -- version 2.1: 折りたたみ式レイアウト -- version 2.0: モジュール化された関数プラグインを導入 -- version 1.0: 基本機能 - -## 参考および学習 - - -以下は中国語のマークダウンファイルです。日本語に翻訳してください。既存のマークダウンコマンドを変更しないでください: - -``` -多くの優秀なプロジェクトの設計を参考にしています。主なものは以下の通りです: - -# 参考プロジェクト1:ChuanhuChatGPTから多くのテクニックを借用 -https://github.com/GaiZhenbiao/ChuanhuChatGPT - -# 参考プロジェクト2:清華ChatGLM-6B: -https://github.com/THUDM/ChatGLM-6B -``` - diff --git "a/spaces/qingxu98/gpt-academic/crazy_functions/\350\260\267\346\255\214\346\243\200\347\264\242\345\260\217\345\212\251\346\211\213.py" "b/spaces/qingxu98/gpt-academic/crazy_functions/\350\260\267\346\255\214\346\243\200\347\264\242\345\260\217\345\212\251\346\211\213.py" deleted file mode 100644 index 05e80d2c8432cd3db46b8ca5a30b045ca9f5c7ca..0000000000000000000000000000000000000000 --- "a/spaces/qingxu98/gpt-academic/crazy_functions/\350\260\267\346\255\214\346\243\200\347\264\242\345\260\217\345\212\251\346\211\213.py" +++ /dev/null @@ -1,179 +0,0 @@ -from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive -from toolbox import CatchException, report_execption, promote_file_to_downloadzone -from toolbox import update_ui, update_ui_lastest_msg, disable_auto_promotion, write_history_to_file -import logging -import requests -import time -import random - -ENABLE_ALL_VERSION_SEARCH = True - -def get_meta_information(url, chatbot, history): - import arxiv - import difflib - import re - from bs4 import BeautifulSoup - from toolbox import get_conf - from urllib.parse import urlparse - session = requests.session() - - proxies, = get_conf('proxies') - headers = { - 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36', - 'Accept-Encoding': 'gzip, deflate, br', - 'Accept-Language': 'en-US,en;q=0.9,zh-CN;q=0.8,zh;q=0.7', - 'Cache-Control':'max-age=0', - 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7', - 'Connection': 'keep-alive' - } - session.proxies.update(proxies) - session.headers.update(headers) - - response = session.get(url) - # 解析网页内容 - soup = BeautifulSoup(response.text, "html.parser") - - def string_similar(s1, s2): - return difflib.SequenceMatcher(None, s1, s2).quick_ratio() - - if ENABLE_ALL_VERSION_SEARCH: - def search_all_version(url): - time.sleep(random.randint(1,5)) # 睡一会防止触发google反爬虫 - response = session.get(url) - soup = BeautifulSoup(response.text, "html.parser") - - for result in soup.select(".gs_ri"): - try: - url = result.select_one(".gs_rt").a['href'] - except: - continue - arxiv_id = extract_arxiv_id(url) - if not arxiv_id: - continue - search = arxiv.Search( - id_list=[arxiv_id], - max_results=1, - sort_by=arxiv.SortCriterion.Relevance, - ) - try: paper = next(search.results()) - except: paper = None - return paper - - return None - - def extract_arxiv_id(url): - # 返回给定的url解析出的arxiv_id,如url未成功匹配返回None - pattern = r'arxiv.org/abs/([^/]+)' - match = re.search(pattern, url) - if match: - return match.group(1) - else: - return None - - profile = [] - # 获取所有文章的标题和作者 - for result in soup.select(".gs_ri"): - title = result.a.text.replace('\n', ' ').replace(' ', ' ') - author = result.select_one(".gs_a").text - try: - citation = result.select_one(".gs_fl > a[href*='cites']").text # 引用次数是链接中的文本,直接取出来 - except: - citation = 'cited by 0' - abstract = result.select_one(".gs_rs").text.strip() # 摘要在 .gs_rs 中的文本,需要清除首尾空格 - - # 首先在arxiv上搜索,获取文章摘要 - search = arxiv.Search( - query = title, - max_results = 1, - sort_by = arxiv.SortCriterion.Relevance, - ) - try: paper = next(search.results()) - except: paper = None - - is_match = paper is not None and string_similar(title, paper.title) > 0.90 - - # 如果在Arxiv上匹配失败,检索文章的历史版本的题目 - if not is_match and ENABLE_ALL_VERSION_SEARCH: - other_versions_page_url = [tag['href'] for tag in result.select_one('.gs_flb').select('.gs_nph') if 'cluster' in tag['href']] - if len(other_versions_page_url) > 0: - other_versions_page_url = other_versions_page_url[0] - paper = search_all_version('http://' + urlparse(url).netloc + other_versions_page_url) - is_match = paper is not None and string_similar(title, paper.title) > 0.90 - - if is_match: - # same paper - abstract = paper.summary.replace('\n', ' ') - is_paper_in_arxiv = True - else: - # different paper - abstract = abstract - is_paper_in_arxiv = False - - logging.info('[title]:' + title) - logging.info('[author]:' + author) - logging.info('[citation]:' + citation) - - profile.append({ - 'title': title, - 'author': author, - 'citation': citation, - 'abstract': abstract, - 'is_paper_in_arxiv': is_paper_in_arxiv, - }) - - chatbot[-1] = [chatbot[-1][0], title + f'\n\n是否在arxiv中(不在arxiv中无法获取完整摘要):{is_paper_in_arxiv}\n\n' + abstract] - yield from update_ui(chatbot=chatbot, history=[]) # 刷新界面 - return profile - -@CatchException -def 谷歌检索小助手(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port): - disable_auto_promotion(chatbot=chatbot) - # 基本信息:功能、贡献者 - chatbot.append([ - "函数插件功能?", - "分析用户提供的谷歌学术(google scholar)搜索页面中,出现的所有文章: binary-husky,插件初始化中..."]) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - - # 尝试导入依赖,如果缺少依赖,则给出安装建议 - try: - import arxiv - import math - from bs4 import BeautifulSoup - except: - report_execption(chatbot, history, - a = f"解析项目: {txt}", - b = f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade beautifulsoup4 arxiv```。") - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - - # 清空历史,以免输入溢出 - history = [] - meta_paper_info_list = yield from get_meta_information(txt, chatbot, history) - if len(meta_paper_info_list) == 0: - yield from update_ui_lastest_msg(lastmsg='获取文献失败,可能触发了google反爬虫机制。',chatbot=chatbot, history=history, delay=0) - return - batchsize = 5 - for batch in range(math.ceil(len(meta_paper_info_list)/batchsize)): - if len(meta_paper_info_list[:batchsize]) > 0: - i_say = "下面是一些学术文献的数据,提取出以下内容:" + \ - "1、英文题目;2、中文题目翻译;3、作者;4、arxiv公开(is_paper_in_arxiv);4、引用数量(cite);5、中文摘要翻译。" + \ - f"以下是信息源:{str(meta_paper_info_list[:batchsize])}" - - inputs_show_user = f"请分析此页面中出现的所有文章:{txt},这是第{batch+1}批" - gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive( - inputs=i_say, inputs_show_user=inputs_show_user, - llm_kwargs=llm_kwargs, chatbot=chatbot, history=[], - sys_prompt="你是一个学术翻译,请从数据中提取信息。你必须使用Markdown表格。你必须逐个文献进行处理。" - ) - - history.extend([ f"第{batch+1}批", gpt_say ]) - meta_paper_info_list = meta_paper_info_list[batchsize:] - - chatbot.append(["状态?", - "已经全部完成,您可以试试让AI写一个Related Works,例如您可以继续输入Write a \"Related Works\" section about \"你搜索的研究领域\" for me."]) - msg = '正常' - yield from update_ui(chatbot=chatbot, history=history, msg=msg) # 刷新界面 - path = write_history_to_file(history) - promote_file_to_downloadzone(path, chatbot=chatbot) - chatbot.append(("完成了吗?", path)); - yield from update_ui(chatbot=chatbot, history=history, msg=msg) # 刷新界面 diff --git a/spaces/qinzhu/diy-girlfriend-online/text/cantonese.py b/spaces/qinzhu/diy-girlfriend-online/text/cantonese.py deleted file mode 100644 index 32eae72ef7eb43d493da6d6f75dd46176d0e8808..0000000000000000000000000000000000000000 --- a/spaces/qinzhu/diy-girlfriend-online/text/cantonese.py +++ /dev/null @@ -1,59 +0,0 @@ -import re -import cn2an -import opencc - - -converter = opencc.OpenCC('chinese_dialect_lexicons/jyutjyu') - -# List of (Latin alphabet, ipa) pairs: -_latin_to_ipa = [(re.compile('%s' % x[0]), x[1]) for x in [ - ('A', 'ei˥'), - ('B', 'biː˥'), - ('C', 'siː˥'), - ('D', 'tiː˥'), - ('E', 'iː˥'), - ('F', 'e˥fuː˨˩'), - ('G', 'tsiː˥'), - ('H', 'ɪk̚˥tsʰyː˨˩'), - ('I', 'ɐi˥'), - ('J', 'tsei˥'), - ('K', 'kʰei˥'), - ('L', 'e˥llou˨˩'), - ('M', 'ɛːm˥'), - ('N', 'ɛːn˥'), - ('O', 'ou˥'), - ('P', 'pʰiː˥'), - ('Q', 'kʰiːu˥'), - ('R', 'aː˥lou˨˩'), - ('S', 'ɛː˥siː˨˩'), - ('T', 'tʰiː˥'), - ('U', 'juː˥'), - ('V', 'wiː˥'), - ('W', 'tʊk̚˥piː˥juː˥'), - ('X', 'ɪk̚˥siː˨˩'), - ('Y', 'waːi˥'), - ('Z', 'iː˨sɛːt̚˥') -]] - - -def number_to_cantonese(text): - return re.sub(r'\d+(?:\.?\d+)?', lambda x: cn2an.an2cn(x.group()), text) - - -def latin_to_ipa(text): - for regex, replacement in _latin_to_ipa: - text = re.sub(regex, replacement, text) - return text - - -def cantonese_to_ipa(text): - text = number_to_cantonese(text.upper()) - text = converter.convert(text).replace('-','').replace('$',' ') - text = re.sub(r'[A-Z]', lambda x: latin_to_ipa(x.group())+' ', text) - text = re.sub(r'[、;:]', ',', text) - text = re.sub(r'\s*,\s*', ', ', text) - text = re.sub(r'\s*。\s*', '. ', text) - text = re.sub(r'\s*?\s*', '? ', text) - text = re.sub(r'\s*!\s*', '! ', text) - text = re.sub(r'\s*$', '', text) - return text diff --git a/spaces/quidiaMuxgu/Expedit-SAM/Badlapur Hindi Movie Full Hd Free Download ((TOP)).md b/spaces/quidiaMuxgu/Expedit-SAM/Badlapur Hindi Movie Full Hd Free Download ((TOP)).md deleted file mode 100644 index 606fd791d925dc6c8bf13f5bb51827b7520d330d..0000000000000000000000000000000000000000 --- a/spaces/quidiaMuxgu/Expedit-SAM/Badlapur Hindi Movie Full Hd Free Download ((TOP)).md +++ /dev/null @@ -1,8 +0,0 @@ -

          badlapur hindi movie full hd free download


          Download Zip » https://geags.com/2uCq6F



          -
          -Badlapur (2015) (Hindi). 128 minutes DVDRip IMDB. Released: February 20, 2015 Category: Bollywood Favorites, Bollywood Movies 2015,. Genres: Action, Crime, Drama, . You searched online for Badlapur (2015) movie (Hindi) | Action, Crime, Drama | HD | 2016 |. If Movie Online Badlapur (2015) (Hindi) | Action, Crime, Drama | HD | 2016 | If you liked it, you can leave a comment or review below). -Watch movies online for free, new movies, movies online without registration, only the best movies, good quality, watch for free. -Watch Indian Movies 8a78ff9644
          -
          -
          -

          diff --git a/spaces/quidiaMuxgu/Expedit-SAM/Blackmagic Design DaVinci Resolve Studio 14.3 Crack [BEST] Serial Key Keygenl.md b/spaces/quidiaMuxgu/Expedit-SAM/Blackmagic Design DaVinci Resolve Studio 14.3 Crack [BEST] Serial Key Keygenl.md deleted file mode 100644 index 58ccfd8955a5cd8aafc83721b5065d5a9fa814d6..0000000000000000000000000000000000000000 --- a/spaces/quidiaMuxgu/Expedit-SAM/Blackmagic Design DaVinci Resolve Studio 14.3 Crack [BEST] Serial Key Keygenl.md +++ /dev/null @@ -1,29 +0,0 @@ -
          -

          How to Download and Install Blackmagic Design DaVinci Resolve Studio 14.3 Crack Serial Key Keygenl

          -

          If you are looking for a professional video editing software that can handle offline and online editing, color correction, audio post-production, and visual effects in one tool, then you might want to try Blackmagic Design DaVinci Resolve Studio 14.3. This software is used by Hollywood filmmakers, TV show creators, and YouTube vloggers to create stunning videos with high-quality results.

          -

          However, DaVinci Resolve Studio 14.3 is not a free software. You need to purchase a license key to activate it and enjoy its full features. But what if you don't have the budget to buy it? Is there a way to get it for free? The answer is yes, but you need to be careful. There are many websites that claim to offer DaVinci Resolve Studio 14.3 crack serial key keygenl, but most of them are fake or malicious. They might infect your computer with viruses, malware, or ransomware, or steal your personal information.

          -

          Blackmagic Design DaVinci Resolve Studio 14.3 Crack Serial Key Keygenl


          Download Zip ……… https://geags.com/2uCsNL



          -

          That's why we have created this guide to help you download and install DaVinci Resolve Studio 14.3 crack serial key keygenl safely and easily. We will show you how to find a reliable source of the crack file, how to install it on your PC, and how to activate the software without any problems. Follow these steps carefully and you will be able to enjoy DaVinci Resolve Studio 14.3 for free.

          -

          Step 1: Find a Reliable Source of DaVinci Resolve Studio 14.3 Crack Serial Key Keygenl

          -

          The first step is to find a website that offers DaVinci Resolve Studio 14.3 crack serial key keygenl for download. You need to be very careful when choosing a website because many of them are scams or contain viruses. Here are some tips to help you find a trustworthy website:

          -
            -
          • Check the reviews and ratings of the website from other users. If the website has positive feedback and high ratings, then it might be safe to use.
          • -
          • Check the date of the post or the update of the crack file. If the website has recently updated the crack file, then it might be working and compatible with the latest version of DaVinci Resolve Studio 14.3.
          • -
          • Check the size of the crack file. If the website offers a crack file that is too small or too large compared to the original size of DaVinci Resolve Studio 14.3, then it might be fake or corrupted.
          • -
          • Check the download link of the crack file. If the website requires you to complete surveys, enter your email address, or provide your credit card information before downloading the crack file, then it might be a scam or a phishing attempt.
          • -
          -

          One of the websites that we recommend for downloading DaVinci Resolve Studio 14.3 crack serial key keygenl is [^2^]. This website has positive reviews from other users, has recently updated the crack file, has a reasonable size of the crack file, and has a direct download link without any surveys or registration required.

          -

          Step 2: Download and Install DaVinci Resolve Studio 14.3 Crack Serial Key Keygenl

          -

          The second step is to download and install DaVinci Resolve Studio 14.3 crack serial key keygenl on your PC. Here are the steps to follow:

          -
            -
          1. Go to [^2^] and click on the download button.
          2. -
          3. Wait for the download to finish and then extract the zip file using WinRAR or any other extraction tool.
          4. -
          5. Open the extracted folder and run the setup.exe file as administrator.
          6. -
          7. Follow the installation wizard and choose your preferred language, destination folder, and components.
          8. -
          9. Wait for the installation to complete and then close the wizard.
          10. -
          -

          Step 3: Activate DaVinci Resolve Studio 14.3 Crack Serial Key Keygenl

          -

          The final step is to activate DaV

          -

          d5da3c52bf
          -
          -
          \ No newline at end of file diff --git a/spaces/r3gm/Aesthetic_RVC_Inference_HF/lib/infer/infer_libs/csvutil.py b/spaces/r3gm/Aesthetic_RVC_Inference_HF/lib/infer/infer_libs/csvutil.py deleted file mode 100644 index 8992d13ffc7497bf441232552fbe9cfb776e4919..0000000000000000000000000000000000000000 --- a/spaces/r3gm/Aesthetic_RVC_Inference_HF/lib/infer/infer_libs/csvutil.py +++ /dev/null @@ -1,33 +0,0 @@ - - -import csv - -# praatEXE = join('.',os.path.abspath(os.getcwd()) + r"\Praat.exe") - - -def CSVutil(file, rw, type, *args): - if type == "formanting": - if rw == "r": - with open(file) as fileCSVread: - csv_reader = list(csv.reader(fileCSVread)) - return ( - (csv_reader[0][0], csv_reader[0][1], csv_reader[0][2]) - if csv_reader is not None - else (lambda: exec('raise ValueError("No data")'))() - ) - else: - if args: - doformnt = args[0] - else: - doformnt = False - qfr = args[1] if len(args) > 1 else 1.0 - tmb = args[2] if len(args) > 2 else 1.0 - with open(file, rw, newline="") as fileCSVwrite: - csv_writer = csv.writer(fileCSVwrite, delimiter=",") - csv_writer.writerow([doformnt, qfr, tmb]) - elif type == "stop": - stop = args[0] if args else False - with open(file, rw, newline="") as fileCSVwrite: - csv_writer = csv.writer(fileCSVwrite, delimiter=",") - csv_writer.writerow([stop]) - diff --git a/spaces/rajesh1729/Text-analysis-with-spacy-and-streamlit/README.md b/spaces/rajesh1729/Text-analysis-with-spacy-and-streamlit/README.md deleted file mode 100644 index ea7e13bb6b88fc9fd2c6915ad7951dbc156e1c03..0000000000000000000000000000000000000000 --- a/spaces/rajesh1729/Text-analysis-with-spacy-and-streamlit/README.md +++ /dev/null @@ -1,46 +0,0 @@ ---- -title: Text Analysis With Spacy And Streamlit -emoji: 📊 -colorFrom: purple -colorTo: blue -sdk: streamlit -app_file: app.py -pinned: false -license: afl-3.0 ---- - -# Configuration - -`title`: _string_ -Display title for the Space - -`emoji`: _string_ -Space emoji (emoji-only character allowed) - -`colorFrom`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) - -`colorTo`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) - -`sdk`: _string_ -Can be either `gradio`, `streamlit`, or `static` - -`sdk_version` : _string_ -Only applicable for `streamlit` SDK. -See [doc](https://hf.co/docs/hub/spaces) for more info on supported versions. - -`app_file`: _string_ -Path to your main application file (which contains either `gradio` or `streamlit` Python code, or `static` html code). -Path is relative to the root of the repository. - -`models`: _List[string]_ -HF model IDs (like "gpt2" or "deepset/roberta-base-squad2") used in the Space. -Will be parsed automatically from your code if not specified here. - -`datasets`: _List[string]_ -HF dataset IDs (like "common_voice" or "oscar-corpus/OSCAR-2109") used in the Space. -Will be parsed automatically from your code if not specified here. - -`pinned`: _boolean_ -Whether the Space stays on top of your list. diff --git a/spaces/ramiin2/AutoGPT/tests/integration/milvus_memory_tests.py b/spaces/ramiin2/AutoGPT/tests/integration/milvus_memory_tests.py deleted file mode 100644 index ec38bf2f72087b5da679d26594ebff97d8a09b19..0000000000000000000000000000000000000000 --- a/spaces/ramiin2/AutoGPT/tests/integration/milvus_memory_tests.py +++ /dev/null @@ -1,57 +0,0 @@ -# sourcery skip: snake-case-functions -"""Tests for the MilvusMemory class.""" -import random -import string -import unittest - -from autogpt.config import Config -from autogpt.memory.milvus import MilvusMemory - -try: - - class TestMilvusMemory(unittest.TestCase): - """Tests for the MilvusMemory class.""" - - def random_string(self, length: int) -> str: - """Generate a random string of the given length.""" - return "".join(random.choice(string.ascii_letters) for _ in range(length)) - - def setUp(self) -> None: - """Set up the test environment.""" - cfg = Config() - cfg.milvus_addr = "localhost:19530" - self.memory = MilvusMemory(cfg) - self.memory.clear() - - # Add example texts to the cache - self.example_texts = [ - "The quick brown fox jumps over the lazy dog", - "I love machine learning and natural language processing", - "The cake is a lie, but the pie is always true", - "ChatGPT is an advanced AI model for conversation", - ] - - for text in self.example_texts: - self.memory.add(text) - - # Add some random strings to test noise - for _ in range(5): - self.memory.add(self.random_string(10)) - - def test_get_relevant(self) -> None: - """Test getting relevant texts from the cache.""" - query = "I'm interested in artificial intelligence and NLP" - num_relevant = 3 - relevant_texts = self.memory.get_relevant(query, num_relevant) - - print(f"Top {k} relevant texts for the query '{query}':") - for i, text in enumerate(relevant_texts, start=1): - print(f"{i}. {text}") - - self.assertEqual(len(relevant_texts), k) - self.assertIn(self.example_texts[1], relevant_texts) - -except: - print( - "Skipping tests/integration/milvus_memory_tests.py as Milvus is not installed." - ) diff --git a/spaces/rayan-saleh/whisper2notion/app.py b/spaces/rayan-saleh/whisper2notion/app.py deleted file mode 100644 index e4f1571c0a3f7cc47055678c001733099939d699..0000000000000000000000000000000000000000 --- a/spaces/rayan-saleh/whisper2notion/app.py +++ /dev/null @@ -1,446 +0,0 @@ -from datetime import datetime -import math -from typing import Iterator -import argparse - -from io import StringIO -import os -import pathlib -import tempfile -import zipfile -import numpy as np - -import torch -from src.modelCache import ModelCache -from src.source import get_audio_source_collection -from src.vadParallel import ParallelContext, ParallelTranscription - -# External programs -import ffmpeg - -# UI -import gradio as gr - -from src.download import ExceededMaximumDuration, download_url -from src.utils import slugify, write_srt, write_vtt -from src.vad import AbstractTranscription, NonSpeechStrategy, PeriodicTranscriptionConfig, TranscriptionConfig, VadPeriodicTranscription, VadSileroTranscription -from src.whisperContainer import WhisperContainer - -# Limitations (set to -1 to disable) -DEFAULT_INPUT_AUDIO_MAX_DURATION = 600 # seconds - -# Whether or not to automatically delete all uploaded files, to save disk space -DELETE_UPLOADED_FILES = True - -# Gradio seems to truncate files without keeping the extension, so we need to truncate the file prefix ourself -MAX_FILE_PREFIX_LENGTH = 17 - -# Limit auto_parallel to a certain number of CPUs (specify vad_cpu_cores to get a higher number) -MAX_AUTO_CPU_CORES = 8 - -LANGUAGES = [ - "English", "Chinese", "German", "Spanish", "Russian", "Korean", - "French", "Japanese", "Portuguese", "Turkish", "Polish", "Catalan", - "Dutch", "Arabic", "Swedish", "Italian", "Indonesian", "Hindi", - "Finnish", "Vietnamese", "Hebrew", "Ukrainian", "Greek", "Malay", - "Czech", "Romanian", "Danish", "Hungarian", "Tamil", "Norwegian", - "Thai", "Urdu", "Croatian", "Bulgarian", "Lithuanian", "Latin", - "Maori", "Malayalam", "Welsh", "Slovak", "Telugu", "Persian", - "Latvian", "Bengali", "Serbian", "Azerbaijani", "Slovenian", - "Kannada", "Estonian", "Macedonian", "Breton", "Basque", "Icelandic", - "Armenian", "Nepali", "Mongolian", "Bosnian", "Kazakh", "Albanian", - "Swahili", "Galician", "Marathi", "Punjabi", "Sinhala", "Khmer", - "Shona", "Yoruba", "Somali", "Afrikaans", "Occitan", "Georgian", - "Belarusian", "Tajik", "Sindhi", "Gujarati", "Amharic", "Yiddish", - "Lao", "Uzbek", "Faroese", "Haitian Creole", "Pashto", "Turkmen", - "Nynorsk", "Maltese", "Sanskrit", "Luxembourgish", "Myanmar", "Tibetan", - "Tagalog", "Malagasy", "Assamese", "Tatar", "Hawaiian", "Lingala", - "Hausa", "Bashkir", "Javanese", "Sundanese" -] - -WHISPER_MODELS = ["tiny", "base", "small", "medium", "large", "large-v1", "large-v2"] - -class WhisperTranscriber: - def __init__(self, input_audio_max_duration: float = DEFAULT_INPUT_AUDIO_MAX_DURATION, vad_process_timeout: float = None, - vad_cpu_cores: int = 1, delete_uploaded_files: bool = DELETE_UPLOADED_FILES, output_dir: str = None): - self.model_cache = ModelCache() - self.parallel_device_list = None - self.gpu_parallel_context = None - self.cpu_parallel_context = None - self.vad_process_timeout = vad_process_timeout - self.vad_cpu_cores = vad_cpu_cores - - self.vad_model = None - self.inputAudioMaxDuration = input_audio_max_duration - self.deleteUploadedFiles = delete_uploaded_files - self.output_dir = output_dir - - def set_parallel_devices(self, vad_parallel_devices: str): - self.parallel_device_list = [ device.strip() for device in vad_parallel_devices.split(",") ] if vad_parallel_devices else None - - def set_auto_parallel(self, auto_parallel: bool): - if auto_parallel: - if torch.cuda.is_available(): - self.parallel_device_list = [ str(gpu_id) for gpu_id in range(torch.cuda.device_count())] - - self.vad_cpu_cores = min(os.cpu_count(), MAX_AUTO_CPU_CORES) - print("[Auto parallel] Using GPU devices " + str(self.parallel_device_list) + " and " + str(self.vad_cpu_cores) + " CPU cores for VAD/transcription.") - - # Entry function for the simple tab - def transcribe_webui_simple(self, modelName, languageName, urlData, multipleFiles, microphoneData, task, vad, vadMergeWindow, vadMaxMergeSize, vadPadding, vadPromptWindow): - return self.transcribe_webui(modelName, languageName, urlData, multipleFiles, microphoneData, task, vad, vadMergeWindow, vadMaxMergeSize, vadPadding, vadPromptWindow) - - # Entry function for the full tab - def transcribe_webui_full(self, modelName, languageName, urlData, multipleFiles, microphoneData, task, vad, vadMergeWindow, vadMaxMergeSize, vadPadding, vadPromptWindow, - initial_prompt: str, temperature: float, best_of: int, beam_size: int, patience: float, length_penalty: float, suppress_tokens: str, - condition_on_previous_text: bool, fp16: bool, temperature_increment_on_fallback: float, - compression_ratio_threshold: float, logprob_threshold: float, no_speech_threshold: float): - - # Handle temperature_increment_on_fallback - if temperature_increment_on_fallback is not None: - temperature = tuple(np.arange(temperature, 1.0 + 1e-6, temperature_increment_on_fallback)) - else: - temperature = [temperature] - - return self.transcribe_webui(modelName, languageName, urlData, multipleFiles, microphoneData, task, vad, vadMergeWindow, vadMaxMergeSize, vadPadding, vadPromptWindow, - initial_prompt=initial_prompt, temperature=temperature, best_of=best_of, beam_size=beam_size, patience=patience, length_penalty=length_penalty, suppress_tokens=suppress_tokens, - condition_on_previous_text=condition_on_previous_text, fp16=fp16, - compression_ratio_threshold=compression_ratio_threshold, logprob_threshold=logprob_threshold, no_speech_threshold=no_speech_threshold) - - def transcribe_webui(self, modelName, languageName, urlData, multipleFiles, microphoneData, task, vad, vadMergeWindow, vadMaxMergeSize, vadPadding, vadPromptWindow, **decodeOptions: dict): - try: - sources = self.__get_source(urlData, multipleFiles, microphoneData) - - try: - selectedLanguage = languageName.lower() if len(languageName) > 0 else None - selectedModel = modelName if modelName is not None else "base" - - model = WhisperContainer(model_name=selectedModel, cache=self.model_cache) - - # Result - download = [] - zip_file_lookup = {} - text = "" - vtt = "" - - # Write result - downloadDirectory = tempfile.mkdtemp() - source_index = 0 - - outputDirectory = self.output_dir if self.output_dir is not None else downloadDirectory - - # Execute whisper - for source in sources: - source_prefix = "" - - if (len(sources) > 1): - # Prefix (minimum 2 digits) - source_index += 1 - source_prefix = str(source_index).zfill(2) + "_" - print("Transcribing ", source.source_path) - - # Transcribe - result = self.transcribe_file(model, source.source_path, selectedLanguage, task, vad, vadMergeWindow, vadMaxMergeSize, vadPadding, vadPromptWindow, **decodeOptions) - filePrefix = slugify(source_prefix + source.get_short_name(), allow_unicode=True) - - source_download, source_text, source_vtt = self.write_result(result, filePrefix, outputDirectory) - - if len(sources) > 1: - # Add new line separators - if (len(source_text) > 0): - source_text += os.linesep + os.linesep - if (len(source_vtt) > 0): - source_vtt += os.linesep + os.linesep - - # Append file name to source text too - source_text = source.get_full_name() + ":" + os.linesep + source_text - source_vtt = source.get_full_name() + ":" + os.linesep + source_vtt - - # Add to result - download.extend(source_download) - text += source_text - vtt += source_vtt - - if (len(sources) > 1): - # Zip files support at least 260 characters, but we'll play it safe and use 200 - zipFilePrefix = slugify(source_prefix + source.get_short_name(max_length=200), allow_unicode=True) - - # File names in ZIP file can be longer - for source_download_file in source_download: - # Get file postfix (after last -) - filePostfix = os.path.basename(source_download_file).split("-")[-1] - zip_file_name = zipFilePrefix + "-" + filePostfix - zip_file_lookup[source_download_file] = zip_file_name - - # Create zip file from all sources - if len(sources) > 1: - downloadAllPath = os.path.join(downloadDirectory, "All_Output-" + datetime.now().strftime("%Y%m%d-%H%M%S") + ".zip") - - with zipfile.ZipFile(downloadAllPath, 'w', zipfile.ZIP_DEFLATED) as zip: - for download_file in download: - # Get file name from lookup - zip_file_name = zip_file_lookup.get(download_file, os.path.basename(download_file)) - zip.write(download_file, arcname=zip_file_name) - - download.insert(0, downloadAllPath) - - return download, text, vtt - - finally: - # Cleanup source - if self.deleteUploadedFiles: - for source in sources: - print("Deleting source file " + source.source_path) - - try: - os.remove(source.source_path) - except Exception as e: - # Ignore error - it's just a cleanup - print("Error deleting source file " + source.source_path + ": " + str(e)) - - except ExceededMaximumDuration as e: - return [], ("[ERROR]: Maximum remote video length is " + str(e.maxDuration) + "s, file was " + str(e.videoDuration) + "s"), "[ERROR]" - - def transcribe_file(self, model: WhisperContainer, audio_path: str, language: str, task: str = None, vad: str = None, - vadMergeWindow: float = 5, vadMaxMergeSize: float = 150, vadPadding: float = 1, vadPromptWindow: float = 1, **decodeOptions: dict): - - initial_prompt = decodeOptions.pop('initial_prompt', None) - - if ('task' in decodeOptions): - task = decodeOptions.pop('task') - - # Callable for processing an audio file - whisperCallable = model.create_callback(language, task, initial_prompt, **decodeOptions) - - # The results - if (vad == 'silero-vad'): - # Silero VAD where non-speech gaps are transcribed - process_gaps = self._create_silero_config(NonSpeechStrategy.CREATE_SEGMENT, vadMergeWindow, vadMaxMergeSize, vadPadding, vadPromptWindow) - result = self.process_vad(audio_path, whisperCallable, self.vad_model, process_gaps) - elif (vad == 'silero-vad-skip-gaps'): - # Silero VAD where non-speech gaps are simply ignored - skip_gaps = self._create_silero_config(NonSpeechStrategy.SKIP, vadMergeWindow, vadMaxMergeSize, vadPadding, vadPromptWindow) - result = self.process_vad(audio_path, whisperCallable, self.vad_model, skip_gaps) - elif (vad == 'silero-vad-expand-into-gaps'): - # Use Silero VAD where speech-segments are expanded into non-speech gaps - expand_gaps = self._create_silero_config(NonSpeechStrategy.EXPAND_SEGMENT, vadMergeWindow, vadMaxMergeSize, vadPadding, vadPromptWindow) - result = self.process_vad(audio_path, whisperCallable, self.vad_model, expand_gaps) - elif (vad == 'periodic-vad'): - # Very simple VAD - mark every 5 minutes as speech. This makes it less likely that Whisper enters an infinite loop, but - # it may create a break in the middle of a sentence, causing some artifacts. - periodic_vad = VadPeriodicTranscription() - period_config = PeriodicTranscriptionConfig(periodic_duration=vadMaxMergeSize, max_prompt_window=vadPromptWindow) - result = self.process_vad(audio_path, whisperCallable, periodic_vad, period_config) - - else: - if (self._has_parallel_devices()): - # Use a simple period transcription instead, as we need to use the parallel context - periodic_vad = VadPeriodicTranscription() - period_config = PeriodicTranscriptionConfig(periodic_duration=math.inf, max_prompt_window=1) - - result = self.process_vad(audio_path, whisperCallable, periodic_vad, period_config) - else: - # Default VAD - result = whisperCallable.invoke(audio_path, 0, None, None) - - return result - - def process_vad(self, audio_path, whisperCallable, vadModel: AbstractTranscription, vadConfig: TranscriptionConfig): - if (not self._has_parallel_devices()): - # No parallel devices, so just run the VAD and Whisper in sequence - return vadModel.transcribe(audio_path, whisperCallable, vadConfig) - - gpu_devices = self.parallel_device_list - - if (gpu_devices is None or len(gpu_devices) == 0): - # No GPU devices specified, pass the current environment variable to the first GPU process. This may be NULL. - gpu_devices = [os.environ.get("CUDA_VISIBLE_DEVICES", None)] - - # Create parallel context if needed - if (self.gpu_parallel_context is None): - # Create a context wih processes and automatically clear the pool after 1 hour of inactivity - self.gpu_parallel_context = ParallelContext(num_processes=len(gpu_devices), auto_cleanup_timeout_seconds=self.vad_process_timeout) - # We also need a CPU context for the VAD - if (self.cpu_parallel_context is None): - self.cpu_parallel_context = ParallelContext(num_processes=self.vad_cpu_cores, auto_cleanup_timeout_seconds=self.vad_process_timeout) - - parallel_vad = ParallelTranscription() - return parallel_vad.transcribe_parallel(transcription=vadModel, audio=audio_path, whisperCallable=whisperCallable, - config=vadConfig, cpu_device_count=self.vad_cpu_cores, gpu_devices=gpu_devices, - cpu_parallel_context=self.cpu_parallel_context, gpu_parallel_context=self.gpu_parallel_context) - - def _has_parallel_devices(self): - return (self.parallel_device_list is not None and len(self.parallel_device_list) > 0) or self.vad_cpu_cores > 1 - - def _concat_prompt(self, prompt1, prompt2): - if (prompt1 is None): - return prompt2 - elif (prompt2 is None): - return prompt1 - else: - return prompt1 + " " + prompt2 - - def _create_silero_config(self, non_speech_strategy: NonSpeechStrategy, vadMergeWindow: float = 5, vadMaxMergeSize: float = 150, vadPadding: float = 1, vadPromptWindow: float = 1): - # Use Silero VAD - if (self.vad_model is None): - self.vad_model = VadSileroTranscription() - - config = TranscriptionConfig(non_speech_strategy = non_speech_strategy, - max_silent_period=vadMergeWindow, max_merge_size=vadMaxMergeSize, - segment_padding_left=vadPadding, segment_padding_right=vadPadding, - max_prompt_window=vadPromptWindow) - - return config - - def write_result(self, result: dict, source_name: str, output_dir: str): - if not os.path.exists(output_dir): - os.makedirs(output_dir) - - text = result["text"] - language = result["language"] - languageMaxLineWidth = self.__get_max_line_width(language) - - print("Max line width " + str(languageMaxLineWidth)) - vtt = self.__get_subs(result["segments"], "vtt", languageMaxLineWidth) - srt = self.__get_subs(result["segments"], "srt", languageMaxLineWidth) - - output_files = [] - output_files.append(self.__create_file(srt, output_dir, source_name + "-subs.srt")); - output_files.append(self.__create_file(vtt, output_dir, source_name + "-subs.vtt")); - output_files.append(self.__create_file(text, output_dir, source_name + "-transcript.txt")); - - return output_files, text, vtt - - def clear_cache(self): - self.model_cache.clear() - self.vad_model = None - - def __get_source(self, urlData, multipleFiles, microphoneData): - return get_audio_source_collection(urlData, multipleFiles, microphoneData, self.inputAudioMaxDuration) - - def __get_max_line_width(self, language: str) -> int: - if (language and language.lower() in ["japanese", "ja", "chinese", "zh"]): - # Chinese characters and kana are wider, so limit line length to 40 characters - return 40 - else: - # TODO: Add more languages - # 80 latin characters should fit on a 1080p/720p screen - return 80 - - def __get_subs(self, segments: Iterator[dict], format: str, maxLineWidth: int) -> str: - segmentStream = StringIO() - - if format == 'vtt': - write_vtt(segments, file=segmentStream, maxLineWidth=maxLineWidth) - elif format == 'srt': - write_srt(segments, file=segmentStream, maxLineWidth=maxLineWidth) - else: - raise Exception("Unknown format " + format) - - segmentStream.seek(0) - return segmentStream.read() - - def __create_file(self, text: str, directory: str, fileName: str) -> str: - # Write the text to a file - with open(os.path.join(directory, fileName), 'w+', encoding="utf-8") as file: - file.write(text) - - return file.name - - def close(self): - print("Closing parallel contexts") - self.clear_cache() - - if (self.gpu_parallel_context is not None): - self.gpu_parallel_context.close() - if (self.cpu_parallel_context is not None): - self.cpu_parallel_context.close() - - -def create_ui(input_audio_max_duration, share=False, server_name: str = None, server_port: int = 7860, - default_model_name: str = "medium", default_vad: str = None, vad_parallel_devices: str = None, - vad_process_timeout: float = None, vad_cpu_cores: int = 1, auto_parallel: bool = False, - output_dir: str = None): - ui = WhisperTranscriber(input_audio_max_duration, vad_process_timeout, vad_cpu_cores, DELETE_UPLOADED_FILES, output_dir) - - # Specify a list of devices to use for parallel processing - ui.set_parallel_devices(vad_parallel_devices) - ui.set_auto_parallel(auto_parallel) - - ui_description = "Whisper is a general-purpose speech recognition model. It is trained on a large dataset of diverse " - ui_description += " audio and is also a multi-task model that can perform multilingual speech recognition " - ui_description += " as well as speech translation and language identification. " - - ui_description += "\n\n\n\nFor longer audio files (>10 minutes) not in English, it is recommended that you select Silero VAD (Voice Activity Detector) in the VAD option." - - if input_audio_max_duration > 0: - ui_description += "\n\n" + "Max audio file length: " + str(input_audio_max_duration) + " s" - - ui_article = "Read the [documentation here](https://gitlab.com/aadnk/whisper-webui/-/blob/main/docs/options.md)" - - simple_inputs = lambda : [ - gr.Dropdown(choices=WHISPER_MODELS, value=default_model_name, label="Model"), - gr.Dropdown(choices=sorted(LANGUAGES), label="Language"), - gr.Text(label="URL (YouTube, etc.)"), - gr.File(label="Upload Files", file_count="multiple"), - gr.Audio(source="microphone", type="filepath", label="Microphone Input"), - gr.Dropdown(choices=["transcribe", "translate"], label="Task"), - gr.Dropdown(choices=["none", "silero-vad", "silero-vad-skip-gaps", "silero-vad-expand-into-gaps", "periodic-vad"], value=default_vad, label="VAD"), - gr.Number(label="VAD - Merge Window (s)", precision=0, value=5), - gr.Number(label="VAD - Max Merge Size (s)", precision=0, value=30), - gr.Number(label="VAD - Padding (s)", precision=None, value=1), - gr.Number(label="VAD - Prompt Window (s)", precision=None, value=3) - ] - - simple_transcribe = gr.Interface(fn=ui.transcribe_webui_simple, description=ui_description, article=ui_article, inputs=simple_inputs(), outputs=[ - gr.File(label="Download"), - gr.Text(label="Transcription"), - gr.Text(label="Segments") - ]) - - full_description = ui_description + "\n\n\n\n" + "Be careful when changing some of the options in the full interface - this can cause the model to crash." - - full_transcribe = gr.Interface(fn=ui.transcribe_webui_full, description=full_description, article=ui_article, inputs=[ - *simple_inputs(), - gr.TextArea(label="Initial Prompt"), - gr.Number(label="Temperature", value=0), - gr.Number(label="Best Of - Non-zero temperature", value=5, precision=0), - gr.Number(label="Beam Size - Zero temperature", value=5, precision=0), - gr.Number(label="Patience - Zero temperature", value=None), - gr.Number(label="Length Penalty - Any temperature", value=None), - gr.Text(label="Suppress Tokens - Comma-separated list of token IDs", value="-1"), - gr.Checkbox(label="Condition on previous text", value=True), - gr.Checkbox(label="FP16", value=True), - gr.Number(label="Temperature increment on fallback", value=0.2), - gr.Number(label="Compression ratio threshold", value=2.4), - gr.Number(label="Logprob threshold", value=-1.0), - gr.Number(label="No speech threshold", value=0.6) - ], outputs=[ - gr.File(label="Download"), - gr.Text(label="Transcription"), - gr.Text(label="Segments") - ]) - - demo = gr.TabbedInterface([simple_transcribe, full_transcribe], tab_names=["Simple", "Full"]) - - demo.launch(share=share, server_name=server_name, server_port=server_port) - - # Clean up - ui.close() - -if __name__ == '__main__': - parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter) - parser.add_argument("--input_audio_max_duration", type=int, default=DEFAULT_INPUT_AUDIO_MAX_DURATION, help="Maximum audio file length in seconds, or -1 for no limit.") - parser.add_argument("--share", type=bool, default=False, help="True to share the app on HuggingFace.") - parser.add_argument("--server_name", type=str, default=None, help="The host or IP to bind to. If None, bind to localhost.") - parser.add_argument("--server_port", type=int, default=7860, help="The port to bind to.") - parser.add_argument("--default_model_name", type=str, choices=WHISPER_MODELS, default="medium", help="The default model name.") - parser.add_argument("--default_vad", type=str, default="silero-vad", help="The default VAD.") - parser.add_argument("--vad_parallel_devices", type=str, default="", help="A commma delimited list of CUDA devices to use for parallel processing. If None, disable parallel processing.") - parser.add_argument("--vad_cpu_cores", type=int, default=1, help="The number of CPU cores to use for VAD pre-processing.") - parser.add_argument("--vad_process_timeout", type=float, default="1800", help="The number of seconds before inactivate processes are terminated. Use 0 to close processes immediately, or None for no timeout.") - parser.add_argument("--auto_parallel", type=bool, default=False, help="True to use all available GPUs and CPU cores for processing. Use vad_cpu_cores/vad_parallel_devices to specify the number of CPU cores/GPUs to use.") - parser.add_argument("--output_dir", "-o", type=str, default=None, help="directory to save the outputs") - - args = parser.parse_args().__dict__ - create_ui(**args) \ No newline at end of file diff --git a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Cardfightvanguardridetovictory3dsromdownload.md b/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Cardfightvanguardridetovictory3dsromdownload.md deleted file mode 100644 index b487b2ebdac24c87b8e15f24998390f9372c7f1e..0000000000000000000000000000000000000000 --- a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Cardfightvanguardridetovictory3dsromdownload.md +++ /dev/null @@ -1,8 +0,0 @@ - -

          https://www.reddit.com/r/Android/comments/7q4dg6/cardfightvanguardridetovictory3dsromdownload/?st=j7wlky5i&sh=8db85ad8 Androiddrive for Cardfightvanguardridetovictory3dsromdownload. Cardfightvanguardridetovictory3dsromdownload - 16-02-2020 - 15:12:53 - Cardfight Vanguard: Ridetovich https://www.reddit.com/r/Android/comments/7q4dg6/cardfightvanguardridetovictory3dsromdownload/ CrackCardfightvanguardridetovictory3dsromdownload. michjaej

          -

          We are offering Cardfightvanguardridetovictory3dsromdownload Cardfightvanguardridetovictory3dsromdownload to you for free because it is the popular and mega. Piyo tzyh1 microcdf 2e67bb680f http://aeriehall.hu/unibrow-i-win-cardfightvanguardridetovictory3dsromdownload-ailei-C0C8CPOR5.

          -

          cardfightvanguardridetovictory3dsromdownload


          Download ››››› https://urlgoal.com/2uCL9p



          -

          CLICK FOR CARDFIGHTVANGUARDRIDETOVICTORY3DSROMDOWNLOAD. Cardfightvanguardridetovictory3dsromdownload Below is only a small selection of free cartoons for children. Download cardfightvanguardridetovictory3dsromdownload was just released, with over 6,000 downloads within the first month. Cardfightvanguardridetovictory3dsromdownload is an arcade game where you use colorful cards to battle. The app can be used for much more than just cardfightvanguardridetovictory3dsromdownload - you can easily create a lineup of recommended cartoon series and movies. But the biggest feature is the ability to record videos with your iPhone. Download cardfightvanguardridetovictory3dsromdownload for free and enjoy watching your videos - either as broadcast on your phone or as edited on the app.

          -

          Click for Cardfightvanguardridetovictory3dsromdownload carlstand-usercardfightvanguardridetovictory3dsromdownload.whmci http://graceyelec.yolasite.com/cardfightvanguardridetovictory3dsromdownload/

          899543212b
          -
          -
          \ No newline at end of file diff --git a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Decoz.master.numerology.program.8.0.cracked-tsrh.zip Full Version.md b/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Decoz.master.numerology.program.8.0.cracked-tsrh.zip Full Version.md deleted file mode 100644 index 1dc6693e09f42f7b526c7536e366ecb6f5bc84a1..0000000000000000000000000000000000000000 --- a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Decoz.master.numerology.program.8.0.cracked-tsrh.zip Full Version.md +++ /dev/null @@ -1,9 +0,0 @@ - -

          after you've installed the cracked file, simply reboot your computer. you're now on the decoz.master.numerology.program.8.0.cracked-tsrh.exe. this time, you will be asked for a password when it starts. simply use the one you chose when you installed decoz. this will be the same as the one you chose for decrypting the cracked file. enter the password in the red box. if you are asked, enter the one you chose in step 3 of the installation of decoz.exe .

          -

          Decoz.master.numerology.program.8.0.cracked-tsrh.zip Full Version


          Download File >>> https://urlgoal.com/2uCKx5



          -

          it was a test program for the re-release of tcot and i was using it to check the tcot/tcun interface..
          mostly saying how well it works and the features are implemented and tested.
          it also includes the tcot in case anyone wants to play with it too.
          please note that the latest version of the program is on my website decoz.net/program/ and is not on here yet.

          -

          this is my test program for the re-release of tcot and i was using it to check the tcot/tcun interface..
          mostly saying how well it works and the features are implemented and tested.
          it also includes the tcot in case anyone wants to play with it too.
          please note that the latest version of the program is on my website decoz.net/program/ and is not on here yet.

          -

          and the last of my test programs for the tcot release.
          this program will generate hundreds of lines with the ancient greek temple times and i have been trying it out with several different tries with the prime numbers and the twin numbers.
          if you are familiar with these numbers i have included them, if not just see if you can get it right.
          it uses the old tcot/tcun interface and a lot of tweaking to the golden mean and the equidistant times to the single numbers and the double numbers.

          -

          899543212b
          -
          -
          \ No newline at end of file diff --git a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Der Untergang Downfall German With English Subs Torrent.md b/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Der Untergang Downfall German With English Subs Torrent.md deleted file mode 100644 index a11d2b238763d5b2ff89fad5401c3e93736b67fc..0000000000000000000000000000000000000000 --- a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Der Untergang Downfall German With English Subs Torrent.md +++ /dev/null @@ -1,84 +0,0 @@ -
          -

          Der Untergang (Downfall) - The German Film with English Subs that Shook the World

          - -

          Der Untergang (Downfall) is a 2004 German film that depicts the last ten days of Adolf Hitler's life and the collapse of Nazi Germany in 1945. The film is based on the memoirs of Hitler's personal secretary Traudl Junge and other eyewitness accounts. It features an acclaimed performance by Bruno Ganz as Hitler, as well as other actors portraying prominent figures of the Third Reich.

          -

          der untergang downfall german with english subs torrent


          DOWNLOAD >>> https://urlgoal.com/2uCJW6



          - -

          The film was praised by critics and audiences for its realistic and unflinching portrayal of the horrors of war and the madness of Hitler. It was nominated for an Academy Award for Best Foreign Language Film and won several awards in Germany and Europe. It also sparked controversy and debate for humanizing Hitler and his followers, as well as for using actual historical footage and locations.

          - -

          How to Watch Der Untergang (Downfall) with English Subs

          - -

          If you want to watch this masterpiece of cinema, you might be wondering how to find it with English subtitles. The film is in German, with some scenes in Russian, French, and Hungarian. The original DVD release had English subtitles, but they were criticized for being inaccurate and poorly translated. Some fans have created their own subtitles that are more faithful to the original dialogue and context.

          - -

          One way to watch Der Untergang (Downfall) with English subs is to download it from a torrent site. A torrent is a file that contains information about other files that are shared by users over the internet. You can use a torrent client software to download the movie file and the subtitle file from other users who have them. However, this method has some risks, such as legal issues, viruses, malware, and low quality.

          - -

          Another way to watch Der Untergang (Downfall) with English subs is to stream it online from a reputable site. Streaming is a method of watching video content without downloading it to your device. You can use a web browser or an app to access the site and watch the movie with subtitles. However, this method also has some drawbacks, such as buffering, ads, pop-ups, and limited availability.

          - -

          Why You Should Watch Der Untergang (Downfall) with English Subs

          - -

          Der Untergang (Downfall) is not only a film, but also a history lesson and a psychological study. It shows you the events that led to the end of one of the most brutal regimes in history, as well as the mindset and emotions of the people involved. It also challenges you to think about the nature of evil, power, loyalty, and morality.

          - -

          By watching Der Untergang (Downfall) with English subs, you can appreciate the film better and understand its nuances and subtleties. You can also learn some German words and phrases that are relevant to the historical context. You can also compare and contrast the film with other sources of information, such as books, documentaries, and interviews.

          - -

          Der Untergang (Downfall) is a film that will stay with you long after you watch it. It will make you feel, think, and question. It will also inspire you to learn more about this fascinating and tragic period of history. If you are looking for a film that will challenge you intellectually and emotionally, Der Untergang (Downfall) is the one for you.

          -

          -

          What You Can Learn from Der Untergang (Downfall) with English Subs

          - -

          Der Untergang (Downfall) is not only a film, but also a source of valuable insights and lessons. By watching this film with English subs, you can learn more about the history, culture, and psychology of Nazi Germany and its leaders. You can also gain a deeper understanding of the human condition and the consequences of war.

          - -

          Some of the things you can learn from Der Untergang (Downfall) with English subs are:

          - -
            -
          • The complexity and diversity of the Nazi regime and its supporters. The film shows that not all Nazis were fanatical ideologues or ruthless killers. Some were loyal, brave, compassionate, or conflicted. Some were disillusioned, fearful, or desperate. Some were opportunistic, ambitious, or corrupt. The film also shows that not all Germans supported Hitler or agreed with his policies. Some resisted, rebelled, or defected.
          • -
          • The psychology and personality of Adolf Hitler and his inner circle. The film portrays Hitler as a charismatic, paranoid, delusional, and megalomaniacal leader who gradually lost touch with reality and isolated himself from his people. It also depicts his relationships with his subordinates, allies, enemies, and lover. The film reveals the motivations, emotions, conflicts, and dilemmas of his closest associates, such as Eva Braun, Joseph Goebbels, Heinrich Himmler, Albert Speer, Hermann Fegelein, and Traudl Junge.
          • -
          • The impact and aftermath of World War II on Germany and Europe. The film depicts the horrors and tragedies of war from different perspectives and angles. It shows the suffering and death of soldiers and civilians on both sides of the conflict. It shows the destruction and devastation of Berlin and other cities by bombing and shelling. It shows the atrocities and crimes committed by the Nazis and their opponents. It also shows the fate and legacy of the Nazi regime and its leaders after their defeat.
          • -
          - -
          How to Enjoy Der Untergang (Downfall) with English Subs
          - -

          Der Untergang (Downfall) is a film that deserves your attention and appreciation. It is a film that will make you think, feel, and remember. It is a film that will enrich your knowledge and perspective. It is a film that will entertain you and move you.

          - -

          To enjoy Der Untergang (Downfall) with English subs, you need to prepare yourself mentally and emotionally. You need to be ready to witness some disturbing and shocking scenes that might make you uncomfortable or upset. You need to be open-minded and respectful of the historical facts and artistic choices. You need to be attentive and curious about the details and nuances.

          - -

          You also need to choose a good time and place to watch Der Untergang (Downfall) with English subs. You need to find a quiet and comfortable environment where you can focus on the film without distractions or interruptions. You need to allocate enough time to watch the film in one sitting without skipping or fast-forwarding. You need to adjust your screen brightness, volume level, subtitle size, and language preference according to your preference.

          - -

          Finally, you need to enjoy Der Untergang (Downfall) with English subs as a cinematic masterpiece that will enrich your life. You need to appreciate the film's production quality, direction, script, acting, music, cinematography, editing, sound design, costume design, set design, special effects, and historical accuracy. You need to immerse yourself in the film's story, characters, emotions, themes, messages, symbolism, humor, irony, suspense, drama, action, romance, horror, tragedy, -

          Where to Find Der Untergang (Downfall) with English Subs
          - -

          Der Untergang (Downfall) is a film that is worth watching and owning. If you want to find this film with English subs, you have several options to choose from. You can buy or rent the DVD or Blu-ray from online or physical stores that sell or ship to your country. You can also download or stream the film from legal and reliable sites that offer high-quality video and audio.

          - -

          Some of the sites where you can find Der Untergang (Downfall) with English subs are:

          - -
            -
          • YTS - This is a popular torrent site that offers a large collection of movies in various resolutions and formats. You can download Der Untergang (Downfall) in 720p or 1080p Blu-ray quality with English subtitles. However, you need to use a VPN and a torrent client software to access this site and download the film safely and anonymously.
          • -
          • SolidTorrents - This is another torrent site that provides a variety of torrents for different types of content. You can download Der Untergang (Downfall) in German with English subs in two parts, each about 730 MB in size. You also need to use a VPN and a torrent client software to access this site and download the film securely and privately.
          • -
          • Internet Archive - This is a non-profit digital library that preserves and provides access to millions of free books, movies, music, and other media. You can watch Der Untergang (Downfall) online or download it in German with English subs from this site. The film is about 1.4 GB in size and has a resolution of 720x400 pixels. You do not need to use a VPN or a torrent client software to access this site and watch or download the film legally and freely.
          • -
          - -Conclusion - -

          Der Untergang (Downfall) is a film that you should not miss if you are interested in history, drama, or cinema. It is a film that will show you the final days of Hitler and Nazi Germany in a realistic and captivating way. It is a film that will teach you valuable lessons and insights about war, power, evil, and humanity.

          - -

          By watching Der Untergang (Downfall) with English subs, you can enjoy this film more and understand its meaning and message better. You can also learn some German language and culture along the way. You can also compare and contrast this film with other sources of information and opinions about this topic.

          - -

          Der Untergang (Downfall) is a film that will make you appreciate the art of filmmaking and storytelling. It is a film that will make you appreciate the courage and sacrifice of those who fought against tyranny and oppression. It is a film that will make you appreciate the gift of life and freedom.

          - -

          If you are looking for a film that will challenge you intellectually and emotionally, Der Untergang (Downfall) is the one for you.

          -FAQs - -

          Here are some frequently asked questions and answers about Der Untergang (Downfall) and its English subs:

          - -
            -
          1. Is Der Untergang (Downfall) based on a true story?
          2. -

            Yes, Der Untergang (Downfall) is based on a true story. The film is mainly based on the memoirs of Traudl Junge, Hitler's personal secretary, who witnessed his last days in the Führerbunker. The film also uses other historical sources, such as the books Inside Hitler's Bunker by Joachim Fest and Hitler's Last Days by Gerhard Boldt.

            -
          3. Who played Adolf Hitler in Der Untergang (Downfall)?
          4. -

            The actor who played Adolf Hitler in Der Untergang (Downfall) was Bruno Ganz, a Swiss actor who was widely regarded as one of the greatest German-speaking actors of his generation. He spent four months preparing for the role, studying Hitler's speeches, mannerisms, and movements. He also consulted with a historian and a voice coach to perfect his portrayal. He received critical acclaim and several awards for his performance.

            -
          5. What is the meaning of the title Der Untergang (Downfall)?
          6. -

            The title Der Untergang (Downfall) has multiple meanings and interpretations. It can refer to the downfall of Hitler, Nazi Germany, or the Third Reich. It can also refer to the downfall of humanity, morality, or civilization. It can also refer to the downfall of the characters in the film, who face their own personal and professional crises.

            -
          7. Where was Der Untergang (Downfall) filmed?
          8. -

            Der Untergang (Downfall) was filmed in various locations in Germany, Austria, and Russia. The scenes in the Führerbunker were filmed in a studio in Munich, where a replica of the bunker was built. The scenes in Berlin were filmed in St. Petersburg, where some buildings and streets resembled the war-torn city. The scenes in East Prussia were filmed in Saxony-Anhalt, where a forest was set on fire to create a realistic atmosphere.

            -
          9. How accurate is Der Untergang (Downfall)?
          10. -

            Der Untergang (Downfall) is considered to be one of the most accurate and authentic films about Hitler and Nazi Germany. The film follows the historical facts and events as closely as possible, using eyewitness accounts, documents, and archives. The film also recreates the costumes, props, sets, and special effects with great attention to detail. However, the film also takes some artistic liberties and dramatizes some scenes and dialogues for cinematic purposes.

            -

          3cee63e6c2
          -
          -
          \ No newline at end of file diff --git a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Download Grease Full Movie For Free !!INSTALL!!.md b/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Download Grease Full Movie For Free !!INSTALL!!.md deleted file mode 100644 index 3467fd4efa375038453e3b6d82bc3733a6fe66f2..0000000000000000000000000000000000000000 --- a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Download Grease Full Movie For Free !!INSTALL!!.md +++ /dev/null @@ -1,46 +0,0 @@ - -

          How to Download Grease Full Movie For Free

          -

          If you are a fan of musicals, romance and nostalgia, you might want to download Grease full movie for free. Grease is a 1978 classic film that stars John Travolta and Olivia Newton-John as Danny Zuko and Sandy Olsson, two high school sweethearts who have a summer fling and then reunite at Rydell High. The film features iconic songs like "You're The One That I Want", "Summer Nights" and "Greased Lightning", as well as memorable characters like the T-Birds, the Pink Ladies and Rizzo.

          -

          Download Grease Full Movie For Free


          Downloadhttps://urlgoal.com/2uCMOf



          -

          But how can you download Grease full movie for free? There are several options available online, but you need to be careful about the quality, legality and safety of the sources. In this article, we will show you some of the best ways to download Grease full movie for free, as well as some tips and tricks to enjoy the film to the fullest.

          - -

          Download Grease Full Movie For Free from Torrent Sites

          -

          One of the most popular ways to download Grease full movie for free is to use torrent sites. Torrent sites are platforms that allow users to share files with each other using a peer-to-peer network. You can find almost any movie or TV show on torrent sites, including Grease.

          -

          To download Grease full movie for free from torrent sites, you need to have a torrent client installed on your device. A torrent client is a software that enables you to download and upload files using torrent protocols. Some of the most common torrent clients are uTorrent, BitTorrent and Vuze.

          -

          Once you have a torrent client, you need to find a reliable torrent site that has Grease available for download. Some of the most popular torrent sites are YTS, The Pirate Bay and 1337x. You can search for Grease on these sites and find various versions of the film with different qualities, sizes and subtitles.

          -

          After you find the version of Grease that you want to download, you need to click on the download link or magnet link and open it with your torrent client. The torrent client will then start downloading the file from other users who have it on their devices. The download speed will depend on the number of seeders (users who have the complete file) and leechers (users who are downloading the file).

          -

          When the download is complete, you can open the file with your preferred media player and enjoy watching Grease full movie for free.

          -

          - -

          Download Grease Full Movie For Free from Streaming Sites

          -

          Another way to download Grease full movie for free is to use streaming sites. Streaming sites are platforms that allow users to watch movies and TV shows online without downloading them. You can find a variety of genres and categories on streaming sites, including musicals like Grease.

          -

          To download Grease full movie for free from streaming sites, you need to have a web browser installed on your device. A web browser is a software that enables you to access websites and online content. Some of the most common web browsers are Chrome, Firefox and Safari.

          -

          Once you have a web browser, you need to find a reliable streaming site that has Grease available for watching. Some of the most popular streaming sites are ABC.com, Freeform.com and IEVENN.com. You can search for Grease on these sites and find various versions of the film with different qualities, languages and subtitles.

          -

          After you find the version of Grease that you want to watch, you need to click on the play button and start streaming the film online. However, if you want to download Grease full movie for free from streaming sites, you need to use a third-party tool that can capture and save the video stream.

          -

          One of the most common tools that can help you download Grease full movie for free from streaming sites is Video DownloadHelper. Video DownloadHelper is a browser extension that detects any video or audio content on a web page and allows you to download it with one click. You can install Video DownloadHelper on your web browser from its official website or from your browser's extension store.

          -

          When you install Video DownloadHelper, you will see an icon on your browser's toolbar that indicates when there is a video or audio content available for download on a web page. When you are streaming Grease online, you can click on this icon and select the option to download the video stream.

          -

          The tool will then start downloading the file from the streaming site and save it on your device. The download speed will depend on your internet connection and the quality of the video stream.

          -

          When the download is complete, you can open the file with your preferred media player and enjoy watching Grease full movie for free.

          - -

          Tips and Tricks to Enjoy Grease Full Movie For Free

          -

          Now that you know how to download Grease full movie for free from torrent sites or streaming sites, here are some tips and tricks to enjoy the film to the fullest:

          -
            -
          • Make sure that you have enough space on your device before downloading or streaming Grease full movie for free. The file size of Grease can vary depending on the quality and format of the version that you choose. You can check the file size before downloading or streaming by looking at the details or information provided by the source.
          • -
          • Make sure that you have a good internet connection before downloading or streaming Grease full movie for free. The internet speed can affect the quality and stability of the download or stream. You can check your internet speed by using online tools like Speedtest.net or Fast.com.
          • -
          • Make sure that you have a good VPN service before downloading or streaming Grease full movie for free. A VPN service is a software that encrypts your online traffic and hides your IP address from prying eyes. This can help you avoid any legal issues or cyber threats that might arise from accessing torrent sites or streaming sites.
          • -
          • Make sure that you have a good antivirus software before downloading or streaming Grease full movie for free. An antivirus software is a software that protects your device from viruses, malware and other harmful programs that might infect your device when downloading or streaming files from unknown sources.
          • -
          • Make sure that you have some snacks and drinks ready before watching Grease full movie for free. Watching a musical like Grease can be more fun when you have something to munch on and sip on while enjoying the songs and scenes.
          • -
          - -

          Conclusion

          -

          Grease is one of the most iconic musicals of all time that features John Travolta and Olivia Newton-John as Danny Zuko and Sandy Olsson, two high school lovers who have a summer romance and then reunite at Rydell High. The film has catchy songs like "You're The One That I Want", "Summer Nights" and "Greased Lightning", as well as memorable characters like the T-Birds, the Pink Ladies and Rizzo.

          -

          If you want to watch or rewatch this classic film, you might want to know how to download Grease full movie for free. There are two main ways to do this: using torrent sites or using streaming sites. Both methods have their pros and cons, but they can help you get access to Grease full movie for free online.

          -

          However, before downloading or streaming Grease full movie for free, make sure that you follow some tips and tricks to ensure a safe and enjoyable experience. These include having enough space on your device, having a good internet connection, having a good VPN service, having a good antivirus software and having some snacks and drinks ready.

          -

          We hope that this article has helped you learn how to download Grease full movie for free from torrent sites or streaming sites. Now go ahead and enjoy this musical masterpiece!

          -

          Conclusion

          -

          Grease is one of the most iconic musicals of all time that features John Travolta and Olivia Newton-John as Danny Zuko and Sandy Olsson, two high school lovers who have a summer romance and then reunite at Rydell High. The film has catchy songs like "You're The One That I Want", "Summer Nights" and "Greased Lightning", as well as memorable characters like the T-Birds, the Pink Ladies and Rizzo.

          -

          If you want to watch or rewatch this classic film, you might want to know how to download Grease full movie for free. There are two main ways to do this: using torrent sites or using streaming sites. Both methods have their pros and cons, but they can help you get access to Grease full movie for free online.

          -

          However, before downloading or streaming Grease full movie for free, make sure that you follow some tips and tricks to ensure a safe and enjoyable experience. These include having enough space on your device, having a good internet connection, having a good VPN service, having a good antivirus software and having some snacks and drinks ready.

          -

          We hope that this article has helped you learn how to download Grease full movie for free from torrent sites or streaming sites. Now go ahead and enjoy this musical masterpiece!

          3cee63e6c2
          -
          -
          \ No newline at end of file diff --git a/spaces/robin0307/MMOCR/configs/_base_/schedules/schedule_adadelta_5e.py b/spaces/robin0307/MMOCR/configs/_base_/schedules/schedule_adadelta_5e.py deleted file mode 100644 index ad996d65f8aca131023d34712e2d960bf6928cce..0000000000000000000000000000000000000000 --- a/spaces/robin0307/MMOCR/configs/_base_/schedules/schedule_adadelta_5e.py +++ /dev/null @@ -1,8 +0,0 @@ -# optimizer -optimizer = dict(type='Adadelta', lr=1.0) -optimizer_config = dict(grad_clip=None) -# learning policy -lr_config = dict(policy='step', step=[]) -# running settings -runner = dict(type='EpochBasedRunner', max_epochs=5) -checkpoint_config = dict(interval=1) diff --git a/spaces/rorallitri/biomedical-language-models/logs/3gp Hindi Mere Dad Ki Maruti The Ultimate Gift for His Sisters Wedding Goes Missing.md b/spaces/rorallitri/biomedical-language-models/logs/3gp Hindi Mere Dad Ki Maruti The Ultimate Gift for His Sisters Wedding Goes Missing.md deleted file mode 100644 index 27e68a024194378f28c1f88bc4d2a784a17d7f27..0000000000000000000000000000000000000000 --- a/spaces/rorallitri/biomedical-language-models/logs/3gp Hindi Mere Dad Ki Maruti The Ultimate Gift for His Sisters Wedding Goes Missing.md +++ /dev/null @@ -1,9 +0,0 @@ - -


          Soniyaan saariyaan de vich

          Main ta lagdi haan tich

          Soniyaan saariyaan de vich

          Main ta lagdi haan tich

          Dus mere wargah aithe kaun hai

          Ho teri kurti hai tight

          Tenu karaan invite

          Main tah kardangi tenu tainvon ve

          Ho love tenu provide karaangi

          Kuch na tere toh hide karaangi

          Karega buraah buraah


          Jado hilegi meri hip karega

          Hip Hip Hurrah

          Jado hilegi meri hip karega

          Hip Hip Hurrah

          Mainu pike tu zip zip karega

          Hip Hip Hurrah

          Jado hilegi meri hip karega

          Hip Hip Hurrah



          Hot hot hain ke mahi mere armaan

          Main tah haan padaangi phir muchiyaan de maan

          Jawaani teh ticket karaan

          Tera chaddar thalle wait karaan

          Jawaani teh ticket karaan

          Tera chaddar thalle wait karaan

          Love da main record todh

          Bacche paida eight karaan

          Oye chakde..

          Tenu main excite karaangi

          Kalle utthe fight karaangi

          Karega buraah buraah

          Ha ha ha..


          Jado hilegi meri hip karega

          Hip Hip Hurrah

          Jado hilegi meri hip karega

          Hip Hip Hurrah

          Mainu pike tu zip zip karega

          Hip Hip Hurrah

          Jado hilegi meri hip karega

          Hip Hip Hurrah


          Oye tu hai mera sunny boy

          Honey boy honey boy

          Hoye tu hai mera sunny boy

          Honey boy honey boy

          Kaanu armaanan ji khoon kariye

          Marriage toh pehla ji honeymoon kariye

          Raunchy raunchy gal karaangi

          Lavaangi love therapy

          Raunchy raunchy gal karaangi

          Lavaangi love therapy

          Sexy episode banega

          Banegi phir TRP

          Oh soniya..

          Tere utte rule karaangi

          Phir main tenu cool karaangi

          Karega buraah buraah


          Jado hilegi meri hip karega

          Hip Hip Hurrah

          Hilegi meri hip karega

          Hip Hip Hurrah

          Pike tu zip zip karega

          Hip Hip Hurrah

          Jado hilegi meri hip karega

          Hip Hip Hurrah


          Hip Hip Hurrah

          Hip Hip Hurrah..

          -

          Soniyaan saariyaan de vich
          Main ta lagdi haan tich
          Soniyaan saariyaan de vich
          Main ta lagdi haan tich
          Dus mere wargah aithe kaun hai
          Ho teri kurti hai tight
          Tenu karaan invite
          Main tah kardangi tenu tainvon ve
          Ho love tenu provide karaangi
          Kuch na tere toh hide karaangi
          Karega buraah buraah

          -

          3gp Hindi Mere Dad Ki Maruti


          Download Filehttps://tinurll.com/2uzoHR



          -

          Hot hot hain ke mahi mere armaan
          Main tah haan padaangi phir muchiyaan de maan
          Jawaani teh ticket karaan
          Tera chaddar thalle wait karaan
          Jawaani teh ticket karaan
          Tera chaddar thalle wait karaan
          Love da main record todh
          Bacche paida eight karaan
          Oye chakde..
          Tenu main excite karaangi
          Kalle utthe fight karaangi
          Karega buraah buraah
          Ha ha ha..

          -

          Hot hot hain ke
          Mahi mere armaan
          Main tah haan padaangi
          Phir muchiyaan de maan
          Jawaani teh ticket karaan
          Tera chaddar thalle wait karaan
          Jawaani teh ticket karaan
          Tera chaddar thalle wait karaan
          Love da main record todh
          Bacche paida eight karaan
          Oye chakde

          -

          Catch us for latest Bollywood News, New Bollywood Movies update, Box office collection, New Movies Release , Bollywood News Hindi, Entertainment News, Bollywood Live News Today & Upcoming Movies 2023 and stay updated with latest hindi movies only on Bollywood Hungama.

          aaccfb2cb3
          -
          -
          \ No newline at end of file diff --git a/spaces/rorallitri/biomedical-language-models/logs/Aanch free download movie The story of a village torn by violence and corruption.md b/spaces/rorallitri/biomedical-language-models/logs/Aanch free download movie The story of a village torn by violence and corruption.md deleted file mode 100644 index cc782aa3a3dbc5b94d9380f6d7ae13d66624f6c9..0000000000000000000000000000000000000000 --- a/spaces/rorallitri/biomedical-language-models/logs/Aanch free download movie The story of a village torn by violence and corruption.md +++ /dev/null @@ -1,6 +0,0 @@ -

          Sarabjit Full Movie English Subtitle Free Download architetto infiniti


          DOWNLOAD ->->->-> https://tinurll.com/2uznIj



          - - aaccfb2cb3
          -
          -
          -

          diff --git a/spaces/rorallitri/biomedical-language-models/logs/Downloadmoviesin720pNoOneKilledJessica1080p __HOT__.md b/spaces/rorallitri/biomedical-language-models/logs/Downloadmoviesin720pNoOneKilledJessica1080p __HOT__.md deleted file mode 100644 index 4b0dda6a01c76846a282a9ce83bcd200d37dd99d..0000000000000000000000000000000000000000 --- a/spaces/rorallitri/biomedical-language-models/logs/Downloadmoviesin720pNoOneKilledJessica1080p __HOT__.md +++ /dev/null @@ -1,6 +0,0 @@ -

          downloadmoviesin720pNoOneKilledJessica1080p


          DOWNLOAD ✺✺✺ https://tinurll.com/2uzo8n



          -
          - d5da3c52bf
          -
          -
          -

          diff --git a/spaces/rzimmerdev/lenet_mnist/app.py b/spaces/rzimmerdev/lenet_mnist/app.py deleted file mode 100644 index 4d45438af9850502a34980e794a11bc4a6986835..0000000000000000000000000000000000000000 --- a/spaces/rzimmerdev/lenet_mnist/app.py +++ /dev/null @@ -1,3 +0,0 @@ -from src.demo import main - -main("cpu") diff --git a/spaces/sanchit-gandhi/enhanced_direct_s2st/README.md b/spaces/sanchit-gandhi/enhanced_direct_s2st/README.md deleted file mode 100644 index eca8beff9f6b55ecd0435070ac28e2c1913b67c0..0000000000000000000000000000000000000000 --- a/spaces/sanchit-gandhi/enhanced_direct_s2st/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Enhanced Direct S2ST -emoji: 🗣 -colorFrom: indigo -colorTo: red -sdk: gradio -sdk_version: 3.9.1 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/scedlatioru/img-to-music/example/Adobe Cs6 Master Collection Crack Only.md b/spaces/scedlatioru/img-to-music/example/Adobe Cs6 Master Collection Crack Only.md deleted file mode 100644 index 7471df0da74871d97fb2ba87646ed91698c3570c..0000000000000000000000000000000000000000 --- a/spaces/scedlatioru/img-to-music/example/Adobe Cs6 Master Collection Crack Only.md +++ /dev/null @@ -1,6 +0,0 @@ -

          Adobe Cs6 Master Collection Crack Only


          Download Filehttps://gohhs.com/2uEzEb



          - -Adobe CS6 Master Collection Torrent. Adobe cs6 torrent Grasp Selection is a strong, demo Edition program only accessible for Mac, belonging ... 1fdad05405
          -
          -
          -

          diff --git a/spaces/scedlatioru/img-to-music/example/FULL Adobe Premiere Pro CC 2019 13.0.0 (x64) BEST Crack.md b/spaces/scedlatioru/img-to-music/example/FULL Adobe Premiere Pro CC 2019 13.0.0 (x64) BEST Crack.md deleted file mode 100644 index fb2f689fc4a717ae38add975a80c2acf239c5747..0000000000000000000000000000000000000000 --- a/spaces/scedlatioru/img-to-music/example/FULL Adobe Premiere Pro CC 2019 13.0.0 (x64) BEST Crack.md +++ /dev/null @@ -1,6 +0,0 @@ -

          FULL Adobe Premiere Pro CC 2019 13.0.0 (x64) Crack


          Download File ✒ ✒ ✒ https://gohhs.com/2uEA83



          - -Adobe Premiere Pro CC 2019 13.0.0 (x64) + Crack [CracksNow] torrent. 1fdad05405
          -
          -
          -

          diff --git a/spaces/scedlatioru/img-to-music/example/Ml 1660 V34 Generator.md b/spaces/scedlatioru/img-to-music/example/Ml 1660 V34 Generator.md deleted file mode 100644 index a094a5e827beb538b8d2c66427a389aae52a7cec..0000000000000000000000000000000000000000 --- a/spaces/scedlatioru/img-to-music/example/Ml 1660 V34 Generator.md +++ /dev/null @@ -1,6 +0,0 @@ -

          Ml 1660 V34 Generator


          Downloadhttps://gohhs.com/2uEAp6



          -
          -chip-ml1660-v34 Firmware fix for ml-1660 v3.4 just edit file and put your printers serial number. 4d29de3e1b
          -
          -
          -

          diff --git a/spaces/scedlatioru/img-to-music/example/Pink Floyd Meddle Full Album Mp3 Download [TOP].md b/spaces/scedlatioru/img-to-music/example/Pink Floyd Meddle Full Album Mp3 Download [TOP].md deleted file mode 100644 index 0a54035130abbf7b4157f4f42b8591fab6711e7d..0000000000000000000000000000000000000000 --- a/spaces/scedlatioru/img-to-music/example/Pink Floyd Meddle Full Album Mp3 Download [TOP].md +++ /dev/null @@ -1,6 +0,0 @@ -

          Pink Floyd Meddle Full Album Mp3 Download


          DOWNLOAD ✵✵✵ https://gohhs.com/2uEzFU



          -
          -Pink Floyd - Meddle download free mp3 flac. ... Meddle. Category: Rock. MP3 RAR album size: 1761 mb. FLAC RAR album size: 1691 mb. Formats: VOX MP2 ... 4d29de3e1b
          -
          -
          -

          diff --git a/spaces/sd9972/autotune/Dockerfile b/spaces/sd9972/autotune/Dockerfile deleted file mode 100644 index 94ee76a4f45af463ab7f945633c9258172f9cc80..0000000000000000000000000000000000000000 --- a/spaces/sd9972/autotune/Dockerfile +++ /dev/null @@ -1,2 +0,0 @@ -FROM huggingface/autotrain-advanced:latest -CMD autotrain app --port 7860 diff --git a/spaces/segestic/HealthBlock/app.py b/spaces/segestic/HealthBlock/app.py deleted file mode 100644 index 1ed5cf5cc3e463b4e48bea45443c3a1424baea81..0000000000000000000000000000000000000000 --- a/spaces/segestic/HealthBlock/app.py +++ /dev/null @@ -1,187 +0,0 @@ -import streamlit as st -from pytezos import pytezos -import pandas as pd - -pytezos = pytezos.using(shell = 'https://rpc.tzkt.io/ghostnet', key='edsk3MrRkoidY2SjEgufvi44orvyjxgZoy4LhaJNTNcddWykW6SssL') -contract = pytezos.contract('KT1KvCVKiZhkPG8s9CCoxW3r135phk2HhZUV') - -def welcome(): - return "Welcome To Decentralised Medical Records" - -def addUser(): - name = st.text_input("Enter Full Name of the Patient") - email = st.text_input("Enter Email of the Patient") - number = st.number_input("Enter the Contact Number", step=1, min_value=1) - age = st.number_input("Enter Age", step=1, min_value=18) - gender = st.radio("Enter Gender", ('Male', 'Female')) - #Hid = st.text_input("Enter your Unique Hospital Id") - #hospital=st.text_input("Enter the Hospital details") - - - if st.button("Register Patient"): - a = pytezos.using(shell = 'https://rpc.tzkt.io/ghostnet', key='edsk3MrRkoidY2SjEgufvi44orvyjxgZoy4LhaJNTNcddWykW6SssL') - contract = a.contract('KT1KvCVKiZhkPG8s9CCoxW3r135phk2HhZUV') - - contract.addUser(email = email, name = name, age = age, gender = gender, number = number).with_amount(0).as_transaction().fill().sign().inject() - - -def ViewPatientRecord(): - Hid = st.text_input("Enter Unique Hospital Id of Patient") - if st.button("View Records"): - usds = pytezos.using(shell = 'https://rpc.tzkt.io/ghostnet').contract('KT1KvCVKiZhkPG8s9CCoxW3r135phk2HhZUV') - #print (usds.storage())#debug - #print(list(usds.storage().keys())[0]) - - #if email is in storage... print record - if Hid in list(usds.storage().keys()): - st.text(usds.storage()) - #print(usds.storage()) - #st.text(list(usds.storage().keys())[0]) - #st.text(list(usds.storage().values())) - else: - st.text('Not Found') - #st.text(usds.storage[email]['Record']()) - - -####################WIDGETS START ################################## - -def filters_widgets(df, columns=None, allow_single_value_widgets=False): - # Parse the df and get filter widgets based for provided columns - if not columns: #if columns not provided, use all columns to create widgets - columns=df.columns.tolist() - if allow_single_value_widgets: - threshold=0 - else: - threshold=1 - widget_dict = {} - filter_widgets = st.container() - filter_widgets.warning( - "After selecting filters press the 'Apply Filters' button at the bottom.") - if not allow_single_value_widgets: - filter_widgets.markdown("Only showing columns that contain more than 1 unique value.") - with filter_widgets.form(key="data_filters"): - not_showing = [] - for y in df[columns]: - if str(y) in st.session_state: #update value from session state if exists - selected_opts = st.session_state[str(y)] - else: #if doesnt exist use all values as defaults - selected_opts = df[y].unique().tolist() - if len(df[y].unique().tolist()) > threshold: #checks if above threshold - widget_dict[y] = st.multiselect( - label=str(y), - options=df[y].unique().tolist(), - default=selected_opts, - key=str(y), - ) - else:#if doesnt pass threshold - not_showing.append(y) - if not_showing:#if the list is not empty, show this warning - st.warning( - f"Not showing filters for {' '.join(not_showing)} since they only contain one unique value." - ) - submit_button = st.form_submit_button("Apply Filters") - #reset button to return all unselected values back - reset_button = filter_widgets.button( - "Reset All Filters", - key="reset_buttons", - on_click=reset_filter_widgets_to_default, - args=(df, columns), - ) - filter_widgets.warning( - "Dont forget to apply filters by pressing 'Apply Filters' at the bottom." - ) - -def reset_filter_widgets_to_default(df, columns): - for y in df[columns]: - if str(y) in st.session_state: - del st.session_state[y] - -####################WIDGETS END################################## - -def main(): - - st.set_page_config(page_title="Decentralised Health Vaccine Records") - - st.title("Blockchain Based Medical Records") - st.markdown( - """
          -

          - Vaccine Data


          """, - unsafe_allow_html=True, - ) - - - st.markdown( - """

          - This project greatly decreases any chances of misuse or the manipulation of the medical Records

          """, - unsafe_allow_html=True, - ) - - st.sidebar.title("Choose your entry point") - st.sidebar.markdown("Select the entry point accordingly:") - - algo = st.sidebar.selectbox( - "Select the Option", options=[ - "Register Patient", - "View Patient Data" - ] - ) - - if algo == "Register Patient": - addUser() - if algo == "View Patient Data": - ViewPatientRecord() - - - st.write ('\n') - st.write ('\n') - st.write ('\n') - - - #ledger start - #get ledger data - - st.subheader("Blockchain Ledger") - st.write("Click to explore Blockchain ledger [link](https://ghostnet.tzkt.io/KT1KvCVKiZhkPG8s9CCoxW3r135phk2HhZUV/operations/)") - - - ledger_data = pytezos.using(shell = 'https://rpc.tzkt.io/ghostnet').contract('KT1KvCVKiZhkPG8s9CCoxW3r135phk2HhZUV').storage() #.values() - - for x in ledger_data: - ledger = ledger_data.values() - - try: - df = pd.DataFrame(ledger, index=[0]) - #filters_widgets(df) - except: - df = pd.DataFrame(ledger)#, index=[0]) - #filters_widgets(df) - # Display the dataframe as a table - st.write(df) - - - -if __name__ == "__main__": - main() #streamlit-start - import subprocess - import uvicorn - - subprocess.run("uvicorn api.main:app --host 0.0.0.0 --port 7860", shell=True) - - - - ############end table/ledger - -#if __name__ == "__main__": - #main() - - - -#comments - #ledger = {'age': 18, 'gender': 'Female', 'hospital': '', 'name': 'tesuser1', 'number': 41414, 'v1': False, 'v1Date': 0, 'v2': False, 'v2Date': 0} - -# data = [ -# {"Name": "Alice", "Age": 25, "City": "New York"}, -# {"Name": "Bob", "Age": 30, "City": "Paris"}, -# {"Name": "Charlie", "Age": 35, "City": "London"} -# ] diff --git a/spaces/shenfangqi/Retrieval-based-Voice-Conversion-WebUI/Retrieval-based-Voice-Conversion-WebUI/uvr5_pack/utils.py b/spaces/shenfangqi/Retrieval-based-Voice-Conversion-WebUI/Retrieval-based-Voice-Conversion-WebUI/uvr5_pack/utils.py deleted file mode 100644 index 1d91f963370321cf093c7fb9adefaa018463c8da..0000000000000000000000000000000000000000 --- a/spaces/shenfangqi/Retrieval-based-Voice-Conversion-WebUI/Retrieval-based-Voice-Conversion-WebUI/uvr5_pack/utils.py +++ /dev/null @@ -1,120 +0,0 @@ -import torch -import numpy as np -from tqdm import tqdm -import json - - -def load_data(file_name: str = "./uvr5_pack/name_params.json") -> dict: - with open(file_name, "r") as f: - data = json.load(f) - - return data - - -def make_padding(width, cropsize, offset): - left = offset - roi_size = cropsize - left * 2 - if roi_size == 0: - roi_size = cropsize - right = roi_size - (width % roi_size) + left - - return left, right, roi_size - - -def inference(X_spec, device, model, aggressiveness, data): - """ - data : dic configs - """ - - def _execute( - X_mag_pad, roi_size, n_window, device, model, aggressiveness, is_half=True - ): - model.eval() - with torch.no_grad(): - preds = [] - - iterations = [n_window] - - total_iterations = sum(iterations) - for i in tqdm(range(n_window)): - start = i * roi_size - X_mag_window = X_mag_pad[ - None, :, :, start : start + data["window_size"] - ] - X_mag_window = torch.from_numpy(X_mag_window) - if is_half: - X_mag_window = X_mag_window.half() - X_mag_window = X_mag_window.to(device) - - pred = model.predict(X_mag_window, aggressiveness) - - pred = pred.detach().cpu().numpy() - preds.append(pred[0]) - - pred = np.concatenate(preds, axis=2) - return pred - - def preprocess(X_spec): - X_mag = np.abs(X_spec) - X_phase = np.angle(X_spec) - - return X_mag, X_phase - - X_mag, X_phase = preprocess(X_spec) - - coef = X_mag.max() - X_mag_pre = X_mag / coef - - n_frame = X_mag_pre.shape[2] - pad_l, pad_r, roi_size = make_padding(n_frame, data["window_size"], model.offset) - n_window = int(np.ceil(n_frame / roi_size)) - - X_mag_pad = np.pad(X_mag_pre, ((0, 0), (0, 0), (pad_l, pad_r)), mode="constant") - - if list(model.state_dict().values())[0].dtype == torch.float16: - is_half = True - else: - is_half = False - pred = _execute( - X_mag_pad, roi_size, n_window, device, model, aggressiveness, is_half - ) - pred = pred[:, :, :n_frame] - - if data["tta"]: - pad_l += roi_size // 2 - pad_r += roi_size // 2 - n_window += 1 - - X_mag_pad = np.pad(X_mag_pre, ((0, 0), (0, 0), (pad_l, pad_r)), mode="constant") - - pred_tta = _execute( - X_mag_pad, roi_size, n_window, device, model, aggressiveness, is_half - ) - pred_tta = pred_tta[:, :, roi_size // 2 :] - pred_tta = pred_tta[:, :, :n_frame] - - return (pred + pred_tta) * 0.5 * coef, X_mag, np.exp(1.0j * X_phase) - else: - return pred * coef, X_mag, np.exp(1.0j * X_phase) - - -def _get_name_params(model_path, model_hash): - data = load_data() - flag = False - ModelName = model_path - for type in list(data): - for model in list(data[type][0]): - for i in range(len(data[type][0][model])): - if str(data[type][0][model][i]["hash_name"]) == model_hash: - flag = True - elif str(data[type][0][model][i]["hash_name"]) in ModelName: - flag = True - - if flag: - model_params_auto = data[type][0][model][i]["model_params"] - param_name_auto = data[type][0][model][i]["param_name"] - if type == "equivalent": - return param_name_auto, model_params_auto - else: - flag = False - return param_name_auto, model_params_auto diff --git a/spaces/sherjilozair/meta-llama-Llama-2-70b-chat-hf/README.md b/spaces/sherjilozair/meta-llama-Llama-2-70b-chat-hf/README.md deleted file mode 100644 index 06530a04eae2e35dd274a7133a4afb9becb4ebab..0000000000000000000000000000000000000000 --- a/spaces/sherjilozair/meta-llama-Llama-2-70b-chat-hf/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Meta Llama Llama 2 70b Chat Hf -emoji: 📊 -colorFrom: pink -colorTo: red -sdk: gradio -sdk_version: 3.37.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/shikunl/prismer/prismer/experts/obj_detection/unidet/modeling/roi_heads/multi_dataset_fast_rcnn.py b/spaces/shikunl/prismer/prismer/experts/obj_detection/unidet/modeling/roi_heads/multi_dataset_fast_rcnn.py deleted file mode 100644 index 7b6763d12b7ece402ccb98fc2ceb9432a9f8a236..0000000000000000000000000000000000000000 --- a/spaces/shikunl/prismer/prismer/experts/obj_detection/unidet/modeling/roi_heads/multi_dataset_fast_rcnn.py +++ /dev/null @@ -1,74 +0,0 @@ -import logging -import math -from typing import Dict, Union -import torch -from torch import nn -from torch.nn import functional as F - -from detectron2.layers import Linear, ShapeSpec, batched_nms, cat, nonzero_tuple -from detectron2.modeling.roi_heads.fast_rcnn import _log_classification_stats -from .custom_fast_rcnn import CustomFastRCNNOutputLayers - -class MultiDatasetFastRCNNOutputLayers(CustomFastRCNNOutputLayers): - def __init__( - self, - cfg, - num_classes_list, - input_shape: ShapeSpec, - **kwargs - ): - super().__init__(cfg, input_shape, **kwargs) - del self.cls_score - input_size = input_shape.channels * (input_shape.width or 1) * (input_shape.height or 1) - prior_prob = cfg.MODEL.ROI_BOX_HEAD.PRIOR_PROB - if cfg.MODEL.ROI_BOX_HEAD.USE_SIGMOID_CE: - bias_value = -math.log((1 - prior_prob) / prior_prob) - else: - bias_value = 0 - self.openimage_index = cfg.MULTI_DATASET.DATASETS.index('oid') - self.num_datasets = len(num_classes_list) - self.cls_score = nn.ModuleList() - for num_classes in num_classes_list: - self.cls_score.append(nn.Linear(input_size, num_classes + 1)) - nn.init.normal_(self.cls_score[-1].weight, std=0.01) - nn.init.constant_(self.cls_score[-1].bias, bias_value) - - def forward(self, x, dataset_source=-1): - if x.dim() > 2: - x = torch.flatten(x, start_dim=1) - proposal_deltas = self.bbox_pred(x) - if dataset_source >= 0: - scores = self.cls_score[dataset_source](x) - else: - scores = [self.cls_score[d](x) for d in range(self.num_datasets)] - return scores, proposal_deltas - - def losses(self, predictions, proposals, dataset_source): - use_advanced_loss = (dataset_source == self.openimage_index) - scores, proposal_deltas = predictions - gt_classes = ( - cat([p.gt_classes for p in proposals], dim=0) if len(proposals) else torch.empty(0) - ) - _log_classification_stats(scores, gt_classes) - - if len(proposals): - proposal_boxes = cat([p.proposal_boxes.tensor for p in proposals], dim=0) # Nx4 - assert not proposal_boxes.requires_grad, "Proposals should not require gradients!" - gt_boxes = cat( - [(p.gt_boxes if p.has("gt_boxes") else p.proposal_boxes).tensor for p in proposals], - dim=0, - ) - else: - proposal_boxes = gt_boxes = torch.empty((0, 4), device=proposal_deltas.device) - - if self.use_sigmoid_ce: - loss_cls = self.sigmoid_cross_entropy_loss( - scores, gt_classes, use_advanced_loss) - else: - assert not use_advanced_loss - loss_cls = self.softmax_cross_entropy_loss(scores, gt_classes) - return { - "loss_cls": loss_cls, - "loss_box_reg": self.box_reg_loss( - proposal_boxes, gt_boxes, proposal_deltas, gt_classes) - } \ No newline at end of file diff --git a/spaces/shiwan10000/CodeFormer/CodeFormer/basicsr/utils/img_util.py b/spaces/shiwan10000/CodeFormer/CodeFormer/basicsr/utils/img_util.py deleted file mode 100644 index d409a132ff216e6943a276fb5d8cd5f410824883..0000000000000000000000000000000000000000 --- a/spaces/shiwan10000/CodeFormer/CodeFormer/basicsr/utils/img_util.py +++ /dev/null @@ -1,170 +0,0 @@ -import cv2 -import math -import numpy as np -import os -import torch -from torchvision.utils import make_grid - - -def img2tensor(imgs, bgr2rgb=True, float32=True): - """Numpy array to tensor. - - Args: - imgs (list[ndarray] | ndarray): Input images. - bgr2rgb (bool): Whether to change bgr to rgb. - float32 (bool): Whether to change to float32. - - Returns: - list[tensor] | tensor: Tensor images. If returned results only have - one element, just return tensor. - """ - - def _totensor(img, bgr2rgb, float32): - if img.shape[2] == 3 and bgr2rgb: - if img.dtype == 'float64': - img = img.astype('float32') - img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) - img = torch.from_numpy(img.transpose(2, 0, 1)) - if float32: - img = img.float() - return img - - if isinstance(imgs, list): - return [_totensor(img, bgr2rgb, float32) for img in imgs] - else: - return _totensor(imgs, bgr2rgb, float32) - - -def tensor2img(tensor, rgb2bgr=True, out_type=np.uint8, min_max=(0, 1)): - """Convert torch Tensors into image numpy arrays. - - After clamping to [min, max], values will be normalized to [0, 1]. - - Args: - tensor (Tensor or list[Tensor]): Accept shapes: - 1) 4D mini-batch Tensor of shape (B x 3/1 x H x W); - 2) 3D Tensor of shape (3/1 x H x W); - 3) 2D Tensor of shape (H x W). - Tensor channel should be in RGB order. - rgb2bgr (bool): Whether to change rgb to bgr. - out_type (numpy type): output types. If ``np.uint8``, transform outputs - to uint8 type with range [0, 255]; otherwise, float type with - range [0, 1]. Default: ``np.uint8``. - min_max (tuple[int]): min and max values for clamp. - - Returns: - (Tensor or list): 3D ndarray of shape (H x W x C) OR 2D ndarray of - shape (H x W). The channel order is BGR. - """ - if not (torch.is_tensor(tensor) or (isinstance(tensor, list) and all(torch.is_tensor(t) for t in tensor))): - raise TypeError(f'tensor or list of tensors expected, got {type(tensor)}') - - if torch.is_tensor(tensor): - tensor = [tensor] - result = [] - for _tensor in tensor: - _tensor = _tensor.squeeze(0).float().detach().cpu().clamp_(*min_max) - _tensor = (_tensor - min_max[0]) / (min_max[1] - min_max[0]) - - n_dim = _tensor.dim() - if n_dim == 4: - img_np = make_grid(_tensor, nrow=int(math.sqrt(_tensor.size(0))), normalize=False).numpy() - img_np = img_np.transpose(1, 2, 0) - if rgb2bgr: - img_np = cv2.cvtColor(img_np, cv2.COLOR_RGB2BGR) - elif n_dim == 3: - img_np = _tensor.numpy() - img_np = img_np.transpose(1, 2, 0) - if img_np.shape[2] == 1: # gray image - img_np = np.squeeze(img_np, axis=2) - else: - if rgb2bgr: - img_np = cv2.cvtColor(img_np, cv2.COLOR_RGB2BGR) - elif n_dim == 2: - img_np = _tensor.numpy() - else: - raise TypeError('Only support 4D, 3D or 2D tensor. ' f'But received with dimension: {n_dim}') - if out_type == np.uint8: - # Unlike MATLAB, numpy.unit8() WILL NOT round by default. - img_np = (img_np * 255.0).round() - img_np = img_np.astype(out_type) - result.append(img_np) - if len(result) == 1: - result = result[0] - return result - - -def tensor2img_fast(tensor, rgb2bgr=True, min_max=(0, 1)): - """This implementation is slightly faster than tensor2img. - It now only supports torch tensor with shape (1, c, h, w). - - Args: - tensor (Tensor): Now only support torch tensor with (1, c, h, w). - rgb2bgr (bool): Whether to change rgb to bgr. Default: True. - min_max (tuple[int]): min and max values for clamp. - """ - output = tensor.squeeze(0).detach().clamp_(*min_max).permute(1, 2, 0) - output = (output - min_max[0]) / (min_max[1] - min_max[0]) * 255 - output = output.type(torch.uint8).cpu().numpy() - if rgb2bgr: - output = cv2.cvtColor(output, cv2.COLOR_RGB2BGR) - return output - - -def imfrombytes(content, flag='color', float32=False): - """Read an image from bytes. - - Args: - content (bytes): Image bytes got from files or other streams. - flag (str): Flags specifying the color type of a loaded image, - candidates are `color`, `grayscale` and `unchanged`. - float32 (bool): Whether to change to float32., If True, will also norm - to [0, 1]. Default: False. - - Returns: - ndarray: Loaded image array. - """ - img_np = np.frombuffer(content, np.uint8) - imread_flags = {'color': cv2.IMREAD_COLOR, 'grayscale': cv2.IMREAD_GRAYSCALE, 'unchanged': cv2.IMREAD_UNCHANGED} - img = cv2.imdecode(img_np, imread_flags[flag]) - if float32: - img = img.astype(np.float32) / 255. - return img - - -def imwrite(img, file_path, params=None, auto_mkdir=True): - """Write image to file. - - Args: - img (ndarray): Image array to be written. - file_path (str): Image file path. - params (None or list): Same as opencv's :func:`imwrite` interface. - auto_mkdir (bool): If the parent folder of `file_path` does not exist, - whether to create it automatically. - - Returns: - bool: Successful or not. - """ - if auto_mkdir: - dir_name = os.path.abspath(os.path.dirname(file_path)) - os.makedirs(dir_name, exist_ok=True) - return cv2.imwrite(file_path, img, params) - - -def crop_border(imgs, crop_border): - """Crop borders of images. - - Args: - imgs (list[ndarray] | ndarray): Images with shape (h, w, c). - crop_border (int): Crop border for each end of height and weight. - - Returns: - list[ndarray]: Cropped images. - """ - if crop_border == 0: - return imgs - else: - if isinstance(imgs, list): - return [v[crop_border:-crop_border, crop_border:-crop_border, ...] for v in imgs] - else: - return imgs[crop_border:-crop_border, crop_border:-crop_border, ...] diff --git a/spaces/shuhulhandoo/face-swap/face_swap.py b/spaces/shuhulhandoo/face-swap/face_swap.py deleted file mode 100644 index 2c2c25002d660166877b471e63a5fd0eba156324..0000000000000000000000000000000000000000 --- a/spaces/shuhulhandoo/face-swap/face_swap.py +++ /dev/null @@ -1,238 +0,0 @@ -#! /usr/bin/env python -import cv2 -import numpy as np -import scipy.spatial as spatial -import logging - - -## 3D Transform -def bilinear_interpolate(img, coords): - """ Interpolates over every image channel - http://en.wikipedia.org/wiki/Bilinear_interpolation - :param img: max 3 channel image - :param coords: 2 x _m_ array. 1st row = xcoords, 2nd row = ycoords - :returns: array of interpolated pixels with same shape as coords - """ - int_coords = np.int32(coords) - x0, y0 = int_coords - dx, dy = coords - int_coords - - # 4 Neighour pixels - q11 = img[y0, x0] - q21 = img[y0, x0 + 1] - q12 = img[y0 + 1, x0] - q22 = img[y0 + 1, x0 + 1] - - btm = q21.T * dx + q11.T * (1 - dx) - top = q22.T * dx + q12.T * (1 - dx) - inter_pixel = top * dy + btm * (1 - dy) - - return inter_pixel.T - -def grid_coordinates(points): - """ x,y grid coordinates within the ROI of supplied points - :param points: points to generate grid coordinates - :returns: array of (x, y) coordinates - """ - xmin = np.min(points[:, 0]) - xmax = np.max(points[:, 0]) + 1 - ymin = np.min(points[:, 1]) - ymax = np.max(points[:, 1]) + 1 - - return np.asarray([(x, y) for y in range(ymin, ymax) - for x in range(xmin, xmax)], np.uint32) - - -def process_warp(src_img, result_img, tri_affines, dst_points, delaunay): - """ - Warp each triangle from the src_image only within the - ROI of the destination image (points in dst_points). - """ - roi_coords = grid_coordinates(dst_points) - # indices to vertices. -1 if pixel is not in any triangle - roi_tri_indices = delaunay.find_simplex(roi_coords) - - for simplex_index in range(len(delaunay.simplices)): - coords = roi_coords[roi_tri_indices == simplex_index] - num_coords = len(coords) - out_coords = np.dot(tri_affines[simplex_index], - np.vstack((coords.T, np.ones(num_coords)))) - x, y = coords.T - result_img[y, x] = bilinear_interpolate(src_img, out_coords) - - return None - - -def triangular_affine_matrices(vertices, src_points, dst_points): - """ - Calculate the affine transformation matrix for each - triangle (x,y) vertex from dst_points to src_points - :param vertices: array of triplet indices to corners of triangle - :param src_points: array of [x, y] points to landmarks for source image - :param dst_points: array of [x, y] points to landmarks for destination image - :returns: 2 x 3 affine matrix transformation for a triangle - """ - ones = [1, 1, 1] - for tri_indices in vertices: - src_tri = np.vstack((src_points[tri_indices, :].T, ones)) - dst_tri = np.vstack((dst_points[tri_indices, :].T, ones)) - mat = np.dot(src_tri, np.linalg.inv(dst_tri))[:2, :] - yield mat - - -def warp_image_3d(src_img, src_points, dst_points, dst_shape, dtype=np.uint8): - rows, cols = dst_shape[:2] - result_img = np.zeros((rows, cols, 3), dtype=dtype) - - delaunay = spatial.Delaunay(dst_points) - tri_affines = np.asarray(list(triangular_affine_matrices( - delaunay.simplices, src_points, dst_points))) - - process_warp(src_img, result_img, tri_affines, dst_points, delaunay) - - return result_img - - -## 2D Transform -def transformation_from_points(points1, points2): - points1 = points1.astype(np.float64) - points2 = points2.astype(np.float64) - - c1 = np.mean(points1, axis=0) - c2 = np.mean(points2, axis=0) - points1 -= c1 - points2 -= c2 - - s1 = np.std(points1) - s2 = np.std(points2) - points1 /= s1 - points2 /= s2 - - U, S, Vt = np.linalg.svd(np.dot(points1.T, points2)) - R = (np.dot(U, Vt)).T - - return np.vstack([np.hstack([s2 / s1 * R, - (c2.T - np.dot(s2 / s1 * R, c1.T))[:, np.newaxis]]), - np.array([[0., 0., 1.]])]) - - -def warp_image_2d(im, M, dshape): - output_im = np.zeros(dshape, dtype=im.dtype) - cv2.warpAffine(im, - M[:2], - (dshape[1], dshape[0]), - dst=output_im, - borderMode=cv2.BORDER_TRANSPARENT, - flags=cv2.WARP_INVERSE_MAP) - - return output_im - - -## Generate Mask -def mask_from_points(size, points,erode_flag=1): - radius = 10 # kernel size - kernel = np.ones((radius, radius), np.uint8) - - mask = np.zeros(size, np.uint8) - cv2.fillConvexPoly(mask, cv2.convexHull(points), 255) - if erode_flag: - mask = cv2.erode(mask, kernel,iterations=1) - - return mask - - -## Color Correction -def correct_colours(im1, im2, landmarks1): - COLOUR_CORRECT_BLUR_FRAC = 0.75 - LEFT_EYE_POINTS = list(range(42, 48)) - RIGHT_EYE_POINTS = list(range(36, 42)) - - blur_amount = COLOUR_CORRECT_BLUR_FRAC * np.linalg.norm( - np.mean(landmarks1[LEFT_EYE_POINTS], axis=0) - - np.mean(landmarks1[RIGHT_EYE_POINTS], axis=0)) - blur_amount = int(blur_amount) - if blur_amount % 2 == 0: - blur_amount += 1 - im1_blur = cv2.GaussianBlur(im1, (blur_amount, blur_amount), 0) - im2_blur = cv2.GaussianBlur(im2, (blur_amount, blur_amount), 0) - - # Avoid divide-by-zero errors. - im2_blur = im2_blur.astype(int) - im2_blur += 128*(im2_blur <= 1) - - result = im2.astype(np.float64) * im1_blur.astype(np.float64) / im2_blur.astype(np.float64) - result = np.clip(result, 0, 255).astype(np.uint8) - - return result - - -## Copy-and-paste -def apply_mask(img, mask): - """ Apply mask to supplied image - :param img: max 3 channel image - :param mask: [0-255] values in mask - :returns: new image with mask applied - """ - masked_img=cv2.bitwise_and(img,img,mask=mask) - - return masked_img - - -## Alpha blending -def alpha_feathering(src_img, dest_img, img_mask, blur_radius=15): - mask = cv2.blur(img_mask, (blur_radius, blur_radius)) - mask = mask / 255.0 - - result_img = np.empty(src_img.shape, np.uint8) - for i in range(3): - result_img[..., i] = src_img[..., i] * mask + dest_img[..., i] * (1-mask) - - return result_img - - -def check_points(img,points): - # Todo: I just consider one situation. - if points[8,1]>img.shape[0]: - logging.error("Jaw part out of image") - else: - return True - return False - - -def face_swap(src_face, dst_face, src_points, dst_points, dst_shape, dst_img, args, end=48): - h, w = dst_face.shape[:2] - - ## 3d warp - warped_src_face = warp_image_3d(src_face, src_points[:end], dst_points[:end], (h, w)) - ## Mask for blending - mask = mask_from_points((h, w), dst_points) - mask_src = np.mean(warped_src_face, axis=2) > 0 - mask = np.asarray(mask * mask_src, dtype=np.uint8) - ## Correct color - if args == "correct color": - warped_src_face = apply_mask(warped_src_face, mask) - dst_face_masked = apply_mask(dst_face, mask) - warped_src_face = correct_colours(dst_face_masked, warped_src_face, dst_points) - ## 2d warp - if args == "warp_2d": - unwarped_src_face = warp_image_3d(warped_src_face, dst_points[:end], src_points[:end], src_face.shape[:2]) - warped_src_face = warp_image_2d(unwarped_src_face, transformation_from_points(dst_points, src_points), - (h, w, 3)) - - mask = mask_from_points((h, w), dst_points) - mask_src = np.mean(warped_src_face, axis=2) > 0 - mask = np.asarray(mask * mask_src, dtype=np.uint8) - - ## Shrink the mask - kernel = np.ones((10, 10), np.uint8) - mask = cv2.erode(mask, kernel, iterations=1) - ##Poisson Blending - r = cv2.boundingRect(mask) - center = ((r[0] + int(r[2] / 2), r[1] + int(r[3] / 2))) - output = cv2.seamlessClone(warped_src_face, dst_face, mask, center, cv2.NORMAL_CLONE) - - x, y, w, h = dst_shape - dst_img_cp = dst_img.copy() - dst_img_cp[y:y + h, x:x + w] = output - - return dst_img_cp diff --git a/spaces/sidharthism/fashion-eye/netdissect/segmodel/resnext.py b/spaces/sidharthism/fashion-eye/netdissect/segmodel/resnext.py deleted file mode 100644 index cdbb7461a6c8eb126717967cdca5d5ce392aecea..0000000000000000000000000000000000000000 --- a/spaces/sidharthism/fashion-eye/netdissect/segmodel/resnext.py +++ /dev/null @@ -1,182 +0,0 @@ -import os -import sys -import torch -import torch.nn as nn -import math -try: - from lib.nn import SynchronizedBatchNorm2d -except ImportError: - from torch.nn import BatchNorm2d as SynchronizedBatchNorm2d -try: - from urllib import urlretrieve -except ImportError: - from urllib.request import urlretrieve - - -__all__ = ['ResNeXt', 'resnext101'] # support resnext 101 - - -model_urls = { - #'resnext50': 'http://sceneparsing.csail.mit.edu/model/pretrained_resnet/resnext50-imagenet.pth', - 'resnext101': 'http://sceneparsing.csail.mit.edu/model/pretrained_resnet/resnext101-imagenet.pth' -} - - -def conv3x3(in_planes, out_planes, stride=1): - "3x3 convolution with padding" - return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, - padding=1, bias=False) - - -class GroupBottleneck(nn.Module): - expansion = 2 - - def __init__(self, inplanes, planes, stride=1, groups=1, downsample=None): - super(GroupBottleneck, self).__init__() - self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False) - self.bn1 = SynchronizedBatchNorm2d(planes) - self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, - padding=1, groups=groups, bias=False) - self.bn2 = SynchronizedBatchNorm2d(planes) - self.conv3 = nn.Conv2d(planes, planes * 2, kernel_size=1, bias=False) - self.bn3 = SynchronizedBatchNorm2d(planes * 2) - self.relu = nn.ReLU(inplace=True) - self.downsample = downsample - self.stride = stride - - def forward(self, x): - residual = x - - out = self.conv1(x) - out = self.bn1(out) - out = self.relu(out) - - out = self.conv2(out) - out = self.bn2(out) - out = self.relu(out) - - out = self.conv3(out) - out = self.bn3(out) - - if self.downsample is not None: - residual = self.downsample(x) - - out += residual - out = self.relu(out) - - return out - - -class ResNeXt(nn.Module): - - def __init__(self, block, layers, groups=32, num_classes=1000): - self.inplanes = 128 - super(ResNeXt, self).__init__() - self.conv1 = conv3x3(3, 64, stride=2) - self.bn1 = SynchronizedBatchNorm2d(64) - self.relu1 = nn.ReLU(inplace=True) - self.conv2 = conv3x3(64, 64) - self.bn2 = SynchronizedBatchNorm2d(64) - self.relu2 = nn.ReLU(inplace=True) - self.conv3 = conv3x3(64, 128) - self.bn3 = SynchronizedBatchNorm2d(128) - self.relu3 = nn.ReLU(inplace=True) - self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) - - self.layer1 = self._make_layer(block, 128, layers[0], groups=groups) - self.layer2 = self._make_layer(block, 256, layers[1], stride=2, groups=groups) - self.layer3 = self._make_layer(block, 512, layers[2], stride=2, groups=groups) - self.layer4 = self._make_layer(block, 1024, layers[3], stride=2, groups=groups) - self.avgpool = nn.AvgPool2d(7, stride=1) - self.fc = nn.Linear(1024 * block.expansion, num_classes) - - for m in self.modules(): - if isinstance(m, nn.Conv2d): - n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels // m.groups - m.weight.data.normal_(0, math.sqrt(2. / n)) - elif isinstance(m, SynchronizedBatchNorm2d): - m.weight.data.fill_(1) - m.bias.data.zero_() - - def _make_layer(self, block, planes, blocks, stride=1, groups=1): - downsample = None - if stride != 1 or self.inplanes != planes * block.expansion: - downsample = nn.Sequential( - nn.Conv2d(self.inplanes, planes * block.expansion, - kernel_size=1, stride=stride, bias=False), - SynchronizedBatchNorm2d(planes * block.expansion), - ) - - layers = [] - layers.append(block(self.inplanes, planes, stride, groups, downsample)) - self.inplanes = planes * block.expansion - for i in range(1, blocks): - layers.append(block(self.inplanes, planes, groups=groups)) - - return nn.Sequential(*layers) - - def forward(self, x): - x = self.relu1(self.bn1(self.conv1(x))) - x = self.relu2(self.bn2(self.conv2(x))) - x = self.relu3(self.bn3(self.conv3(x))) - x = self.maxpool(x) - - x = self.layer1(x) - x = self.layer2(x) - x = self.layer3(x) - x = self.layer4(x) - - x = self.avgpool(x) - x = x.view(x.size(0), -1) - x = self.fc(x) - - return x - - -''' -def resnext50(pretrained=False, **kwargs): - """Constructs a ResNet-50 model. - - Args: - pretrained (bool): If True, returns a model pre-trained on Places - """ - model = ResNeXt(GroupBottleneck, [3, 4, 6, 3], **kwargs) - if pretrained: - model.load_state_dict(load_url(model_urls['resnext50']), strict=False) - return model -''' - - -def resnext101(pretrained=False, **kwargs): - """Constructs a ResNet-101 model. - - Args: - pretrained (bool): If True, returns a model pre-trained on Places - """ - model = ResNeXt(GroupBottleneck, [3, 4, 23, 3], **kwargs) - if pretrained: - model.load_state_dict(load_url(model_urls['resnext101']), strict=False) - return model - - -# def resnext152(pretrained=False, **kwargs): -# """Constructs a ResNeXt-152 model. -# -# Args: -# pretrained (bool): If True, returns a model pre-trained on Places -# """ -# model = ResNeXt(GroupBottleneck, [3, 8, 36, 3], **kwargs) -# if pretrained: -# model.load_state_dict(load_url(model_urls['resnext152'])) -# return model - - -def load_url(url, model_dir='./pretrained', map_location=None): - if not os.path.exists(model_dir): - os.makedirs(model_dir) - filename = url.split('/')[-1] - cached_file = os.path.join(model_dir, filename) - if not os.path.exists(cached_file): - sys.stderr.write('Downloading: "{}" to {}\n'.format(url, cached_file)) - urlretrieve(url, cached_file) - return torch.load(cached_file, map_location=map_location) diff --git a/spaces/sidharthism/fashion-eye/netdissect/server.py b/spaces/sidharthism/fashion-eye/netdissect/server.py deleted file mode 100644 index d8422a2bad5ac2a09d4582a98da4f962dac1a911..0000000000000000000000000000000000000000 --- a/spaces/sidharthism/fashion-eye/netdissect/server.py +++ /dev/null @@ -1,185 +0,0 @@ -#!/usr/bin/env python - -import argparse, connexion, os, sys, yaml, json, socket -from netdissect.easydict import EasyDict -from flask import send_from_directory, redirect -from flask_cors import CORS - - -from netdissect.serverstate import DissectionProject - -__author__ = 'Hendrik Strobelt, David Bau' - -CONFIG_FILE_NAME = 'dissect.json' -projects = {} - -app = connexion.App(__name__, debug=False) - - -def get_all_projects(): - res = [] - for key, project in projects.items(): - # print key - res.append({ - 'project': key, - 'info': { - 'layers': [layer['layer'] for layer in project.get_layers()] - } - }) - return sorted(res, key=lambda x: x['project']) - -def get_layers(project): - return { - 'request': {'project': project}, - 'res': projects[project].get_layers() - } - -def get_units(project, layer): - return { - 'request': {'project': project, 'layer': layer}, - 'res': projects[project].get_units(layer) - } - -def get_rankings(project, layer): - return { - 'request': {'project': project, 'layer': layer}, - 'res': projects[project].get_rankings(layer) - } - -def get_levels(project, layer, quantiles): - return { - 'request': {'project': project, 'layer': layer, 'quantiles': quantiles}, - 'res': projects[project].get_levels(layer, quantiles) - } - -def get_channels(project, layer): - answer = dict(channels=projects[project].get_channels(layer)) - return { - 'request': {'project': project, 'layer': layer}, - 'res': answer - } - -def post_generate(gen_req): - project = gen_req['project'] - zs = gen_req.get('zs', None) - ids = gen_req.get('ids', None) - return_urls = gen_req.get('return_urls', False) - assert (zs is None) != (ids is None) # one or the other, not both - ablations = gen_req.get('ablations', []) - interventions = gen_req.get('interventions', None) - # no z avilable if ablations - generated = projects[project].generate_images(zs, ids, interventions, - return_urls=return_urls) - return { - 'request': gen_req, - 'res': generated - } - -def post_features(feat_req): - project = feat_req['project'] - ids = feat_req['ids'] - masks = feat_req.get('masks', None) - layers = feat_req.get('layers', None) - interventions = feat_req.get('interventions', None) - features = projects[project].get_features( - ids, masks, layers, interventions) - return { - 'request': feat_req, - 'res': features - } - -def post_featuremaps(feat_req): - project = feat_req['project'] - ids = feat_req['ids'] - layers = feat_req.get('layers', None) - interventions = feat_req.get('interventions', None) - featuremaps = projects[project].get_featuremaps( - ids, layers, interventions) - return { - 'request': feat_req, - 'res': featuremaps - } - -@app.route('/client/') -def send_static(path): - """ serves all files from ./client/ to ``/client/`` - - :param path: path from api call - """ - return send_from_directory(args.client, path) - -@app.route('/data/') -def send_data(path): - """ serves all files from the data dir to ``/dissect/`` - - :param path: path from api call - """ - print('Got the data route for', path) - return send_from_directory(args.data, path) - - -@app.route('/') -def redirect_home(): - return redirect('/client/index.html', code=302) - - -def load_projects(directory): - """ - searches for CONFIG_FILE_NAME in all subdirectories of directory - and creates data handlers for all of them - - :param directory: scan directory - :return: null - """ - project_dirs = [] - # Don't search more than 2 dirs deep. - search_depth = 2 + directory.count(os.path.sep) - for root, dirs, files in os.walk(directory): - if CONFIG_FILE_NAME in files: - project_dirs.append(root) - # Don't get subprojects under a project dir. - del dirs[:] - elif root.count(os.path.sep) >= search_depth: - del dirs[:] - for p_dir in project_dirs: - print('Loading %s' % os.path.join(p_dir, CONFIG_FILE_NAME)) - with open(os.path.join(p_dir, CONFIG_FILE_NAME), 'r') as jf: - config = EasyDict(json.load(jf)) - dh_id = os.path.split(p_dir)[1] - projects[dh_id] = DissectionProject( - config=config, - project_dir=p_dir, - path_url='data/' + os.path.relpath(p_dir, directory), - public_host=args.public_host) - -app.add_api('server.yaml') - -# add CORS support -CORS(app.app, headers='Content-Type') - -parser = argparse.ArgumentParser() -parser.add_argument("--nodebug", default=False) -parser.add_argument("--address", default="127.0.0.1") # 0.0.0.0 for nonlocal use -parser.add_argument("--port", default="5001") -parser.add_argument("--public_host", default=None) -parser.add_argument("--nocache", default=False) -parser.add_argument("--data", type=str, default='dissect') -parser.add_argument("--client", type=str, default='client_dist') - -if __name__ == '__main__': - args = parser.parse_args() - for d in [args.data, args.client]: - if not os.path.isdir(d): - print('No directory %s' % d) - sys.exit(1) - args.data = os.path.abspath(args.data) - args.client = os.path.abspath(args.client) - if args.public_host is None: - args.public_host = '%s:%d' % (socket.getfqdn(), int(args.port)) - app.run(port=int(args.port), debug=not args.nodebug, host=args.address, - use_reloader=False) -else: - args, _ = parser.parse_known_args() - if args.public_host is None: - args.public_host = '%s:%d' % (socket.getfqdn(), int(args.port)) - load_projects(args.data) diff --git a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Enjoy Room and a Half on Your PC or Mac with BlueStacks Emulator.md b/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Enjoy Room and a Half on Your PC or Mac with BlueStacks Emulator.md deleted file mode 100644 index 4caa3a2c8720494564c7bd0a98f16a1a89ca04a7..0000000000000000000000000000000000000000 --- a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Enjoy Room and a Half on Your PC or Mac with BlueStacks Emulator.md +++ /dev/null @@ -1,143 +0,0 @@ -
          -

          How to Download Room and a Half, a Biographical Film About Joseph Brodsky

          -

          If you are looking for a captivating, creative, and inspiring film to watch, you might want to consider Room and a Half, a biographical film about Joseph Brodsky, one of the most influential poets of the 20th century. In this article, we will tell you what Room and a Half is about, why it is worth watching, where you can find it online, and how you can enjoy it to the fullest.

          -

          Introduction

          -

          Room and a Half is a 2009 Russian film directed by Andrey Khrzhanovsky, based on the life and poetry of Joseph Brodsky, who won the Nobel Prize in Literature in 1987. The film combines live-action, animation, documentary, and fantasy elements to create a unique portrait of Brodsky and his world, from his childhood in post-war Leningrad to his exile in America.

          -

          download room and a half


          Download Ziphttps://ssurll.com/2uNX7D



          -

          Joseph Brodsky was born in 1940 in Leningrad (now St. Petersburg), Russia. He was a self-taught poet who started writing at an early age. He was persecuted by the Soviet authorities for his dissident views and his refusal to conform to the official literary norms. He was arrested, tried, sentenced to hard labor, and eventually expelled from the country in 1972. He settled in New York, where he continued to write poetry, essays, lectures, and translations. He became an American citizen in 1977. He died in 1996 at the age of 55.

          -

          Room and a Half explores some of the main themes and features of Brodsky's life and work, such as his love for his parents, his nostalgia for his homeland, his passion for language, his sense of humor, his loneliness, his courage, his spirituality, his curiosity, his generosity, his wisdom, and his legacy. The film also pays tribute to some of his influences and friends, such as Anna Akhmatova, W.H. Auden, Nadezhda Mandelstam, Marina Tsvetaeva, Mikhail Baryshnikov, Susan Sontag , and others. The film also features some of Brodsky's poems, recited in Russian and English by himself and others.

          -

          If you are interested in learning more about this remarkable poet and his extraordinary film, read on to find out how you can download Room and a Half and enjoy it.

          -

          Where to Find Room and a Half Online

          -

          Room and a Half is not a very popular or widely available film, but there are still some options for you to find it online. Here are some of the most common ones:

          -

          YouTube

          -

          One of the easiest ways to watch Room and a Half online is to use YouTube. You can watch the trailer for free, or you can rent or buy the film for a small fee. To do so, you need to have a Google account and a valid payment method. You can also choose the quality and the language of the video, depending on your preferences and your internet connection.

          -

          The advantages of using YouTube are that it is fast, convenient, and reliable. You can watch the film on any device that supports YouTube, such as your computer, smartphone, tablet, or smart TV. You can also pause, rewind, or fast-forward the film as you wish. The disadvantages are that you need to have a good internet connection to stream the film smoothly, and that you might not be able to download the film for offline viewing.

          -

          Google Play

          -

          Another option to download Room and a Half online is to use Google Play. You can either download the film itself, or you can download a game based on the film, called Room and a Half: The Game. The game is an interactive adventure that lets you explore Brodsky's life and poetry in a fun and creative way. You can download the game for free, but you need to have an Android device and a Google account to play it.

          -

          The advantages of using Google Play are that you can download the film or the game for offline viewing or playing, and that you can enjoy the game's graphics, sounds, and features. The disadvantages are that you need to have enough storage space on your device to download the film or the game, and that you might not be able to watch the film on other devices or platforms.

          -

          download room and a half movie
          -download room and a half trailer
          -download room and a half subtitles
          -download room and a half film review
          -download room and a half biopic of Joseph Brodsky
          -download room and a half Russian movie
          -download room and a half 2009
          -download room and a half Nika Award winner
          -download room and a half Andrey Khrzhanovsky
          -download room and a half animation
          -download room and a half poetry
          -download room and a half full movie online
          -download room and a half DVD
          -download room and a half soundtrack
          -download room and a half rotten tomatoes
          -download room and a half imdb
          -download room and a half wikipedia
          -download room and a half english subtitles
          -download room and a half streaming
          -download room and a half watch online free
          -download room and a half torrent
          -download room and a half 1080p
          -download room and a half 720p
          -download room and a half bluray
          -download room and a half mp4
          -download room and a half avi
          -download room and a half mkv
          -download room and a half youtube
          -download room and a half vimeo
          -download room and a half dailymotion
          -download room and a half google drive
          -download room and a half netflix
          -download room and a half amazon prime video
          -download room and a half hulu
          -download room and a half disney plus
          -download room and a half apple tv plus
          -download room and a half hbo max
          -download room and a half peacock tv
          -download room and a half paramount plus
          -download room and a half discovery plus

          -

          Other Options

          -

          If you are not satisfied with YouTube or Google Play, you can also try to find other platforms or websites that offer Room and a Half online. Some of them might be legal, while others might be illegal or unsafe. You need to be careful and do your research before using any of them. Some of the possible options are:

          -
            -
          • Amazon Prime Video: You can watch Room and a Half on Amazon Prime Video if you have an Amazon Prime membership or if you buy or rent the film individually. You can also download the film for offline viewing on some devices.
          • -
          • iTunes: You can buy or rent Room and a Half on iTunes if you have an Apple device and an Apple ID. You can also download the film for offline viewing on some devices.
          • -
          • Vudu: You can buy or rent Room and a Half on Vudu if you have a Vudu account and a valid payment method. You can also download the film for offline viewing on some devices.
          • -
          • Pirate Bay: You can download Room and a Half for free on Pirate Bay if you have a torrent client and a VPN. However, this is an illegal and risky option that might expose you to viruses, malware, legal issues, or ethical dilemmas.
          • -
          -

          The advantages of using other options are that you might have more choices, lower prices, or better quality than YouTube or Google Play. The disadvantages are that you might encounter some problems, such as compatibility issues, technical errors, security threats, or legal troubles.

          How to Enjoy Room and a Half to the Fullest

          -

          Now that you have downloaded Room and a Half online, you might wonder how you can enjoy it to the fullest. Here are some tips and suggestions that might help you:

          -

          Watch it with Subtitles or Dubbing

          -

          Room and a Half is a film that uses multiple languages, such as Russian, English, French, and Italian. Depending on your level of proficiency and preference, you might want to watch it with subtitles or dubbing. Subtitles are text that appear on the screen that translate the dialogue or narration of the film. Dubbing is audio that replaces the original voice of the actors or narrators with another language.

          -

          To choose the language and subtitle options that suit you best, you need to check the settings of the platform or website that you are using to watch the film. Some of them might offer more options than others. For example, YouTube offers subtitles in English, Russian, Spanish, French, German, Italian, Portuguese, Turkish, Arabic, and Chinese. Google Play offers subtitles in English and Russian. Other platforms or websites might have different or fewer options.

          -

          Watching Room and a Half with subtitles or dubbing can enhance your understanding and enjoyment of the film for several reasons. First, it can help you follow the plot and the characters better. Second, it can help you appreciate the poetry and the language of Brodsky better. Third, it can help you learn some new words or phrases in different languages.

          -

          Learn More About Joseph Brodsky and His Poetry

          -

          Room and a Half is a film that is based on the life and poetry of Joseph Brodsky. If you want to enjoy it to the fullest, you might want to learn more about him and his work. You can find and read some of his poems and essays online or in print. You can also watch some of his interviews or lectures online or on DVD.

          -

          To find and read some of his poems and essays online, you can use some of these websites:

          -
            -
          • The Poetry Foundation: This website offers a biography, a bibliography, and a selection of poems by Brodsky in English translation. You can also listen to some of his poems read by himself or others.
          • -
          • The Nobel Prize: This website offers a biography, a bibliography, a speech, an interview, and a lecture by Brodsky when he received the Nobel Prize in Literature in 1987.
          • -
          • The New York Review of Books: This website offers a collection of essays by Brodsky on various topics, such as literature, politics, culture, history, and art.
          • -
          -

          To find and read some of his poems and essays in print, you can use some of these books:

          -
            -
          • Collected Poems in English: This book offers a comprehensive collection of poems by Brodsky in English translation, edited by Ann Kjellberg.
          • -
          • Less Than One: Selected Essays: This book offers a selection of essays by Brodsky on various topics, such as literature, politics, culture, history, and art.
          • -
          • On Grief and Reason: Essays: This book offers another selection of essays by Brodsky on various topics, such as literature, politics, culture, history, and art.
          • -
          -

          To watch some of his interviews or lectures online or on DVD, you can use some of these sources:

          -
            -
          • The Paris Review: This website offers an interview with Brodsky conducted by Peter Vail in 1979.
          • -
          • The New Yorker: This website offers an interview with Brodsky conducted by Cynthia Haven in 1995.
          • -
          • A Voice from Exile: Joseph Brodsky: This is a documentary film that features interviews with Brodsky and his friends and colleagues.
          • -
          -

          Learning more about Joseph Brodsky and his poetry can enrich your appreciation of Room and a Half and his life for several reasons. First, it can help you understand his background and his perspective better. Second, it can help you discover his style and his themes better. Third, it can help you admire his talent and his legacy better.

          Share Your Thoughts and Impressions with Others

          -

          Room and a Half is a film that can provoke many thoughts and impressions in the viewers. If you want to enjoy it to the fullest, you might want to share them with others. You can join online discussions or forums about Room and a Half or Joseph Brodsky, where you can express your opinions, ask questions, answer queries, exchange ideas, or make friends.

          -

          To join online discussions or forums about Room and a Half or Joseph Brodsky, you can use some of these websites:

          -
            -
          • IMDb: This website offers a page for Room and a Half, where you can rate the film, write reviews, read trivia, watch clips, and join the message board.
          • -
          • Reddit: This website offers a subreddit for Joseph Brodsky, where you can post links, texts, images, or videos related to him and his work, and comment on other posts.
          • -
          • Goodreads: This website offers a group for Joseph Brodsky, where you can discuss his books, poems, essays, and biography, and participate in polls, quizzes, and challenges.
          • -
          -

          Sharing your thoughts and impressions with others can deepen your insights and connections with others for several reasons. First, it can help you clarify your own thoughts and impressions by articulating them. Second, it can help you learn from other people's thoughts and impressions by listening to them. Third, it can help you create a sense of community and belonging by interacting with them.

          -

          Conclusion

          -

          In conclusion, Room and a Half is a biographical film about Joseph Brodsky, one of the most influential poets of the 20th century. The film combines live-action, animation, documentary, and fantasy elements to create a unique portrait of Brodsky and his world. The film explores some of the main themes and features of Brodsky's life and work, such as his love for his parents, his nostalgia for his homeland, his passion for language, his sense of humor, his loneliness, his courage, his spirituality , his curiosity, his generosity, his wisdom, and his legacy. If you are interested in watching this remarkable film, you can download it online from various platforms or websites, such as YouTube, Google Play, Amazon Prime Video, iTunes, Vudu, or Pirate Bay. However, you need to be careful and do your research before using any of them, as some of them might have some problems or risks. To enjoy Room and a Half to the fullest, you can also watch it with subtitles or dubbing, learn more about Joseph Brodsky and his poetry, and share your thoughts and impressions with others. You can use various websites or books to find and read some of his poems and essays, watch some of his interviews or lectures, and join some online discussions or forums about him and his work. We hope that this article has helped you learn more about Room and a Half and Joseph Brodsky, and that you will download the film and enjoy it. We also hope that you will discover the beauty and the power of Brodsky's poetry and prose, and that you will be inspired by his life and his legacy.

          FAQs

          -

          Here are some of the frequently asked questions about Room and a Half and Joseph Brodsky:

          -
            -
          1. What are some of the awards and nominations that Room and a Half received?
          2. -

            Room and a Half received several awards and nominations from various film festivals and organizations, such as:

            -
              -
            • The Nika Award for Best Film in 2010.
            • -
            • The Golden Eagle Award for Best Screenplay in 2010.
            • -
            • The Golden Ram Award for Best Film in 2010.
            • -
            • The Kinotavr Award for Best Director in 2009.
            • -
            • The Venice Film Festival Special Jury Prize in 2009.
            • -
            -
          3. Who are some of the actors and actresses who played in Room and a Half?
          4. -

            Room and a Half features a cast of talented actors and actresses who played various roles in the film, such as:

            -
              -
            • Grigoriy Dityatkovskiy as Joseph Brodsky.
            • -
            • Sergey Yurskiy as Joseph Brodsky's father.
            • -
            • Alisa Freyndlikh as Joseph Brodsky's mother.
            • -
            • Karen Shakhnazarov as W.H. Auden.
            • -
            • Mikhail Efremov as Mikhail Baryshnikov.
            • -
            -
          5. What are some of the techniques and styles that the director used in Room and a Half?
          6. -

            Room and a Half is a film that uses various techniques and styles to create a unique cinematic experience, such as:

            -
              -
            • Animation: The film uses animation to depict some of the scenes from Brodsky's childhood, dreams, fantasies, or poems.
            • -
            • Documentary: The film uses documentary footage to show some of the historical events or places that influenced Brodsky's life or work.
            • -
            • Fantasy: The film uses fantasy elements to show some of the imaginary scenarios or situations that Brodsky envisioned or experienced.
            • -
            • Collage: The film uses collage techniques to mix different media, such as photographs, drawings, paintings, newspapers, or books.
            • -
            -
          7. How accurate is Room and a Half in depicting Joseph Brodsky's life and personality?
          8. -

            Room and a Half is not a strictly factual or realistic film. It is a creative interpretation of Brodsky's life and personality, based on his poetry, his essays, his interviews, his memoirs, his friends' testimonies, and the director's imagination. The film does not aim to provide a complete or objective biography of Brodsky. It aims to capture his essence, his spirit, his voice, his vision, and his emotions. Therefore, some of the details or events in the film might be fictionalized, exaggerated, simplified , or rearranged for artistic purposes. Therefore, the film should not be taken as a literal or definitive account of Brodsky's life and personality. It should be taken as a poetic or symbolic tribute to his life and personality.

            -
          9. What are some of the challenges and difficulties that Joseph Brodsky faced in his life?
          10. -

            Joseph Brodsky faced many challenges and difficulties in his life, such as:

            -
              -
            • Poverty: He grew up in a poor family that lived in a small and crowded apartment in Leningrad. He had to work various jobs to support himself and his parents.
            • -
            • Persecution: He was harassed, arrested, tried, sentenced, and expelled by the Soviet authorities for his dissident views and his refusal to conform to the official literary norms. He was also denied access to education, travel, publication, and recognition.
            • -
            • Exile: He was forced to leave his homeland and his family and friends in 1972. He had to adapt to a new culture and a new language in America. He also suffered from homesickness, isolation, and depression.
            • -
            • Illness: He had a congenital heart condition that caused him chronic pain and fatigue. He also had asthma, diabetes, and kidney problems. He died of a heart attack in 1996 at the age of 55.
            • -
            -

            Despite these challenges and difficulties, Joseph Brodsky never gave up on his poetry and his passion for life. He overcame his hardships with courage, humor, grace, and dignity. He became one of the most respected and celebrated poets of his time. He left behind a rich and powerful legacy of poetry and prose that continues to inspire and enlighten people around the world.

            401be4b1e0
            -
            -
            \ No newline at end of file diff --git a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Experience the Magic and Mischief of Little Krishna APK for Android.md b/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Experience the Magic and Mischief of Little Krishna APK for Android.md deleted file mode 100644 index eb161fe3ba32fb667322b59b3add24e2136f65e1..0000000000000000000000000000000000000000 --- a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Experience the Magic and Mischief of Little Krishna APK for Android.md +++ /dev/null @@ -1,119 +0,0 @@ - -

            Little Krishna APK Download: A Fun and Adventurous Game for Android

            -

            If you are looking for a fun and adventurous game for your Android device, you might want to try Little Krishna APK. This is a game based on the popular Indian animated series, Little Krishna, that follows the exploits of the mischievous and charming young god, Krishna. In this game, you can play as Little Krishna and help him defeat the evil Putana, who is trying to destroy Vrindavan and its people. You can also explore the vibrant land of Vrindavan, collect coins and power-ups, unlock different characters, and enjoy challenging boss fights. In this article, we will tell you more about what Little Krishna APK is, how to download and install it, how to play and enjoy it, and what are some alternatives and similar games to it.

            -

            What is Little Krishna APK?

            -

            Little Krishna APK is an Android game developed by Zapak, a leading Indian gaming company. It is an official game based on the animated series, Little Krishna, which is produced by Reliance Animation and The Indian Heritage Foundation. The series is inspired by the childhood stories of Lord Krishna, one of the most revered Hindu deities. The game was released in 2017 and has received over 10 million downloads and 4.7 stars rating on Google Play Store. It is a 3D endless runner game that combines action, platformer, arcade, and runner genres.

            -

            little krishna apk download


            Download Ziphttps://ssurll.com/2uNWLL



            -

            The story and characters of Little Krishna

            -

            The game follows the story of Little Krishna, who is the darling of Vrindavan. He is a playful and naughty boy who loves to prank his friends and family. He also has a divine power that he uses to protect Vrindavan from evil forces. One of his enemies is Putana, a demoness who disguises herself as a beautiful woman and tries to kill him by feeding him poisoned milk. She also sends her minions to attack Vrindavan and its people. Little Krishna has to chase Putana across Vrindavan and bring her to justice for her evil deeds.

            -

            The game also features other characters from the series, such as Radha, Balram, Yashoda, Nanda, Kansa, Aghasura, Bakasura, Trinavarta, etc. Each character has a specific ability that can help Little Krishna in his quest. For example, Radha can heal him, Balram can smash obstacles, Yashoda can give him more coins, etc. You can unlock these characters by collecting tokens on the run or by purchasing them with real money.

            -

            The gameplay and features of Little Krishna

            -

            The gameplay of Little Krishna is simple but addictive. You have to swipe left or right to move Little Krishna on the screen, swipe up to jump over obstacles or enemies, swipe down to slide under them or attack them with your flute. You have to avoid raging bulls, angry elephants, hot lava streams, falling rocks, flying arrows, etc. You also have to collect coins that you can use to upgrade your power-ups or buy items in the shop. You can also gather rewards such as feathers, makhan (butter), peacock feathers, etc. that can give you extra points or bonuses.

            -

            The game also has various features that make it more fun and exciting. Some of them are:

            -
              -
            • The game has amazing HD graphics that capture the beauty and charm of Vrindavan.
            • -
            • The game has an original soundtrack that matches the mood and theme of the game.
            • -
            • The game has different modes such as Story Mode, Challenge Mode, Endless Mode, etc. that offer different levels of difficulty and objectives.
            • -
            • The game has challenging boss fights with Put ana, Aghasura, Bakasura, Trinavarta, etc. that require you to use your skills and strategies to defeat them.
            • -
            • The game has social features that allow you to share your scores and achievements with your friends on Facebook or Twitter.
            • -
            • The game has a table that shows the ranking of the top players in the world and in your country.
            • -
            -

            How to download and install Little Krishna APK?

            -

            If you want to download and install Little Krishna APK on your Android device, you need to follow some simple steps. But before that, you need to make sure that your device meets the following requirements:

            -
              -
            • Your device should have Android 4.2 or higher version.
            • -
            • Your device should have at least 1 GB of RAM and 100 MB of free storage space.
            • -
            • Your device should have a stable internet connection.
            • -
            -

            Once you have checked these requirements, you can proceed with the following steps:

            -
              -
            1. Go to the Google Play Store and search for Little Krishna APK or click on this link to go directly to the game page.
            2. -
            3. Tap on the Install button and wait for the game to download and install on your device.
            4. -
            5. Once the installation is complete, tap on the Open button to launch the game and enjoy playing it.
            6. -
            -

            The benefits and risks of downloading Little Krishna APK

            -

            Downloading Little Krishna APK has some benefits and risks that you should be aware of. Here are some of them:

            - - - - - - -
            BenefitsRisks
            You can enjoy playing a fun and adventurous game based on a popular animated series.You may encounter some bugs or glitches that may affect the game performance or functionality.
            You can experience the story and characters of Little Krishna in a new and interactive way.You may face some compatibility issues with your device or operating system.
            You can challenge yourself with different modes, levels, and boss fights.You may expose your device to malware or viruses that may harm your data or privacy.
            You can share your scores and achievements with your friends and compete with other players around the world.You may spend too much time or money on the game and neglect other important aspects of your life.
            -

            Therefore, you should download Little Krishna APK only from trusted sources such as Google Play Store and avoid downloading it from unknown or unverified websites. You should also scan your device regularly with an antivirus software and update your game whenever there is a new version available. You should also play the game responsibly and moderately and not let it affect your health or well-being.

            -

            little krishna game apk download
            -little krishna mod apk download
            -little krishna apk free download
            -little krishna runner apk download
            -little krishna android game download
            -little krishna apk latest version
            -little krishna apk download for pc
            -little krishna apk offline
            -little krishna apk hack
            -little krishna apk unlimited money
            -little krishna adventure game download
            -little krishna cartoon game download
            -little krishna 3d game download
            -little krishna action game download
            -little krishna fun game download
            -little krishna official game download
            -little krishna zapak game download
            -little krishna mobile game download
            -little krishna best game download
            -little krishna new game download
            -little krishna apk pure
            -little krishna apk mirror
            -little krishna apk uptodown
            -little krishna apk rexdl
            -little krishna apk revdl
            -little krishna apk mob.org
            -little krishna apk apkpure.com
            -little krishna apk apkmirror.com
            -little krishna apk uptodown.com
            -little krishna apk rexdl.com
            -little krishna app download
            -little krishna app free download
            -little krishna app for android
            -little krishna app store
            -little krishna app play store
            -little krishna app online
            -little krishna app offline
            -little krishna app hack
            -little krishna app mod
            -little krishna app update
            -how to download little krishna apk
            -where to download little krishna apk
            -why to download little krishna apk
            -what is little krishna apk
            -who made little krishna apk
            -when was little krishna apk released
            -which is the best version of little krishna apk

            -

            How to play and enjoy Little Krishna APK?

            Now that you have downloaded and installed Little Krishna APK, you might be wondering how to play and enjoy it. Well, don't worry, we have some tips and tricks for you that will help you master the game and have fun with it. Here are some of them:

            -

            The tips and tricks for playing Little Krishna APK

            -
              -
            • Learn the controls and gestures of the game. You can swipe left or right to move Little Krishna, swipe up to jump, swipe down to slide or attack, and tap to use power-ups. You can also change the sensitivity of the swipes in the settings menu.
            • -
            • Collect as many coins as you can on the run. You can use them to upgrade your power-ups such as speed boost, magnet, shield, etc. You can also buy items such as costumes, flutes, makhan, etc. in the shop.
            • -
            • Use your power-ups wisely. They can help you overcome obstacles, enemies, or boss fights. For example, you can use the speed boost to run faster, the magnet to attract coins, the shield to protect yourself from damage, etc.
            • -
            • Unlock different characters and use their abilities. You can unlock characters such as Radha, Balram, Yashoda, etc. by collecting tokens on the run or by purchasing them with real money. Each character has a special ability that can help you in the game. For example, Radha can heal you, Balram can smash obstacles, Yashoda can give you more coins, etc.
            • -
            • Explore different locations and environments in Vrindavan. The game has various scenes such as forests, rivers, mountains, temples, etc. that offer different challenges and rewards. You can also find hidden paths and secrets that can lead you to more coins or power-ups.
            • -
            • Complete missions and achievements to earn more rewards and bonuses. The game has different missions and achievements that you can complete by fulfilling certain criteria or objectives. For example, you can complete missions such as running a certain distance, collecting a certain number of coins or feathers, defeating a certain number of enemies or bosses, etc. You can also earn achievements such as unlocking all characters, upgrading all power-ups, completing all modes, etc.
            • -
            • Play different modes and levels to test your skills and challenge yourself. The game has different modes such as Story Mode, Challenge Mode, Endless Mode, etc. that offer different levels of difficulty and objectives. For example, in Story Mode, you have to follow the plot of the series and defeat Putana and her minions in various stages. In Challenge Mode, you have to complete specific tasks or goals within a limited time or distance. In Endless Mode, you have to run as far as you can without dying or stopping.
            • -
            -

            The alternatives and similar games to Little Krishna APK

            -

            If you like Little Krishna APK, you might also like some other games that are similar to it in terms of genre or theme. Here are some of them:

            -
              -
            • Temple Run 2: This is one of the most popular endless runner games in the world. You have to run away from a giant monkey while avoiding obstacles and collecting coins and gems. You can also unlock different characters and locations with unique features and challenges.
            • -
            • Subway Surfers: This is another famous endless runner game where you have to escape from the police while running on subway tracks and trains. You can also collect coins and items that can help you in your escape. You can also customize your character and hoverboard with different outfits and accessories.
            • -
            • Chhota Bheem Jungle Run: This is a game based on another popular Indian animated series, APK free to download and play? -A: Yes, Little Krishna APK is free to download and play. However, it may contain some in-app purchases or ads that may require real money or internet connection.
            • -
            • Q: How can I update Little Krishna APK to the latest version? -A: You can update Little Krishna APK to the latest version by going to the Google Play Store and tapping on the Update button on the game page. Alternatively, you can check for updates in the settings menu of the game.
            • -
            • Q: How can I contact the developer of Little Krishna APK for support or feedback? -A: You can contact the developer of Little Krishna APK by sending an email to support@zapak.com or by visiting their website at https://www.zapak.com/.
            • -
            • Q: How can I uninstall Little Krishna APK from my device? -A: You can uninstall Little Krishna APK from your device by going to the settings menu of your device and tapping on the Apps or Applications option. Then, find and select Little Krishna APK and tap on the Uninstall button. You can also uninstall it by long-pressing on the game icon on your home screen and dragging it to the Uninstall option.
            • -
            • Q: What are some other games that are similar to Little Krishna APK? -A: Some other games that are similar to Little Krishna APK are Temple Run 2, Subway Surfers, Chhota Bheem Jungle Run, Minion Rush, and Super Mario Run. You can find more details about these games in this article.
            • -

            401be4b1e0
            -
            -
            \ No newline at end of file diff --git a/spaces/simsantonioii/MusicGen-Continuation/tests/modules/test_rope.py b/spaces/simsantonioii/MusicGen-Continuation/tests/modules/test_rope.py deleted file mode 100644 index b9a54aec8b38a257ba28053afccf305a60691bfc..0000000000000000000000000000000000000000 --- a/spaces/simsantonioii/MusicGen-Continuation/tests/modules/test_rope.py +++ /dev/null @@ -1,160 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -import torch - -from audiocraft.modules.rope import RotaryEmbedding -from audiocraft.modules.transformer import StreamingTransformer - - -def test_rope(): - B, T, H, C = 8, 75, 16, 128 - - rope = RotaryEmbedding(dim=C) - xq = torch.rand((B, T, H, C)) - xk = torch.rand((B, T, H, C)) - xq_out, xk_out = rope.rotate_qk(xq, xk, start=7) - - assert list(xq_out.shape) == [B, T, H, C] - assert list(xk_out.shape) == [B, T, H, C] - - -def test_rope_io_dtypes(): - B, T, H, C = 8, 75, 16, 128 - - rope_32 = RotaryEmbedding(dim=C, dtype=torch.float32) - rope_64 = RotaryEmbedding(dim=C, dtype=torch.float64) - - # Test bfloat16 inputs w/ both 32 and 64 precision rope. - xq_16 = torch.rand((B, T, H, C)).to(torch.bfloat16) - xk_16 = torch.rand((B, T, H, C)).to(torch.bfloat16) - xq_out, xk_out = rope_32.rotate_qk(xq_16, xk_16) - assert xq_out.dtype == torch.bfloat16 - xq_out, xk_out = rope_64.rotate_qk(xq_16, xk_16) - assert xq_out.dtype == torch.bfloat16 - - # Test float32 inputs w/ both 32 and 64 precision rope. - xq_32 = torch.rand((B, T, H, C)).to(torch.float32) - xk_32 = torch.rand((B, T, H, C)).to(torch.float32) - xq_out, xk_out = rope_32.rotate_qk(xq_32, xk_32) - assert xq_out.dtype == torch.float32 - xq_out, xk_out = rope_64.rotate_qk(xq_32, xk_32) - assert xq_out.dtype == torch.float32 - - -def test_transformer_with_rope(): - torch.manual_seed(1234) - for pos in ['rope', 'sin_rope']: - tr = StreamingTransformer( - 16, 4, 2, custom=True, dropout=0., layer_scale=0.1, - positional_embedding=pos) - tr.eval() - steps = 12 - x = torch.randn(3, steps, 16) - - out = tr(x) - assert list(out.shape) == list(x.shape) - - -@torch.no_grad() -def test_rope_streaming(): - torch.manual_seed(1234) - tr = StreamingTransformer( - 16, 4, 2, causal=True, dropout=0., - custom=True, positional_embedding='rope') - tr.eval() - steps = 12 - x = torch.randn(3, steps, 16) - - ref = tr(x) - - with tr.streaming(): - outs = [] - frame_sizes = [1] * steps - - for frame_size in frame_sizes: - frame = x[:, :frame_size] - x = x[:, frame_size:] - outs.append(tr(frame)) - - out = torch.cat(outs, dim=1) - assert list(out.shape) == [3, steps, 16] - delta = torch.norm(out - ref) / torch.norm(out) - assert delta < 1e-6, delta - - -@torch.no_grad() -def test_rope_streaming_past_context(): - torch.manual_seed(1234) - - for context in [None, 10]: - tr = StreamingTransformer( - 16, 4, 1 if context else 2, - causal=True, past_context=context, custom=True, - dropout=0., positional_embedding='rope') - tr.eval() - - steps = 20 - x = torch.randn(3, steps, 16) - ref = tr(x) - - with tr.streaming(): - outs = [] - frame_sizes = [1] * steps - - for frame_size in frame_sizes: - frame = x[:, :frame_size] - x = x[:, frame_size:] - outs.append(tr(frame)) - - out = torch.cat(outs, dim=1) - assert list(out.shape) == [3, steps, 16] - delta = torch.norm(out - ref) / torch.norm(out) - assert delta < 1e-6, delta - - -def test_rope_memory_efficient(): - torch.manual_seed(1234) - tr = StreamingTransformer( - 16, 4, 2, custom=True, dropout=0., layer_scale=0.1, - positional_embedding='rope') - tr_mem_efficient = StreamingTransformer( - 16, 4, 2, dropout=0., memory_efficient=True, layer_scale=0.1, - positional_embedding='rope') - tr_mem_efficient.load_state_dict(tr.state_dict()) - tr.eval() - steps = 12 - x = torch.randn(3, steps, 16) - - with torch.no_grad(): - y = tr(x) - y2 = tr_mem_efficient(x) - # Check at float precision b/c this is the rope default. - assert torch.allclose(y, y2, atol=1e-7), (y - y2).norm() - - -def test_rope_with_xpos(): - B, T, H, C = 8, 75, 16, 128 - - rope = RotaryEmbedding(dim=C, xpos=True) - xq = torch.rand((B, T, H, C)) - xk = torch.rand((B, T, H, C)) - xq_out, xk_out = rope.rotate_qk(xq, xk, start=7) - - assert list(xq_out.shape) == [B, T, H, C] - assert list(xk_out.shape) == [B, T, H, C] - - -def test_positional_scale(): - B, T, H, C = 8, 75, 16, 128 - - rope = RotaryEmbedding(dim=C, xpos=True, scale=0.0) - xq = torch.rand((B, T, H, C)) - xk = torch.rand((B, T, H, C)) - xq_out, xk_out = rope.rotate_qk(xq, xk, start=7) - - assert torch.allclose(xq, xq_out) - assert torch.allclose(xk, xk_out) diff --git a/spaces/sklearn-docs/Gradient_Boosting_regression/app.py b/spaces/sklearn-docs/Gradient_Boosting_regression/app.py deleted file mode 100644 index 0fdbcb6a3795566eae67034515b1473886ac4ca6..0000000000000000000000000000000000000000 --- a/spaces/sklearn-docs/Gradient_Boosting_regression/app.py +++ /dev/null @@ -1,288 +0,0 @@ -from sklearn import datasets, ensemble -from sklearn.inspection import permutation_importance -from sklearn.metrics import mean_squared_error -from sklearn.model_selection import train_test_split -import plotly.graph_objs as go -import numpy as np -import plotly.express as px -import pandas as pd - - -import gradio as gr - - -diabetes = datasets.load_diabetes(as_frame=True) -X, y = diabetes.data, diabetes.target - - -def display_table(row_number): - X, y = diabetes.data, diabetes.target - XX = pd.concat([X, y], axis=1) - temp_df = XX[row_number : row_number + 5] - Statement = f"Displaying rows from row {row_number} to {row_number+5}" - return Statement, temp_df - - -def train_model( - test_split, - learning_rate, - n_estimators, - max_depth, - min_samples_split, - loss, - duration, -): - X, y = diabetes.data, diabetes.target - X_train, X_test, y_train, y_test = train_test_split( - X, y, test_size=test_split, random_state=42 - ) - params = { - "n_estimators": n_estimators, - "max_depth": max_depth, - "min_samples_split": min_samples_split, - "learning_rate": learning_rate, - "loss": loss, - } - global reg - reg = ensemble.GradientBoostingRegressor(**params) - reg.fit(X_train, y_train) - - mse = mean_squared_error(y_test, reg.predict(X_test)) - - x = np.arange(params["n_estimators"]) + 1 - train_score = reg.train_score_ - test_score = np.zeros((params["n_estimators"],), dtype=np.float64) - for i, y_pred in enumerate(reg.staged_predict(X_test)): - test_score[i] = mean_squared_error(y_test, y_pred) - - test_score = test_score - - fig = go.Figure() - - fig.add_trace( - go.Scatter( - x=x, - y=train_score, - mode="lines", - name="Training Set Deviance", - line=dict(color="blue"), - ) - ) - fig.add_trace( - go.Scatter( - x=x, - y=test_score, - mode="lines", - name="Test Set Deviance", - line=dict(color="red"), - ) - ) - - frames = [ - go.Frame( - data=[ - go.Scatter( - x=x[: k + 1], - y=train_score[: k + 1], - mode="lines", - line=dict(color="blue"), - ), - go.Scatter( - x=x[: k + 1], - y=test_score[: k + 1], - mode="lines", - line=dict(color="red"), - ), - ], - name=f"frame{k}", - ) - for k in range(1, len(x)) - ] - - fig.frames = frames - - fig.update_layout( - title="Deviance", - xaxis_title="Boosting Iterations", - yaxis_title="Deviance", - legend=dict(x=0, y=1), - updatemenus=[ - dict( - type="buttons", - showactive=False, - direction="right", - pad={"r": 10}, - buttons=[ - dict( - label="Play", - method="animate", - args=[ - None, - dict( - frame=dict(duration=duration, redraw=True), - fromcurrent=True, - transition=dict(duration=0), - ), - ], - ), - dict( - label="Pause", - method="animate", - args=[ - [None], - dict( - frame=dict(duration=0, redraw=False), - mode="immediate", - transition=dict(duration=0), - ), - ], - ), - ], - x=0.5, - y=-0.2, - ) - ], - ) - - return fig, mse - - -def Plot_featue_importance(test_split): - try: - feature_importance = reg.feature_importances_ - except: - # return blank figures - fig = go.Figure() - fig.update_layout(title="Train a model to see the plots") - return fig, fig - - sorted_idx = np.argsort(feature_importance) - - fig = px.bar( - pd.DataFrame( - { - "Importance": feature_importance[sorted_idx], - "Feature": np.array(diabetes.feature_names)[sorted_idx], - } - ), - x="Importance", - y="Feature", - orientation="h", - title="Feature Importance (MDI)", - ) - - X, y = diabetes.data, diabetes.target - X_train, X_test, y_train, y_test = train_test_split( - X, y, test_size=test_split, random_state=42 - ) - result = permutation_importance( - reg, X_test, y_test, n_repeats=10, random_state=42, n_jobs=2 - ) - - fig1 = px.box( - pd.DataFrame(result.importances.T, columns=diabetes.feature_names), - title="Permutation Importance (test set)", - ) - - return fig, fig1 - - -with gr.Blocks() as demo: - gr.Markdown("# Gradient Boosting regression") - gr.Markdown( - "This demo is based on [gradient boosting regression example of scikit-learn](https://scikit-learn.org/stable/auto_examples/ensemble/plot_gradient_boosting_regression.html) Example.This example demonstrates gradient goosting to produce a predictive model from an ensemble of weak predictive models. Gradient boosting can be used for regression and classification problems. Here, we will train a model to tackle a diabetes regression task." - ) - - with gr.Tab("Train the model"): - gr.Markdown("### Below is the diabetes dataset used in this demo 👇 ") - gr.Markdown("### You can change the interval of rows to display.") - gr.Markdown( - "The diabetes dataset consists of ten baseline variables, age, sex, body mass index (BMI), average blood pressure (BP), and six blood serum measurements for 442 diabetes patients. The target variable is a quantitative measure of disease progression one year after baseline." - ) - total_rows = X.shape[0] - rows_number = gr.Slider( - 0, total_rows, label="Displaying Rows", value=5, step=5 - ) - - rows_number.change( - fn=display_table, - inputs=[rows_number], - outputs=[gr.Text(label="Row"), gr.DataFrame()], - ) - - gr.Markdown( - "# Play with the parameters to see how the model performance changes" - ) - - gr.Markdown( - """ - ### `Number of Estimators` : the number of boosting stages that will be performed. Later, we will plot deviance against boosting iterations. - - ### `Max Depth` : limits the number of nodes in the tree. The best value depends on the interaction of the input variables. - - ### `Min Samples Split` : the minimum number of samples required to split an internal node. - - ### `learning_rate` : how much the contribution of each tree will shrink. - - ### `loss` : loss function to optimize. - - ### `Test Split` : the percentage of the dataset to include in the test split. - - ### `Animation Speed for Deviance Plot` : the duration of the animation of Deviation Plot. - """ - ) - with gr.Row(): - test_split = gr.Slider(0.1, 0.9, label="Test Split", value=0.2, step=0.1) - learning_rate = gr.Slider( - 0.01, 0.5, label="Learning Rate", value=0.1, step=0.01 - ) - n_estimators = gr.Slider( - 10, 1000, label="Number of Estimators", value=100, step=10 - ) - max_depth = gr.Slider(1, 10, label="Max Depth", value=3, step=1) - min_samples_split = gr.Slider( - 2, 10, label="Min Samples Split", value=2, step=1 - ) - loss = gr.Dropdown( - ["squared_error", "absolute_error", "huber", "quantile"], - label="Loss", - value="squared_error", - ) - - duration = gr.Slider( - 0, 100, label="Animation Speed for Deviance Plot", value=25, step=10 - ) - - model_btn = gr.Button("Train Model") - gr.Markdown( - "### Finally, we will visualize the results. To do that, we will first compute the test set deviance and then plot it against boosting iterations." - ) - model_btn.click( - fn=train_model, - inputs=[ - test_split, - learning_rate, - n_estimators, - max_depth, - min_samples_split, - loss, - duration, - ], - outputs=[gr.Plot(), gr.Text(label="MSE")], - ) - - with gr.Tab("Feature Importance"): - gr.Markdown("## Feature Importance (MDI) and Permutation Importance (test set)") - gr.Markdown( - "For this example, the impurity-based and permutation methods identify the same 2 strongly predictive features but not in the same order. The third most predictive feature, “bp”, is also the same for the 2 methods. The remaining features are less predictive and the error bars of the permutation plot show that they overlap with 0." - ) - feat_imp_btn = gr.Button("Plot Feature Importance") - with gr.Row(): - feat_imp_btn.click( - fn=Plot_featue_importance, - inputs=[test_split], - outputs=[gr.Plot(), gr.Plot()], - ) - - -demo.launch() diff --git a/spaces/snowcoin/bing/README.md b/spaces/snowcoin/bing/README.md deleted file mode 100644 index 864d84303dca71b6653324b3f890387c523f35ca..0000000000000000000000000000000000000000 --- a/spaces/snowcoin/bing/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Go Proxy Bingai -emoji: 📉 -colorFrom: gray -colorTo: red -sdk: docker -pinned: false -license: mit -app_port: 8080 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/songweig/rich-text-to-image/app.py b/spaces/songweig/rich-text-to-image/app.py deleted file mode 100644 index 1b353f2522e7d94158b8395e3f77e3984d832661..0000000000000000000000000000000000000000 --- a/spaces/songweig/rich-text-to-image/app.py +++ /dev/null @@ -1,514 +0,0 @@ -import math -import random -import os -import json -import time -import argparse -import torch -import numpy as np -from torchvision import transforms - -from models.region_diffusion_xl import RegionDiffusionXL -from utils.attention_utils import get_token_maps -from utils.richtext_utils import seed_everything, parse_json, get_region_diffusion_input,\ - get_attention_control_input, get_gradient_guidance_input - - -import gradio as gr -from PIL import Image, ImageOps -from share_btn import community_icon_html, loading_icon_html, share_js, css - - -help_text = """ -If you are encountering an error or not achieving your desired outcome, here are some potential reasons and recommendations to consider: -1. If you format only a portion of a word rather than the complete word, an error may occur. -2. If you use font color and get completely corrupted results, you may consider decrease the color weight lambda. -3. Consider using a different seed. -""" - - -canvas_html = """""" -get_js_data = """ -async (text_input, negative_prompt, num_segments, segment_threshold, inject_interval, inject_background, seed, color_guidance_weight, rich_text_input, height, width, steps, guidance_weights) => { - const richEl = document.getElementById("rich-text-root"); - const data = richEl? richEl.contentDocument.body._data : {}; - return [text_input, negative_prompt, num_segments, segment_threshold, inject_interval, inject_background, seed, color_guidance_weight, JSON.stringify(data), height, width, steps, guidance_weights]; -} -""" -set_js_data = """ -async (text_input) => { - const richEl = document.getElementById("rich-text-root"); - const data = text_input ? JSON.parse(text_input) : null; - if (richEl && data) richEl.contentDocument.body.setQuillContents(data); -} -""" - -get_window_url_params = """ -async (url_params) => { - const params = new URLSearchParams(window.location.search); - url_params = Object.fromEntries(params); - return [url_params]; -} -""" - - -def load_url_params(url_params): - if 'prompt' in url_params: - return gr.update(visible=True), url_params - else: - return gr.update(visible=False), url_params - - -def main(): - device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') - model = RegionDiffusionXL() - - def generate( - text_input: str, - negative_text: str, - num_segments: int, - segment_threshold: float, - inject_interval: float, - inject_background: float, - seed: int, - color_guidance_weight: float, - rich_text_input: str, - height: int, - width: int, - steps: int, - guidance_weight: float, - ): - run_dir = 'results/' - os.makedirs(run_dir, exist_ok=True) - # Load region diffusion model. - height = int(height) if height else 1024 - width = int(width) if width else 1024 - steps = 41 if not steps else steps - guidance_weight = 8.5 if not guidance_weight else guidance_weight - text_input = rich_text_input if rich_text_input != '' and rich_text_input != None else text_input - print('text_input', text_input, width, height, steps, guidance_weight, num_segments, segment_threshold, inject_interval, inject_background, color_guidance_weight, negative_text) - if (text_input == '' or rich_text_input == ''): - raise gr.Error("Please enter some text.") - # parse json to span attributes - base_text_prompt, style_text_prompts, footnote_text_prompts, footnote_target_tokens,\ - color_text_prompts, color_names, color_rgbs, size_text_prompts_and_sizes, use_grad_guidance = parse_json( - json.loads(text_input)) - - # create control input for region diffusion - region_text_prompts, region_target_token_ids, base_tokens = get_region_diffusion_input( - model, base_text_prompt, style_text_prompts, footnote_text_prompts, - footnote_target_tokens, color_text_prompts, color_names) - - # create control input for cross attention - text_format_dict = get_attention_control_input( - model, base_tokens, size_text_prompts_and_sizes) - - # create control input for region guidance - text_format_dict, color_target_token_ids = get_gradient_guidance_input( - model, base_tokens, color_text_prompts, color_rgbs, text_format_dict, color_guidance_weight=color_guidance_weight) - - seed_everything(seed) - - # get token maps from plain text to image generation. - begin_time = time.time() - if model.selfattn_maps is None and model.crossattn_maps is None: - model.remove_tokenmap_hooks() - model.register_tokenmap_hooks() - else: - model.remove_tokenmap_hooks() - model.remove_tokenmap_hooks() - plain_img = model.sample([base_text_prompt], negative_prompt=[negative_text], - height=height, width=width, num_inference_steps=steps, - guidance_scale=guidance_weight, run_rich_text=False) - print('time lapses to get attention maps: %.4f' % - (time.time()-begin_time)) - seed_everything(seed) - color_obj_masks, segments_vis, token_maps = get_token_maps(model.selfattn_maps, model.crossattn_maps, model.n_maps, run_dir, - 1024//8, 1024//8, color_target_token_ids[:-1], seed, - base_tokens, segment_threshold=segment_threshold, num_segments=num_segments, - return_vis=True) - seed_everything(seed) - model.masks, segments_vis, token_maps = get_token_maps(model.selfattn_maps, model.crossattn_maps, model.n_maps, run_dir, - 1024//8, 1024//8, region_target_token_ids[:-1], seed, - base_tokens, segment_threshold=segment_threshold, num_segments=num_segments, - return_vis=True) - color_obj_atten_all = torch.zeros_like(color_obj_masks[-1]) - for obj_mask in color_obj_masks[:-1]: - color_obj_atten_all += obj_mask - color_obj_masks = [transforms.functional.resize(color_obj_mask, (height, width), - interpolation=transforms.InterpolationMode.BICUBIC, - antialias=True) - for color_obj_mask in color_obj_masks] - text_format_dict['color_obj_atten'] = color_obj_masks - text_format_dict['color_obj_atten_all'] = color_obj_atten_all - model.remove_tokenmap_hooks() - - # generate image from rich text - begin_time = time.time() - seed_everything(seed) - rich_img = model.sample(region_text_prompts, negative_prompt=[negative_text], - height=height, width=width, num_inference_steps=steps, - guidance_scale=guidance_weight, use_guidance=use_grad_guidance, - text_format_dict=text_format_dict, inject_selfattn=inject_interval, - inject_background=inject_background, run_rich_text=True) - print('time lapses to generate image from rich text: %.4f' % - (time.time()-begin_time)) - return [plain_img.images[0], rich_img.images[0], segments_vis, token_maps] - - with gr.Blocks(css=css) as demo: - url_params = gr.JSON({}, visible=False, label="URL Params") - gr.HTML("""

            Expressive Text-to-Image Generation with Rich Text

            -

            Songwei Ge, Taesung Park, Jun-Yan Zhu, Jia-Bin Huang

            -

            UMD, Adobe, CMU

            -

            ICCV, 2023

            -

            Duplicate Space | [Website] | [Code] | [Paper]

            -

            Our method is now using Stable Diffusion XL. For faster inference without waiting in queue, you may duplicate the space and upgrade to GPU in settings.""") - with gr.Row(): - with gr.Column(): - rich_text_el = gr.HTML(canvas_html, elem_id="canvas_html") - rich_text_input = gr.Textbox(value="", visible=False) - text_input = gr.Textbox( - label='Rich-text JSON Input', - visible=False, - max_lines=1, - placeholder='Example: \'{"ops":[{"insert":"a Gothic "},{"attributes":{"color":"#b26b00"},"insert":"church"},{"insert":" in a the sunset with a beautiful landscape in the background.\n"}]}\'', - elem_id="text_input" - ) - negative_prompt = gr.Textbox( - label='Negative Prompt', - max_lines=1, - placeholder='Example: poor quality, blurry, dark, low resolution, low quality, worst quality', - elem_id="negative_prompt" - ) - segment_threshold = gr.Slider(label='Token map threshold', - info='(See less area in token maps? Decrease this. See too much area? Increase this.)', - minimum=0, - maximum=1, - step=0.01, - value=0.25) - inject_interval = gr.Slider(label='Detail preservation', - info='(To preserve more structure from plain-text generation, increase this. To see more rich-text attributes, decrease this.)', - minimum=0, - maximum=1, - step=0.01, - value=0.) - inject_background = gr.Slider(label='Unformatted token preservation', - info='(To affect less the tokens without any rich-text attributes, increase this.)', - minimum=0, - maximum=1, - step=0.01, - value=0.3) - color_guidance_weight = gr.Slider(label='Color weight', - info='(To obtain more precise color, increase this, while too large value may cause artifacts.)', - minimum=0, - maximum=2, - step=0.1, - value=0.5) - num_segments = gr.Slider(label='Number of segments', - minimum=2, - maximum=20, - step=1, - value=9) - seed = gr.Slider(label='Seed', - minimum=0, - maximum=100000, - step=1, - value=6, - elem_id="seed" - ) - with gr.Accordion('Other Parameters', open=False): - steps = gr.Slider(label='Number of Steps', - minimum=0, - maximum=500, - step=1, - value=41) - guidance_weight = gr.Slider(label='CFG weight', - minimum=0, - maximum=50, - step=0.1, - value=8.5) - width = gr.Dropdown(choices=[1024], - value=1024, - label='Width', - visible=True) - height = gr.Dropdown(choices=[1024], - value=1024, - label='height', - visible=True) - - with gr.Row(): - with gr.Column(scale=1, min_width=100): - generate_button = gr.Button("Generate") - load_params_button = gr.Button( - "Load from URL Params", visible=True) - with gr.Column(): - richtext_result = gr.Image( - label='Rich-text', elem_id="rich-text-image") - richtext_result.style(height=784) - with gr.Row(): - plaintext_result = gr.Image( - label='Plain-text', elem_id="plain-text-image") - segments = gr.Image(label='Segmentation') - with gr.Row(): - token_map = gr.Image(label='Token Maps') - with gr.Row(visible=False) as share_row: - with gr.Group(elem_id="share-btn-container"): - community_icon = gr.HTML(community_icon_html) - loading_icon = gr.HTML(loading_icon_html) - share_button = gr.Button( - "Share to community", elem_id="share-btn") - share_button.click(None, [], [], _js=share_js) - # with gr.Row(): - # gr.Markdown(help_text) - - with gr.Row(): - footnote_examples = [ - [ - '{"ops":[{"insert":"A close-up 4k dslr photo of a "},{"attributes":{"link":"A cat wearing sunglasses and a bandana around its neck."},"insert":"cat"},{"insert":" riding a scooter. Palm trees in the background."}]}', - '', - 9, - 0.3, - 0.3, - 0.5, - 3, - 0, - None, - ], - [ - '{"ops":[{"insert":"A cozy "},{"attributes":{"link":"A charming wooden cabin with Christmas decoration, warm light coming out from the windows."},"insert":"cabin"},{"insert":" nestled in a "},{"attributes":{"link":"Towering evergreen trees covered in a thick layer of pristine snow."},"insert":"snowy forest"},{"insert":", and a "},{"attributes":{"link":"A cute snowman wearing a carrot nose, coal eyes, and a colorful scarf, welcoming visitors with a cheerful vibe."},"insert":"snowman"},{"insert":" stands in the yard."}]}', - '', - 12, - 0.4, - 0.3, - 0.5, - 3, - 0, - None, - ], - [ - '{"ops":[{"insert":"A "},{"attributes":{"link":"Happy Kung fu panda art, elder, asian art, volumetric lighting, dramatic scene, ultra detailed, realism, chinese"},"insert":"panda"},{"insert":" standing on a cliff by a waterfall, wildlife photography, photograph, high quality, wildlife, f 1.8, soft focus, 8k, national geographic, award - winning photograph by nick nichols"}]}', - '', - 5, - 0.3, - 0, - 0.1, - 4, - 0, - None, - ], - ] - - gr.Examples(examples=footnote_examples, - label='Footnote examples', - inputs=[ - text_input, - negative_prompt, - num_segments, - segment_threshold, - inject_interval, - inject_background, - seed, - color_guidance_weight, - rich_text_input, - ], - outputs=[ - plaintext_result, - richtext_result, - segments, - token_map, - ], - fn=generate, - cache_examples=True, - examples_per_page=20) - with gr.Row(): - color_examples = [ - [ - '{"ops":[{"insert":"a beautifule girl with big eye, skin, and long "},{"attributes":{"color":"#04a704"},"insert":"hair"},{"insert":", t-shirt, bursting with vivid color, intricate, elegant, highly detailed, photorealistic, digital painting, artstation, illustration, concept art."}]}', - 'lowres, had anatomy, bad hands, cropped, worst quality', - 11, - 0.5, - 0.3, - 0.3, - 6, - 0.5, - None, - ], - [ - '{"ops":[{"insert":"a Gothic "},{"attributes":{"color":"#FD6C9E"},"insert":"church"},{"insert":" in a the sunset with a beautiful landscape in the background."}]}', - '', - 10, - 0.5, - 0.5, - 0.3, - 7, - 0.5, - None, - ], - ] - gr.Examples(examples=color_examples, - label='Font color examples', - inputs=[ - text_input, - negative_prompt, - num_segments, - segment_threshold, - inject_interval, - inject_background, - seed, - color_guidance_weight, - rich_text_input, - ], - outputs=[ - plaintext_result, - richtext_result, - segments, - token_map, - ], - fn=generate, - cache_examples=True, - examples_per_page=20) - - with gr.Row(): - style_examples = [ - [ - '{"ops":[{"insert":"a beautiful"},{"attributes":{"font":"mirza"},"insert":" garden"},{"insert":" with a "},{"attributes":{"font":"roboto"},"insert":"snow mountain"},{"insert":" in the background"}]}', - '', - 10, - 0.6, - 0, - 0.4, - 5, - 0, - None, - ], - [ - '{"ops":[{"insert":"a night"},{"attributes":{"font":"slabo"},"insert":" sky"},{"insert":" filled with stars above a turbulent"},{"attributes":{"font":"roboto"},"insert":" sea"},{"insert":" with giant waves"}]}', - '', - 2, - 0.6, - 0, - 0, - 6, - 0.5, - None, - ], - ] - gr.Examples(examples=style_examples, - label='Font style examples', - inputs=[ - text_input, - negative_prompt, - num_segments, - segment_threshold, - inject_interval, - inject_background, - seed, - color_guidance_weight, - rich_text_input, - ], - outputs=[ - plaintext_result, - richtext_result, - segments, - token_map, - ], - fn=generate, - cache_examples=True, - examples_per_page=20) - - with gr.Row(): - size_examples = [ - [ - '{"ops": [{"insert": "A pizza with "}, {"attributes": {"size": "60px"}, "insert": "pineapple"}, {"insert": " pepperoni, and mushroom on the top"}]}', - '', - 5, - 0.3, - 0, - 0, - 3, - 1, - None, - ], - [ - '{"ops": [{"insert": "A pizza with pineapple, "}, {"attributes": {"size": "60px"}, "insert": "pepperoni"}, {"insert": ", and mushroom on the top"}]}', - '', - 5, - 0.3, - 0, - 0, - 3, - 1, - None, - ], - [ - '{"ops": [{"insert": "A pizza with pineapple, pepperoni, and "}, {"attributes": {"size": "60px"}, "insert": "mushroom"}, {"insert": " on the top"}]}', - '', - 5, - 0.3, - 0, - 0, - 3, - 1, - None, - ], - ] - gr.Examples(examples=size_examples, - label='Font size examples', - inputs=[ - text_input, - negative_prompt, - num_segments, - segment_threshold, - inject_interval, - inject_background, - seed, - color_guidance_weight, - rich_text_input, - ], - outputs=[ - plaintext_result, - richtext_result, - segments, - token_map, - ], - fn=generate, - cache_examples=True, - examples_per_page=20) - generate_button.click(fn=lambda: gr.update(visible=False), inputs=None, outputs=share_row, queue=False).then( - fn=generate, - inputs=[ - text_input, - negative_prompt, - num_segments, - segment_threshold, - inject_interval, - inject_background, - seed, - color_guidance_weight, - rich_text_input, - height, - width, - steps, - guidance_weight, - ], - outputs=[plaintext_result, richtext_result, segments, token_map], - _js=get_js_data - ).then( - fn=lambda: gr.update(visible=True), inputs=None, outputs=share_row, queue=False) - text_input.change( - fn=None, inputs=[text_input], outputs=None, _js=set_js_data, queue=False) - # load url param prompt to textinput - load_params_button.click(fn=lambda x: x['prompt'], inputs=[ - url_params], outputs=[text_input], queue=False) - demo.load( - fn=load_url_params, - inputs=[url_params], - outputs=[load_params_button, url_params], - _js=get_window_url_params - ) - demo.queue(concurrency_count=1) - demo.launch(share=False) - - -if __name__ == "__main__": - main() diff --git a/spaces/stomexserde/gpt4-ui/Examples/Content Views Pro Nulled 16.md b/spaces/stomexserde/gpt4-ui/Examples/Content Views Pro Nulled 16.md deleted file mode 100644 index b13ac18a80da54f9cf85bbbe3406d2156c389642..0000000000000000000000000000000000000000 --- a/spaces/stomexserde/gpt4-ui/Examples/Content Views Pro Nulled 16.md +++ /dev/null @@ -1,46 +0,0 @@ - -

            How to Use Content Views Pro Nulled 16 to Create Stunning WordPress Grids

            -

            Content Views Pro Nulled 16 is a WordPress plugin that lets you filter and display any post types in grid and more layouts beautifully and easily. Whether you want to show posts, pages, images, products, events, or any custom post types, Content Views Pro Nulled 16 can help you create stunning grids with just a few clicks.

            -

            content views pro nulled 16


            Download Ziphttps://urlgoal.com/2uI6du



            -

            In this article, we will show you how to use Content Views Pro Nulled 16 to create WordPress grids that will impress your visitors and boost your conversions.

            -

            Step 1: Install and Activate Content Views Pro Nulled 16

            -

            To use Content Views Pro Nulled 16, you need to install and activate it on your WordPress site. You can download the plugin from this link [^1^] or search for it on Google. After downloading the plugin, go to Plugins > Add New > Upload Plugin and upload the zip file. Then click on Install Now and Activate.

            -

            Step 2: Create a New View

            -

            A view is a collection of settings that defines how your grid will look and function. To create a new view, go to Content Views > Add New. You will see a screen like this:

            -Content Views Pro Add New View Screen -

            Here you can give your view a name, select the content type you want to display, filter and sort the content by various criteria, and choose the layout and style of your grid.

            -

            Step 3: Select the Content Type

            -

            The first thing you need to do is select the content type you want to display in your grid. You can choose from posts, pages, media, products, or any custom post types you have on your site. You can also select multiple content types at the same time.

            -

            -

            For example, if you want to display a grid of your latest blog posts and products, you can select both posts and products from the dropdown menu.

            -Content Views Pro Select Content Type -

            Step 4: Filter and Sort the Content

            -

            Next, you can filter and sort the content by various criteria, such as category, tag, custom taxonomy, custom field, date, author, status, keyword, id, etc. You can also set the number of items to show per page and enable pagination.

            -

            For example, if you want to display only posts from the category "News" that have the tag "Featured" and are published in the last month, you can set the filters like this:

            -Content Views Pro Filter Content -

            You can also sort the content by drag & drop, custom field, random order, title, date, etc. For example, if you want to display the most popular posts first based on the number of views stored in a custom field called "views_count", you can set the sort order like this:

            -Content Views Pro Sort Content -

            Step 5: Choose the Layout and Style

            -

            The last step is to choose the layout and style of your grid. Content Views Pro Nulled 16 offers many beautiful, responsive and flexible layouts for your grid, such as grid, list, masonry, -timeline, -glossary, -one & others, -etc. You can also customize columns, -show/hide elements, -change element positions, -image sizes, -colors, -fonts, -padding, -margin, -and many other settings to fit your website design.

            -

            For example, -if you want to display a grid of posts with three columns, -showing only the title, -thumbnail, -and read more button, -you can choose the grid layout -and set the style like this:

            - 81aa517590
            -
            -
            \ No newline at end of file diff --git a/spaces/stomexserde/gpt4-ui/Examples/Express Scribe Pro Full Crack Kid PORTABLE.md b/spaces/stomexserde/gpt4-ui/Examples/Express Scribe Pro Full Crack Kid PORTABLE.md deleted file mode 100644 index c1f64c45d23335ffad7cdf8fe09606427363dbc7..0000000000000000000000000000000000000000 --- a/spaces/stomexserde/gpt4-ui/Examples/Express Scribe Pro Full Crack Kid PORTABLE.md +++ /dev/null @@ -1,18 +0,0 @@ -
            -

            Why You Should Avoid Express Scribe Pro Full Crack Kid

            -

            Express Scribe Pro is a professional audio player software that helps transcribe audio recordings. It supports various audio and video formats, encrypted dictation files, keyboard hotkeys, transcribing pedals, speech recognition software, and more. It is a useful tool for typists, journalists, students, lawyers, and anyone who needs to transcribe audio files quickly and accurately.

            -

            Express Scribe Pro Full Crack Kid


            DOWNLOAD ✏ ✏ ✏ https://urlgoal.com/2uI8T0



            -

            However, some people may be tempted to download Express Scribe Pro Full Crack Kid, which is an illegal version of the software that claims to offer all the features of the original program for free. This is a risky and unethical move that can have serious consequences for your computer and your work. Here are some reasons why you should avoid Express Scribe Pro Full Crack Kid and use the official version instead.

            -

            Express Scribe Pro Full Crack Kid Is Not Safe

            -

            One of the main dangers of using Express Scribe Pro Full Crack Kid is that it may contain viruses, malware, spyware, or other harmful programs that can infect your computer and compromise your security. These malicious programs can steal your personal information, damage your files, slow down your system, or even lock you out of your own device. You may end up losing your important data or paying a ransom to get it back.

            -

            Moreover, Express Scribe Pro Full Crack Kid may not work properly or at all. It may crash frequently, cause errors, or corrupt your audio files. It may also lack some of the features or updates of the original software. You may end up wasting your time and effort trying to fix the problems or looking for another solution.

            -

            -

            Express Scribe Pro Full Crack Kid Is Not Legal

            -

            Another reason to avoid Express Scribe Pro Full Crack Kid is that it is illegal and violates the intellectual property rights of the software developer, NCH Software. By downloading and using Express Scribe Pro Full Crack Kid, you are breaking the law and exposing yourself to potential legal action. You may face fines, lawsuits, or even criminal charges for software piracy.

            -

            Furthermore, Express Scribe Pro Full Crack Kid is unethical and unfair to the software developer who invested time, money, and effort to create and maintain the software. By using Express Scribe Pro Full Crack Kid, you are depriving them of their rightful income and discouraging them from improving their product or creating new ones. You are also hurting other users who pay for the software and expect quality service and support.

            -

            Express Scribe Pro Full Crack Kid Is Not Worth It

            -

            In conclusion, Express Scribe Pro Full Crack Kid is not a good option for anyone who needs a reliable and professional audio player software for transcribing audio recordings. It is not safe, not legal, and not worth it. It can harm your computer, your work, and your reputation.

            -

            The best way to use Express Scribe Pro is to download it from the official website of NCH Software[^1^] [^2^] or from a trusted online source. You can try it for free for 14 days or purchase it for a reasonable price. You will get access to all the features and updates of the software as well as technical support and customer service. You will also support the software developer and respect their rights.

            -

            Express Scribe Pro is a great tool for transcribing audio recordings. Don't ruin your experience by using Express Scribe Pro Full Crack Kid. Use the official version instead and enjoy its benefits.

            e93f5a0c3f
            -
            -
            \ No newline at end of file diff --git a/spaces/stomexserde/gpt4-ui/Examples/Ies Rp-8-14 Pdf Free.md b/spaces/stomexserde/gpt4-ui/Examples/Ies Rp-8-14 Pdf Free.md deleted file mode 100644 index d0e04181245a75f3f377d071e362b6749c642db0..0000000000000000000000000000000000000000 --- a/spaces/stomexserde/gpt4-ui/Examples/Ies Rp-8-14 Pdf Free.md +++ /dev/null @@ -1,42 +0,0 @@ -
            -

            How to Download Ies Rp-8-14 Pdf Free: A Guide for Roadway Lighting Designers

            - -

            If you are a roadway lighting designer, you may be interested in downloading Ies Rp-8-14 Pdf Free. This is a recommended practice for roadway lighting published by the Illuminating Engineering Society (IES) in 2014. It is an ANSI-approved standard that provides guidance on the design of fixed lighting for roadways, streets, adjacent bikeways, and pedestrian ways.

            -

            Ies Rp-8-14 Pdf Free


            Download Zip ❤❤❤ https://urlgoal.com/2uI73c



            - -

            According to the IES, Ies Rp-8-14 deals entirely with lighting design and does not give advice on construction. It covers topics such as lighting criteria, design methods, lighting equipment, maintenance, and environmental considerations. It also includes appendices with examples, calculations, and references.

            - -

            So how can you download Ies Rp-8-14 Pdf Free? Here are some steps you can follow:

            - -
              -
            1. Visit the official website of the IES at https://www.ies.org/.
            2. -
            3. Click on the "Standards" tab and select "IES Standards Cross-Reference" from the drop-down menu.
            4. -
            5. Scroll down to find the deprecated standard "DG-4-14 Design Guide for Roadway Lighting Maintenance" and look for its current standard "RP-8-21 Recommended Practice: Lighting Roadway and Parking Facilities".
            6. -
            7. Click on the current standard title to access its product page.
            8. -
            9. On the product page, you can choose to purchase the print or digital version of the standard. The print version costs $60 and the digital version costs $42 for IES members. Non-members have to pay $85 for either version.
            10. -
            11. If you want to download Ies Rp-8-14 Pdf Free, you can try to find it on other websites that offer free pdf downloads of standards. However, be aware that these websites may not be authorized by the IES and may not provide the latest or accurate version of the standard. You may also risk downloading viruses or malware that can harm your computer or device.
            12. -
            13. One of the websites that claims to offer Ies Rp-8-14 Pdf Free is https://civilnode.com/. On this website, you can search for "Ies Rp-8-14" and find a link to download the pdf file. However, we cannot guarantee the quality or safety of this website or file. Use it at your own risk.
            14. -
            - -

            We hope this guide has helped you learn how to download Ies Rp-8-14 Pdf Free. However, we recommend that you purchase the official version of the standard from the IES website to support their work and ensure that you get the most updated and reliable information on roadway lighting design.

            -

            - -

            Why is roadway lighting important? Roadway lighting can provide many benefits for motorists, pedestrians, cyclists, and the general public. Some of the benefits are:

            - -
              -
            • Roadway lighting can improve visibility and contrast at night, which can help drivers to see the road, traffic signs, signals, markings, and other road users more clearly. This can reduce the risk of collisions and injuries.
            • -
            • Roadway lighting can also enhance the perception of safety and security for road users and residents. People may feel more comfortable and confident to travel or walk at night if the roads are well-lit. This can encourage more active and sustainable modes of transportation such as walking and cycling.
            • -
            • Roadway lighting can also support the economic and social development of communities. Well-designed and maintained roadway lighting can create a more attractive and inviting environment for businesses, tourism, recreation, and cultural activities. This can boost the local economy and quality of life.
            • -
            - -

            However, roadway lighting also has some drawbacks and challenges. Some of the drawbacks are:

            - -
              -
            • Roadway lighting can consume a significant amount of energy and generate greenhouse gas emissions. According to the U.S. Department of Energy, roadway lighting accounted for about 1% of total U.S. electricity consumption in 2010. This can have negative impacts on the environment and climate change.
            • -
            • Roadway lighting can also cause light pollution and glare. Light pollution is the excessive or inappropriate use of artificial light that can affect the natural cycles of wildlife, plants, and humans. Glare is the discomfort or impairment of vision caused by excessive brightness or contrast. Both light pollution and glare can reduce the visibility and safety of road users and affect the health and well-being of people and animals.
            • -
            • Roadway lighting can also be costly to install, operate, and maintain. Roadway lighting requires a large initial investment for infrastructure and equipment. It also requires ongoing costs for energy consumption, maintenance, repair, and replacement. These costs can be a burden for public authorities and taxpayers.
            • -
            - -

            Therefore, roadway lighting should be designed, implemented, and managed in a way that balances the benefits and drawbacks. Ies Rp-8-14 is a standard that provides guidance on how to achieve this balance by applying best practices and principles for roadway lighting design.

            e93f5a0c3f
            -
            -
            \ No newline at end of file diff --git a/spaces/sub314xxl/MetaGPT/metagpt/actions/search_and_summarize.py b/spaces/sub314xxl/MetaGPT/metagpt/actions/search_and_summarize.py deleted file mode 100644 index 5c7577e171de712bdc20946c8e97d509db6ee040..0000000000000000000000000000000000000000 --- a/spaces/sub314xxl/MetaGPT/metagpt/actions/search_and_summarize.py +++ /dev/null @@ -1,142 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -""" -@Time : 2023/5/23 17:26 -@Author : alexanderwu -@File : search_google.py -@Modified By: mashenquan, 2023/8/20. Remove global configuration `CONFIG`, enable configuration support for business isolation. -""" -import pydantic - -from metagpt.actions import Action -from metagpt.config import CONFIG -from metagpt.logs import logger -from metagpt.schema import Message -from metagpt.tools.search_engine import SearchEngine - -SEARCH_AND_SUMMARIZE_SYSTEM = """### Requirements -1. Please summarize the latest dialogue based on the reference information (secondary) and dialogue history (primary). Do not include text that is irrelevant to the conversation. -- The context is for reference only. If it is irrelevant to the user's search request history, please reduce its reference and usage. -2. If there are citable links in the context, annotate them in the main text in the format [main text](citation link). If there are none in the context, do not write links. -3. The reply should be graceful, clear, non-repetitive, smoothly written, and of moderate length, in {LANG}. - -### Dialogue History (For example) -A: MLOps competitors - -### Current Question (For example) -A: MLOps competitors - -### Current Reply (For example) -1. Alteryx Designer: etc. if any -2. Matlab: ditto -3. IBM SPSS Statistics -4. RapidMiner Studio -5. DataRobot AI Platform -6. Databricks Lakehouse Platform -7. Amazon SageMaker -8. Dataiku -""" - -SEARCH_AND_SUMMARIZE_SYSTEM_EN_US = SEARCH_AND_SUMMARIZE_SYSTEM.format(LANG="en-us") - -SEARCH_AND_SUMMARIZE_PROMPT = """ -### Reference Information -{CONTEXT} - -### Dialogue History -{QUERY_HISTORY} -{QUERY} - -### Current Question -{QUERY} - -### Current Reply: Based on the information, please write the reply to the Question - - -""" - - -SEARCH_AND_SUMMARIZE_SALES_SYSTEM = """## Requirements -1. Please summarize the latest dialogue based on the reference information (secondary) and dialogue history (primary). Do not include text that is irrelevant to the conversation. -- The context is for reference only. If it is irrelevant to the user's search request history, please reduce its reference and usage. -2. If there are citable links in the context, annotate them in the main text in the format [main text](citation link). If there are none in the context, do not write links. -3. The reply should be graceful, clear, non-repetitive, smoothly written, and of moderate length, in Simplified Chinese. - -# Example -## Reference Information -... - -## Dialogue History -user: Which facial cleanser is good for oily skin? -Salesperson: Hello, for oily skin, it is suggested to choose a product that can deeply cleanse, control oil, and is gentle and skin-friendly. According to customer feedback and market reputation, the following facial cleansers are recommended:... -user: Do you have any by L'Oreal? -> Salesperson: ... - -## Ideal Answer -Yes, I've selected the following for you: -1. L'Oreal Men's Facial Cleanser: Oil control, anti-acne, balance of water and oil, pore purification, effectively against blackheads, deep exfoliation, refuse oil shine. Dense foam, not tight after washing. -2. L'Oreal Age Perfect Hydrating Cleanser: Added with sodium cocoyl glycinate and Centella Asiatica, two effective ingredients, it can deeply cleanse, tighten the skin, gentle and not tight. -""" - -SEARCH_AND_SUMMARIZE_SALES_PROMPT = """ -## Reference Information -{CONTEXT} - -## Dialogue History -{QUERY_HISTORY} -{QUERY} -> {ROLE}: - -""" - -SEARCH_FOOD = """ -# User Search Request -What are some delicious foods in Xiamen? - -# Requirements -You are a member of a professional butler team and will provide helpful suggestions: -1. Please summarize the user's search request based on the context and avoid including unrelated text. -2. Use [main text](reference link) in markdown format to **naturally annotate** 3-5 textual elements (such as product words or similar text sections) within the main text for easy navigation. -3. The response should be elegant, clear, **without any repetition of text**, smoothly written, and of moderate length. -""" - - -class SearchAndSummarize(Action): - def __init__(self, name="", context=None, llm=None, engine=None, search_func=None): - self.engine = engine or CONFIG.search_engine - - try: - self.search_engine = SearchEngine(self.engine, run_func=search_func) - except pydantic.ValidationError: - self.search_engine = None - - self.result = "" - super().__init__(name, context, llm) - - async def run(self, context: list[Message], system_text=SEARCH_AND_SUMMARIZE_SYSTEM) -> str: - if self.search_engine is None: - logger.warning("Configure one of SERPAPI_API_KEY, SERPER_API_KEY, GOOGLE_API_KEY to unlock full feature") - return "" - - query = context[-1].content - # logger.debug(query) - rsp = await self.search_engine.run(query) - self.result = rsp - if not rsp: - logger.error("empty rsp...") - return "" - # logger.info(rsp) - - system_prompt = [system_text] - - prompt = SEARCH_AND_SUMMARIZE_PROMPT.format( - # PREFIX = self.prefix, - ROLE=self.profile, - CONTEXT=rsp, - QUERY_HISTORY="\n".join([str(i) for i in context[:-1]]), - QUERY=str(context[-1]), - ) - result = await self._aask(prompt, system_prompt) - logger.debug(prompt) - logger.debug(result) - return result diff --git a/spaces/supertori/files/stable-diffusion-webui/modules/extra_networks_hypernet.py b/spaces/supertori/files/stable-diffusion-webui/modules/extra_networks_hypernet.py deleted file mode 100644 index 207343daa673c14a362d4bd2399982d9ad86fe22..0000000000000000000000000000000000000000 --- a/spaces/supertori/files/stable-diffusion-webui/modules/extra_networks_hypernet.py +++ /dev/null @@ -1,27 +0,0 @@ -from modules import extra_networks, shared, extra_networks -from modules.hypernetworks import hypernetwork - - -class ExtraNetworkHypernet(extra_networks.ExtraNetwork): - def __init__(self): - super().__init__('hypernet') - - def activate(self, p, params_list): - additional = shared.opts.sd_hypernetwork - - if additional != "" and additional in shared.hypernetworks and len([x for x in params_list if x.items[0] == additional]) == 0: - p.all_prompts = [x + f"" for x in p.all_prompts] - params_list.append(extra_networks.ExtraNetworkParams(items=[additional, shared.opts.extra_networks_default_multiplier])) - - names = [] - multipliers = [] - for params in params_list: - assert len(params.items) > 0 - - names.append(params.items[0]) - multipliers.append(float(params.items[1]) if len(params.items) > 1 else 1.0) - - hypernetwork.load_hypernetworks(names, multipliers) - - def deactivate(self, p): - pass diff --git a/spaces/supertori/files/stable-diffusion-webui/modules/processing.py b/spaces/supertori/files/stable-diffusion-webui/modules/processing.py deleted file mode 100644 index a5eeff368b534e478ea48d9a9e5a4fc48a146d9b..0000000000000000000000000000000000000000 --- a/spaces/supertori/files/stable-diffusion-webui/modules/processing.py +++ /dev/null @@ -1,1061 +0,0 @@ -import json -import math -import os -import sys -import warnings - -import torch -import numpy as np -from PIL import Image, ImageFilter, ImageOps -import random -import cv2 -from skimage import exposure -from typing import Any, Dict, List, Optional - -import modules.sd_hijack -from modules import devices, prompt_parser, masking, sd_samplers, lowvram, generation_parameters_copypaste, script_callbacks, extra_networks, sd_vae_approx, scripts -from modules.sd_hijack import model_hijack -from modules.shared import opts, cmd_opts, state -import modules.shared as shared -import modules.paths as paths -import modules.face_restoration -import modules.images as images -import modules.styles -import modules.sd_models as sd_models -import modules.sd_vae as sd_vae -import logging -from ldm.data.util import AddMiDaS -from ldm.models.diffusion.ddpm import LatentDepth2ImageDiffusion - -from einops import repeat, rearrange -from blendmodes.blend import blendLayers, BlendType - -# some of those options should not be changed at all because they would break the model, so I removed them from options. -opt_C = 4 -opt_f = 8 - - -def setup_color_correction(image): - logging.info("Calibrating color correction.") - correction_target = cv2.cvtColor(np.asarray(image.copy()), cv2.COLOR_RGB2LAB) - return correction_target - - -def apply_color_correction(correction, original_image): - logging.info("Applying color correction.") - image = Image.fromarray(cv2.cvtColor(exposure.match_histograms( - cv2.cvtColor( - np.asarray(original_image), - cv2.COLOR_RGB2LAB - ), - correction, - channel_axis=2 - ), cv2.COLOR_LAB2RGB).astype("uint8")) - - image = blendLayers(image, original_image, BlendType.LUMINOSITY) - - return image - - -def apply_overlay(image, paste_loc, index, overlays): - if overlays is None or index >= len(overlays): - return image - - overlay = overlays[index] - - if paste_loc is not None: - x, y, w, h = paste_loc - base_image = Image.new('RGBA', (overlay.width, overlay.height)) - image = images.resize_image(1, image, w, h) - base_image.paste(image, (x, y)) - image = base_image - - image = image.convert('RGBA') - image.alpha_composite(overlay) - image = image.convert('RGB') - - return image - - -def txt2img_image_conditioning(sd_model, x, width, height): - if sd_model.model.conditioning_key not in {'hybrid', 'concat'}: - # Dummy zero conditioning if we're not using inpainting model. - # Still takes up a bit of memory, but no encoder call. - # Pretty sure we can just make this a 1x1 image since its not going to be used besides its batch size. - return x.new_zeros(x.shape[0], 5, 1, 1, dtype=x.dtype, device=x.device) - - # The "masked-image" in this case will just be all zeros since the entire image is masked. - image_conditioning = torch.zeros(x.shape[0], 3, height, width, device=x.device) - image_conditioning = sd_model.get_first_stage_encoding(sd_model.encode_first_stage(image_conditioning)) - - # Add the fake full 1s mask to the first dimension. - image_conditioning = torch.nn.functional.pad(image_conditioning, (0, 0, 0, 0, 1, 0), value=1.0) - image_conditioning = image_conditioning.to(x.dtype) - - return image_conditioning - - -class StableDiffusionProcessing: - """ - The first set of paramaters: sd_models -> do_not_reload_embeddings represent the minimum required to create a StableDiffusionProcessing - """ - def __init__(self, sd_model=None, outpath_samples=None, outpath_grids=None, prompt: str = "", styles: List[str] = None, seed: int = -1, subseed: int = -1, subseed_strength: float = 0, seed_resize_from_h: int = -1, seed_resize_from_w: int = -1, seed_enable_extras: bool = True, sampler_name: str = None, batch_size: int = 1, n_iter: int = 1, steps: int = 50, cfg_scale: float = 7.0, width: int = 512, height: int = 512, restore_faces: bool = False, tiling: bool = False, do_not_save_samples: bool = False, do_not_save_grid: bool = False, extra_generation_params: Dict[Any, Any] = None, overlay_images: Any = None, negative_prompt: str = None, eta: float = None, do_not_reload_embeddings: bool = False, denoising_strength: float = 0, ddim_discretize: str = None, s_churn: float = 0.0, s_tmax: float = None, s_tmin: float = 0.0, s_noise: float = 1.0, override_settings: Dict[str, Any] = None, override_settings_restore_afterwards: bool = True, sampler_index: int = None, script_args: list = None): - if sampler_index is not None: - print("sampler_index argument for StableDiffusionProcessing does not do anything; use sampler_name", file=sys.stderr) - - self.outpath_samples: str = outpath_samples - self.outpath_grids: str = outpath_grids - self.prompt: str = prompt - self.prompt_for_display: str = None - self.negative_prompt: str = (negative_prompt or "") - self.styles: list = styles or [] - self.seed: int = seed - self.subseed: int = subseed - self.subseed_strength: float = subseed_strength - self.seed_resize_from_h: int = seed_resize_from_h - self.seed_resize_from_w: int = seed_resize_from_w - self.sampler_name: str = sampler_name - self.batch_size: int = batch_size - self.n_iter: int = n_iter - self.steps: int = steps - self.cfg_scale: float = cfg_scale - self.width: int = width - self.height: int = height - self.restore_faces: bool = restore_faces - self.tiling: bool = tiling - self.do_not_save_samples: bool = do_not_save_samples - self.do_not_save_grid: bool = do_not_save_grid - self.extra_generation_params: dict = extra_generation_params or {} - self.overlay_images = overlay_images - self.eta = eta - self.do_not_reload_embeddings = do_not_reload_embeddings - self.paste_to = None - self.color_corrections = None - self.denoising_strength: float = denoising_strength - self.sampler_noise_scheduler_override = None - self.ddim_discretize = ddim_discretize or opts.ddim_discretize - self.s_churn = s_churn or opts.s_churn - self.s_tmin = s_tmin or opts.s_tmin - self.s_tmax = s_tmax or float('inf') # not representable as a standard ui option - self.s_noise = s_noise or opts.s_noise - self.override_settings = {k: v for k, v in (override_settings or {}).items() if k not in shared.restricted_opts} - self.override_settings_restore_afterwards = override_settings_restore_afterwards - self.is_using_inpainting_conditioning = False - self.disable_extra_networks = False - - if not seed_enable_extras: - self.subseed = -1 - self.subseed_strength = 0 - self.seed_resize_from_h = 0 - self.seed_resize_from_w = 0 - - self.scripts = None - self.script_args = script_args - self.all_prompts = None - self.all_negative_prompts = None - self.all_seeds = None - self.all_subseeds = None - self.iteration = 0 - - @property - def sd_model(self): - return shared.sd_model - - def txt2img_image_conditioning(self, x, width=None, height=None): - self.is_using_inpainting_conditioning = self.sd_model.model.conditioning_key in {'hybrid', 'concat'} - - return txt2img_image_conditioning(self.sd_model, x, width or self.width, height or self.height) - - def depth2img_image_conditioning(self, source_image): - # Use the AddMiDaS helper to Format our source image to suit the MiDaS model - transformer = AddMiDaS(model_type="dpt_hybrid") - transformed = transformer({"jpg": rearrange(source_image[0], "c h w -> h w c")}) - midas_in = torch.from_numpy(transformed["midas_in"][None, ...]).to(device=shared.device) - midas_in = repeat(midas_in, "1 ... -> n ...", n=self.batch_size) - - conditioning_image = self.sd_model.get_first_stage_encoding(self.sd_model.encode_first_stage(source_image)) - conditioning = torch.nn.functional.interpolate( - self.sd_model.depth_model(midas_in), - size=conditioning_image.shape[2:], - mode="bicubic", - align_corners=False, - ) - - (depth_min, depth_max) = torch.aminmax(conditioning) - conditioning = 2. * (conditioning - depth_min) / (depth_max - depth_min) - 1. - return conditioning - - def edit_image_conditioning(self, source_image): - conditioning_image = self.sd_model.encode_first_stage(source_image).mode() - - return conditioning_image - - def inpainting_image_conditioning(self, source_image, latent_image, image_mask=None): - self.is_using_inpainting_conditioning = True - - # Handle the different mask inputs - if image_mask is not None: - if torch.is_tensor(image_mask): - conditioning_mask = image_mask - else: - conditioning_mask = np.array(image_mask.convert("L")) - conditioning_mask = conditioning_mask.astype(np.float32) / 255.0 - conditioning_mask = torch.from_numpy(conditioning_mask[None, None]) - - # Inpainting model uses a discretized mask as input, so we round to either 1.0 or 0.0 - conditioning_mask = torch.round(conditioning_mask) - else: - conditioning_mask = source_image.new_ones(1, 1, *source_image.shape[-2:]) - - # Create another latent image, this time with a masked version of the original input. - # Smoothly interpolate between the masked and unmasked latent conditioning image using a parameter. - conditioning_mask = conditioning_mask.to(device=source_image.device, dtype=source_image.dtype) - conditioning_image = torch.lerp( - source_image, - source_image * (1.0 - conditioning_mask), - getattr(self, "inpainting_mask_weight", shared.opts.inpainting_mask_weight) - ) - - # Encode the new masked image using first stage of network. - conditioning_image = self.sd_model.get_first_stage_encoding(self.sd_model.encode_first_stage(conditioning_image)) - - # Create the concatenated conditioning tensor to be fed to `c_concat` - conditioning_mask = torch.nn.functional.interpolate(conditioning_mask, size=latent_image.shape[-2:]) - conditioning_mask = conditioning_mask.expand(conditioning_image.shape[0], -1, -1, -1) - image_conditioning = torch.cat([conditioning_mask, conditioning_image], dim=1) - image_conditioning = image_conditioning.to(shared.device).type(self.sd_model.dtype) - - return image_conditioning - - def img2img_image_conditioning(self, source_image, latent_image, image_mask=None): - source_image = devices.cond_cast_float(source_image) - - # HACK: Using introspection as the Depth2Image model doesn't appear to uniquely - # identify itself with a field common to all models. The conditioning_key is also hybrid. - if isinstance(self.sd_model, LatentDepth2ImageDiffusion): - return self.depth2img_image_conditioning(source_image) - - if self.sd_model.cond_stage_key == "edit": - return self.edit_image_conditioning(source_image) - - if self.sampler.conditioning_key in {'hybrid', 'concat'}: - return self.inpainting_image_conditioning(source_image, latent_image, image_mask=image_mask) - - # Dummy zero conditioning if we're not using inpainting or depth model. - return latent_image.new_zeros(latent_image.shape[0], 5, 1, 1) - - def init(self, all_prompts, all_seeds, all_subseeds): - pass - - def sample(self, conditioning, unconditional_conditioning, seeds, subseeds, subseed_strength, prompts): - raise NotImplementedError() - - def close(self): - self.sampler = None - - -class Processed: - def __init__(self, p: StableDiffusionProcessing, images_list, seed=-1, info="", subseed=None, all_prompts=None, all_negative_prompts=None, all_seeds=None, all_subseeds=None, index_of_first_image=0, infotexts=None, comments=""): - self.images = images_list - self.prompt = p.prompt - self.negative_prompt = p.negative_prompt - self.seed = seed - self.subseed = subseed - self.subseed_strength = p.subseed_strength - self.info = info - self.comments = comments - self.width = p.width - self.height = p.height - self.sampler_name = p.sampler_name - self.cfg_scale = p.cfg_scale - self.image_cfg_scale = getattr(p, 'image_cfg_scale', None) - self.steps = p.steps - self.batch_size = p.batch_size - self.restore_faces = p.restore_faces - self.face_restoration_model = opts.face_restoration_model if p.restore_faces else None - self.sd_model_hash = shared.sd_model.sd_model_hash - self.seed_resize_from_w = p.seed_resize_from_w - self.seed_resize_from_h = p.seed_resize_from_h - self.denoising_strength = getattr(p, 'denoising_strength', None) - self.extra_generation_params = p.extra_generation_params - self.index_of_first_image = index_of_first_image - self.styles = p.styles - self.job_timestamp = state.job_timestamp - self.clip_skip = opts.CLIP_stop_at_last_layers - - self.eta = p.eta - self.ddim_discretize = p.ddim_discretize - self.s_churn = p.s_churn - self.s_tmin = p.s_tmin - self.s_tmax = p.s_tmax - self.s_noise = p.s_noise - self.sampler_noise_scheduler_override = p.sampler_noise_scheduler_override - self.prompt = self.prompt if type(self.prompt) != list else self.prompt[0] - self.negative_prompt = self.negative_prompt if type(self.negative_prompt) != list else self.negative_prompt[0] - self.seed = int(self.seed if type(self.seed) != list else self.seed[0]) if self.seed is not None else -1 - self.subseed = int(self.subseed if type(self.subseed) != list else self.subseed[0]) if self.subseed is not None else -1 - self.is_using_inpainting_conditioning = p.is_using_inpainting_conditioning - - self.all_prompts = all_prompts or p.all_prompts or [self.prompt] - self.all_negative_prompts = all_negative_prompts or p.all_negative_prompts or [self.negative_prompt] - self.all_seeds = all_seeds or p.all_seeds or [self.seed] - self.all_subseeds = all_subseeds or p.all_subseeds or [self.subseed] - self.infotexts = infotexts or [info] - - def js(self): - obj = { - "prompt": self.all_prompts[0], - "all_prompts": self.all_prompts, - "negative_prompt": self.all_negative_prompts[0], - "all_negative_prompts": self.all_negative_prompts, - "seed": self.seed, - "all_seeds": self.all_seeds, - "subseed": self.subseed, - "all_subseeds": self.all_subseeds, - "subseed_strength": self.subseed_strength, - "width": self.width, - "height": self.height, - "sampler_name": self.sampler_name, - "cfg_scale": self.cfg_scale, - "steps": self.steps, - "batch_size": self.batch_size, - "restore_faces": self.restore_faces, - "face_restoration_model": self.face_restoration_model, - "sd_model_hash": self.sd_model_hash, - "seed_resize_from_w": self.seed_resize_from_w, - "seed_resize_from_h": self.seed_resize_from_h, - "denoising_strength": self.denoising_strength, - "extra_generation_params": self.extra_generation_params, - "index_of_first_image": self.index_of_first_image, - "infotexts": self.infotexts, - "styles": self.styles, - "job_timestamp": self.job_timestamp, - "clip_skip": self.clip_skip, - "is_using_inpainting_conditioning": self.is_using_inpainting_conditioning, - } - - return json.dumps(obj) - - def infotext(self, p: StableDiffusionProcessing, index): - return create_infotext(p, self.all_prompts, self.all_seeds, self.all_subseeds, comments=[], position_in_batch=index % self.batch_size, iteration=index // self.batch_size) - - -# from https://discuss.pytorch.org/t/help-regarding-slerp-function-for-generative-model-sampling/32475/3 -def slerp(val, low, high): - low_norm = low/torch.norm(low, dim=1, keepdim=True) - high_norm = high/torch.norm(high, dim=1, keepdim=True) - dot = (low_norm*high_norm).sum(1) - - if dot.mean() > 0.9995: - return low * val + high * (1 - val) - - omega = torch.acos(dot) - so = torch.sin(omega) - res = (torch.sin((1.0-val)*omega)/so).unsqueeze(1)*low + (torch.sin(val*omega)/so).unsqueeze(1) * high - return res - - -def create_random_tensors(shape, seeds, subseeds=None, subseed_strength=0.0, seed_resize_from_h=0, seed_resize_from_w=0, p=None): - eta_noise_seed_delta = opts.eta_noise_seed_delta or 0 - xs = [] - - # if we have multiple seeds, this means we are working with batch size>1; this then - # enables the generation of additional tensors with noise that the sampler will use during its processing. - # Using those pre-generated tensors instead of simple torch.randn allows a batch with seeds [100, 101] to - # produce the same images as with two batches [100], [101]. - if p is not None and p.sampler is not None and (len(seeds) > 1 and opts.enable_batch_seeds or eta_noise_seed_delta > 0): - sampler_noises = [[] for _ in range(p.sampler.number_of_needed_noises(p))] - else: - sampler_noises = None - - for i, seed in enumerate(seeds): - noise_shape = shape if seed_resize_from_h <= 0 or seed_resize_from_w <= 0 else (shape[0], seed_resize_from_h//8, seed_resize_from_w//8) - - subnoise = None - if subseeds is not None: - subseed = 0 if i >= len(subseeds) else subseeds[i] - - subnoise = devices.randn(subseed, noise_shape) - - # randn results depend on device; gpu and cpu get different results for same seed; - # the way I see it, it's better to do this on CPU, so that everyone gets same result; - # but the original script had it like this, so I do not dare change it for now because - # it will break everyone's seeds. - noise = devices.randn(seed, noise_shape) - - if subnoise is not None: - noise = slerp(subseed_strength, noise, subnoise) - - if noise_shape != shape: - x = devices.randn(seed, shape) - dx = (shape[2] - noise_shape[2]) // 2 - dy = (shape[1] - noise_shape[1]) // 2 - w = noise_shape[2] if dx >= 0 else noise_shape[2] + 2 * dx - h = noise_shape[1] if dy >= 0 else noise_shape[1] + 2 * dy - tx = 0 if dx < 0 else dx - ty = 0 if dy < 0 else dy - dx = max(-dx, 0) - dy = max(-dy, 0) - - x[:, ty:ty+h, tx:tx+w] = noise[:, dy:dy+h, dx:dx+w] - noise = x - - if sampler_noises is not None: - cnt = p.sampler.number_of_needed_noises(p) - - if eta_noise_seed_delta > 0: - torch.manual_seed(seed + eta_noise_seed_delta) - - for j in range(cnt): - sampler_noises[j].append(devices.randn_without_seed(tuple(noise_shape))) - - xs.append(noise) - - if sampler_noises is not None: - p.sampler.sampler_noises = [torch.stack(n).to(shared.device) for n in sampler_noises] - - x = torch.stack(xs).to(shared.device) - return x - - -def decode_first_stage(model, x): - with devices.autocast(disable=x.dtype == devices.dtype_vae): - x = model.decode_first_stage(x) - - return x - - -def get_fixed_seed(seed): - if seed is None or seed == '' or seed == -1: - return int(random.randrange(4294967294)) - - return seed - - -def fix_seed(p): - p.seed = get_fixed_seed(p.seed) - p.subseed = get_fixed_seed(p.subseed) - - -def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments=None, iteration=0, position_in_batch=0): - index = position_in_batch + iteration * p.batch_size - - clip_skip = getattr(p, 'clip_skip', opts.CLIP_stop_at_last_layers) - - generation_params = { - "Steps": p.steps, - "Sampler": p.sampler_name, - "CFG scale": p.cfg_scale, - "Image CFG scale": getattr(p, 'image_cfg_scale', None), - "Seed": all_seeds[index], - "Face restoration": (opts.face_restoration_model if p.restore_faces else None), - "Size": f"{p.width}x{p.height}", - "Model hash": getattr(p, 'sd_model_hash', None if not opts.add_model_hash_to_info or not shared.sd_model.sd_model_hash else shared.sd_model.sd_model_hash), - "Model": (None if not opts.add_model_name_to_info or not shared.sd_model.sd_checkpoint_info.model_name else shared.sd_model.sd_checkpoint_info.model_name.replace(',', '').replace(':', '')), - "Variation seed": (None if p.subseed_strength == 0 else all_subseeds[index]), - "Variation seed strength": (None if p.subseed_strength == 0 else p.subseed_strength), - "Seed resize from": (None if p.seed_resize_from_w == 0 or p.seed_resize_from_h == 0 else f"{p.seed_resize_from_w}x{p.seed_resize_from_h}"), - "Denoising strength": getattr(p, 'denoising_strength', None), - "Conditional mask weight": getattr(p, "inpainting_mask_weight", shared.opts.inpainting_mask_weight) if p.is_using_inpainting_conditioning else None, - "Clip skip": None if clip_skip <= 1 else clip_skip, - "ENSD": None if opts.eta_noise_seed_delta == 0 else opts.eta_noise_seed_delta, - } - - generation_params.update(p.extra_generation_params) - - generation_params_text = ", ".join([k if k == v else f'{k}: {generation_parameters_copypaste.quote(v)}' for k, v in generation_params.items() if v is not None]) - - negative_prompt_text = "\nNegative prompt: " + p.all_negative_prompts[index] if p.all_negative_prompts[index] else "" - - return f"{all_prompts[index]}{negative_prompt_text}\n{generation_params_text}".strip() - - -def process_images(p: StableDiffusionProcessing) -> Processed: - stored_opts = {k: opts.data[k] for k in p.override_settings.keys()} - - try: - for k, v in p.override_settings.items(): - setattr(opts, k, v) - - if k == 'sd_model_checkpoint': - sd_models.reload_model_weights() - - if k == 'sd_vae': - sd_vae.reload_vae_weights() - - res = process_images_inner(p) - - finally: - # restore opts to original state - if p.override_settings_restore_afterwards: - for k, v in stored_opts.items(): - setattr(opts, k, v) - if k == 'sd_model_checkpoint': - sd_models.reload_model_weights() - - if k == 'sd_vae': - sd_vae.reload_vae_weights() - - return res - - -def process_images_inner(p: StableDiffusionProcessing) -> Processed: - """this is the main loop that both txt2img and img2img use; it calls func_init once inside all the scopes and func_sample once per batch""" - - if type(p.prompt) == list: - assert(len(p.prompt) > 0) - else: - assert p.prompt is not None - - devices.torch_gc() - - seed = get_fixed_seed(p.seed) - subseed = get_fixed_seed(p.subseed) - - modules.sd_hijack.model_hijack.apply_circular(p.tiling) - modules.sd_hijack.model_hijack.clear_comments() - - comments = {} - - if type(p.prompt) == list: - p.all_prompts = [shared.prompt_styles.apply_styles_to_prompt(x, p.styles) for x in p.prompt] - else: - p.all_prompts = p.batch_size * p.n_iter * [shared.prompt_styles.apply_styles_to_prompt(p.prompt, p.styles)] - - if type(p.negative_prompt) == list: - p.all_negative_prompts = [shared.prompt_styles.apply_negative_styles_to_prompt(x, p.styles) for x in p.negative_prompt] - else: - p.all_negative_prompts = p.batch_size * p.n_iter * [shared.prompt_styles.apply_negative_styles_to_prompt(p.negative_prompt, p.styles)] - - if type(seed) == list: - p.all_seeds = seed - else: - p.all_seeds = [int(seed) + (x if p.subseed_strength == 0 else 0) for x in range(len(p.all_prompts))] - - if type(subseed) == list: - p.all_subseeds = subseed - else: - p.all_subseeds = [int(subseed) + x for x in range(len(p.all_prompts))] - - def infotext(iteration=0, position_in_batch=0): - return create_infotext(p, p.all_prompts, p.all_seeds, p.all_subseeds, comments, iteration, position_in_batch) - - if os.path.exists(cmd_opts.embeddings_dir) and not p.do_not_reload_embeddings: - model_hijack.embedding_db.load_textual_inversion_embeddings() - - if p.scripts is not None: - p.scripts.process(p) - - infotexts = [] - output_images = [] - - cached_uc = [None, None] - cached_c = [None, None] - - def get_conds_with_caching(function, required_prompts, steps, cache): - """ - Returns the result of calling function(shared.sd_model, required_prompts, steps) - using a cache to store the result if the same arguments have been used before. - - cache is an array containing two elements. The first element is a tuple - representing the previously used arguments, or None if no arguments - have been used before. The second element is where the previously - computed result is stored. - """ - - if cache[0] is not None and (required_prompts, steps) == cache[0]: - return cache[1] - - with devices.autocast(): - cache[1] = function(shared.sd_model, required_prompts, steps) - - cache[0] = (required_prompts, steps) - return cache[1] - - with torch.no_grad(), p.sd_model.ema_scope(): - with devices.autocast(): - p.init(p.all_prompts, p.all_seeds, p.all_subseeds) - - # for OSX, loading the model during sampling changes the generated picture, so it is loaded here - if shared.opts.live_previews_enable and opts.show_progress_type == "Approx NN": - sd_vae_approx.model() - - if state.job_count == -1: - state.job_count = p.n_iter - - for n in range(p.n_iter): - p.iteration = n - - if state.skipped: - state.skipped = False - - if state.interrupted: - break - - prompts = p.all_prompts[n * p.batch_size:(n + 1) * p.batch_size] - negative_prompts = p.all_negative_prompts[n * p.batch_size:(n + 1) * p.batch_size] - seeds = p.all_seeds[n * p.batch_size:(n + 1) * p.batch_size] - subseeds = p.all_subseeds[n * p.batch_size:(n + 1) * p.batch_size] - - if p.scripts is not None: - p.scripts.before_process_batch(p, batch_number=n, prompts=prompts, seeds=seeds, subseeds=subseeds) - - if len(prompts) == 0: - break - - prompts, extra_network_data = extra_networks.parse_prompts(prompts) - - if not p.disable_extra_networks: - with devices.autocast(): - extra_networks.activate(p, extra_network_data) - - if p.scripts is not None: - p.scripts.process_batch(p, batch_number=n, prompts=prompts, seeds=seeds, subseeds=subseeds) - - # params.txt should be saved after scripts.process_batch, since the - # infotext could be modified by that callback - # Example: a wildcard processed by process_batch sets an extra model - # strength, which is saved as "Model Strength: 1.0" in the infotext - if n == 0: - with open(os.path.join(paths.data_path, "params.txt"), "w", encoding="utf8") as file: - processed = Processed(p, [], p.seed, "") - file.write(processed.infotext(p, 0)) - - uc = get_conds_with_caching(prompt_parser.get_learned_conditioning, negative_prompts, p.steps, cached_uc) - c = get_conds_with_caching(prompt_parser.get_multicond_learned_conditioning, prompts, p.steps, cached_c) - - if len(model_hijack.comments) > 0: - for comment in model_hijack.comments: - comments[comment] = 1 - - if p.n_iter > 1: - shared.state.job = f"Batch {n+1} out of {p.n_iter}" - - with devices.without_autocast() if devices.unet_needs_upcast else devices.autocast(): - samples_ddim = p.sample(conditioning=c, unconditional_conditioning=uc, seeds=seeds, subseeds=subseeds, subseed_strength=p.subseed_strength, prompts=prompts) - - x_samples_ddim = [decode_first_stage(p.sd_model, samples_ddim[i:i+1].to(dtype=devices.dtype_vae))[0].cpu() for i in range(samples_ddim.size(0))] - for x in x_samples_ddim: - devices.test_for_nans(x, "vae") - - x_samples_ddim = torch.stack(x_samples_ddim).float() - x_samples_ddim = torch.clamp((x_samples_ddim + 1.0) / 2.0, min=0.0, max=1.0) - - del samples_ddim - - if shared.cmd_opts.lowvram or shared.cmd_opts.medvram: - lowvram.send_everything_to_cpu() - - devices.torch_gc() - - if p.scripts is not None: - p.scripts.postprocess_batch(p, x_samples_ddim, batch_number=n) - - for i, x_sample in enumerate(x_samples_ddim): - x_sample = 255. * np.moveaxis(x_sample.cpu().numpy(), 0, 2) - x_sample = x_sample.astype(np.uint8) - - if p.restore_faces: - if opts.save and not p.do_not_save_samples and opts.save_images_before_face_restoration: - images.save_image(Image.fromarray(x_sample), p.outpath_samples, "", seeds[i], prompts[i], opts.samples_format, info=infotext(n, i), p=p, suffix="-before-face-restoration") - - devices.torch_gc() - - x_sample = modules.face_restoration.restore_faces(x_sample) - devices.torch_gc() - - image = Image.fromarray(x_sample) - - if p.scripts is not None: - pp = scripts.PostprocessImageArgs(image) - p.scripts.postprocess_image(p, pp) - image = pp.image - - if p.color_corrections is not None and i < len(p.color_corrections): - if opts.save and not p.do_not_save_samples and opts.save_images_before_color_correction: - image_without_cc = apply_overlay(image, p.paste_to, i, p.overlay_images) - images.save_image(image_without_cc, p.outpath_samples, "", seeds[i], prompts[i], opts.samples_format, info=infotext(n, i), p=p, suffix="-before-color-correction") - image = apply_color_correction(p.color_corrections[i], image) - - image = apply_overlay(image, p.paste_to, i, p.overlay_images) - - if opts.samples_save and not p.do_not_save_samples: - images.save_image(image, p.outpath_samples, "", seeds[i], prompts[i], opts.samples_format, info=infotext(n, i), p=p) - - text = infotext(n, i) - infotexts.append(text) - if opts.enable_pnginfo: - image.info["parameters"] = text - output_images.append(image) - - del x_samples_ddim - - devices.torch_gc() - - state.nextjob() - - p.color_corrections = None - - index_of_first_image = 0 - unwanted_grid_because_of_img_count = len(output_images) < 2 and opts.grid_only_if_multiple - if (opts.return_grid or opts.grid_save) and not p.do_not_save_grid and not unwanted_grid_because_of_img_count: - grid = images.image_grid(output_images, p.batch_size) - - if opts.return_grid: - text = infotext() - infotexts.insert(0, text) - if opts.enable_pnginfo: - grid.info["parameters"] = text - output_images.insert(0, grid) - index_of_first_image = 1 - - if opts.grid_save: - images.save_image(grid, p.outpath_grids, "grid", p.all_seeds[0], p.all_prompts[0], opts.grid_format, info=infotext(), short_filename=not opts.grid_extended_filename, p=p, grid=True) - - if not p.disable_extra_networks: - extra_networks.deactivate(p, extra_network_data) - - devices.torch_gc() - - res = Processed(p, output_images, p.all_seeds[0], infotext(), comments="".join(["\n\n" + x for x in comments]), subseed=p.all_subseeds[0], index_of_first_image=index_of_first_image, infotexts=infotexts) - - if p.scripts is not None: - p.scripts.postprocess(p, res) - - return res - - -def old_hires_fix_first_pass_dimensions(width, height): - """old algorithm for auto-calculating first pass size""" - - desired_pixel_count = 512 * 512 - actual_pixel_count = width * height - scale = math.sqrt(desired_pixel_count / actual_pixel_count) - width = math.ceil(scale * width / 64) * 64 - height = math.ceil(scale * height / 64) * 64 - - return width, height - - -class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): - sampler = None - - def __init__(self, enable_hr: bool = False, denoising_strength: float = 0.75, firstphase_width: int = 0, firstphase_height: int = 0, hr_scale: float = 2.0, hr_upscaler: str = None, hr_second_pass_steps: int = 0, hr_resize_x: int = 0, hr_resize_y: int = 0, **kwargs): - super().__init__(**kwargs) - self.enable_hr = enable_hr - self.denoising_strength = denoising_strength - self.hr_scale = hr_scale - self.hr_upscaler = hr_upscaler - self.hr_second_pass_steps = hr_second_pass_steps - self.hr_resize_x = hr_resize_x - self.hr_resize_y = hr_resize_y - self.hr_upscale_to_x = hr_resize_x - self.hr_upscale_to_y = hr_resize_y - - if firstphase_width != 0 or firstphase_height != 0: - self.hr_upscale_to_x = self.width - self.hr_upscale_to_y = self.height - self.width = firstphase_width - self.height = firstphase_height - - self.truncate_x = 0 - self.truncate_y = 0 - self.applied_old_hires_behavior_to = None - - def init(self, all_prompts, all_seeds, all_subseeds): - if self.enable_hr: - if opts.use_old_hires_fix_width_height and self.applied_old_hires_behavior_to != (self.width, self.height): - self.hr_resize_x = self.width - self.hr_resize_y = self.height - self.hr_upscale_to_x = self.width - self.hr_upscale_to_y = self.height - - self.width, self.height = old_hires_fix_first_pass_dimensions(self.width, self.height) - self.applied_old_hires_behavior_to = (self.width, self.height) - - if self.hr_resize_x == 0 and self.hr_resize_y == 0: - self.extra_generation_params["Hires upscale"] = self.hr_scale - self.hr_upscale_to_x = int(self.width * self.hr_scale) - self.hr_upscale_to_y = int(self.height * self.hr_scale) - else: - self.extra_generation_params["Hires resize"] = f"{self.hr_resize_x}x{self.hr_resize_y}" - - if self.hr_resize_y == 0: - self.hr_upscale_to_x = self.hr_resize_x - self.hr_upscale_to_y = self.hr_resize_x * self.height // self.width - elif self.hr_resize_x == 0: - self.hr_upscale_to_x = self.hr_resize_y * self.width // self.height - self.hr_upscale_to_y = self.hr_resize_y - else: - target_w = self.hr_resize_x - target_h = self.hr_resize_y - src_ratio = self.width / self.height - dst_ratio = self.hr_resize_x / self.hr_resize_y - - if src_ratio < dst_ratio: - self.hr_upscale_to_x = self.hr_resize_x - self.hr_upscale_to_y = self.hr_resize_x * self.height // self.width - else: - self.hr_upscale_to_x = self.hr_resize_y * self.width // self.height - self.hr_upscale_to_y = self.hr_resize_y - - self.truncate_x = (self.hr_upscale_to_x - target_w) // opt_f - self.truncate_y = (self.hr_upscale_to_y - target_h) // opt_f - - # special case: the user has chosen to do nothing - if self.hr_upscale_to_x == self.width and self.hr_upscale_to_y == self.height: - self.enable_hr = False - self.denoising_strength = None - self.extra_generation_params.pop("Hires upscale", None) - self.extra_generation_params.pop("Hires resize", None) - return - - if not state.processing_has_refined_job_count: - if state.job_count == -1: - state.job_count = self.n_iter - - shared.total_tqdm.updateTotal((self.steps + (self.hr_second_pass_steps or self.steps)) * state.job_count) - state.job_count = state.job_count * 2 - state.processing_has_refined_job_count = True - - if self.hr_second_pass_steps: - self.extra_generation_params["Hires steps"] = self.hr_second_pass_steps - - if self.hr_upscaler is not None: - self.extra_generation_params["Hires upscaler"] = self.hr_upscaler - - def sample(self, conditioning, unconditional_conditioning, seeds, subseeds, subseed_strength, prompts): - self.sampler = sd_samplers.create_sampler(self.sampler_name, self.sd_model) - - latent_scale_mode = shared.latent_upscale_modes.get(self.hr_upscaler, None) if self.hr_upscaler is not None else shared.latent_upscale_modes.get(shared.latent_upscale_default_mode, "nearest") - if self.enable_hr and latent_scale_mode is None: - assert len([x for x in shared.sd_upscalers if x.name == self.hr_upscaler]) > 0, f"could not find upscaler named {self.hr_upscaler}" - - x = create_random_tensors([opt_C, self.height // opt_f, self.width // opt_f], seeds=seeds, subseeds=subseeds, subseed_strength=self.subseed_strength, seed_resize_from_h=self.seed_resize_from_h, seed_resize_from_w=self.seed_resize_from_w, p=self) - samples = self.sampler.sample(self, x, conditioning, unconditional_conditioning, image_conditioning=self.txt2img_image_conditioning(x)) - - if not self.enable_hr: - return samples - - target_width = self.hr_upscale_to_x - target_height = self.hr_upscale_to_y - - def save_intermediate(image, index): - """saves image before applying hires fix, if enabled in options; takes as an argument either an image or batch with latent space images""" - - if not opts.save or self.do_not_save_samples or not opts.save_images_before_highres_fix: - return - - if not isinstance(image, Image.Image): - image = sd_samplers.sample_to_image(image, index, approximation=0) - - info = create_infotext(self, self.all_prompts, self.all_seeds, self.all_subseeds, [], iteration=self.iteration, position_in_batch=index) - images.save_image(image, self.outpath_samples, "", seeds[index], prompts[index], opts.samples_format, info=info, suffix="-before-highres-fix") - - if latent_scale_mode is not None: - for i in range(samples.shape[0]): - save_intermediate(samples, i) - - samples = torch.nn.functional.interpolate(samples, size=(target_height // opt_f, target_width // opt_f), mode=latent_scale_mode["mode"], antialias=latent_scale_mode["antialias"]) - - # Avoid making the inpainting conditioning unless necessary as - # this does need some extra compute to decode / encode the image again. - if getattr(self, "inpainting_mask_weight", shared.opts.inpainting_mask_weight) < 1.0: - image_conditioning = self.img2img_image_conditioning(decode_first_stage(self.sd_model, samples), samples) - else: - image_conditioning = self.txt2img_image_conditioning(samples) - else: - decoded_samples = decode_first_stage(self.sd_model, samples) - lowres_samples = torch.clamp((decoded_samples + 1.0) / 2.0, min=0.0, max=1.0) - - batch_images = [] - for i, x_sample in enumerate(lowres_samples): - x_sample = 255. * np.moveaxis(x_sample.cpu().numpy(), 0, 2) - x_sample = x_sample.astype(np.uint8) - image = Image.fromarray(x_sample) - - save_intermediate(image, i) - - image = images.resize_image(0, image, target_width, target_height, upscaler_name=self.hr_upscaler) - image = np.array(image).astype(np.float32) / 255.0 - image = np.moveaxis(image, 2, 0) - batch_images.append(image) - - decoded_samples = torch.from_numpy(np.array(batch_images)) - decoded_samples = decoded_samples.to(shared.device) - decoded_samples = 2. * decoded_samples - 1. - - samples = self.sd_model.get_first_stage_encoding(self.sd_model.encode_first_stage(decoded_samples)) - - image_conditioning = self.img2img_image_conditioning(decoded_samples, samples) - - shared.state.nextjob() - - img2img_sampler_name = self.sampler_name - if self.sampler_name in ['PLMS', 'UniPC']: # PLMS/UniPC do not support img2img so we just silently switch to DDIM - img2img_sampler_name = 'DDIM' - self.sampler = sd_samplers.create_sampler(img2img_sampler_name, self.sd_model) - - samples = samples[:, :, self.truncate_y//2:samples.shape[2]-(self.truncate_y+1)//2, self.truncate_x//2:samples.shape[3]-(self.truncate_x+1)//2] - - noise = create_random_tensors(samples.shape[1:], seeds=seeds, subseeds=subseeds, subseed_strength=subseed_strength, p=self) - - # GC now before running the next img2img to prevent running out of memory - x = None - devices.torch_gc() - - samples = self.sampler.sample_img2img(self, samples, noise, conditioning, unconditional_conditioning, steps=self.hr_second_pass_steps or self.steps, image_conditioning=image_conditioning) - - return samples - - -class StableDiffusionProcessingImg2Img(StableDiffusionProcessing): - sampler = None - - def __init__(self, init_images: list = None, resize_mode: int = 0, denoising_strength: float = 0.75, image_cfg_scale: float = None, mask: Any = None, mask_blur: int = 4, inpainting_fill: int = 0, inpaint_full_res: bool = True, inpaint_full_res_padding: int = 0, inpainting_mask_invert: int = 0, initial_noise_multiplier: float = None, **kwargs): - super().__init__(**kwargs) - - self.init_images = init_images - self.resize_mode: int = resize_mode - self.denoising_strength: float = denoising_strength - self.image_cfg_scale: float = image_cfg_scale if shared.sd_model.cond_stage_key == "edit" else None - self.init_latent = None - self.image_mask = mask - self.latent_mask = None - self.mask_for_overlay = None - self.mask_blur = mask_blur - self.inpainting_fill = inpainting_fill - self.inpaint_full_res = inpaint_full_res - self.inpaint_full_res_padding = inpaint_full_res_padding - self.inpainting_mask_invert = inpainting_mask_invert - self.initial_noise_multiplier = opts.initial_noise_multiplier if initial_noise_multiplier is None else initial_noise_multiplier - self.mask = None - self.nmask = None - self.image_conditioning = None - - def init(self, all_prompts, all_seeds, all_subseeds): - self.sampler = sd_samplers.create_sampler(self.sampler_name, self.sd_model) - crop_region = None - - image_mask = self.image_mask - - if image_mask is not None: - image_mask = image_mask.convert('L') - - if self.inpainting_mask_invert: - image_mask = ImageOps.invert(image_mask) - - if self.mask_blur > 0: - image_mask = image_mask.filter(ImageFilter.GaussianBlur(self.mask_blur)) - - if self.inpaint_full_res: - self.mask_for_overlay = image_mask - mask = image_mask.convert('L') - crop_region = masking.get_crop_region(np.array(mask), self.inpaint_full_res_padding) - crop_region = masking.expand_crop_region(crop_region, self.width, self.height, mask.width, mask.height) - x1, y1, x2, y2 = crop_region - - mask = mask.crop(crop_region) - image_mask = images.resize_image(2, mask, self.width, self.height) - self.paste_to = (x1, y1, x2-x1, y2-y1) - else: - image_mask = images.resize_image(self.resize_mode, image_mask, self.width, self.height) - np_mask = np.array(image_mask) - np_mask = np.clip((np_mask.astype(np.float32)) * 2, 0, 255).astype(np.uint8) - self.mask_for_overlay = Image.fromarray(np_mask) - - self.overlay_images = [] - - latent_mask = self.latent_mask if self.latent_mask is not None else image_mask - - add_color_corrections = opts.img2img_color_correction and self.color_corrections is None - if add_color_corrections: - self.color_corrections = [] - imgs = [] - for img in self.init_images: - image = images.flatten(img, opts.img2img_background_color) - - if crop_region is None and self.resize_mode != 3: - image = images.resize_image(self.resize_mode, image, self.width, self.height) - - if image_mask is not None: - image_masked = Image.new('RGBa', (image.width, image.height)) - image_masked.paste(image.convert("RGBA").convert("RGBa"), mask=ImageOps.invert(self.mask_for_overlay.convert('L'))) - - self.overlay_images.append(image_masked.convert('RGBA')) - - # crop_region is not None if we are doing inpaint full res - if crop_region is not None: - image = image.crop(crop_region) - image = images.resize_image(2, image, self.width, self.height) - - if image_mask is not None: - if self.inpainting_fill != 1: - image = masking.fill(image, latent_mask) - - if add_color_corrections: - self.color_corrections.append(setup_color_correction(image)) - - image = np.array(image).astype(np.float32) / 255.0 - image = np.moveaxis(image, 2, 0) - - imgs.append(image) - - if len(imgs) == 1: - batch_images = np.expand_dims(imgs[0], axis=0).repeat(self.batch_size, axis=0) - if self.overlay_images is not None: - self.overlay_images = self.overlay_images * self.batch_size - - if self.color_corrections is not None and len(self.color_corrections) == 1: - self.color_corrections = self.color_corrections * self.batch_size - - elif len(imgs) <= self.batch_size: - self.batch_size = len(imgs) - batch_images = np.array(imgs) - else: - raise RuntimeError(f"bad number of images passed: {len(imgs)}; expecting {self.batch_size} or less") - - image = torch.from_numpy(batch_images) - image = 2. * image - 1. - image = image.to(shared.device) - - self.init_latent = self.sd_model.get_first_stage_encoding(self.sd_model.encode_first_stage(image)) - - if self.resize_mode == 3: - self.init_latent = torch.nn.functional.interpolate(self.init_latent, size=(self.height // opt_f, self.width // opt_f), mode="bilinear") - - if image_mask is not None: - init_mask = latent_mask - latmask = init_mask.convert('RGB').resize((self.init_latent.shape[3], self.init_latent.shape[2])) - latmask = np.moveaxis(np.array(latmask, dtype=np.float32), 2, 0) / 255 - latmask = latmask[0] - latmask = np.around(latmask) - latmask = np.tile(latmask[None], (4, 1, 1)) - - self.mask = torch.asarray(1.0 - latmask).to(shared.device).type(self.sd_model.dtype) - self.nmask = torch.asarray(latmask).to(shared.device).type(self.sd_model.dtype) - - # this needs to be fixed to be done in sample() using actual seeds for batches - if self.inpainting_fill == 2: - self.init_latent = self.init_latent * self.mask + create_random_tensors(self.init_latent.shape[1:], all_seeds[0:self.init_latent.shape[0]]) * self.nmask - elif self.inpainting_fill == 3: - self.init_latent = self.init_latent * self.mask - - self.image_conditioning = self.img2img_image_conditioning(image, self.init_latent, image_mask) - - def sample(self, conditioning, unconditional_conditioning, seeds, subseeds, subseed_strength, prompts): - x = create_random_tensors([opt_C, self.height // opt_f, self.width // opt_f], seeds=seeds, subseeds=subseeds, subseed_strength=self.subseed_strength, seed_resize_from_h=self.seed_resize_from_h, seed_resize_from_w=self.seed_resize_from_w, p=self) - - if self.initial_noise_multiplier != 1.0: - self.extra_generation_params["Noise multiplier"] = self.initial_noise_multiplier - x *= self.initial_noise_multiplier - - samples = self.sampler.sample_img2img(self, self.init_latent, x, conditioning, unconditional_conditioning, image_conditioning=self.image_conditioning) - - if self.mask is not None: - samples = samples * self.nmask + self.init_latent * self.mask - - del x - devices.torch_gc() - - return samples diff --git a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Ceo Film Crna Macka Beli Macor Download.md b/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Ceo Film Crna Macka Beli Macor Download.md deleted file mode 100644 index cd691be75c671ca017f27b718a3b6cf2d25b9924..0000000000000000000000000000000000000000 --- a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Ceo Film Crna Macka Beli Macor Download.md +++ /dev/null @@ -1,58 +0,0 @@ -
            -

            Ceo film Crna macka, beli macor download: Kako gledati ovaj kultni film online

            - -

            Ako ste ljubitelj domaće kinematografije, verovatno ste čuli za film Crna mačka, beli mačor iz 1998. godine. Ovo je jedan od najpoznatijih i najnagrađivanijih filmova Emira Kusturice, koji je osvojio Srebrnog lava na Venecijanskom filmskom festivalu i brojne druge priznanja.

            -

            ceo film crna macka beli macor download


            DOWNLOADhttps://cinurl.com/2uEYHi



            - -

            Film prati avanture Matka i njegovog sina Zarea, koji žive na obalama Dunava i bave se sitnim prevarama i švercom. Da bi otplatio dug lokalnom gangsteru Dadanu, Matko mora da uda Zarea za Dadanovu sestru Afrodu, koja je patuljastog rasta. Međutim, stvari se zakomplikuju kada se Zare zaljubi u Idu, ćerku bogatog Turčina.

            - -

            Crna mačka, beli mačor je film pun humora, živopisnih likova i muzike. Film je sniman na autentičnim lokacijama na Dunavu i u romskim naseljima, a glumačka ekipa je mešavina profesionalaca i amatera. Film je dobio pohvale kritike i publike širom sveta, a smatra se jednim od najboljih ostvarenja balkanske kinematografije.

            - -

            Gde možete da preuzmete ceo film Crna mačka, beli mačor

            - -

            Ako želite da pogledate ceo film Crna mačka, beli mačor online, imate nekoliko opcija. Jedna od njih je da preuzmete film sa nekog od sajtova koji nude besplatno gledanje filmova. Međutim, budite oprezni jer takvi sajtovi često sadrže viruse, reklame i loš kvalitet slike i zvuka.

            - -

            Druga opcija je da kupite ili iznajmite film na nekoj od legalnih platformi koje nude striming usluge. Na primer, možete da pronađete film na Amazon Prime Video, iTunes ili Google Play. Tako ćete biti sigurni da gledate film u visokoj rezoluciji i sa titlovima na vašem jeziku.

            -

            - -

            Treća opcija je da potražite film na YouTube-u. Na ovom popularnom sajtu možete da nađete ceo film Crna mačka, beli mačor sa prevodom na različite jezike. Međutim, imajte na umu da YouTube često briše filmove zbog kršenja autorskih prava, pa ne garantujemo da će film uvek biti dostupan.

            - -

            Zašto treba da pogledate ceo film Crna mačka, beli mačor

            - -

            Ceo film Crna mačka, beli mačor je pravi filmski dragulj koji će vas nasmejati i oduševiti svojom originalnošću i energijom. Film je pun duhovitih dijaloga, urnebesnih situacija i nezaboravnih scena. Film takođe prikazuje lepotu i raznolikost Balkana, njegovu kulturu i tradiciju.

            - -

            Film Crna mačka, beli mačor je više od komedije. To je film o ljubavi, porodici i prijateljstvu. To je film o tome kako se suočiti sa životnim izazovima i kako pronaći sreću u malim stvarima. To je film koji slavi život u svim njegovim bojama.

            - -

            Zato ne oklevajte i preuzmite ceo film Crna mačka, beli mačor što pre. Uživaćete u ovom remek-delu Emira Kusturice i sigurno ćete poželeti da ga pogledate ponovo.

            -

            Ko su glavni glumci u filmu Crna mačka, beli mačor

            - -

            Film Crna mačka, beli mačor ima sjajnu glumačku postavu koja se sastoji od profesionalnih i amaterskih glumaca. Glavne uloge tumače Bajram Severdžan kao Matko, Srđan Todorović kao Zare, Branka Katić kao Ida, Florijan Ajdini kao Dadan i Ljubica Adžović kao Afrodita.

            - -

            Osim njih, u filmu se pojavljuju i mnogi drugi zanimljivi likovi, kao što su Grga Pitić, bogati prijatelj Matkove porodice, Žamila, Dadanova majka koja je opsednuta smrću, Zarije, Zareov deda koji je navodno umro ali se probudio iz kovčega, i mnogi drugi.

            - -

            Glumci su uspeli da dočaraju duh i atmosferu filma, svojim prirodnim i spontanim nastupom. Njihova hemija na ekranu je očigledna i zarazna. Oni su uspeli da nasmeju publiku svojim komičnim situacijama i replikama, ali i da je dirnu svojim emocijama i sudbinama.

            - -
            Koja je muzika u filmu Crna mačka, beli mačor
            - -

            Jedan od najvažnijih elemenata filma Crna mačka, beli mačor je muzika. Film je prepun pesama koje prate radnju i raspoloženje likova. Muzika je uglavnom romska i balkanska, sa elementima džeza, roka i fanka.

            - -

            Muziku za film je komponovao Goran Bregović, poznati srpski muzičar i kompozitor. On je sarađivao sa raznim pevačima i bendovima koji su izvodili pesme u filmu, kao što su Šaban Bajramović, Vaska Jankovska, No Smoking Orchestra i drugi.

            - -

            Muzika u filmu Crna mačka, beli mačor je veoma zarazna i energična. Ona podstiče gledaoce da pevaju, igraju i uživaju u filmu. Nekoliko pesama iz filma su postale hitovi i klasici balkanske muzike, kao što su Bubamara, Pit Bull Terrier, Duj Sandale i druge.

            - -
            Zaključak
            - -

            Ceo film Crna mačka, beli mačor je jedinstveno filmsko iskustvo koje ne smete propustiti. Ovo je film koji će vas nasmejati do suza, ali i raznežiti svojom toplinom i humanosti. Ovo je film koji će vam pokazati lepotu i bogatstvo Balkana, njegovog naroda i kulture.

            - -

            Ako želite da preuzmete ceo film Crna mačka, beli mačor online, imate više opcija na raspolaganju. Možete da preuzmete film sa nekog od besplatnih sajtova za gledanje filmova, ali budite oprezni zbog mogućih virusa i reklama. Možete da kupite ili iznajmite film na nekoj od legalnih platformi za striming usluge, kao što su Amazon Prime Video, iTunes ili Google Play. Ili možete da potražite film na YouTube-u, gde možete da nađete ceo film sa prevodom na različite jezike.

            - -

            Bez obzira koju opciju izaberete, sigurni smo da ćete uživati u ovom remek-delu Emira Kusturice. Ceo film Crna mačka, beli mačor je film koji morate pogledati barem jednom u životu.

            -

            Zaključak

            - -

            Ceo film Crna mačka, beli mačor je jedinstveno filmsko iskustvo koje ne smete propustiti. Ovo je film koji će vas nasmejati do suza, ali i raznežiti svojom toplinom i humanosti. Ovo je film koji će vam pokazati lepotu i bogatstvo Balkana, njegovog naroda i kulture.

            - -

            Ako želite da preuzmete ceo film Crna mačka, beli mačor online, imate više opcija na raspolaganju. Možete da preuzmete film sa nekog od besplatnih sajtova za gledanje filmova, ali budite oprezni zbog mogućih virusa i reklama. Možete da kupite ili iznajmite film na nekoj od legalnih platformi za striming usluge, kao što su Amazon Prime Video, iTunes ili Google Play. Ili možete da potražite film na YouTube-u, gde možete da nađete ceo film sa prevodom na različite jezike.

            - -

            Bez obzira koju opciju izaberete, sigurni smo da ćete uživati u ovom remek-delu Emira Kusturice. Ceo film Crna mačka, beli mačor je film koji morate pogledati barem jednom u životu.

            3cee63e6c2
            -
            -
            \ No newline at end of file diff --git a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Dil Dhadakne Do Movie Download Mp4 BETTER.md b/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Dil Dhadakne Do Movie Download Mp4 BETTER.md deleted file mode 100644 index d044732f7c2f00fe727d8d0d29014b1b3e983fa5..0000000000000000000000000000000000000000 --- a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Dil Dhadakne Do Movie Download Mp4 BETTER.md +++ /dev/null @@ -1,44 +0,0 @@ -

            Dil Dhadakne Do movie download mp4


            DOWNLOAD 🗸 https://cinurl.com/2uEYJt



            -
            -Link for mp3 song: - -No download and install. you will find direct link to movie and you can play any time. - -movie duration is 90 minutes. - -Watch all the song and movie trailer here: - -Download - 3GP - MP3 Song - Mp4 - Download - -Videos like Dil Dhadakne Do videos always give you a lot of freedom. You don't have to go anywhere or buy anything. This video does not violate any of the YouTube content policies. You can see or listen to this video without any censorship or limitation. - -Q: - -¿Cómo obtener datos desde un tabla desde otra tabla? - -Tengo una tabla llamada cliente en la cual guardo datos de esta manera: - -query($SQL); - - $idClient = $mysqli->insert_id; - - $SQL2 = "SELECT * FROM `clientes`"; - - $resultado = mysqli_query($mysqli, $SQL2); - - while($datos = mysqli_fetch_array($resultado)) - - echo $datos['nombre']; - - - -?> - -y en otra otra tabla llamada formulario_clientes tengo el siguiente código: - - -
            -
            -

            diff --git a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/IES VE 6.4.0.5 CRACK.rarl REPACK.md b/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/IES VE 6.4.0.5 CRACK.rarl REPACK.md deleted file mode 100644 index c70fccab96f5e0ff44a241e5bfcae69dd6ab823f..0000000000000000000000000000000000000000 --- a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/IES VE 6.4.0.5 CRACK.rarl REPACK.md +++ /dev/null @@ -1,6 +0,0 @@ -

            IES VE 6.4.0.5 CRACK.rarl


            Download ————— https://cinurl.com/2uEXy2



            - - d5da3c52bf
            -
            -
            -

            diff --git a/spaces/surmensipa/VITS-Umamusume-voice-synthesizer/logs/Alawar Games Unwrapper V 1.3.3 Keygen.md b/spaces/surmensipa/VITS-Umamusume-voice-synthesizer/logs/Alawar Games Unwrapper V 1.3.3 Keygen.md deleted file mode 100644 index 54d3afa94e7892654c9017ca046aaa93930ee9f1..0000000000000000000000000000000000000000 --- a/spaces/surmensipa/VITS-Umamusume-voice-synthesizer/logs/Alawar Games Unwrapper V 1.3.3 Keygen.md +++ /dev/null @@ -1,6 +0,0 @@ -

            Alawar Games Unwrapper V 1.3.3 Keygen


            DOWNLOAD ————— https://urluss.com/2uCH7R



            - -Download Kitab Senjata Mukmin Pdf ->>> http://cinurl.com/ ... 7e8245da16. Alawar Games Unwrapper V 1.3.3 Keygen.epub · Visualizador de ... 1fdad05405
            -
            -
            -

            diff --git a/spaces/swhyuni/Digital-Financial-Advisory-for-Mutual-Funds/README.md b/spaces/swhyuni/Digital-Financial-Advisory-for-Mutual-Funds/README.md deleted file mode 100644 index f985f09f026f1ab3cc4da18708cd7a6bf74a69db..0000000000000000000000000000000000000000 --- a/spaces/swhyuni/Digital-Financial-Advisory-for-Mutual-Funds/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Digital Financial Advisory For Mutual Funds -emoji: 🐢 -colorFrom: gray -colorTo: purple -sdk: streamlit -sdk_version: 1.15.2 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/szukevin/VISOR-GPT/train/scripts/run_lgb.py b/spaces/szukevin/VISOR-GPT/train/scripts/run_lgb.py deleted file mode 100644 index 3a0871f4909da5614aa09904374de6883c404b7c..0000000000000000000000000000000000000000 --- a/spaces/szukevin/VISOR-GPT/train/scripts/run_lgb.py +++ /dev/null @@ -1,87 +0,0 @@ -import lightgbm as lgb -import numpy as np -import json -import argparse -from run_lgb_cv_bayesopt import read_labels - - -def main(): - parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter) - - # Path options. - parser.add_argument("--train_path", type=str, required=True, - help="Path of the trainset.") - parser.add_argument("--test_path", type=str, required=True, - help="Path of the testset.") - parser.add_argument("--train_features_path", type=str, required=True, - help="Path of the train features for stacking.") - parser.add_argument("--test_features_path", type=str, required=True, - help="Path of the test features for stacking.") - - # Model options. - parser.add_argument("--models_num", type=int, default=64, - help="Number of models for ensemble.") - parser.add_argument("--labels_num", type=int, default=6, - help="Number of label.") - - args = parser.parse_args() - - train_features = [] - for i in range(args.models_num): - train_features.append(np.load(args.train_features_path + "train_features_" + str(i) + ".npy")) - train_features = np.concatenate(train_features, axis=-1) - train_labels = read_labels(args.train_path) - - test_features = [] - for i in range(args.models_num): - test_features.append(np.load(args.test_features_path + "test_features_" + str(i) + ".npy")) - test_features = np.concatenate(test_features, axis=-1) - test_labels = read_labels(args.test_path) - - params = { - "task": "train", - "objective": "multiclass", - "num_class": args.labels_num, - "metric": "multi_error", - "feature_fraction": 0.25, - "lambda_l1": 5.0, - "lambda_l2": 5.0, - - "learning_rate": 0.02, - "max_depth": 100, - "min_data_in_leaf": 50, - "num_leaves": 10 - } - - lgb_train = lgb.Dataset(train_features, train_labels) - lgb_eval = lgb.Dataset(test_features, test_labels, reference=lgb_train) - - model = lgb.train(params, lgb_train, valid_sets=lgb_eval, verbose_eval=50) - - test_pred = model.predict(test_features) - test_pred = np.argmax(test_pred, axis=1) - - confusion = np.zeros((args.labels_num, args.labels_num)) - - for i in range(len(test_pred)): - confusion[test_pred[i], test_labels[i]] += 1 - correct = np.sum(test_pred == test_labels) - - macro_f1 = [] - print("Confusion matrix:") - print(confusion) - print("Report precision, recall, and f1:") - eps = 1e-9 - for i in range(args.labels_num): - p = confusion[i, i].item() / (confusion[i, :].sum().item() + eps) - r = confusion[i, i].item() / (confusion[:, i].sum().item() + eps) - f1 = 2 * p * r / (p + r + eps) - print("Label {}: {:.3f}, {:.3f}, {:.3f}".format(i, p, r, f1)) - macro_f1.append(f1) - - print("Macro F1: {:.4f}".format(np.mean(macro_f1))) - print("Acc. (Correct/Total): {:.4f} ({}/{})".format(correct/len(test_pred), correct, len(test_pred))) - - -if __name__ == "__main__": - main() diff --git a/spaces/talhaty/Faceswapper/roop/processors/frame/face_enhancer.py b/spaces/talhaty/Faceswapper/roop/processors/frame/face_enhancer.py deleted file mode 100644 index e4c2dec05f834f7732ac62f0db6dcde416ed0b30..0000000000000000000000000000000000000000 --- a/spaces/talhaty/Faceswapper/roop/processors/frame/face_enhancer.py +++ /dev/null @@ -1,81 +0,0 @@ -from typing import Any, List, Callable -import cv2 -import threading -import gfpgan - -import roop.globals -import roop.processors.frame.core -from roop.core import update_status -from roop.face_analyser import get_one_face -from roop.typing import Frame, Face -from roop.utilities import conditional_download, resolve_relative_path, is_image, is_video - -FACE_ENHANCER = None -THREAD_SEMAPHORE = threading.Semaphore() -THREAD_LOCK = threading.Lock() -NAME = 'ROOP.FACE-ENHANCER' - - -def get_face_enhancer() -> Any: - global FACE_ENHANCER - - with THREAD_LOCK: - if FACE_ENHANCER is None: - model_path = resolve_relative_path('../models/GFPGANv1.4.pth') - # todo: set models path https://github.com/TencentARC/GFPGAN/issues/399 - FACE_ENHANCER = gfpgan.GFPGANer(model_path=model_path, upscale=1) # type: ignore[attr-defined] - return FACE_ENHANCER - - -def pre_check() -> bool: - download_directory_path = resolve_relative_path('../models') - conditional_download(download_directory_path, ['https://github.com/TencentARC/GFPGAN/releases/download/v1.3.4/GFPGANv1.4.pth']) - return True - - -def pre_start() -> bool: - if not is_image(roop.globals.target_path) and not is_video(roop.globals.target_path): - update_status('Select an image or video for target path.', NAME) - return False - return True - - -def post_process() -> None: - global FACE_ENHANCER - - FACE_ENHANCER = None - - -def enhance_face(temp_frame: Frame) -> Frame: - with THREAD_SEMAPHORE: - _, _, temp_frame = get_face_enhancer().enhance( - temp_frame, - paste_back=True - ) - return temp_frame - - -def process_frame(source_face: Face, temp_frame: Frame) -> Frame: - target_face = get_one_face(temp_frame) - if target_face: - temp_frame = enhance_face(temp_frame) - return temp_frame - - -def process_frames(source_path: str, temp_frame_paths: List[str], update: Callable[[], None]) -> None: - for temp_frame_path in temp_frame_paths: - temp_frame = cv2.imread(temp_frame_path) - result = process_frame(None, temp_frame) - cv2.imwrite(temp_frame_path, result) - if update: - update() - - -def process_image(source_path: str, target_path: str, output_path: str) -> None: - target_frame = cv2.imread(target_path) - result = process_frame(None, target_frame) - cv2.imwrite(output_path, result) - - -def process_video(source_path: str, temp_frame_paths: List[str]) -> None: - roop.processors.frame.core.process_video(None, temp_frame_paths, process_frames) diff --git a/spaces/terfces0erbo/CollegeProjectV2/Honestech Tvr 3.0 Keygen Free Download [VERIFIED].md b/spaces/terfces0erbo/CollegeProjectV2/Honestech Tvr 3.0 Keygen Free Download [VERIFIED].md deleted file mode 100644 index 659f5d6d4dbabfbf7403356f61f9e5fa901f267c..0000000000000000000000000000000000000000 --- a/spaces/terfces0erbo/CollegeProjectV2/Honestech Tvr 3.0 Keygen Free Download [VERIFIED].md +++ /dev/null @@ -1,17 +0,0 @@ -

            honestech tvr 3.0 keygen free download


            Download File ››› https://bytlly.com/2uGlQ5



            -
            -Photodex Proshow Producer 5.0.3222 Meegen Music here you can download the game for free or get the key to the game. -Title: Photodex Proshow Manufacturer 5.0222 Moken Music Released: 2009 Genre: Soft Developer: PhotoDex Corporation Publisher: New Disc Interface: Russian Platform Only: • PC System Requirements Microsoft® Windows® XP / Vista • Pentium Processor® 2 GHz • 512 MB RAM • 1 GB Free hard disk space • 128 MB DirectX® 9.0 compatible graphics card -Or 10 • DirectX® 9.0 compatible audio device. • DirectX® 9.0C (included on the software disc) • DVD drive Description: PhotoShow - Unleash the power to create amazing movies from photos and music. -Create unforgettable slideshows on any topic: travel, holidays, weddings, photos, love story, etc. There are hundreds of combinations of animation effects to bring any photo to life. -You won't get bored for sure... Photos are in grayscale, the idea is to store them in grayscale down to the lowest value. -Photosow has powerful editing tools, revolutionary effects, and an intuitive interface that makes your movie easier than ever. -Use the full power of your digital cameras to create amazing high quality photo movies. -Create movies with animation effects to make your movie more impressive.・ -Import photos and music ・ Edit photo and video slideshows ・ Export slideshows for viewing on any device ・ Show your movie on YouTube, Facebook, Picasa and Google+ ・ Save to local hard drive or upload to the web ・ Customize duration and themes ・Screen capture ・ Turn photos into videos ・ Share on mobile devices ・ Save your work to disk Description: The archive contains a file with instructions for -working with the program. -Screenshots: -Download 8a78ff9644
            -
            -
            -

            diff --git a/spaces/terfces0erbo/CollegeProjectV2/Intergraph Pds 8 0 Crack [UPDATED].md b/spaces/terfces0erbo/CollegeProjectV2/Intergraph Pds 8 0 Crack [UPDATED].md deleted file mode 100644 index eb88118db5cd6093b5146593fed2b864bb7cac46..0000000000000000000000000000000000000000 --- a/spaces/terfces0erbo/CollegeProjectV2/Intergraph Pds 8 0 Crack [UPDATED].md +++ /dev/null @@ -1,9 +0,0 @@ -
            -

            its simple to view the ongoing repairs, add a special section for your regular maintenance and schedule them as you go. it also lets you check the condition of each nut or bolt and decide whether it should be replaced. you can download the full version of intergraph watertight crack. the application will allow you to perform full information on the condition and performance of a ship hull. this tool will allow you to perform a complete detailed analysis of the hull. it will allow you to perform a complete analysis of the hull. with the help of this program you will get to know about any cracks present in the ship hull. you can also download filebot.

            -

            intergraph pds 8 0 crack


            Download File 🆗 https://bytlly.com/2uGiUk



            -

            the intergraph pv elite 2019 is a comprehensive utility for the design, analysis, and evaluation of vessel and heat exchanger components. it meets international code requirements for steel tanks and provides several region specific content. the package delivers workable plans to allow for the creation of detailed 3d models and 2d drawings. you can also like coating manager 2009 crack. the product has been fully tested and certified for use with the international association of pipeline and gas traders (iapgt) core specification.

            -

            new version of shade 3d professional has been released. this version is compatible with windows 8.1 / windows 7 and above. this crack of shade 3d professional will help you to create, edit, compare and print 3d models and drawings.

            -

            intergraph caesar 2002 caesar ii is a comprehensive and userfriendly environmental design software for the structural design ofbuildings. it can import and export both 2d and 3d geometry. it includesfeatures for the analysis of complex structures as well as a number ofother products that can be applied to every part of the structure. it isalso used for the design of industrial and transport structures. a newfeature is the digital elevation model (dem), used to visualize topographiesfor the site analysis. it also includes several publications on the subject.

            -

            899543212b
            -
            -
            \ No newline at end of file diff --git a/spaces/thapasushil/Multiverse/header.html b/spaces/thapasushil/Multiverse/header.html deleted file mode 100644 index f058ac2d3d9c4cc6f3b16983d720a9cd43f505f6..0000000000000000000000000000000000000000 --- a/spaces/thapasushil/Multiverse/header.html +++ /dev/null @@ -1,18 +0,0 @@ -
            -
            -

            - Multiverse of Movies -

            -
            -
            -

            - Uses Face detection & semantic segmentation to get the mask, and Stable Diffusion Inpainting to replace the mask with a different actor/actress. -

            -
            -
            \ No newline at end of file diff --git a/spaces/theaster/RVC-New-Arknights/vc_infer_pipeline.py b/spaces/theaster/RVC-New-Arknights/vc_infer_pipeline.py deleted file mode 100644 index 7ff98b2c812f4e74afe92048fb26009fb008479d..0000000000000000000000000000000000000000 --- a/spaces/theaster/RVC-New-Arknights/vc_infer_pipeline.py +++ /dev/null @@ -1,320 +0,0 @@ -import numpy as np, parselmouth, torch, pdb -from time import time as ttime -import torch.nn.functional as F -import scipy.signal as signal -import pyworld, os, traceback, faiss -from scipy import signal - -bh, ah = signal.butter(N=5, Wn=48, btype="high", fs=16000) - - -class VC(object): - def __init__(self, tgt_sr, config): - self.x_pad, self.x_query, self.x_center, self.x_max, self.is_half = ( - config.x_pad, - config.x_query, - config.x_center, - config.x_max, - config.is_half, - ) - self.sr = 16000 # hubert输入采样率 - self.window = 160 # 每帧点数 - self.t_pad = self.sr * self.x_pad # 每条前后pad时间 - self.t_pad_tgt = tgt_sr * self.x_pad - self.t_pad2 = self.t_pad * 2 - self.t_query = self.sr * self.x_query # 查询切点前后查询时间 - self.t_center = self.sr * self.x_center # 查询切点位置 - self.t_max = self.sr * self.x_max # 免查询时长阈值 - self.device = config.device - - def get_f0(self, x, p_len, f0_up_key, f0_method, inp_f0=None): - time_step = self.window / self.sr * 1000 - f0_min = 50 - f0_max = 1100 - f0_mel_min = 1127 * np.log(1 + f0_min / 700) - f0_mel_max = 1127 * np.log(1 + f0_max / 700) - if f0_method == "pm": - f0 = ( - parselmouth.Sound(x, self.sr) - .to_pitch_ac( - time_step=time_step / 1000, - voicing_threshold=0.6, - pitch_floor=f0_min, - pitch_ceiling=f0_max, - ) - .selected_array["frequency"] - ) - pad_size = (p_len - len(f0) + 1) // 2 - if pad_size > 0 or p_len - len(f0) - pad_size > 0: - f0 = np.pad( - f0, [[pad_size, p_len - len(f0) - pad_size]], mode="constant" - ) - elif f0_method == "harvest": - f0, t = pyworld.harvest( - x.astype(np.double), - fs=self.sr, - f0_ceil=f0_max, - f0_floor=f0_min, - frame_period=10, - ) - f0 = pyworld.stonemask(x.astype(np.double), f0, t, self.sr) - f0 = signal.medfilt(f0, 3) - f0 *= pow(2, f0_up_key / 12) - # with open("test.txt","w")as f:f.write("\n".join([str(i)for i in f0.tolist()])) - tf0 = self.sr // self.window # 每秒f0点数 - if inp_f0 is not None: - delta_t = np.round( - (inp_f0[:, 0].max() - inp_f0[:, 0].min()) * tf0 + 1 - ).astype("int16") - replace_f0 = np.interp( - list(range(delta_t)), inp_f0[:, 0] * 100, inp_f0[:, 1] - ) - shape = f0[self.x_pad * tf0 : self.x_pad * tf0 + len(replace_f0)].shape[0] - f0[self.x_pad * tf0 : self.x_pad * tf0 + len(replace_f0)] = replace_f0[ - :shape - ] - # with open("test_opt.txt","w")as f:f.write("\n".join([str(i)for i in f0.tolist()])) - f0bak = f0.copy() - f0_mel = 1127 * np.log(1 + f0 / 700) - f0_mel[f0_mel > 0] = (f0_mel[f0_mel > 0] - f0_mel_min) * 254 / ( - f0_mel_max - f0_mel_min - ) + 1 - f0_mel[f0_mel <= 1] = 1 - f0_mel[f0_mel > 255] = 255 - f0_coarse = np.rint(f0_mel).astype(np.int) - return f0_coarse, f0bak # 1-0 - - def vc( - self, - model, - net_g, - sid, - audio0, - pitch, - pitchf, - times, - index, - big_npy, - index_rate, - ): # ,file_index,file_big_npy - feats = torch.from_numpy(audio0) - if self.is_half: - feats = feats.half() - else: - feats = feats.float() - if feats.dim() == 2: # double channels - feats = feats.mean(-1) - assert feats.dim() == 1, feats.dim() - feats = feats.view(1, -1) - padding_mask = torch.BoolTensor(feats.shape).to(self.device).fill_(False) - - inputs = { - "source": feats.to(self.device), - "padding_mask": padding_mask, - "output_layer": 9, # layer 9 - } - t0 = ttime() - with torch.no_grad(): - logits = model.extract_features(**inputs) - feats = model.final_proj(logits[0]) - - if ( - isinstance(index, type(None)) == False - and isinstance(big_npy, type(None)) == False - and index_rate != 0 - ): - npy = feats[0].cpu().numpy() - if self.is_half: - npy = npy.astype("float32") - - # _, I = index.search(npy, 1) - # npy = big_npy[I.squeeze()] - - score, ix = index.search(npy, k=8) - weight = np.square(1 / score) - weight /= weight.sum(axis=1, keepdims=True) - npy = np.sum(big_npy[ix] * np.expand_dims(weight, axis=2), axis=1) - - if self.is_half: - npy = npy.astype("float16") - feats = ( - torch.from_numpy(npy).unsqueeze(0).to(self.device) * index_rate - + (1 - index_rate) * feats - ) - - feats = F.interpolate(feats.permute(0, 2, 1), scale_factor=2).permute(0, 2, 1) - t1 = ttime() - p_len = audio0.shape[0] // self.window - if feats.shape[1] < p_len: - p_len = feats.shape[1] - if pitch != None and pitchf != None: - pitch = pitch[:, :p_len] - pitchf = pitchf[:, :p_len] - p_len = torch.tensor([p_len], device=self.device).long() - with torch.no_grad(): - if pitch != None and pitchf != None: - audio1 = ( - (net_g.infer(feats, p_len, pitch, pitchf, sid)[0][0, 0] * 32768) - .data.cpu() - .float() - .numpy() - .astype(np.int16) - ) - else: - audio1 = ( - (net_g.infer(feats, p_len, sid)[0][0, 0] * 32768) - .data.cpu() - .float() - .numpy() - .astype(np.int16) - ) - del feats, p_len, padding_mask - if torch.cuda.is_available(): - torch.cuda.empty_cache() - t2 = ttime() - times[0] += t1 - t0 - times[2] += t2 - t1 - return audio1 - - def pipeline( - self, - model, - net_g, - sid, - audio, - times, - f0_up_key, - f0_method, - file_index, - # file_big_npy, - index_rate, - if_f0, - f0_file=None, - ): - if ( - file_index != "" - # and file_big_npy != "" - # and os.path.exists(file_big_npy) == True - and os.path.exists(file_index) == True - and index_rate != 0 - ): - try: - index = faiss.read_index(file_index) - # big_npy = np.load(file_big_npy) - big_npy = index.reconstruct_n(0, index.ntotal) - except: - traceback.print_exc() - index = big_npy = None - else: - index = big_npy = None - audio = signal.filtfilt(bh, ah, audio) - audio_pad = np.pad(audio, (self.window // 2, self.window // 2), mode="reflect") - opt_ts = [] - if audio_pad.shape[0] > self.t_max: - audio_sum = np.zeros_like(audio) - for i in range(self.window): - audio_sum += audio_pad[i : i - self.window] - for t in range(self.t_center, audio.shape[0], self.t_center): - opt_ts.append( - t - - self.t_query - + np.where( - np.abs(audio_sum[t - self.t_query : t + self.t_query]) - == np.abs(audio_sum[t - self.t_query : t + self.t_query]).min() - )[0][0] - ) - s = 0 - audio_opt = [] - t = None - t1 = ttime() - audio_pad = np.pad(audio, (self.t_pad, self.t_pad), mode="reflect") - p_len = audio_pad.shape[0] // self.window - inp_f0 = None - if hasattr(f0_file, "name") == True: - try: - with open(f0_file.name, "r") as f: - lines = f.read().strip("\n").split("\n") - inp_f0 = [] - for line in lines: - inp_f0.append([float(i) for i in line.split(",")]) - inp_f0 = np.array(inp_f0, dtype="float32") - except: - traceback.print_exc() - sid = torch.tensor(sid, device=self.device).unsqueeze(0).long() - pitch, pitchf = None, None - if if_f0 == 1: - pitch, pitchf = self.get_f0(audio_pad, p_len, f0_up_key, f0_method, inp_f0) - pitch = pitch[:p_len] - pitchf = pitchf[:p_len] - pitch = torch.tensor(pitch, device=self.device).unsqueeze(0).long() - pitchf = torch.tensor(pitchf, device=self.device).unsqueeze(0).float() - t2 = ttime() - times[1] += t2 - t1 - for t in opt_ts: - t = t // self.window * self.window - if if_f0 == 1: - audio_opt.append( - self.vc( - model, - net_g, - sid, - audio_pad[s : t + self.t_pad2 + self.window], - pitch[:, s // self.window : (t + self.t_pad2) // self.window], - pitchf[:, s // self.window : (t + self.t_pad2) // self.window], - times, - index, - big_npy, - index_rate, - )[self.t_pad_tgt : -self.t_pad_tgt] - ) - else: - audio_opt.append( - self.vc( - model, - net_g, - sid, - audio_pad[s : t + self.t_pad2 + self.window], - None, - None, - times, - index, - big_npy, - index_rate, - )[self.t_pad_tgt : -self.t_pad_tgt] - ) - s = t - if if_f0 == 1: - audio_opt.append( - self.vc( - model, - net_g, - sid, - audio_pad[t:], - pitch[:, t // self.window :] if t is not None else pitch, - pitchf[:, t // self.window :] if t is not None else pitchf, - times, - index, - big_npy, - index_rate, - )[self.t_pad_tgt : -self.t_pad_tgt] - ) - else: - audio_opt.append( - self.vc( - model, - net_g, - sid, - audio_pad[t:], - None, - None, - times, - index, - big_npy, - index_rate, - )[self.t_pad_tgt : -self.t_pad_tgt] - ) - audio_opt = np.concatenate(audio_opt) - del pitch, pitchf, sid - if torch.cuda.is_available(): - torch.cuda.empty_cache() - return audio_opt diff --git a/spaces/thelou1s/chat_gpt_space/README.md b/spaces/thelou1s/chat_gpt_space/README.md deleted file mode 100644 index 8ed8734bb23856b01d3fb01ffbabe21c5475ba58..0000000000000000000000000000000000000000 --- a/spaces/thelou1s/chat_gpt_space/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Chat Gpt Space -emoji: 📚 -colorFrom: purple -colorTo: blue -sdk: gradio -sdk_version: 3.20.0 -app_file: app.py -pinned: false -duplicated_from: SourcezZ/chat_gpt_space ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/tialenAdioni/chat-gpt-api/logs/Crack A cold one Iron Throne shirt A cool and comfortable way to celebrate the final season.md b/spaces/tialenAdioni/chat-gpt-api/logs/Crack A cold one Iron Throne shirt A cool and comfortable way to celebrate the final season.md deleted file mode 100644 index 5f99aafd5bd9d4f38798d3b99f26d7df92cbbbe5..0000000000000000000000000000000000000000 --- a/spaces/tialenAdioni/chat-gpt-api/logs/Crack A cold one Iron Throne shirt A cool and comfortable way to celebrate the final season.md +++ /dev/null @@ -1,15 +0,0 @@ - -

            Crack A Cold One Iron Throne Shirt: The Perfect Gift for Game of Thrones Fans

            -

            If you are looking for a unique and funny gift for a Game of Thrones fan, look no further than the Crack A Cold One Iron Throne shirt. This shirt features a hilarious design of a man sitting on the Iron Throne with a can of beer in his hand, ready to crack a cold one with his friends. The shirt is made of high-quality cotton and comes in various sizes and colors. Whether you want to show your love for the show, make a joke, or simply enjoy a cold drink, this shirt is perfect for you.

            -

            Crack A cold one Iron Throne shirt


            Download ->->->-> https://urlcod.com/2uK81S



            -

            The Crack A Cold One Iron Throne shirt is not only a great gift idea, but also a way to express your personality and style. You can wear it casually with jeans or shorts, or dress it up with a jacket or blazer. You can also pair it with other Game of Thrones accessories, such as hats, mugs, or posters. No matter how you wear it, you will surely get compliments and laughs from fellow fans and friends.

            -

            The Crack A Cold One Iron Throne shirt is available online at a reasonable price and with fast shipping. You can order it from the official website or from other trusted platforms. You can also check out the customer reviews and ratings to see what others think of the product. You will not regret buying this shirt, as it is one of the best ways to celebrate the end of an epic saga.

            -

            So what are you waiting for? Order your Crack A Cold One Iron Throne shirt today and get ready to crack a cold one with the king!

            - -

            The Crack A Cold One Iron Throne shirt is not only a fun and witty gift, but also a tribute to one of the most popular and influential TV shows of all time. Game of Thrones has captivated millions of viewers around the world with its complex characters, thrilling plot twists, and stunning visuals. The show has also inspired countless memes, parodies, and fan theories. The Crack A Cold One Iron Throne shirt is a way to join the conversation and show your appreciation for the show.

            -

            The Crack A Cold One Iron Throne shirt is also a great conversation starter and ice breaker. You can wear it to parties, events, or gatherings and instantly connect with other fans. You can share your opinions, predictions, and favorite moments from the show. You can also make jokes and references that only true fans will understand. The shirt will make you stand out from the crowd and attract attention.

            -

            The Crack A Cold One Iron Throne shirt is more than just a shirt. It is a statement of your fandom, humor, and personality. It is a way to express yourself and have fun. It is a way to crack a cold one with the king.

            - -

            The Crack A Cold One Iron Throne shirt is also a perfect gift for any occasion. You can give it to your friends, family, or colleagues for their birthday, anniversary, or holiday. You can also give it to yourself as a treat or a reward. The shirt is suitable for anyone who loves Game of Thrones, beer, or both. It is a gift that will make them smile and laugh.

            e753bf7129
            -
            -
            \ No newline at end of file diff --git a/spaces/tialenAdioni/chat-gpt-api/logs/Eclipse Avn4405d Map Discrar Everything You Need to Know About This Car Radio Software.md b/spaces/tialenAdioni/chat-gpt-api/logs/Eclipse Avn4405d Map Discrar Everything You Need to Know About This Car Radio Software.md deleted file mode 100644 index 7fe59bba52a6160a4e36156d18890abbd548d8a8..0000000000000000000000000000000000000000 --- a/spaces/tialenAdioni/chat-gpt-api/logs/Eclipse Avn4405d Map Discrar Everything You Need to Know About This Car Radio Software.md +++ /dev/null @@ -1,65 +0,0 @@ - -

            How to Download Eclipse Map Disk for Japanese Radio

            -

            If you have a Japanese car with an Eclipse navigation system, you may need to update the map disk to get the latest features and maps. However, finding and downloading the right map disk can be tricky, especially if you don't speak Japanese. In this article, we will show you how to download Eclipse map disk for your Japanese radio.

            -

            Eclipse is a brand of Fujitsu Ten, a Japanese company that produces car audio and navigation systems. Eclipse navigation systems are installed in many Japanese cars, such as Toyota, Nissan, Honda, Mitsubishi, Subaru, and Mazda. Eclipse navigation systems use map disks to store the map data and software for the system. The map disks are usually updated every year or two, and they contain new maps, points of interest, routes, and features.

            -

            Eclipse Avn4405d Map Discrar


            Download Ziphttps://urlcod.com/2uKb0m



            -

            However, if you have an Eclipse navigation system in your car, you may face some problems when trying to update the map disk. First of all, the map disks are region-specific, meaning that they only work for certain countries or areas. For example, if you have a map disk for Japan, it will not work for Europe or North America. Secondly, the map disks are language-specific, meaning that they only display the information in one language. For example, if you have a map disk in Japanese, it will not show any English text or voice guidance. Thirdly, the map disks are model-specific, meaning that they only work for certain models of Eclipse navigation systems. For example, if you have a map disk for AVN4405D, it will not work for AVN5504D.

            -

            So how can you find and download the right map disk for your Eclipse navigation system? Here are some steps that you can follow:

            -
              -
            1. Identify your Eclipse navigation system model. You can find the model number on the front panel of the system or on a sticker on the back of the system. Some common models are AVN1106D ,AVN1106DmkII ,AVN2204D ,AVN2205D ,AVN4404D ,AVN4405D ,AVN4406D ,AVN5504D ,AVN5504DM ,AVN5505D ,AVN7701D ,AVN7702D ,AVN7703D ,UCNV884 ,UCNV884RE ,UCNV884mkII ,AVN-20D ,AVN30D ,AVN-50D ,AVN-51D ,AVN-52D ,AVN-62D ,AVN2454 ,AVN5435 ,AVN-5500 ,AVN-5510 ,AVN-6600 ,AVN-6610 , and AVN-6620.
            2. -
            3. Identify your region and language. You need to know which region and language you want your map disk to cover. For example, if you live in Europe and want English voice guidance, you need to look for a map disk that supports Europe and English.
            4. -
            5. Search online for a reliable source of Eclipse map disk download. You can use Google or other search engines to find websites that offer Eclipse map disk download for your model, region, and language. However, be careful of scams and viruses that may harm your computer or your navigation system. Some websites that claim to offer free or cheap downloads may actually contain malware or corrupted files that can damage your system or steal your personal information. Therefore, always check the reputation and reviews of the website before downloading anything from it.
            6. -
            7. Download the map disk file to your computer. Once you find a trustworthy website that offers Eclipse map disk download for your model, region, and language, you can proceed to download the file to your computer. The file may be in a compressed format such as ZIP or RAR, so you may need to extract it using a software such as WinRAR or 7-Zip.
            8. -
            9. Burn the map disk file to a blank DVD-R or CD-R. After extracting the file, you need to burn it to a blank DVD-R or CD-R using a software such as Nero or ImgBurn. Make sure that you use the lowest possible speed when burning the disk to avoid errors or glitches.
            10. -
            11. Insert the map disk into your Eclipse navigation system and follow the instructions on the screen. Finally, you can insert the newly burned map disk into your Eclipse navigation system and turn it on. The system will automatically detect the new

              -

              Eclipse Avn4405d Map Disc download
              -Eclipse Avn4405d Map Disc iso
              -Eclipse Avn4405d Map Disc update
              -Eclipse Avn4405d Map Disc free
              -Eclipse Avn4405d Map Disc software
              -Eclipse Avn4405d Map Disc installation
              -Eclipse Avn4405d Map Disc rar file
              -Eclipse Avn4405d Map Disc zip file
              -Eclipse Avn4405d Map Disc torrent
              -Eclipse Avn4405d Map Disc crack
              -Eclipse Avn4405d Map Disc serial number
              -Eclipse Avn4405d Map Disc keygen
              -Eclipse Avn4405d Map Disc activation code
              -Eclipse Avn4405d Map Disc license key
              -Eclipse Avn4405d Map Disc product key
              -Eclipse Avn4405d Map Disc manual
              -Eclipse Avn4405d Map Disc instructions
              -Eclipse Avn4405d Map Disc guide
              -Eclipse Avn4405d Map Disc troubleshooting
              -Eclipse Avn4405d Map Disc error codes
              -Eclipse Avn4405d Map Disc compatibility
              -Eclipse Avn4405d Map Disc features
              -Eclipse Avn4405d Map Disc specifications
              -Eclipse Avn4405d Map Disc reviews
              -Eclipse Avn4405d Map Disc ratings
              -Eclipse Avn4405d Map Disc price
              -Eclipse Avn4405d Map Disc sale
              -Eclipse Avn4405d Map Disc discount
              -Eclipse Avn4405d Map Disc coupon code
              -Eclipse Avn4405d Map Disc best deal
              -Eclipse Avn4405d Map Disc online purchase
              -Eclipse Avn4405d Map Disc delivery
              -Eclipse Avn4405d Map Disc warranty
              -Eclipse Avn4405d Map Disc support
              -Eclipse Avn4405d Map Disc customer service
              -Eclipse Avn4405d Map Disc contact number
              -Eclipse Avn4405d Map Disc email address
              -Eclipse Avn4405d Map Disc website
              -Eclipse Avn4405d Map Disc forum
              -Eclipse Avn4405d Map Disc blog
              -Eclipse Avn4405d Map Disc video tutorial
              -Eclipse Avn4405d Map Disc youtube channel
              -Eclipse Avn4405d Map Disc facebook page
              -Eclipse Avn4405d Map Disc twitter account
              -Eclipse Avn4405d Map Disc instagram profile
              -Eclipse Avn4405d Map Disc pinterest board
              -Eclipse Avn4405d Map Disc reddit thread
              -Eclipse Avn4405d Map Disc quora question
              -Eclipse Avn4405d Map Disc medium article

              e753bf7129
              -
              -
              \ No newline at end of file diff --git a/spaces/tialenAdioni/chat-gpt-api/logs/Firmware Nokia X6-00 RM-559 Where to Find Reliable Download Links and Support.md b/spaces/tialenAdioni/chat-gpt-api/logs/Firmware Nokia X6-00 RM-559 Where to Find Reliable Download Links and Support.md deleted file mode 100644 index 2eeb8cee10f57154e6890ed9c824dff8c7644800..0000000000000000000000000000000000000000 --- a/spaces/tialenAdioni/chat-gpt-api/logs/Firmware Nokia X6-00 RM-559 Where to Find Reliable Download Links and Support.md +++ /dev/null @@ -1,75 +0,0 @@ -
              -

              How to Download and Install Firmware for Nokia X6-00 RM-559

              -

              If you own a Nokia X6-00 RM-559 smartphone and want to update its software or fix any issues, you can download and install the official firmware file from this guide. Firmware is the operating system that runs on your device and controls its functions. By flashing or installing the firmware, you can upgrade, downgrade, or re-install the stock firmware (OS) on your mobile device.

              -

              In this article, we will show you how to download and install the firmware for Nokia X6-00 RM-559 using the OST Tool. OST Tool is a flash tool that allows you to flash or install the firmware on Nokia devices. You will also need a USB driver, a USB cable, and a computer to perform this process.

              -

              firmware-nokia-x6-00-rm-559


              Downloadhttps://urlcod.com/2uK9cj



              -

              Download Firmware for Nokia X6-00 RM-559

              -

              The first step is to download the firmware file for your Nokia X6-00 RM-559 device. You can find the official link to download the firmware file from [^1^]. The firmware file comes in a zip package containing the flash file, flash tool, USB driver, and how-to flash manual.

              -

              The file name is Nokia_X6_B2N-354H-0-00WW-B01_OST.zip and the file size is 2 GB. You can also check the firmware version and release date from [^2^] or [^3^]. The latest firmware version is v40.0.002 and it was released on 2011 Nov 8.

              -

              how to flash firmware nokia x6 00 rm 559
              -firmware nokia x6 00 rm 559 latest version download
              -firmware nokia x6 00 rm 559 original file
              -firmware nokia x6 00 rm 559 free download
              -firmware nokia x6 00 rm 559 update
              -firmware nokia x6 00 rm 559 custom rom
              -firmware nokia x6 00 rm 559 android
              -firmware nokia x6 00 rm 559 bi only
              -firmware nokia x6 00 rm 559 arabic
              -firmware nokia x6 00 rm 559 flash tool
              -firmware nokia x6 00 rm 559 usb driver
              -firmware nokia x6 00 rm 559 error
              -firmware nokia x6 00 rm 559 backup
              -firmware nokia x6 00 rm 559 recovery mode
              -firmware nokia x6 00 rm 559 hard reset
              -firmware nokia x6 00 rm 559 unlock code
              -firmware nokia x6 00 rm 559 imei repair
              -firmware nokia x6 00 rm 559 root
              -firmware nokia x6 00 rm 559 security code reset
              -firmware nokia x6 00 rm 559 sim lock remove
              -firmware nokia x6 00 rm 559 wifi problem
              -firmware nokia x6 00 rm 559 bluetooth fix
              -firmware nokia x6 00 rm 559 camera solution
              -firmware nokia x6 00 rm 559 touch screen not working
              -firmware nokia x6 00 rm 559 dead boot repair
              -firmware nokia x6 00 rm 559 hang on logo
              -firmware nokia x6 00 rm 559 restart problem
              -firmware nokia x6 00 rm 559 network issue
              -firmware nokia x6 00 rm 559 battery drain
              -firmware nokia x6 00 rm 559 speaker problem
              -firmware nokia x6 00 rm 559 microphone not working
              -firmware nokia x6 00 rm 559 headphone jack problem
              -firmware nokia x6 00 rm 559 charging port problem
              -firmware nokia x6 00 rm 559 lcd display problem
              -firmware nokia x6 00 rm 559 keypad problem
              -firmware nokia x6 00 rm 559 vibration problem
              -firmware nokia x6 00 rm

              -

              Install Firmware for Nokia X6-00 RM-559

              -

              After downloading the firmware file, you need to extract it on your computer. You can use any zip extractor software like WinRAR or 7-Zip to do this. After extracting the package, you will be able to get the following files:

              -
                -
              • Firmware File
              • -
              • Flash Tool
              • -
              • USB Driver
              • -
              • How-to Flash Guide
              • -
              -

              Before you proceed to install the firmware, you need to take some precautions:

              -
                -
              • Backup your data: Flashing or installing the firmware will erase all your data on your device. Therefore, it is recommended to backup your important data such as contacts, messages, photos, videos, etc. before you start.
              • -
              • Charge your battery: Make sure your device has at least 50% battery level to avoid any interruption during the process.
              • -
              • Use a good USB cable: Use a good quality USB cable that can connect your device and computer properly.
              • -
              • Disable antivirus: Disable any antivirus or firewall software on your computer that may interfere with the flash tool.
              • -
              -

              Now follow these steps to install the firmware on your Nokia X6-00 RM-559 device:

              -
                -
              1. Install the USB driver on your computer. You can find the driver file in the extracted package. If the USB driver is already installed, then skip this step.
              2. -
              3. Run the OST Tool as administrator. You can find the tool file in the extracted package.
              4. -
              5. Click on File > Open File Agent and select B2N-354H-0-00WW-B01.nb0 from the extracted folder.
              6. -
              7. Click on Edit Phone Information and enter your device information such as IMEI number, product code, etc.
              8. -
              9. Click on Next and connect your device to the computer using a USB cable while holding down the Volume Down button.
              10. -
              11. The tool will detect your device and show its information on the screen.
              12. -
              13. Click on Next and wait for the tool to flash or install the firmware on your device.
              14. -
              15. Once the process is completed, you will see a green tick mark on the screen.
              16. -
              17. Disconnect your device from the computer and reboot it.
              18. -
              -

              Congratulations! You have successfully installed the firmware on your Nokia X6-00 RM-559 device. You can now enjoy the new features and improvements of the latest software version.

              e753bf7129
              -
              -
              \ No newline at end of file diff --git a/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/Dark Riddle Classic Mod APK A Fun and Interactive Adventure with Unlimited Money.md b/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/Dark Riddle Classic Mod APK A Fun and Interactive Adventure with Unlimited Money.md deleted file mode 100644 index 64e6fbbedb45a52cf1f303126ae0542d289e16f6..0000000000000000000000000000000000000000 --- a/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/Dark Riddle Classic Mod APK A Fun and Interactive Adventure with Unlimited Money.md +++ /dev/null @@ -1,111 +0,0 @@ - -

              Dark Riddle Classic Mod APK Unlimited Money: A Fun and Thrilling Adventure Game

              -

              If you are looking for a fun and thrilling adventure game that will keep you on the edge of your seat, then you should try Dark Riddle Classic. This game is a remake of the popular Dark Riddle game, which has been downloaded by millions of players around the world. In this game, you will play as a curious neighbor who wants to find out what is going on in the house across the street. However, you will soon discover that the house is not as normal as it seems, and that there is a dark and mysterious secret hidden inside.

              -

              What is Dark Riddle Classic?

              -

              Dark Riddle Classic is an adventure game that combines elements of stealth, puzzle, and horror. You will have to sneak into the neighbor's house, avoid his traps and cameras, and explore his rooms and basement. Along the way, you will find clues and items that will help you solve puzzles and unlock new areas. You will also encounter some strange and creepy characters, such as a talking cat, a giant spider, and a mysterious woman. You will have to use your wits and skills to survive and uncover the truth behind the neighbor's secret.

              -

              dark riddle classic mod apk unlimited money


              Download Filehttps://bltlly.com/2uOmz6



              -

              The gameplay of Dark Riddle Classic

              -

              The gameplay of Dark Riddle Classic is simple and intuitive. You can control your character using the virtual joystick on the left side of the screen, and interact with objects using the buttons on the right side. You can also switch between first-person and third-person views by tapping on the camera icon. You can use items from your inventory by dragging them to the desired location or target. You can also hide in closets, under beds, or behind furniture to avoid being seen by the neighbor or his minions.

              -

              The features of Dark Riddle Classic

              -

              Dark Riddle Classic has many features that make it an enjoyable and immersive game. Some of these features are:

              -
                -
              • High-quality graphics and sound effects that create a realistic and atmospheric environment.
              • -
              • A large and diverse map that includes various locations, such as the neighbor's house, the street, the park, the supermarket, and more.
              • -
              • Many interactive objects that you can use for your advantage or amusement, such as cars, bikes, drones, fireworks, weapons, tools, food, animals, etc.
              • -
              • A dynamic day-night cycle that affects the gameplay and difficulty level.
              • -
              • A captivating storyline that unfolds as you progress through the game.
              • -
              • A variety of endings that depend on your choices and actions.
              • -
              -

              Why download Dark Riddle Classic Mod APK Unlimited Money?

              -

              Dark Riddle Classic is a free-to-play game that you can download from Google Play Store or App Store. However, if you want to enjoy some extra benefits and features, then you should download Dark Riddle Classic Mod APK Unlimited Money. This is a modified version of the game that gives you access to unlimited money and other resources. With this mod apk, you can:

              -

              The benefits of Dark Riddle Classic Mod APK Unlimited Money

              -
                -
              • Buy any item or upgrade from the shop without worrying about the cost.
              • -
              • Unlock all the skins and costumes for your character.
              • -
              • Get unlimited hints and tips for solving puzzles.
              • -
              • Remove all the ads and pop-ups from the game.
              • -
              • Enjoy faster loading times and smoother performance.
              • -
              -

              How to download and install Dark Riddle Classic Mod APK Unlimited Money

              -

              If you want to download and install Dark Riddle Classic Mod APK Unlimited Money , you need to follow these steps:

              -
                -
              1. Download the Dark Riddle Classic Mod APK Unlimited Money file from a reliable source, such as or . Make sure you have enough storage space on your device.
              2. -
              3. Enable the installation of apps from unknown sources on your device. To do this, go to Settings > Security > Unknown Sources and toggle it on.
              4. -
              5. Locate the downloaded file on your device using a file manager app, such as ES File Explorer or ZArchiver. Tap on the file and select Install.
              6. -
              7. If the game requires an OBB file, which is a data file that contains additional content, you need to extract it from the zip file and copy it to the Android/OBB folder on your device. You can use a file manager app or a zip extractor app, such as or , to do this.
              8. -
              9. Wait for the installation to finish and launch the game from your app drawer or home screen. Enjoy playing Dark Riddle Classic with unlimited money and resources.
              10. -
              -

              Tips and tricks for playing Dark Riddle Classic

              -

              Dark Riddle Classic is a challenging and fun game that requires some skills and strategies to complete. Here are some tips and tricks that can help you play better and have more fun:

              -

              dark riddle classic hack mod apk free money
              -download dark riddle classic mod apk unlimited coins
              -dark riddle classic mod apk latest version unlimited cash
              -how to install dark riddle classic mod apk with unlimited money
              -dark riddle classic mod apk no root unlimited money
              -dark riddle classic cheat mod apk unlimited money and gems
              -dark riddle classic mod apk offline unlimited money
              -dark riddle classic mod apk android 1 unlimited money
              -dark riddle classic mod apk revdl unlimited money
              -dark riddle classic mod apk rexdl unlimited money
              -dark riddle classic mod apk happymod unlimited money
              -dark riddle classic mod apk 2023 unlimited money
              -dark riddle classic premium mod apk unlimited money
              -dark riddle classic pro mod apk unlimited money
              -dark riddle classic vip mod apk unlimited money
              -dark riddle classic unlocked mod apk unlimited money
              -dark riddle classic full mod apk unlimited money
              -dark riddle classic mega mod apk unlimited money
              -dark riddle classic super mod apk unlimited money
              -dark riddle classic ultra mod apk unlimited money
              -dark riddle classic cracked mod apk unlimited money
              -dark riddle classic patched mod apk unlimited money
              -dark riddle classic updated mod apk unlimited money
              -dark riddle classic new mod apk unlimited money
              -dark riddle classic old mod apk unlimited money
              -dark riddle classic original mod apk unlimited money
              -dark riddle classic best mod apk unlimited money
              -dark riddle classic top mod apk unlimited money
              -dark riddle classic awesome mod apk unlimited money
              -dark riddle classic cool mod apk unlimited money
              -dark riddle classic fun mod apk unlimited money
              -dark riddle classic amazing mod apk unlimited money
              -dark riddle classic fantastic mod apk unlimited money
              -dark riddle classic incredible mod apk unlimited money
              -dark riddle classic wonderful mod apk unlimited money
              -dark riddle classic extreme mod apk unlimited money
              -dark riddle classic ultimate mod apk unlimited money
              -dark riddle classic deluxe mod apk unlimited money
              -dark riddle classic plus mod apk unlimited money
              -dark riddle classic gold mod apk unlimited money
              -dark riddle classic diamond mod apk unlimited money
              -dark riddle classic platinum mod apk unlimited money
              -dark riddle classic silver mod apk unlimited money
              -dark riddle classic bronze mod apk unlimited money
              -dark riddle classic iron mod apk unlimited money
              -dark riddle classic steel mod apk unlimited money
              -dark riddle classic copper mod apk unlimited money
              -dark riddle classic tin mod apk unlimited money
              -dark riddle classic lead mod apk unlimited money

              -

              Explore the environment and interact with objects

              -

              One of the best things about Dark Riddle Classic is that you can explore a large and diverse map that has many secrets and surprises. You can interact with many objects in the game, such as cars, bikes, drones, fireworks, weapons, tools, food, animals, etc. Some of these objects can help you distract the neighbor or his minions, while others can help you access new areas or find clues. You can also use some objects for your amusement, such as throwing eggs at the neighbor's window or riding a bike around the street. Be creative and curious and you will discover many things in the game.

              -

              Use stealth and strategy to avoid detection

              -

              Dark Riddle Classic is not a game where you can just run and gun your way through. You need to be stealthy and smart to avoid being caught by the neighbor or his minions. You can use various methods to hide or escape from them, such as hiding in closets, under beds, or behind furniture, using items to create noise or smoke, running away or jumping over fences, etc. You also need to be aware of your surroundings and avoid triggering traps or cameras that can alert the neighbor. You can use items like binoculars or drones to scout ahead and plan your moves accordingly.

              -

              Collect clues and solve puzzles to uncover the mystery

              -

              The main goal of Dark Riddle Classic is to find out what is going on in the neighbor's house and what is his secret. To do this, you need to collect clues and items that will help you solve puzzles and unlock new areas. You can find clues and items in various places, such as drawers, cabinets, safes, boxes, etc. You can also get hints and tips from some characters in the game, such as the talking cat or the mysterious woman. Some puzzles require logic and reasoning, while others require trial and error. You need to pay attention to details and remember what you see and hear in the game.

              -

              Conclusion

              -

              Dark Riddle Classic is a fun and thrilling adventure game that will keep you entertained for hours. You will enjoy sneaking into the neighbor's house, exploring his rooms and basement, finding clues and items, solving puzzles, encountering strange characters, and uncovering his dark secret. You will also love playing with unlimited money and resources by downloading Dark Riddle Classic Mod APK Unlimited Money. This mod apk will give you access to all the items and upgrades in the game without spending a dime. You will also get rid of all the ads and pop-ups that can ruin your gaming experience. If you are ready to embark on this exciting adventure, download Dark Riddle Classic Mod APK Unlimited Money today and have fun!

              -

              FAQs

              -
                -
              • Q: Is Dark Riddle Classic Mod APK Unlimited Money safe to download?
              • -
              • A: Yes, Dark Riddle Classic Mod APK Unlimited Money is safe to download as long as you get it from a trusted source. However, you should always scan any file you download with an antivirus app before installing it on your device.
              • -
              • Q: Do I need to root my device to install Dark Riddle Classic Mod APK Unlimited Money?
              • -
              • A: No, you do not need to root your device to install Dark Riddle Classic Mod APK Unlimited Money. You just need to enable the installation of apps from unknown sources on your device. You can do this by going to Settings > Security > Unknown Sources and toggling it on.
              • -
              • Q: What is the difference between Dark Riddle Classic and Dark Riddle?
              • -
              • A: Dark Riddle Classic is a remake of the original Dark Riddle game, which was released in 2019. Dark Riddle Classic has improved graphics, sound effects, gameplay, and storyline. It also has more locations, characters, objects, puzzles, and endings. However, both games have the same core concept and theme.
              • -
              • Q: How long does it take to finish Dark Riddle Classic?
              • -
              • A: The duration of Dark Riddle Classic depends on your playstyle and skill level. Some players can finish the game in a few hours, while others may take longer. The game also has multiple endings that can affect the length of the game.
              • -
              • Q: Can I play Dark Riddle Classic offline?
              • -
              • A: Yes, you can play Dark Riddle Classic offline without an internet connection. However, you may need to connect to the internet to download the game or update it to the latest version.
              • -

              401be4b1e0
              -
              -
              \ No newline at end of file diff --git a/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/Download FIFA Mobile v18.1.03 MOD APK with Unlimited Money and Menu Options.md b/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/Download FIFA Mobile v18.1.03 MOD APK with Unlimited Money and Menu Options.md deleted file mode 100644 index 301dc09f0e7e269c5639a347daee11856a9e1d61..0000000000000000000000000000000000000000 --- a/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/Download FIFA Mobile v18.1.03 MOD APK with Unlimited Money and Menu Options.md +++ /dev/null @@ -1,137 +0,0 @@ - -

              FIFA Mobile APK Mod Unlimited Money: Everything You Need to Know

              -

              If you are a fan of soccer games, you might have heard of FIFA Mobile, the popular mobile version of the FIFA franchise by EA Sports. But did you know that there is a modded version of the game that gives you unlimited money and other perks? In this article, we will tell you everything you need to know about FIFA Mobile APK Mod Unlimited Money, including what it is, how to download and install it, why you should use it, how to play it like a pro, and how it compares to the official version. Let's get started!

              -

              What is FIFA Mobile?

              -

              FIFA Mobile is a soccer simulation game that lets you build your ultimate team of soccer stars from over 15,000 authentic players across 600+ teams from 30+ leagues around the world. You can also relive the FIFA World Cup 2022 with any of the 32 qualified nations in the official licensed mode. You can compete against other players in various modes, such as VS Attack, Head to Head, Manager Mode, and more. You can also enjoy realistic graphics, sound effects, commentary, and stadiums in FIFA Mobile.

              -

              fifa mobile apk mod unlimited money


              Downloadhttps://bltlly.com/2uOiDH



              -

              Features and gameplay of FIFA Mobile

              -

              Some of the features and gameplay aspects of FIFA Mobile are:

              -
                -
              • Build your ultimate team with star players from the biggest leagues and top teams, such as Kylian Mbappe, Christian Pulisic, Vinicius Jr, Son Heung-min, and more.
              • -
              • Unlock soccer icons and heroes from over 30+ leagues, such as Paolo Maldini, Ronaldinho, and more.
              • -
              • Experience immersive next-level soccer simulation with upgraded stadiums, realistic SFX, live commentary, and up to 60 fps gameplay.
              • -
              • Be the soccer manager of your own dream team. Plan your strategy, adjust your tactics, choose your set piece takers and captain, make substitutions and position changes on the fly.
              • -
              • Master the new stamina feature that affects your players' performance based on their energy level.
              • -
              • Learn new skill moves and use them to dribble past defenders or score spectacular goals.
              • -
              • Earn rewards and progress through various seasons, events, campaigns, tournaments, and challenges.
              • -
              -

              How to download and install FIFA Mobile APK Mod Unlimited Money

              -

              If you want to enjoy FIFA Mobile with unlimited money and other perks, you will need to download and install a modded version of the game. Here are the steps to do so:

              -
                -
              1. Go to [5Play](^1^), [Find Me Apk](^2^), or any other trusted website that offers FIFA Mobile APK Mod Unlimited Money.
              2. -
              3. Download the APK file and the OBB file (if required) to your device.
              4. -
              5. Enable unknown sources in your device settings to allow installation of apps from outside the Google Play Store.
              6. -
              7. Locate the downloaded files in your file manager and tap on them to install them.
              8. -
              9. If you downloaded an OBB file, extract it to Android/OBB folder using any file extractor app.
              10. -
              11. Launch the game and enjoy!
              12. -
              -

              Why use FIFA Mobile APK Mod Unlimited Money?

              -

              You might be wondering why you should use FIFA Mobile APK Mod Unlimited Money instead of the official version of the game. Well, there are several benefits and drawbacks of using the modded version, and we will discuss them in this section.

              -

              Benefits of using FIFA Mobile APK Mod Unlimited Money

              -

              Some of the benefits of using FIFA Mobile APK Mod Unlimited Money are:

              -
                -
              • You get unlimited money to buy players, packs, upgrades, and more. You don't have to worry about running out of coins or gems, or spending real money to get them.
              • -
              • You get access to all the features and modes of the game without any restrictions. You don't have to wait for timers, energy, or level requirements to play the game.
              • -
              • You get to enjoy the game with enhanced graphics, performance, and stability. The modded version has improved visuals, sound effects, and gameplay speed.
              • -
              • You get to customize the game according to your preferences. You can change the language, difficulty, camera angle, controls, and more.
              • -
              -

              Risks and drawbacks of using FIFA Mobile APK Mod Unlimited Money

              -

              Some of the risks and drawbacks of using FIFA Mobile APK Mod Unlimited Money are:

              -

              fifa mobile mod apk unlimited coins and points
              -fifa mobile hack apk download free money
              -fifa mobile 23 mod apk unlimited gems and cash
              -fifa mobile mod menu apk with money unlock
              -fifa mobile cheats apk unlimited money and energy
              -fifa mobile 22 mod apk unlimited everything
              -fifa mobile modded apk free money and players
              -fifa mobile hack version apk unlimited coins
              -fifa mobile 21 mod apk unlimited money and gems
              -fifa mobile mod apk latest version with money
              -fifa mobile hack tool apk unlimited resources
              -fifa mobile cracked apk unlimited money and stamina
              -fifa mobile 20 mod apk unlimited money and points
              -fifa mobile premium apk with money mod
              -fifa mobile hack online apk unlimited cash and gems
              -fifa mobile modded version apk unlimited money and players
              -fifa mobile hack generator apk unlimited coins and points
              -fifa mobile full apk with money mod
              -fifa mobile 19 mod apk unlimited money and gems
              -fifa mobile pro apk with money unlock
              -fifa mobile hack app apk unlimited resources and energy
              -fifa mobile patched apk unlimited money and stamina
              -fifa mobile 18 mod apk unlimited money and points
              -fifa mobile vip apk with money mod
              -fifa mobile hack no verification apk unlimited cash and gems
              -fifa mobile hacked game apk unlimited money and players
              -fifa mobile hack without human verification apk unlimited coins and points
              -fifa mobile unlocked apk with money mod
              -fifa mobile 17 mod apk unlimited money and gems
              -fifa mobile plus apk with money unlock
              -fifa mobile hack no survey apk unlimited resources and energy
              -fifa mobile modded game apk unlimited money and stamina
              -fifa mobile 16 mod apk unlimited money and points
              -fifa mobile gold apk with money mod
              -fifa mobile hack no root apk unlimited cash and gems
              -fifa mobile hacked version download apk unlimited money and players
              -fifa mobile hack easy apk unlimited coins and points
              -fifa mobile mega mod apk with money unlock
              -fifa mobile 15 mod apk unlimited money and gems
              -fifa mobile deluxe apk with money mod
              -fifa mobile hack for android apk unlimited resources and energy
              -fifa mobile super mod apk unlimited money and stamina
              -fifa mobile 14 mod apk unlimited money and points
              -fifa mobile ultimate mod apk with money unlock
              -fifa mobile hack for ios apk unlimited cash and gems
              -fifa mobile hacked app download apk unlimited money and players
              -fifa mobile hack without download apk unlimited coins and points

              -
                -
              • You might face compatibility issues with your device or operating system. The modded version might not work properly on some devices or versions of Android.
              • -
              • You might encounter bugs, glitches, or errors in the game. The modded version might not be updated regularly or tested thoroughly for quality assurance.
              • -
              • You might lose your progress or data in the game. The modded version might not sync with your Google Play account or cloud storage.
              • -
              • You might get banned from the game or face legal consequences. The modded version violates the terms and conditions of EA Sports and Google Play. You might get detected by their anti-cheat system and lose access to the game or face legal action.
              • -
              -

              How to play FIFA Mobile APK Mod Unlimited Money like a pro

              -

              Now that you know what FIFA Mobile APK Mod Unlimited Money is and how to download and install it, you might want to know how to play it like a pro. In this section, we will give you some tips and tricks for building and managing your ultimate team and mastering the different modes and challenges in the game.

              -

              Tips and tricks for building and managing your ultimate team

              -

              Some of the tips and tricks for building and managing your ultimate team are:

              -
                -
              • Use your unlimited money wisely. Don't spend it all on buying expensive players or packs. Save some for upgrading your players' skills, attributes, and chemistry.
              • -
              • Choose your players based on their ratings, positions, roles, and styles. Don't just go for the highest-rated players or your favorite players. Consider their strengths, weaknesses, compatibility, and suitability for your team.
              • -
              • Experiment with different formations, tactics, and strategies. Don't stick to one formation or tactic all the time. Try different combinations and see what works best for your team.
              • -
              • Rotate your players regularly. Don't use the same players for every match. Give some rest to your tired or injured players and give some chances to your bench players.
              • -
              -

              Tips and tricks for mastering the different modes and challenges

              -

              Some of the tips and tricks for mastering the different modes and challenges are:

              -
                -
              • Play VS Attack mode to earn rewards and rank up in the leaderboards. VS Attack mode is a fast-paced mode where you compete against other players in real-time matches. You have to score as many goals as possible in a limited time while defending your own goal.
              • -
              • Play Head to Head mode to test your skills against other players in full matches. Head to Head mode is a realistic mode where you play against other players in 11v11 matches. You have to control your whole team and use your skill moves, tactics, and strategies to win.
              • -
              • Play Manager Mode to manage your team like a real soccer manager. Manager Mode is a simulation mode where you take charge of your team's finances, transfers, contracts, training, scouting, and more. You have to make smart decisions and balance your budget while improving your team's performance.
              • -
              • Play FIFA World Cup 2022 mode to relive the biggest soccer event in the world. FIFA World Cup 2022 mode is a licensed mode where you can choose any of the 32 qualified nations and play through the group stage, knockout stage, and final stage of the tournament. You can also customize your squad, kits, badges, and more.
              • -
              -

              Comparison table of FIFA Mobile APK Mod Unlimited Money and the official version

              - - - - - - - -
              FIFA Mobile APK Mod Unlimited MoneyFIFA Mobile Official Version
              Unlimited moneyLimited money
              All features and modes unlockedSome features and modes locked
              Enhanced graphics, performance, and stabilityStandard graphics, performance, and stability
              Customizable settings and optionsFixed settings and options
              Potential compatibility issues, bugs, errors, data loss, or banNo compatibility issues, bugs, errors, data loss, or ban
              -

              Conclusion

              -

              FIFA Mobile APK Mod Unlimited Money is a modded version of the popular soccer game FIFA Mobile by EA Sports. It gives you unlimited money and other perks that make the game more fun and easy to play. However, it also comes with some risks and drawbacks that you should be aware of before using it. In this article, we have told you everything you need to know about FIFA Mobile APK Mod Unlimited Money, including what it is, how to download and install it, why you should use it, how to play it like a pro, and how it compares to the official version. We hope you found this article helpful and informative. If you have any questions or feedback, please feel free to leave a comment below. Thank you for reading!

              -

              FAQs

              -

              Here are some frequently asked questions about FIFA Mobile APK Mod Unlimited Money:

              -
                -
              1. Is FIFA Mobile APK Mod Unlimited Money safe to use?
              2. -

                FIFA Mobile APK Mod Unlimited Money is not officially endorsed or supported by EA Sports or Google Play. It is a third-party modification that violates the terms and conditions of the game and the platform. Therefore, it is not safe to use and may cause harm to your device or account. Use it at your own risk.

                -
              3. How do I update FIFA Mobile APK Mod Unlimited Money?
              4. -

                FIFA Mobile APK Mod Unlimited Money may not be updated regularly or automatically by the mod developers or the websites that offer it. You may have to manually check for updates and download and install them yourself. However, be careful as some updates may not be compatible with your device or version of the game.

                -
              5. Can I play FIFA Mobile APK Mod Unlimited Money online with other players?
              6. -

                FIFA Mobile APK Mod Unlimited Money allows you to play online with other players who are using the same modded version of the game. However, you cannot play online with players who are using the official version of the game. You may also face connection issues or lag while playing online.

                -
              7. Can I transfer my progress from FIFA Mobile APK Mod Unlimited Money to the official version of the game?
              8. -

                No, you cannot transfer your progress from FIFA Mobile APK Mod Unlimited Money to the official version of the game. The modded version does not sync with your Google Play account or cloud storage. If you want to switch to the official version of the game, you will have to start from scratch.

                -
              9. Can I use FIFA Mobile APK Mod Unlimited Money on iOS devices?
              10. -

                No, you cannot use FIFA Mobile APK Mod Unlimited Money on iOS devices. The modded version is only available for Android devices. If you want to play FIFA Mobile on iOS devices, you will have to download the official version of the game from the App Store.

                -

              401be4b1e0
              -
              -
              \ No newline at end of file diff --git a/spaces/tioseFevbu/cartoon-converter/scripts/Download Film 5 Cm Mp4 Gratis !!INSTALL!!.md b/spaces/tioseFevbu/cartoon-converter/scripts/Download Film 5 Cm Mp4 Gratis !!INSTALL!!.md deleted file mode 100644 index 9cca93bf281a3d52a8bcaea1a2bcfb7264fbc9b4..0000000000000000000000000000000000000000 --- a/spaces/tioseFevbu/cartoon-converter/scripts/Download Film 5 Cm Mp4 Gratis !!INSTALL!!.md +++ /dev/null @@ -1,19 +0,0 @@ -
              -

              Download Film 5 cm MP4 Gratis: Cara Mudah dan Cepat

              -

              Film 5 cm adalah salah satu film Indonesia yang populer dan banyak dicari oleh para pecinta film. Film ini menceritakan tentang lima sahabat yang memutuskan untuk mendaki Gunung Semeru, puncak tertinggi di Jawa, untuk menemukan makna persahabatan mereka. Film ini dibintangi oleh Fedi Nuril, Pevita Pearce, Herjunot Ali, Raline Shah, dan Denny Sumargo.

              -

              Jika kamu ingin menonton film 5 cm tapi tidak punya DVD atau tidak tayang di bioskop, kamu bisa mencoba untuk download film 5 cm mp4 gratis di internet. Ada beberapa situs atau website yang menyediakan link download film 5 cm mp4 gratis dengan kualitas yang bagus. Berikut adalah beberapa cara mudah dan cepat untuk download film 5 cm mp4 gratis:

              -

              download film 5 cm mp4 gratis


              Download File > https://urlcod.com/2uHxUq



              -
                -
              1. Kunjungi situs Vidio, salah satu platform streaming video online yang menyediakan berbagai macam konten, termasuk film 5 cm. Kamu bisa nonton film 5 cm secara online di situs ini dengan berlangganan paket Vidio Premier. Jika kamu ingin download film 5 cm mp4 gratis, kamu bisa menggunakan aplikasi Vidio Downloader yang bisa kamu unduh di Google Play Store atau App Store. Aplikasi ini memungkinkan kamu untuk download video dari situs Vidio dengan mudah dan cepat.
              2. -
              3. Kunjungi situs Indoseries21, salah satu situs nonton dan download film keluaran terbaru dari luar negeri maupun Indonesia. Situs ini menyediakan link download film 5 cm mp4 gratis dengan kualitas yang bagus seperti HD, Bluray, WEB-DL, DVDRip, HDRip dengan format video MP4 MKV serta resolusi seperti 360p, 480p, 720p hingga 1080p. Kamu bisa memilih link download yang sesuai dengan keinginan dan kapasitas memori kamu.
              4. -
              5. Kunjungi situs JalanTikus, salah satu situs yang menyediakan berbagai macam tips dan trik seputar teknologi dan hiburan. Situs ini juga memberikan informasi tentang cara nonton dan download film 5 cm mp4 gratis dengan mudah dan cepat. Kamu bisa mengikuti langkah-langkah yang diberikan oleh situs ini untuk mendapatkan link download film 5 cm mp4 gratis dari berbagai sumber seperti Google Drive, Racaty, Mediafire, Uptobox, Zippyshare dan lainnya.
              6. -
              -

              Itulah beberapa cara mudah dan cepat untuk download film 5 cm mp4 gratis di internet. Semoga artikel ini bermanfaat dan selamat menikmati film 5 cm bersama teman-teman atau keluarga kamu.

              - -

              Film 5 cm tidak hanya menawarkan cerita yang menarik dan menginspirasi, tetapi juga pemandangan alam yang indah dan memukau. Film ini mengambil lokasi syuting di beberapa tempat di Indonesia, seperti Jakarta, Yogyakarta, Bromo, Ranu Pane, dan Mahameru. Kamu bisa melihat keindahan Gunung Semeru yang menjulang tinggi dengan awan putih yang mengelilinginya. Kamu juga bisa melihat sunrise yang spektakuler dari puncak Mahameru yang menjadi tujuan akhir dari perjalanan kelima sahabat ini.

              -

              Film 5 cm juga menyuguhkan adegan-adegan yang menguras emosi dan membuat kamu terbawa suasana. Kamu bisa merasakan kegembiraan, kecemasan, kesedihan, haru, dan bahagia bersama dengan para karakter dalam film ini. Kamu bisa melihat bagaimana persahabatan mereka diuji oleh berbagai rintangan dan tantangan yang mereka hadapi selama mendaki Gunung Semeru. Kamu juga bisa melihat bagaimana cinta mereka berkembang dan berubah seiring dengan waktu dan jarak yang memisahkan mereka.

              -

              Film 5 cm adalah film yang layak untuk ditonton oleh semua orang yang menyukai film drama, petualangan, dan romansa. Film ini juga cocok untuk kamu yang ingin mendapatkan motivasi dan inspirasi dalam hidup. Film ini akan membuat kamu berpikir tentang apa yang kamu inginkan dan apa yang kamu lakukan untuk mencapainya. Film ini juga akan membuat kamu menghargai persahabatan dan cinta yang kamu miliki.

              -

              Jadi, tunggu apa lagi? Segera download film 5 cm mp4 gratis di internet dan nikmati filmnya sekarang juga.

              -

              7b8c122e87
              -
              -
              \ No newline at end of file diff --git a/spaces/tioseFevbu/cartoon-converter/scripts/Guardians Of The Galaxy Vol. 2 Movies Hd 720p In Hindi [UPDATED].md b/spaces/tioseFevbu/cartoon-converter/scripts/Guardians Of The Galaxy Vol. 2 Movies Hd 720p In Hindi [UPDATED].md deleted file mode 100644 index 55aec2dec5fc3dc8ba110f59ce420e7a3680c1bb..0000000000000000000000000000000000000000 --- a/spaces/tioseFevbu/cartoon-converter/scripts/Guardians Of The Galaxy Vol. 2 Movies Hd 720p In Hindi [UPDATED].md +++ /dev/null @@ -1,22 +0,0 @@ -
              -Here is a possible title and article for your keyword: - -

              How to Watch Guardians of the Galaxy Vol. 2 in HD 720p in Hindi

              -

              If you are a fan of Marvel's Guardians of the Galaxy, you might be wondering how to watch the second installment of the series in HD 720p in Hindi. Guardians of the Galaxy Vol. 2 is a 2017 action-adventure comedy film that follows the adventures of Star-Lord, Gamora, Drax, Rocket, and Baby Groot as they try to unravel the mystery of Peter Quill's parentage. The film features an ensemble cast of Chris Pratt, Zoe Saldana, Dave Bautista, Vin Diesel, Bradley Cooper, Michael Rooker, Karen Gillan, Pom Klementieff, Elizabeth Debicki, Chris Sullivan, Sean Gunn, Sylvester Stallone, and Kurt Russell.

              -

              Guardians of the Galaxy Vol. 2 movies hd 720p in hindi


              Download Zip » https://urlcod.com/2uHyi8



              -

              There are several ways to watch Guardians of the Galaxy Vol. 2 in HD 720p in Hindi. Here are some of them:

              -
                -
              • Disney+ Hotstar: Disney+ Hotstar is a streaming service that offers a variety of movies and shows from Disney, Marvel, Star Wars, Pixar, National Geographic, and more. You can watch Guardians of the Galaxy Vol. 2 on Disney+ Hotstar with a subscription or a VIP plan. The film is available in English and Hindi languages[^1^].
              • -
              • NM Flix: NM Flix is a website that provides free downloads of Hindi dubbed movies in various formats and qualities. You can download Guardians of the Galaxy Vol. 2 in BRRip 480p or 720p dual audio [Hindi – English] from NM Flix[^2^]. However, be careful of pop-up ads and malware that might harm your device.
              • -
              • JaguarFilmy: JaguarFilmy is another website that offers free downloads of Bollywood and Hollywood movies in Hindi and English languages. You can download Guardians of the Galaxy Vol. 2 in dual audio Hindi-English 480p, 720p, or 1080p from JaguarFilmy[^3^]. Again, be wary of ads and viruses that might infect your device.
              • -
              • DharamHero: DharamHero is a blog that posts links to download Hollywood movies dubbed in Hindi. You can find a link to download Guardians of the Galaxy Vol. 3 (the upcoming sequel) in 1080p dual audio (Hindi+English) from DharamHero[^4^]. However, this link might not be reliable or safe as the film has not been released yet.
              • -
              -

              These are some of the ways to watch Guardians of the Galaxy Vol. 2 in HD 720p in Hindi. However, we recommend that you watch the film legally and ethically from authorized sources such as Disney+ Hotstar or other streaming platforms that have the rights to distribute the film. This way, you can enjoy the film without compromising your device's security or violating any laws.

              Here are some more paragraphs for your article: - -

              Guardians of the Galaxy Vol. 2 is written and directed by James Gunn, who also helmed the first film in 2014. The film is based on the Marvel Comics superhero team of the same name, created by Dan Abnett and Andy Lanning. The film also features characters created by Steve Englehart, Steve Gan, Jim Starlin, Stan Lee, Larry Lieber, Jack Kirby, Bill Mantlo, Keith Giffen, Steve Gerber, and Val Mayerik.

              -

              -

              The film boasts an impressive cast of talented actors who bring the colorful characters to life. Chris Pratt returns as Peter Quill, aka Star-Lord, the charismatic leader of the Guardians who is half-human and half-celestial. Zoe Saldana plays Gamora, the adopted daughter of Thanos and a skilled assassin who seeks redemption. Dave Bautista portrays Drax, a muscular warrior who takes everything literally and has a vendetta against Thanos. Vin Diesel voices Baby Groot, a cute and innocent sapling who is the offspring of Groot, a sentient tree-like creature who sacrificed himself in the first film. Bradley Cooper voices Rocket, a genetically engineered raccoon who is a master of weapons and sarcasm.

              -

              Michael Rooker reprises his role as Yondu, a blue-skinned space pirate who raised Peter as a child and has a soft spot for him. Karen Gillan returns as Nebula, another adopted daughter of Thanos and Gamora's estranged sister who wants revenge. Pom Klementieff joins the cast as Mantis, an empathic alien who can sense and manipulate emotions. Sylvester Stallone makes a cameo appearance as Stakar Ogord, a legendary Ravager leader who has a history with Yondu. Kurt Russell plays Ego, a powerful celestial being who claims to be Peter's biological father.

              -

              Elizabeth Debicki plays Ayesha, the golden-skinned leader of the Sovereign race who hires and then pursues the Guardians for stealing their precious batteries. Chris Sullivan plays Taserface, a mutinous Ravager who takes over Yondu's ship and crew. Sean Gunn plays Kraglin, Yondu's loyal right-hand man who also serves as the on-set stand-in for Rocket. Tommy Flanagan plays Tullk, a Ravager loyal to Yondu. Laura Haddock plays Meredith Quill, Peter's mother who died of cancer when he was young. Aaron Schwartz provides the facial reference for young Ego.

              7196e7f11a
              -
              -
              \ No newline at end of file diff --git a/spaces/tioseFevbu/cartoon-converter/scripts/Manual Stereo Pioneer Deh M1407 Zc.24 [WORK].md b/spaces/tioseFevbu/cartoon-converter/scripts/Manual Stereo Pioneer Deh M1407 Zc.24 [WORK].md deleted file mode 100644 index 8dcaeaa60625e08a94957aa6c8149e6696f1bde9..0000000000000000000000000000000000000000 --- a/spaces/tioseFevbu/cartoon-converter/scripts/Manual Stereo Pioneer Deh M1407 Zc.24 [WORK].md +++ /dev/null @@ -1,76 +0,0 @@ -
              -

              How to Use the Manual Stereo Pioneer Deh M1407 Zc.24

              -

              The Manual Stereo Pioneer Deh M1407 Zc.24 is a car stereo system that offers various features and functions for your entertainment and convenience. It supports USB, AUX, CD, and radio modes, as well as Bluetooth connectivity and hands-free calling. In this article, we will show you how to use the Manual Stereo Pioneer Deh M1407 Zc.24 and enjoy its benefits.

              -

              USB Mode

              -

              The USB mode allows you to play music files stored on a USB device, such as a flash drive or a smartphone. To use the USB mode, follow these steps:

              -

              Manual Stereo Pioneer Deh M1407 Zc.24


              Download Zip 🌟 https://urlcod.com/2uHyhf



              -
                -
              1. Connect your USB device to the USB port on the front panel of the stereo using a compatible cable.
              2. -
              3. Press the SRC button repeatedly until USB appears on the display.
              4. -
              5. Use the arrow buttons to select a folder or a file, and press the ENTER button to play it.
              6. -
              7. Use the VOL knob to adjust the volume, and the EQ button to change the sound quality.
              8. -
              -

              Note: If you use an MTP connection, you need to set [USB MTP] to [ON] in the SYSTEM settings (page 27 of the manual).

              -

              AUX Mode

              -

              The AUX mode allows you to play audio from an external device, such as an MP3 player or a portable speaker. To use the AUX mode, follow these steps:

              -
                -
              1. Connect your external device to the AUX input jack on the front panel of the stereo using a stereo mini plug.
              2. -
              3. Press the SRC button repeatedly until AUX appears on the display.
              4. -
              5. Use your external device to play audio.
              6. -
              7. Use the VOL knob to adjust the volume, and the EQ button to change the sound quality.
              8. -
              -

              CD Mode

              -

              The CD mode allows you to play audio CDs or MP3/WMA files on a CD-R or CD-RW disc. To use the CD mode, follow these steps:

              -
                -
              1. Insert a disc into the disc slot on the front panel of the stereo. The disc will be automatically loaded and played.
              2. -
              3. Press the SRC button repeatedly until CD appears on the display.
              4. -
              5. Use the arrow buttons to select a track or a file, and press the ENTER button to play it.
              6. -
              7. Use the VOL knob to adjust the volume, and the EQ button to change the sound quality.
              8. -
              -

              Radio Mode

              -

              The radio mode allows you to listen to AM or FM stations. To use the radio mode, follow these steps:

              -
                -
              1. Press the SRC button repeatedly until TUNER appears on the display.
              2. -
              3. Press the BAND button to select AM or FM.
              4. -
              5. Use the arrow buttons to tune in to a station manually, or press and hold them to seek for a station automatically.
              6. -
              7. Use the VOL knob to adjust the volume, and the EQ button to change the sound quality.
              8. -
              - -

              Bluetooth Mode

              -

              The Bluetooth mode allows you to pair your Bluetooth-enabled device with the stereo and stream audio wirelessly. You can also make or receive phone calls hands-free using the built-in microphone and speaker. To use the Bluetooth mode, follow these steps:

              -

              - -
                - -
              1. Press and hold the BT PHONE button for two seconds until BT PAIRING appears on -the display.
              2. - -
              3. On your Bluetooth device, search for available devices and select PIONEER DEH-M1407 ZC from -the list.
              4. - -
              5. If prompted, enter 0000 as -the PIN code.
              6. - -
              7. Once paired successfully, -press -the SRC button repeatedly until BT AUDIO appears on -the display.
              8. - -
              9. Use your Bluetooth device -to play audio or make or receive phone calls.
              10. - -
              11. Use -the VOL knob -to adjust -the volume, -and -the EQ button -to change -the sound quality.
              12. - -
              - -

              Note: You can also use voice commands by pressing and holding -the BT PHONE button for two seconds

              81aa517590
              -
              -
              \ No newline at end of file diff --git a/spaces/tom-doerr/logo_generator/src/dalle_mini/data.py b/spaces/tom-doerr/logo_generator/src/dalle_mini/data.py deleted file mode 100644 index 78488765dcd04bf40f48c65e50dfcf67ae766acf..0000000000000000000000000000000000000000 --- a/spaces/tom-doerr/logo_generator/src/dalle_mini/data.py +++ /dev/null @@ -1,387 +0,0 @@ -import random -from dataclasses import dataclass, field -from functools import partial - -import jax -import jax.numpy as jnp -import numpy as np -from braceexpand import braceexpand -from datasets import Dataset, load_dataset - -from .model.text import TextNormalizer - - -@dataclass -class Dataset: - dataset_repo_or_path: str - train_file: str = None - validation_file: str = None - streaming: bool = True - use_auth_token: bool = False - text_column: str = "caption" - encoding_column: str = "encoding" - max_train_samples: int = None - max_eval_samples: int = None - preprocessing_num_workers: int = None - overwrite_cache: bool = False - do_train: bool = False - do_eval: bool = True - seed_dataset: int = None - shard_by_host: bool = False - blank_caption_prob: float = 0.0 - clip_score_column: str = "clip_score" - min_clip_score: float = None - max_clip_score: float = None - filter_column: str = None - filter_value: str = None - train_dataset: Dataset = field(init=False) - eval_dataset: Dataset = field(init=False) - rng_dataset: jnp.ndarray = field(init=False) - multi_hosts: bool = field(init=False) - - def __post_init__(self): - if self.seed_dataset is None: - # create a random seed - self.seed_dataset = random.randint(0, 2**32 - 1) - # set numpy rng - self.np_rng = np.random.default_rng(self.seed_dataset) - self.multi_hosts = jax.process_count() > 1 - # feed blank captions only in streaming mode for now - # otherwise dataset could be cached with same blanked captions - if self.blank_caption_prob: - assert ( - self.streaming is True - ), "blank_caption_prob can only be used in streaming mode" - # define data_files - if self.train_file is not None or self.validation_file is not None: - # accept braceexpand notation - for k in ["train_file", "validation_file"]: - f = getattr(self, k) - if isinstance(f, str): - setattr(self, k, list(braceexpand(f))) - # for list of files, split training data shards by host - if ( - isinstance(self.train_file, list) - and self.multi_hosts - and self.shard_by_host - ): - self.train_file = self.train_file[ - jax.process_index() :: jax.process_count() - ] - data_files = { - "train": self.train_file, - "validation": self.validation_file, - } - else: - data_files = None - - # load dataset - dataset = load_dataset( - self.dataset_repo_or_path, - data_files=data_files, - streaming=self.streaming, - use_auth_token=self.use_auth_token, - ) - if self.do_train: - if "train" not in dataset: - raise ValueError("Training requires a training dataset") - self.train_dataset = dataset["train"] - if self.max_train_samples is not None: - self.train_dataset = ( - self.train_dataset.take(self.max_train_samples) - if self.streaming - else self.train_dataset.select(range(self.max_train_samples)) - ) - if self.do_eval: - if "validation" not in dataset: - raise ValueError("Evaluating requires a validation dataset") - self.eval_dataset = dataset["validation"] - if self.max_eval_samples is not None: - self.eval_dataset = ( - self.eval_dataset.take(self.max_eval_samples) - if self.streaming - else self.eval_dataset.select(range(self.max_eval_samples)) - ) - - def preprocess(self, tokenizer, config): - # get required config variables - decoder_start_token_id = config.decoder_start_token_id - normalize_text = config.normalize_text - max_length = config.max_text_length - - if self.streaming: - # we need to shuffle early in streaming mode - if hasattr(self, "train_dataset"): - self.train_dataset = self.train_dataset.shuffle( - buffer_size=5000, seed=self.seed_dataset - ) - else: - self.rng_dataset = jax.random.PRNGKey(self.seed_dataset) - - # filter data - partial_filter_function = partial( - filter_function, - filter_column=self.filter_column, - filter_value=self.filter_value, - clip_score_column=self.clip_score_column, - min_clip_score=self.min_clip_score, - max_clip_score=self.max_clip_score, - ) - for ds in ["train_dataset", "eval_dataset"]: - if hasattr(self, ds): - setattr( - self, - ds, - ( - getattr(self, ds).filter(partial_filter_function) - if self.streaming - else getattr(self, ds).filter( - partial_filter_function, - num_proc=self.preprocessing_num_workers, - load_from_cache_file=not self.overwrite_cache, - desc="Filtering datasets", - ) - ), - ) - - # normalize text - if normalize_text: - text_normalizer = TextNormalizer() - partial_normalize_function = partial( - normalize_function, - text_column=self.text_column, - text_normalizer=text_normalizer, - ) - for ds in ["train_dataset", "eval_dataset"]: - if hasattr(self, ds): - setattr( - self, - ds, - ( - getattr(self, ds).map(partial_normalize_function) - if self.streaming - else getattr(self, ds).map( - partial_normalize_function, - num_proc=self.preprocessing_num_workers, - load_from_cache_file=not self.overwrite_cache, - desc="Normalizing datasets", - ) - ), - ) - - # blank captions - if self.blank_caption_prob: - partial_blank_caption_function = partial( - blank_caption_function, - text_column=self.text_column, - blank_caption_prob=self.blank_caption_prob, - rng=self.np_rng, - ) - if hasattr(self, "train_dataset"): - self.train_dataset = ( - self.train_dataset.map(partial_blank_caption_function) - if self.streaming - else self.train_dataset.map( - partial_blank_caption_function, - num_proc=None - if self.seed_dataset - else self.preprocessing_num_workers, - load_from_cache_file=False, - desc="Blanking some captions", - ) - ) - - # preprocess - partial_preprocess_function = partial( - preprocess_function, - tokenizer=tokenizer, - text_column=self.text_column, - encoding_column=self.encoding_column, - max_length=max_length, - decoder_start_token_id=decoder_start_token_id, - ) - for ds in ["train_dataset", "eval_dataset"]: - if hasattr(self, ds): - setattr( - self, - ds, - ( - getattr(self, ds).map( - partial_preprocess_function, - batched=True, - remove_columns=[ - self.text_column, - self.encoding_column, - ], - ) - if self.streaming - else getattr(self, ds).map( - partial_preprocess_function, - batched=True, - remove_columns=getattr(ds, "column_names"), - num_proc=self.preprocessing_num_workers, - load_from_cache_file=not self.overwrite_cache, - desc="Preprocessing datasets", - ) - ), - ) - - def dataloader(self, split, batch_size, epoch=None): - def _dataloader_datasets_non_streaming( - dataset: Dataset, - rng: jax.random.PRNGKey = None, - ): - """ - Returns batches of size `batch_size` from truncated `dataset`, sharded over all local devices. - Shuffle batches if rng is set. - """ - steps_per_epoch = len(dataset) // batch_size - - if rng is not None: - batch_idx = jax.random.permutation(rng, len(dataset)) - else: - batch_idx = jnp.arange(len(dataset)) - - batch_idx = batch_idx[ - : steps_per_epoch * batch_size - ] # Skip incomplete batch. - batch_idx = batch_idx.reshape((steps_per_epoch, batch_size)) - - for idx in batch_idx: - batch = dataset[idx] - batch = {k: jnp.array(v) for k, v in batch.items()} - yield batch - - def _dataloader_datasets_streaming( - dataset: Dataset, - epoch: int, - ): - keys = ["input_ids", "attention_mask", "labels", "decoder_input_ids"] - batch = {k: [] for k in keys} - first_loop = True # stop after one loop in some cases - while (self.multi_hosts and split == "train") or first_loop: - # in multi-host, we run forever (no epoch) as hosts need to stop - # at the same time and training data may not be split equally - # For validation data we put the entire batch on each host and then - # keep only the one specific to each host (could be improved but not necessary) - if epoch is not None: - assert split == "train" - # reshuffle training data at each epoch - dataset.set_epoch(epoch) - epoch += 1 - for item in dataset: - for k in keys: - batch[k].append(item[k]) - if len(batch[keys[0]]) == batch_size: - batch = {k: jnp.array(v) for k, v in batch.items()} - yield batch - batch = {k: [] for k in keys} - first_loop = False - - if split == "train": - ds = self.train_dataset - elif split == "eval": - ds = self.eval_dataset - else: - raise ValueError(f'split must be "train" or "eval", got {split}') - - if self.streaming: - return _dataloader_datasets_streaming(ds, epoch) - else: - if split == "train": - self.rng_dataset, input_rng = jax.random.split(self.rng_dataset) - return _dataloader_datasets_non_streaming(ds, input_rng) - - @property - def length(self): - len_train_dataset, len_eval_dataset = None, None - if self.streaming: - # we don't know the length, let's just assume max_samples if defined - if self.max_train_samples is not None: - len_train_dataset = self.max_train_samples - if self.max_eval_samples is not None: - len_eval_dataset = self.max_eval_samples - else: - len_train_dataset = ( - len(self.train_dataset) if hasattr(self, "train_dataset") else None - ) - len_eval_dataset = ( - len(self.eval_dataset) if hasattr(self, "eval_dataset") else None - ) - return len_train_dataset, len_eval_dataset - - -def shift_tokens_right(input_ids: np.array, decoder_start_token_id: int): - """ - Shift input ids one token to the right. - """ - shifted_input_ids = np.zeros(input_ids.shape) - shifted_input_ids[:, 1:] = input_ids[:, :-1] - shifted_input_ids[:, 0] = decoder_start_token_id - return shifted_input_ids - - -def blank_caption_function(example, text_column, blank_caption_prob, rng=None): - if ( - blank_caption_prob - and (rng.random() if rng is not None else np.random.random()) - < blank_caption_prob - ): - example[text_column] = "" - return example - - -def normalize_function(example, text_column, text_normalizer): - example[text_column] = text_normalizer(example[text_column]) - return example - - -def filter_function( - example, - min_clip_score, - max_clip_score, - clip_score_column, - filter_column, - filter_value, -): - if min_clip_score is not None and example[clip_score_column] < min_clip_score: - return False - if max_clip_score is not None and example[clip_score_column] > max_clip_score: - return False - if filter_column is not None and example[filter_column] != filter_value: - return False - return True - - -def preprocess_function( - examples, - tokenizer, - text_column, - encoding_column, - max_length, - decoder_start_token_id, -): - inputs = examples[text_column] - # Setting padding="max_length" as we need fixed length inputs for jitted functions - model_inputs = tokenizer( - inputs, - max_length=max_length, - padding="max_length", - truncation=True, - return_tensors="np", - ) - - # set up targets - # Note: labels correspond to our target indices - # decoder input ids are the same but shifted to the right with bos at the beginning (and without last token) - labels = examples[encoding_column] - labels = np.asarray(labels) - - # We need the labels, in addition to the decoder_input_ids, for the compute_loss function - model_inputs["labels"] = labels - - # In our case, this prepends the bos token and removes the last one - decoder_input_ids = shift_tokens_right(labels, decoder_start_token_id) - model_inputs["decoder_input_ids"] = decoder_input_ids - - return model_inputs diff --git a/spaces/tomandandy/MusicGen3/audiocraft/data/audio_utils.py b/spaces/tomandandy/MusicGen3/audiocraft/data/audio_utils.py deleted file mode 100644 index 76d4bc2a33ce722d879db2af33cd1336bd6b1fb3..0000000000000000000000000000000000000000 --- a/spaces/tomandandy/MusicGen3/audiocraft/data/audio_utils.py +++ /dev/null @@ -1,174 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -import sys -import typing as tp - -import julius -import torch -import torchaudio - - -def convert_audio_channels(wav: torch.Tensor, channels: int = 2) -> torch.Tensor: - """Convert audio to the given number of channels. - - Args: - wav (torch.Tensor): Audio wave of shape [B, C, T]. - channels (int): Expected number of channels as output. - Returns: - torch.Tensor: Downmixed or unchanged audio wave [B, C, T]. - """ - *shape, src_channels, length = wav.shape - if src_channels == channels: - pass - elif channels == 1: - # Case 1: - # The caller asked 1-channel audio, and the stream has multiple - # channels, downmix all channels. - wav = wav.mean(dim=-2, keepdim=True) - elif src_channels == 1: - # Case 2: - # The caller asked for multiple channels, but the input file has - # a single channel, replicate the audio over all channels. - wav = wav.expand(*shape, channels, length) - elif src_channels >= channels: - # Case 3: - # The caller asked for multiple channels, and the input file has - # more channels than requested. In that case return the first channels. - wav = wav[..., :channels, :] - else: - # Case 4: What is a reasonable choice here? - raise ValueError('The audio file has less channels than requested but is not mono.') - return wav - - -def convert_audio(wav: torch.Tensor, from_rate: float, - to_rate: float, to_channels: int) -> torch.Tensor: - """Convert audio to new sample rate and number of audio channels. - """ - wav = julius.resample_frac(wav, int(from_rate), int(to_rate)) - wav = convert_audio_channels(wav, to_channels) - return wav - - -def normalize_loudness(wav: torch.Tensor, sample_rate: int, loudness_headroom_db: float = 14, - loudness_compressor: bool = False, energy_floor: float = 2e-3): - """Normalize an input signal to a user loudness in dB LKFS. - Audio loudness is defined according to the ITU-R BS.1770-4 recommendation. - - Args: - wav (torch.Tensor): Input multichannel audio data. - sample_rate (int): Sample rate. - loudness_headroom_db (float): Target loudness of the output in dB LUFS. - loudness_compressor (bool): Uses tanh for soft clipping. - energy_floor (float): anything below that RMS level will not be rescaled. - Returns: - output (torch.Tensor): Loudness normalized output data. - """ - energy = wav.pow(2).mean().sqrt().item() - if energy < energy_floor: - return wav - transform = torchaudio.transforms.Loudness(sample_rate) - input_loudness_db = transform(wav).item() - # calculate the gain needed to scale to the desired loudness level - delta_loudness = -loudness_headroom_db - input_loudness_db - gain = 10.0 ** (delta_loudness / 20.0) - output = gain * wav - if loudness_compressor: - output = torch.tanh(output) - assert output.isfinite().all(), (input_loudness_db, wav.pow(2).mean().sqrt()) - return output - - -def _clip_wav(wav: torch.Tensor, log_clipping: bool = False, stem_name: tp.Optional[str] = None) -> None: - """Utility function to clip the audio with logging if specified.""" - max_scale = wav.abs().max() - if log_clipping and max_scale > 1: - clamp_prob = (wav.abs() > 1).float().mean().item() - print(f"CLIPPING {stem_name or ''} happening with proba (a bit of clipping is okay):", - clamp_prob, "maximum scale: ", max_scale.item(), file=sys.stderr) - wav.clamp_(-1, 1) - - -def normalize_audio(wav: torch.Tensor, normalize: bool = True, - strategy: str = 'peak', peak_clip_headroom_db: float = 1, - rms_headroom_db: float = 18, loudness_headroom_db: float = 14, - loudness_compressor: bool = False, log_clipping: bool = False, - sample_rate: tp.Optional[int] = None, - stem_name: tp.Optional[str] = None) -> torch.Tensor: - """Normalize the audio according to the prescribed strategy (see after). - - Args: - wav (torch.Tensor): Audio data. - normalize (bool): if `True` (default), normalizes according to the prescribed - strategy (see after). If `False`, the strategy is only used in case clipping - would happen. - strategy (str): Can be either 'clip', 'peak', or 'rms'. Default is 'peak', - i.e. audio is normalized by its largest value. RMS normalizes by root-mean-square - with extra headroom to avoid clipping. 'clip' just clips. - peak_clip_headroom_db (float): Headroom in dB when doing 'peak' or 'clip' strategy. - rms_headroom_db (float): Headroom in dB when doing 'rms' strategy. This must be much larger - than the `peak_clip` one to avoid further clipping. - loudness_headroom_db (float): Target loudness for loudness normalization. - loudness_compressor (bool): If True, uses tanh based soft clipping. - log_clipping (bool): If True, basic logging on stderr when clipping still - occurs despite strategy (only for 'rms'). - sample_rate (int): Sample rate for the audio data (required for loudness). - stem_name (Optional[str]): Stem name for clipping logging. - Returns: - torch.Tensor: Normalized audio. - """ - scale_peak = 10 ** (-peak_clip_headroom_db / 20) - scale_rms = 10 ** (-rms_headroom_db / 20) - if strategy == 'peak': - rescaling = (scale_peak / wav.abs().max()) - if normalize or rescaling < 1: - wav = wav * rescaling - elif strategy == 'clip': - wav = wav.clamp(-scale_peak, scale_peak) - elif strategy == 'rms': - mono = wav.mean(dim=0) - rescaling = scale_rms / mono.pow(2).mean().sqrt() - if normalize or rescaling < 1: - wav = wav * rescaling - _clip_wav(wav, log_clipping=log_clipping, stem_name=stem_name) - elif strategy == 'loudness': - assert sample_rate is not None, "Loudness normalization requires sample rate." - wav = normalize_loudness(wav, sample_rate, loudness_headroom_db, loudness_compressor) - _clip_wav(wav, log_clipping=log_clipping, stem_name=stem_name) - else: - assert wav.abs().max() < 1 - assert strategy == '' or strategy == 'none', f"Unexpected strategy: '{strategy}'" - return wav - - -def f32_pcm(wav: torch.Tensor) -> torch.Tensor: - """Convert audio to float 32 bits PCM format. - """ - if wav.dtype.is_floating_point: - return wav - else: - assert wav.dtype == torch.int16 - return wav.float() / 2**15 - - -def i16_pcm(wav: torch.Tensor) -> torch.Tensor: - """Convert audio to int 16 bits PCM format. - - ..Warning:: There exist many formula for doing this convertion. None are perfect - due to the asymetry of the int16 range. One either have possible clipping, DC offset, - or inconsistancies with f32_pcm. If the given wav doesn't have enough headroom, - it is possible that `i16_pcm(f32_pcm)) != Identity`. - """ - if wav.dtype.is_floating_point: - assert wav.abs().max() <= 1 - candidate = (wav * 2 ** 15).round() - if candidate.max() >= 2 ** 15: # clipping would occur - candidate = (wav * (2 ** 15 - 1)).round() - return candidate.short() - else: - assert wav.dtype == torch.int16 - return wav diff --git a/spaces/tomofi/MMOCR/mmocr/models/kie/losses/sdmgr_loss.py b/spaces/tomofi/MMOCR/mmocr/models/kie/losses/sdmgr_loss.py deleted file mode 100644 index dba2d12d1ba9534ff014e38f408e3efaeb281bf0..0000000000000000000000000000000000000000 --- a/spaces/tomofi/MMOCR/mmocr/models/kie/losses/sdmgr_loss.py +++ /dev/null @@ -1,41 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import torch -from mmdet.models.losses import accuracy -from torch import nn - -from mmocr.models.builder import LOSSES - - -@LOSSES.register_module() -class SDMGRLoss(nn.Module): - """The implementation the loss of key information extraction proposed in - the paper: Spatial Dual-Modality Graph Reasoning for Key Information - Extraction. - - https://arxiv.org/abs/2103.14470. - """ - - def __init__(self, node_weight=1.0, edge_weight=1.0, ignore=-100): - super().__init__() - self.loss_node = nn.CrossEntropyLoss(ignore_index=ignore) - self.loss_edge = nn.CrossEntropyLoss(ignore_index=-1) - self.node_weight = node_weight - self.edge_weight = edge_weight - self.ignore = ignore - - def forward(self, node_preds, edge_preds, gts): - node_gts, edge_gts = [], [] - for gt in gts: - node_gts.append(gt[:, 0]) - edge_gts.append(gt[:, 1:].contiguous().view(-1)) - node_gts = torch.cat(node_gts).long() - edge_gts = torch.cat(edge_gts).long() - - node_valids = torch.nonzero( - node_gts != self.ignore, as_tuple=False).view(-1) - edge_valids = torch.nonzero(edge_gts != -1, as_tuple=False).view(-1) - return dict( - loss_node=self.node_weight * self.loss_node(node_preds, node_gts), - loss_edge=self.edge_weight * self.loss_edge(edge_preds, edge_gts), - acc_node=accuracy(node_preds[node_valids], node_gts[node_valids]), - acc_edge=accuracy(edge_preds[edge_valids], edge_gts[edge_valids])) diff --git a/spaces/tomofi/MMOCR/tests/test_models/test_ocr_backbone.py b/spaces/tomofi/MMOCR/tests/test_models/test_ocr_backbone.py deleted file mode 100644 index 7fc3a2b9b92ffacfd4626f62150915b04c3b3020..0000000000000000000000000000000000000000 --- a/spaces/tomofi/MMOCR/tests/test_models/test_ocr_backbone.py +++ /dev/null @@ -1,125 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import pytest -import torch - -from mmocr.models.textrecog.backbones import (ResNet, ResNet31OCR, ResNetABI, - ShallowCNN, VeryDeepVgg) - - -def test_resnet31_ocr_backbone(): - """Test resnet backbone.""" - with pytest.raises(AssertionError): - ResNet31OCR(2.5) - - with pytest.raises(AssertionError): - ResNet31OCR(3, layers=5) - - with pytest.raises(AssertionError): - ResNet31OCR(3, channels=5) - - # Test ResNet18 forward - model = ResNet31OCR() - model.init_weights() - model.train() - - imgs = torch.randn(1, 3, 32, 160) - feat = model(imgs) - assert feat.shape == torch.Size([1, 512, 4, 40]) - - -def test_vgg_deep_vgg_ocr_backbone(): - - model = VeryDeepVgg() - model.init_weights() - model.train() - - imgs = torch.randn(1, 3, 32, 160) - feats = model(imgs) - assert feats.shape == torch.Size([1, 512, 1, 41]) - - -def test_shallow_cnn_ocr_backbone(): - - model = ShallowCNN() - model.init_weights() - model.train() - - imgs = torch.randn(1, 1, 32, 100) - feat = model(imgs) - assert feat.shape == torch.Size([1, 512, 8, 25]) - - -def test_resnet_abi(): - """Test resnet backbone.""" - with pytest.raises(AssertionError): - ResNetABI(2.5) - - with pytest.raises(AssertionError): - ResNetABI(3, arch_settings=5) - - with pytest.raises(AssertionError): - ResNetABI(3, stem_channels=None) - - with pytest.raises(AssertionError): - ResNetABI(arch_settings=[3, 4, 6, 6], strides=[1, 2, 1, 2, 1]) - - # Test forwarding - model = ResNetABI() - model.train() - - imgs = torch.randn(1, 3, 32, 160) - feat = model(imgs) - assert feat.shape == torch.Size([1, 512, 8, 40]) - - -def test_resnet(): - """Test all ResNet backbones.""" - - resnet45_aster = ResNet( - in_channels=3, - stem_channels=[64, 128], - block_cfgs=dict(type='BasicBlock', use_conv1x1='True'), - arch_layers=[3, 4, 6, 6, 3], - arch_channels=[32, 64, 128, 256, 512], - strides=[(2, 2), (2, 2), (2, 1), (2, 1), (2, 1)]) - - resnet45_abi = ResNet( - in_channels=3, - stem_channels=32, - block_cfgs=dict(type='BasicBlock', use_conv1x1='True'), - arch_layers=[3, 4, 6, 6, 3], - arch_channels=[32, 64, 128, 256, 512], - strides=[2, 1, 2, 1, 1]) - - resnet_31 = ResNet( - in_channels=3, - stem_channels=[64, 128], - block_cfgs=dict(type='BasicBlock'), - arch_layers=[1, 2, 5, 3], - arch_channels=[256, 256, 512, 512], - strides=[1, 1, 1, 1], - plugins=[ - dict( - cfg=dict(type='Maxpool2d', kernel_size=2, stride=(2, 2)), - stages=(True, True, False, False), - position='before_stage'), - dict( - cfg=dict(type='Maxpool2d', kernel_size=(2, 1), stride=(2, 1)), - stages=(False, False, True, False), - position='before_stage'), - dict( - cfg=dict( - type='ConvModule', - kernel_size=3, - stride=1, - padding=1, - norm_cfg=dict(type='BN'), - act_cfg=dict(type='ReLU')), - stages=(True, True, True, True), - position='after_stage') - ]) - img = torch.rand(1, 3, 32, 100) - - assert resnet45_aster(img).shape == torch.Size([1, 512, 1, 25]) - assert resnet45_abi(img).shape == torch.Size([1, 512, 8, 25]) - assert resnet_31(img).shape == torch.Size([1, 512, 4, 25]) diff --git a/spaces/tomofi/MaskTextSpotterV3-OCR/maskrcnn_benchmark/utils/collect_env.py b/spaces/tomofi/MaskTextSpotterV3-OCR/maskrcnn_benchmark/utils/collect_env.py deleted file mode 100644 index 2d0641dda61c9950cb54d0552106246248e571ef..0000000000000000000000000000000000000000 --- a/spaces/tomofi/MaskTextSpotterV3-OCR/maskrcnn_benchmark/utils/collect_env.py +++ /dev/null @@ -1,14 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. -import PIL - -from torch.utils.collect_env import get_pretty_env_info - - -def get_pil_version(): - return "\n Pillow ({})".format(PIL.__version__) - - -def collect_env_info(): - env_str = get_pretty_env_info() - env_str += get_pil_version() - return env_str diff --git a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/configs/autoassign/autoassign_r50_fpn_8x2_1x_coco.py b/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/configs/autoassign/autoassign_r50_fpn_8x2_1x_coco.py deleted file mode 100644 index eb2779b66e1681787a11f8383ba2dd33abe58bd4..0000000000000000000000000000000000000000 --- a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/configs/autoassign/autoassign_r50_fpn_8x2_1x_coco.py +++ /dev/null @@ -1,85 +0,0 @@ -# We follow the original implementation which -# adopts the Caffe pre-trained backbone. -_base_ = [ - '../_base_/datasets/coco_detection.py', - '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' -] -model = dict( - type='AutoAssign', - pretrained='open-mmlab://detectron2/resnet50_caffe', - backbone=dict( - type='ResNet', - depth=50, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=False), - norm_eval=True, - style='caffe'), - neck=dict( - type='FPN', - in_channels=[256, 512, 1024, 2048], - out_channels=256, - start_level=1, - add_extra_convs=True, - extra_convs_on_inputs=True, - caffe2_xavier_init=True, - num_outs=5, - relu_before_extra_convs=True), - bbox_head=dict( - type='AutoAssignHead', - norm_on_bbox=True, - num_classes=80, - in_channels=256, - stacked_convs=4, - feat_channels=256, - strides=[8, 16, 32, 64, 128], - loss_bbox=dict(type='GIoULoss', loss_weight=5.0)), - train_cfg=None, - test_cfg=dict( - nms_pre=1000, - min_bbox_size=0, - score_thr=0.05, - nms=dict(type='nms', iou_threshold=0.6), - max_per_img=100)) -img_norm_cfg = dict( - mean=[102.9801, 115.9465, 122.7717], std=[1.0, 1.0, 1.0], to_rgb=False) -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations', with_bbox=True), - dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']) -] -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(1333, 800), - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']) - ]) -] -data = dict( - train=dict(pipeline=train_pipeline), - val=dict(pipeline=test_pipeline), - test=dict(pipeline=test_pipeline)) -# optimizer -optimizer = dict(lr=0.01, paramwise_cfg=dict(norm_decay_mult=0.)) -# learning policy -lr_config = dict( - policy='step', - warmup='linear', - warmup_iters=1000, - warmup_ratio=1.0 / 1000, - step=[8, 11]) -total_epochs = 12 diff --git a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/configs/guided_anchoring/ga_faster_x101_64x4d_fpn_1x_coco.py b/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/configs/guided_anchoring/ga_faster_x101_64x4d_fpn_1x_coco.py deleted file mode 100644 index 87bbfdc827eb17654527ad5305ec80bd9e84b78a..0000000000000000000000000000000000000000 --- a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/configs/guided_anchoring/ga_faster_x101_64x4d_fpn_1x_coco.py +++ /dev/null @@ -1,13 +0,0 @@ -_base_ = './ga_faster_r50_fpn_1x_coco.py' -model = dict( - pretrained='open-mmlab://resnext101_64x4d', - backbone=dict( - type='ResNeXt', - depth=101, - groups=64, - base_width=4, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=True), - style='pytorch')) diff --git a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/tools/model_converters/regnet2mmdet.py b/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/tools/model_converters/regnet2mmdet.py deleted file mode 100644 index 9f4e316d37569a6fbeb6329bd36abaa822b20ccf..0000000000000000000000000000000000000000 --- a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/tools/model_converters/regnet2mmdet.py +++ /dev/null @@ -1,89 +0,0 @@ -import argparse -from collections import OrderedDict - -import torch - - -def convert_stem(model_key, model_weight, state_dict, converted_names): - new_key = model_key.replace('stem.conv', 'conv1') - new_key = new_key.replace('stem.bn', 'bn1') - state_dict[new_key] = model_weight - converted_names.add(model_key) - print(f'Convert {model_key} to {new_key}') - - -def convert_head(model_key, model_weight, state_dict, converted_names): - new_key = model_key.replace('head.fc', 'fc') - state_dict[new_key] = model_weight - converted_names.add(model_key) - print(f'Convert {model_key} to {new_key}') - - -def convert_reslayer(model_key, model_weight, state_dict, converted_names): - split_keys = model_key.split('.') - layer, block, module = split_keys[:3] - block_id = int(block[1:]) - layer_name = f'layer{int(layer[1:])}' - block_name = f'{block_id - 1}' - - if block_id == 1 and module == 'bn': - new_key = f'{layer_name}.{block_name}.downsample.1.{split_keys[-1]}' - elif block_id == 1 and module == 'proj': - new_key = f'{layer_name}.{block_name}.downsample.0.{split_keys[-1]}' - elif module == 'f': - if split_keys[3] == 'a_bn': - module_name = 'bn1' - elif split_keys[3] == 'b_bn': - module_name = 'bn2' - elif split_keys[3] == 'c_bn': - module_name = 'bn3' - elif split_keys[3] == 'a': - module_name = 'conv1' - elif split_keys[3] == 'b': - module_name = 'conv2' - elif split_keys[3] == 'c': - module_name = 'conv3' - new_key = f'{layer_name}.{block_name}.{module_name}.{split_keys[-1]}' - else: - raise ValueError(f'Unsupported conversion of key {model_key}') - print(f'Convert {model_key} to {new_key}') - state_dict[new_key] = model_weight - converted_names.add(model_key) - - -def convert(src, dst): - """Convert keys in pycls pretrained RegNet models to mmdet style.""" - # load caffe model - regnet_model = torch.load(src) - blobs = regnet_model['model_state'] - # convert to pytorch style - state_dict = OrderedDict() - converted_names = set() - for key, weight in blobs.items(): - if 'stem' in key: - convert_stem(key, weight, state_dict, converted_names) - elif 'head' in key: - convert_head(key, weight, state_dict, converted_names) - elif key.startswith('s'): - convert_reslayer(key, weight, state_dict, converted_names) - - # check if all layers are converted - for key in blobs: - if key not in converted_names: - print(f'not converted: {key}') - # save checkpoint - checkpoint = dict() - checkpoint['state_dict'] = state_dict - torch.save(checkpoint, dst) - - -def main(): - parser = argparse.ArgumentParser(description='Convert model keys') - parser.add_argument('src', help='src detectron model path') - parser.add_argument('dst', help='save path') - args = parser.parse_args() - convert(args.src, args.dst) - - -if __name__ == '__main__': - main() diff --git a/spaces/trueuserr/psmathur-orca_mini_v2_7b/app.py b/spaces/trueuserr/psmathur-orca_mini_v2_7b/app.py deleted file mode 100644 index 14faf8aa5df48bba903500adad0ecaa842dd909a..0000000000000000000000000000000000000000 --- a/spaces/trueuserr/psmathur-orca_mini_v2_7b/app.py +++ /dev/null @@ -1,3 +0,0 @@ -import gradio as gr - -gr.Interface.load("models/psmathur/orca_mini_v2_7b").launch() \ No newline at end of file diff --git a/spaces/typ12323/bingo/README.md b/spaces/typ12323/bingo/README.md deleted file mode 100644 index 5d6936218874c647b5d22e13ad4be7edb8936f92..0000000000000000000000000000000000000000 --- a/spaces/typ12323/bingo/README.md +++ /dev/null @@ -1,28 +0,0 @@ ---- -title: bingo -emoji: 😊 -colorFrom: red -colorTo: red -sdk: docker -license: mit -duplicated_from: hf4all/bingo ---- - -
              - -# Bingo - -Bingo,一个让你呼吸顺畅 New Bing。 - -高度还原 New Bing 网页版的主要操作,国内可用,兼容绝大多数微软 Bing AI 的功能,可自行部署使用。 - -![Github stars](https://badgen.net/github/stars/weaigc/bingo?icon=github&label=stars) -![Gthub issues](https://img.shields.io/github/issues/weaigc/bingo) -[![docker build](https://github.com/weaigc/bingo/actions/workflows/docker.yml/badge.svg)](https://hub.docker.com/repository/docker/weaigc/bingo/) -[![docker hub](https://badgen.net/docker/size/weaigc/bingo?icon=docker&label=image%20size)](https://hub.docker.com/repository/docker/weaigc/bingo/) -[![MIT License](https://img.shields.io/badge/license-MIT-97c50f)](https://github.com/weaigc/bingo/blob/main/license) - -问题反馈请前往 https://github.com/weaigc/bingo/issues -
              - - diff --git a/spaces/unity/ML-Agents-Pyramids/index.html b/spaces/unity/ML-Agents-Pyramids/index.html deleted file mode 100644 index a659429dbc0ba504aea514d1e2e714b4bc5b70c6..0000000000000000000000000000000000000000 --- a/spaces/unity/ML-Agents-Pyramids/index.html +++ /dev/null @@ -1,92 +0,0 @@ - - - - - - - Unity-MLAgents-Pyramids - - - - -
              - -
              - - - - - diff --git a/spaces/usbethFlerru/sovits-modelsV2/example/99 Nepali Fonts Free Download How to Install and Use Them on Your Device.md b/spaces/usbethFlerru/sovits-modelsV2/example/99 Nepali Fonts Free Download How to Install and Use Them on Your Device.md deleted file mode 100644 index 6e10c164206f78092b044d811be62bac01448800..0000000000000000000000000000000000000000 --- a/spaces/usbethFlerru/sovits-modelsV2/example/99 Nepali Fonts Free Download How to Install and Use Them on Your Device.md +++ /dev/null @@ -1,6 +0,0 @@ -

              99NepaliFontsfreedownload


              Download Zip 🗸 https://urlcod.com/2uyXqG



              -
              - aaccfb2cb3
              -
              -
              -

              diff --git a/spaces/usbethFlerru/sovits-modelsV2/example/All Is Well Telugu Full Movie Download Kickass The Best Way to Spend Your Weekend.md b/spaces/usbethFlerru/sovits-modelsV2/example/All Is Well Telugu Full Movie Download Kickass The Best Way to Spend Your Weekend.md deleted file mode 100644 index 62ddbe58ac3a8c558da34b2d98a35b61e5ac9379..0000000000000000000000000000000000000000 --- a/spaces/usbethFlerru/sovits-modelsV2/example/All Is Well Telugu Full Movie Download Kickass The Best Way to Spend Your Weekend.md +++ /dev/null @@ -1,11 +0,0 @@ - -

              The new Kickass Torrents website once again gives users access to a wide range of torrent downloads, including the latest movies, TV, music and software. As a leader in online piracy, Kickass Torrents has earned a reputation for offering high-quality torrents and a user-friendly interface.

              -

              This can be a fast and efficient way to download large files such as movies or TV, but it also has its drawbacks. Since torrenting is based on file sharing, it is often associated with pirated content, which is illegal in many countries.

              -

              All Is Well telugu full movie download kickass


              Download File ---> https://urlcod.com/2uyY5w



              -

              Despite the authorities' efforts to shut down torrent websites, the demand for this type of website remains high among users who want to download and share digital media. In the case of the new Kickass torrent website, the team behind it aims to provide a reliable and user-friendly platform for accessing the latest movies, TV, software, games and music. The website is regularly updated with new content, and users can easily search for and download the files they want using the website's advanced search features.

              -

              Kickass Torrents (also known as KAT) was a popular peer-to-peer file sharing platform used for distributing torrent files and magnet links. Users could search for and download a variety of digital content through the platform, including movies, music, games and software.

              -

              It was possible to download movies from Kickass Torrents, as well as other types of digital content such as music, games, and software. However, downloading movies or other copyrighted material without permission is illegal and carries the risk of legal action.

              -

              It was possible to download movies from Kickass Torrents, as well as other types of digital content such as music, games and software. However, downloading movies or other copyrighted material without permission is illegal and risks legal action.

              -

              Also known as the YTS torrent tracker, this site is yet another best KickAss Torrents alternative. I am sure all movie lovers are aware of YTS movies. But this differs from the original YIFY torrent website to download YIFY movies.

              aaccfb2cb3
              -
              -
              \ No newline at end of file diff --git a/spaces/usbethFlerru/sovits-modelsV2/example/Cinco Siglos De Historia Francisco Scarano Pdf.md b/spaces/usbethFlerru/sovits-modelsV2/example/Cinco Siglos De Historia Francisco Scarano Pdf.md deleted file mode 100644 index 1111a9f1be3dcabb0935f41229f4020924593ba9..0000000000000000000000000000000000000000 --- a/spaces/usbethFlerru/sovits-modelsV2/example/Cinco Siglos De Historia Francisco Scarano Pdf.md +++ /dev/null @@ -1,7 +0,0 @@ -

              cinco siglos de historia francisco scarano pdf


              Download Ziphttps://urlcod.com/2uyW4A



              -
              -Puerto Rico: Cinco siglos de historia (Spanish edition) Scarano, Francisco And, as well as a large selection of related books, art, and collectibles available now.Written in the form of stories written by Spanish and American conquerors from the 1500s to the present day, the history of Puerto Rico is one of the most widely read books in history, despite its small circulation of over 300,000 copies. -Written in the form of stories written by Spanish and American conquerors from the 1500s to the present day, the history of Puerto Rico is one of the most widely read books in history despite its small circulation of over 300,000 copies. 8a78ff9644
              -
              -
              -

              diff --git a/spaces/usbethFlerru/sovits-modelsV2/example/D33006.md b/spaces/usbethFlerru/sovits-modelsV2/example/D33006.md deleted file mode 100644 index 61b6f6d26a2f457dd3eb84df0007137fec709dfb..0000000000000000000000000000000000000000 --- a/spaces/usbethFlerru/sovits-modelsV2/example/D33006.md +++ /dev/null @@ -1,6 +0,0 @@ -

              D33006


              Download File ··· https://urlcod.com/2uyWvL



              - -Info about driver gigabyte d33006 graphic card driver download. Driver Info: File: gigabyte_d33006_graphic_card.exe. Version: 1.3.8. File size: ... 1fdad05405
              -
              -
              -

              diff --git a/spaces/user238921933/stable-diffusion-webui/modules/sd_disable_initialization.py b/spaces/user238921933/stable-diffusion-webui/modules/sd_disable_initialization.py deleted file mode 100644 index 50e4c180fc74988ec697e4cef2773bd2a785bccf..0000000000000000000000000000000000000000 --- a/spaces/user238921933/stable-diffusion-webui/modules/sd_disable_initialization.py +++ /dev/null @@ -1,93 +0,0 @@ -import ldm.modules.encoders.modules -import open_clip -import torch -import transformers.utils.hub - - -class DisableInitialization: - """ - When an object of this class enters a `with` block, it starts: - - preventing torch's layer initialization functions from working - - changes CLIP and OpenCLIP to not download model weights - - changes CLIP to not make requests to check if there is a new version of a file you already have - - When it leaves the block, it reverts everything to how it was before. - - Use it like this: - ``` - with DisableInitialization(): - do_things() - ``` - """ - - def __init__(self, disable_clip=True): - self.replaced = [] - self.disable_clip = disable_clip - - def replace(self, obj, field, func): - original = getattr(obj, field, None) - if original is None: - return None - - self.replaced.append((obj, field, original)) - setattr(obj, field, func) - - return original - - def __enter__(self): - def do_nothing(*args, **kwargs): - pass - - def create_model_and_transforms_without_pretrained(*args, pretrained=None, **kwargs): - return self.create_model_and_transforms(*args, pretrained=None, **kwargs) - - def CLIPTextModel_from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs): - res = self.CLIPTextModel_from_pretrained(None, *model_args, config=pretrained_model_name_or_path, state_dict={}, **kwargs) - res.name_or_path = pretrained_model_name_or_path - return res - - def transformers_modeling_utils_load_pretrained_model(*args, **kwargs): - args = args[0:3] + ('/', ) + args[4:] # resolved_archive_file; must set it to something to prevent what seems to be a bug - return self.transformers_modeling_utils_load_pretrained_model(*args, **kwargs) - - def transformers_utils_hub_get_file_from_cache(original, url, *args, **kwargs): - - # this file is always 404, prevent making request - if url == 'https://huggingface.co/openai/clip-vit-large-patch14/resolve/main/added_tokens.json' or url == 'openai/clip-vit-large-patch14' and args[0] == 'added_tokens.json': - return None - - try: - res = original(url, *args, local_files_only=True, **kwargs) - if res is None: - res = original(url, *args, local_files_only=False, **kwargs) - return res - except Exception as e: - return original(url, *args, local_files_only=False, **kwargs) - - def transformers_utils_hub_get_from_cache(url, *args, local_files_only=False, **kwargs): - return transformers_utils_hub_get_file_from_cache(self.transformers_utils_hub_get_from_cache, url, *args, **kwargs) - - def transformers_tokenization_utils_base_cached_file(url, *args, local_files_only=False, **kwargs): - return transformers_utils_hub_get_file_from_cache(self.transformers_tokenization_utils_base_cached_file, url, *args, **kwargs) - - def transformers_configuration_utils_cached_file(url, *args, local_files_only=False, **kwargs): - return transformers_utils_hub_get_file_from_cache(self.transformers_configuration_utils_cached_file, url, *args, **kwargs) - - self.replace(torch.nn.init, 'kaiming_uniform_', do_nothing) - self.replace(torch.nn.init, '_no_grad_normal_', do_nothing) - self.replace(torch.nn.init, '_no_grad_uniform_', do_nothing) - - if self.disable_clip: - self.create_model_and_transforms = self.replace(open_clip, 'create_model_and_transforms', create_model_and_transforms_without_pretrained) - self.CLIPTextModel_from_pretrained = self.replace(ldm.modules.encoders.modules.CLIPTextModel, 'from_pretrained', CLIPTextModel_from_pretrained) - self.transformers_modeling_utils_load_pretrained_model = self.replace(transformers.modeling_utils.PreTrainedModel, '_load_pretrained_model', transformers_modeling_utils_load_pretrained_model) - self.transformers_tokenization_utils_base_cached_file = self.replace(transformers.tokenization_utils_base, 'cached_file', transformers_tokenization_utils_base_cached_file) - self.transformers_configuration_utils_cached_file = self.replace(transformers.configuration_utils, 'cached_file', transformers_configuration_utils_cached_file) - self.transformers_utils_hub_get_from_cache = self.replace(transformers.utils.hub, 'get_from_cache', transformers_utils_hub_get_from_cache) - - def __exit__(self, exc_type, exc_val, exc_tb): - for obj, field, original in self.replaced: - setattr(obj, field, original) - - self.replaced.clear() - diff --git a/spaces/user238921933/stable-diffusion-webui/modules/sd_hijack_clip_old.py b/spaces/user238921933/stable-diffusion-webui/modules/sd_hijack_clip_old.py deleted file mode 100644 index 433d8c3da8e33aa09833dcd1793395f420e984d7..0000000000000000000000000000000000000000 --- a/spaces/user238921933/stable-diffusion-webui/modules/sd_hijack_clip_old.py +++ /dev/null @@ -1,81 +0,0 @@ -from modules import sd_hijack_clip -from modules import shared - - -def process_text_old(self: sd_hijack_clip.FrozenCLIPEmbedderWithCustomWordsBase, texts): - id_start = self.id_start - id_end = self.id_end - maxlen = self.wrapped.max_length # you get to stay at 77 - used_custom_terms = [] - remade_batch_tokens = [] - hijack_comments = [] - hijack_fixes = [] - token_count = 0 - - cache = {} - batch_tokens = self.tokenize(texts) - batch_multipliers = [] - for tokens in batch_tokens: - tuple_tokens = tuple(tokens) - - if tuple_tokens in cache: - remade_tokens, fixes, multipliers = cache[tuple_tokens] - else: - fixes = [] - remade_tokens = [] - multipliers = [] - mult = 1.0 - - i = 0 - while i < len(tokens): - token = tokens[i] - - embedding, embedding_length_in_tokens = self.hijack.embedding_db.find_embedding_at_position(tokens, i) - - mult_change = self.token_mults.get(token) if shared.opts.enable_emphasis else None - if mult_change is not None: - mult *= mult_change - i += 1 - elif embedding is None: - remade_tokens.append(token) - multipliers.append(mult) - i += 1 - else: - emb_len = int(embedding.vec.shape[0]) - fixes.append((len(remade_tokens), embedding)) - remade_tokens += [0] * emb_len - multipliers += [mult] * emb_len - used_custom_terms.append((embedding.name, embedding.checksum())) - i += embedding_length_in_tokens - - if len(remade_tokens) > maxlen - 2: - vocab = {v: k for k, v in self.wrapped.tokenizer.get_vocab().items()} - ovf = remade_tokens[maxlen - 2:] - overflowing_words = [vocab.get(int(x), "") for x in ovf] - overflowing_text = self.wrapped.tokenizer.convert_tokens_to_string(''.join(overflowing_words)) - hijack_comments.append(f"Warning: too many input tokens; some ({len(overflowing_words)}) have been truncated:\n{overflowing_text}\n") - - token_count = len(remade_tokens) - remade_tokens = remade_tokens + [id_end] * (maxlen - 2 - len(remade_tokens)) - remade_tokens = [id_start] + remade_tokens[0:maxlen - 2] + [id_end] - cache[tuple_tokens] = (remade_tokens, fixes, multipliers) - - multipliers = multipliers + [1.0] * (maxlen - 2 - len(multipliers)) - multipliers = [1.0] + multipliers[0:maxlen - 2] + [1.0] - - remade_batch_tokens.append(remade_tokens) - hijack_fixes.append(fixes) - batch_multipliers.append(multipliers) - return batch_multipliers, remade_batch_tokens, used_custom_terms, hijack_comments, hijack_fixes, token_count - - -def forward_old(self: sd_hijack_clip.FrozenCLIPEmbedderWithCustomWordsBase, texts): - batch_multipliers, remade_batch_tokens, used_custom_terms, hijack_comments, hijack_fixes, token_count = process_text_old(self, texts) - - self.hijack.comments += hijack_comments - - if len(used_custom_terms) > 0: - self.hijack.comments.append("Used embeddings: " + ", ".join([f'{word} [{checksum}]' for word, checksum in used_custom_terms])) - - self.hijack.fixes = hijack_fixes - return self.process_tokens(remade_batch_tokens, batch_multipliers) diff --git a/spaces/user238921933/stable-diffusion-webui/webui.bat b/spaces/user238921933/stable-diffusion-webui/webui.bat deleted file mode 100644 index 5139b7eb020139c65fa6390a7078c761301229b0..0000000000000000000000000000000000000000 --- a/spaces/user238921933/stable-diffusion-webui/webui.bat +++ /dev/null @@ -1,85 +0,0 @@ -@echo off - -if not defined PYTHON (set PYTHON=python) -if not defined VENV_DIR (set "VENV_DIR=%~dp0%venv") - - -set ERROR_REPORTING=FALSE - -mkdir tmp 2>NUL - -%PYTHON% -c "" >tmp/stdout.txt 2>tmp/stderr.txt -if %ERRORLEVEL% == 0 goto :check_pip -echo Couldn't launch python -goto :show_stdout_stderr - -:check_pip -%PYTHON% -mpip --help >tmp/stdout.txt 2>tmp/stderr.txt -if %ERRORLEVEL% == 0 goto :start_venv -if "%PIP_INSTALLER_LOCATION%" == "" goto :show_stdout_stderr -%PYTHON% "%PIP_INSTALLER_LOCATION%" >tmp/stdout.txt 2>tmp/stderr.txt -if %ERRORLEVEL% == 0 goto :start_venv -echo Couldn't install pip -goto :show_stdout_stderr - -:start_venv -if ["%VENV_DIR%"] == ["-"] goto :skip_venv -if ["%SKIP_VENV%"] == ["1"] goto :skip_venv - -dir "%VENV_DIR%\Scripts\Python.exe" >tmp/stdout.txt 2>tmp/stderr.txt -if %ERRORLEVEL% == 0 goto :activate_venv - -for /f "delims=" %%i in ('CALL %PYTHON% -c "import sys; print(sys.executable)"') do set PYTHON_FULLNAME="%%i" -echo Creating venv in directory %VENV_DIR% using python %PYTHON_FULLNAME% -%PYTHON_FULLNAME% -m venv "%VENV_DIR%" >tmp/stdout.txt 2>tmp/stderr.txt -if %ERRORLEVEL% == 0 goto :activate_venv -echo Unable to create venv in directory "%VENV_DIR%" -goto :show_stdout_stderr - -:activate_venv -set PYTHON="%VENV_DIR%\Scripts\Python.exe" -echo venv %PYTHON% - -:skip_venv -if [%ACCELERATE%] == ["True"] goto :accelerate -goto :launch - -:accelerate -echo Checking for accelerate -set ACCELERATE="%VENV_DIR%\Scripts\accelerate.exe" -if EXIST %ACCELERATE% goto :accelerate_launch - -:launch -%PYTHON% launch.py %* -pause -exit /b - -:accelerate_launch -echo Accelerating -%ACCELERATE% launch --num_cpu_threads_per_process=6 launch.py -pause -exit /b - -:show_stdout_stderr - -echo. -echo exit code: %errorlevel% - -for /f %%i in ("tmp\stdout.txt") do set size=%%~zi -if %size% equ 0 goto :show_stderr -echo. -echo stdout: -type tmp\stdout.txt - -:show_stderr -for /f %%i in ("tmp\stderr.txt") do set size=%%~zi -if %size% equ 0 goto :show_stderr -echo. -echo stderr: -type tmp\stderr.txt - -:endofscript - -echo. -echo Launch unsuccessful. Exiting. -pause diff --git a/spaces/vict0rsch/climateGAN/climategan/logger.py b/spaces/vict0rsch/climateGAN/climategan/logger.py deleted file mode 100644 index a20023e453fbfb8cbb6c351d34ea348ac25117e0..0000000000000000000000000000000000000000 --- a/spaces/vict0rsch/climateGAN/climategan/logger.py +++ /dev/null @@ -1,445 +0,0 @@ -from pathlib import Path - -import numpy as np -import torch -import torchvision.utils as vutils -from addict import Dict -from PIL import Image -from torch.nn.functional import interpolate, sigmoid - -from climategan.data import decode_segmap_merged_labels -from climategan.tutils import ( - all_texts_to_tensors, - decode_bucketed_depth, - normalize_tensor, - write_architecture, -) -from climategan.utils import flatten_opts - - -class Logger: - def __init__(self, trainer): - self.losses = Dict() - self.time = Dict() - self.trainer = trainer - self.global_step = 0 - self.epoch = 0 - - def log_comet_images(self, mode, domain, minimal=False, all_only=False): - trainer = self.trainer - save_images = {} - all_images = [] - n_all_ims = None - all_legends = ["Input"] - task_legends = {} - - if domain not in trainer.display_images[mode]: - return - - # -------------------- - # ----- Masker ----- - # -------------------- - n_ims = len(trainer.display_images[mode][domain]) - print(" " * 60, end="\r") - if domain != "rf": - for j, display_dict in enumerate(trainer.display_images[mode][domain]): - - print(f"Inferring sample {mode} {domain} {j+1}/{n_ims}", end="\r") - - x = display_dict["data"]["x"].unsqueeze(0).to(trainer.device) - z = trainer.G.encode(x) - - s_pred = decoded_s_pred = d_pred = z_depth = None - for k, task in enumerate(["d", "s", "m"]): - - if ( - task not in display_dict["data"] - or task not in trainer.opts.tasks - ): - continue - - task_legend = ["Input"] - target = display_dict["data"][task] - target = target.unsqueeze(0).to(trainer.device) - task_saves = [] - - if task not in save_images: - save_images[task] = [] - - prediction = None - if task == "m": - cond = None - if s_pred is not None and d_pred is not None: - cond = trainer.G.make_m_cond(d_pred, s_pred, x) - - prediction = trainer.G.decoders[task](z, cond, z_depth) - elif task == "d": - prediction, z_depth = trainer.G.decoders[task](z) - elif task == "s": - prediction = trainer.G.decoders[task](z, z_depth) - - if task == "s": - # Log fire - wildfire_tens = trainer.compute_fire(x, prediction) - task_saves.append(wildfire_tens) - task_legend.append("Wildfire") - # Log seg output - s_pred = prediction.clone() - target = ( - decode_segmap_merged_labels(target, domain, True) - .float() - .to(trainer.device) - ) - prediction = ( - decode_segmap_merged_labels(prediction, domain, False) - .float() - .to(trainer.device) - ) - decoded_s_pred = prediction - task_saves.append(target) - task_legend.append("Target Segmentation") - - elif task == "m": - prediction = sigmoid(prediction).repeat(1, 3, 1, 1) - task_saves.append(x * (1.0 - prediction)) - if not minimal: - task_saves.append( - x * (1.0 - (prediction > 0.1).to(torch.int)) - ) - task_saves.append( - x * (1.0 - (prediction > 0.5).to(torch.int)) - ) - - task_saves.append(x * (1.0 - target.repeat(1, 3, 1, 1))) - task_legend.append("Masked input") - - if not minimal: - task_legend.append("Masked input (>0.1)") - task_legend.append("Masked input (>0.5)") - - task_legend.append("Masked input (target)") - # dummy pixels to fool scaling and preserve mask range - prediction[:, :, 0, 0] = 1.0 - prediction[:, :, -1, -1] = 0.0 - - elif task == "d": - # prediction is a log depth tensor - d_pred = prediction - target = normalize_tensor(target) * 255 - if prediction.shape[1] > 1: - prediction = decode_bucketed_depth( - prediction, self.trainer.opts - ) - smogged = self.trainer.compute_smog( - x, d=prediction, s=decoded_s_pred, use_sky_seg=False - ) - prediction = normalize_tensor(prediction) - prediction = prediction.repeat(1, 3, 1, 1) - task_saves.append(smogged) - task_legend.append("Smogged") - task_saves.append(target.repeat(1, 3, 1, 1)) - task_legend.append("Depth target") - - task_saves.append(prediction) - task_legend.append(f"Predicted {task}") - - save_images[task].append(x.cpu().detach()) - if k == 0: - all_images.append(save_images[task][-1]) - - task_legends[task] = task_legend - if j == 0: - all_legends += task_legend[1:] - - for im in task_saves: - save_images[task].append(im.cpu().detach()) - all_images.append(save_images[task][-1]) - - if j == 0: - n_all_ims = len(all_images) - - if not all_only: - for task in save_images.keys(): - # Write images: - self.upload_images( - image_outputs=save_images[task], - mode=mode, - domain=domain, - task=task, - im_per_row=trainer.opts.comet.im_per_row.get(task, 4), - rows_per_log=trainer.opts.comet.get("rows_per_log", 5), - legends=task_legends[task], - ) - - if len(save_images) > 1: - self.upload_images( - image_outputs=all_images, - mode=mode, - domain=domain, - task="all", - im_per_row=n_all_ims, - rows_per_log=trainer.opts.comet.get("rows_per_log", 5), - legends=all_legends, - ) - # --------------------- - # ----- Painter ----- - # --------------------- - else: - # in the rf domain display_size may be different from fid.n_images - limit = trainer.opts.comet.display_size - image_outputs = [] - legends = [] - for im_set in trainer.display_images[mode][domain][:limit]: - x = im_set["data"]["x"].unsqueeze(0).to(trainer.device) - m = im_set["data"]["m"].unsqueeze(0).to(trainer.device) - - prediction = trainer.G.paint(m, x) - - image_outputs.append(x * (1.0 - m)) - image_outputs.append(prediction) - image_outputs.append(x) - image_outputs.append(prediction * m) - if not legends: - legends.append("Masked Input") - legends.append("Painted Input") - legends.append("Input") - legends.append("Isolated Water") - # Write images - self.upload_images( - image_outputs=image_outputs, - mode=mode, - domain=domain, - task="painter", - im_per_row=trainer.opts.comet.im_per_row.get("p", 4), - rows_per_log=trainer.opts.comet.get("rows_per_log", 5), - legends=legends, - ) - - return 0 - - def log_losses(self, model_to_update="G", mode="train"): - """Logs metrics on comet.ml - - Args: - model_to_update (str, optional): One of "G", "D". Defaults to "G". - """ - trainer = self.trainer - loss_names = {"G": "gen", "D": "disc"} - - if trainer.opts.train.log_level < 1: - return - - if trainer.exp is None: - return - - assert model_to_update in { - "G", - "D", - }, "unknown model to log losses {}".format(model_to_update) - - loss_to_update = self.losses[loss_names[model_to_update]] - - losses = loss_to_update.copy() - - if trainer.opts.train.log_level == 1: - # Only log aggregated losses: delete other keys in losses - for k in loss_to_update: - if k not in {"masker", "total_loss", "painter"}: - del losses[k] - # convert losses into a single-level dictionnary - - losses = flatten_opts(losses) - trainer.exp.log_metrics( - losses, prefix=f"{model_to_update}_{mode}", step=self.global_step - ) - - def log_learning_rates(self): - if self.trainer.exp is None: - return - lrs = {} - trainer = self.trainer - if trainer.g_scheduler is not None: - for name, lr in zip( - trainer.lr_names["G"], trainer.g_scheduler.get_last_lr() - ): - lrs[f"lr_G_{name}"] = lr - if trainer.d_scheduler is not None: - for name, lr in zip( - trainer.lr_names["D"], trainer.d_scheduler.get_last_lr() - ): - lrs[f"lr_D_{name}"] = lr - - trainer.exp.log_metrics(lrs, step=self.global_step) - - def log_step_time(self, time): - """Logs step-time on comet.ml - - Args: - step_time (float): step-time in seconds - """ - if self.trainer.exp: - self.trainer.exp.log_metric( - "step-time", time - self.time.step_start, step=self.global_step - ) - - def log_epoch_time(self, time): - """Logs step-time on comet.ml - - Args: - step_time (float): step-time in seconds - """ - if self.trainer.exp: - self.trainer.exp.log_metric( - "epoch-time", time - self.time.epoch_start, step=self.global_step - ) - - def log_comet_combined_images(self, mode, domain): - - trainer = self.trainer - image_outputs = [] - legends = [] - im_per_row = 0 - for i, im_set in enumerate(trainer.display_images[mode][domain]): - x = im_set["data"]["x"].unsqueeze(0).to(trainer.device) - # m = im_set["data"]["m"].unsqueeze(0).to(trainer.device) - - m = trainer.G.mask(x=x) - m_bin = (m > 0.5).to(m.dtype) - prediction = trainer.G.paint(m, x) - prediction_bin = trainer.G.paint(m_bin, x) - - image_outputs.append(x) - legends.append("Input") - image_outputs.append(x * (1.0 - m)) - legends.append("Soft Masked Input") - image_outputs.append(prediction) - legends.append("Painted") - image_outputs.append(prediction * m) - legends.append("Soft Masked Painted") - image_outputs.append(x * (1.0 - m_bin)) - legends.append("Binary (0.5) Masked Input") - image_outputs.append(prediction_bin) - legends.append("Binary (0.5) Painted") - image_outputs.append(prediction_bin * m_bin) - legends.append("Binary (0.5) Masked Painted") - - if i == 0: - im_per_row = len(image_outputs) - # Upload images - self.upload_images( - image_outputs=image_outputs, - mode=mode, - domain=domain, - task="combined", - im_per_row=im_per_row or 7, - rows_per_log=trainer.opts.comet.get("rows_per_log", 5), - legends=legends, - ) - - return 0 - - def upload_images( - self, - image_outputs, - mode, - domain, - task, - im_per_row=3, - rows_per_log=5, - legends=[], - ): - """ - Save output image - - Args: - image_outputs (list(torch.Tensor)): all the images to log - mode (str): train or val - domain (str): current domain - task (str): current task - im_per_row (int, optional): umber of images to be displayed per row. - Typically, for a given task: 3 because [input prediction, target]. - Defaults to 3. - rows_per_log (int, optional): Number of rows (=samples) per uploaded image. - Defaults to 5. - comet_exp (comet_ml.Experiment, optional): experiment to use. - Defaults to None. - """ - trainer = self.trainer - if trainer.exp is None: - return - curr_iter = self.global_step - nb_per_log = im_per_row * rows_per_log - n_logs = len(image_outputs) // nb_per_log + 1 - - header = None - if len(legends) == im_per_row and all(isinstance(t, str) for t in legends): - header_width = max(im.shape[-1] for im in image_outputs) - headers = all_texts_to_tensors(legends, width=header_width) - header = torch.cat(headers, dim=-1) - - for logidx in range(n_logs): - print(" " * 100, end="\r", flush=True) - print( - "Uploading images for {} {} {} {}/{}".format( - mode, domain, task, logidx + 1, n_logs - ), - end="...", - flush=True, - ) - ims = image_outputs[logidx * nb_per_log : (logidx + 1) * nb_per_log] - if not ims: - continue - - ims = self.upsample(ims) - ims = torch.stack([im.squeeze() for im in ims]).squeeze() - image_grid = vutils.make_grid( - ims, nrow=im_per_row, normalize=True, scale_each=True, padding=0 - ) - - if header is not None: - image_grid = torch.cat( - [header.to(image_grid.device), image_grid], dim=1 - ) - - image_grid = image_grid.permute(1, 2, 0).cpu().numpy() - trainer.exp.log_image( - Image.fromarray((image_grid * 255).astype(np.uint8)), - name=f"{mode}_{domain}_{task}_{str(curr_iter)}_#{logidx}", - step=curr_iter, - ) - - def upsample(self, ims): - h = max(im.shape[-2] for im in ims) - w = max(im.shape[-1] for im in ims) - new_ims = [] - for im in ims: - im = interpolate(im, (h, w), mode="bilinear") - new_ims.append(im) - return new_ims - - def padd(self, ims): - h = max(im.shape[-2] for im in ims) - w = max(im.shape[-1] for im in ims) - new_ims = [] - for im in ims: - ih = im.shape[-2] - iw = im.shape[-1] - if ih != h or iw != w: - padded = torch.zeros(im.shape[-3], h, w) - padded[ - :, (h - ih) // 2 : (h + ih) // 2, (w - iw) // 2 : (w + iw) // 2 - ] = im - new_ims.append(padded) - else: - new_ims.append(im) - - return new_ims - - def log_architecture(self): - write_architecture(self.trainer) - - if self.trainer.exp is None: - return - - for f in Path(self.trainer.opts.output_path).glob("archi*.txt"): - self.trainer.exp.log_asset(str(f), overwrite=True) diff --git a/spaces/victor/spaces-collection/index.html b/spaces/victor/spaces-collection/index.html deleted file mode 100644 index 0c027e5c25868e8ab2ada15cf69cc61a32c569c7..0000000000000000000000000000000000000000 --- a/spaces/victor/spaces-collection/index.html +++ /dev/null @@ -1,31 +0,0 @@ - - - - - - My static Space - - - - -
              -

              Collection 1

              -

              Lorem ipsum dolor sit, amet consectetur adipisicing elit. Deleniti doloremque quod facilis a consectetur, dolores quo in soluta minus eum libero quae.

              -
              - - - - - -
              -
              -

              Collection 2

              -

              Lorem ipsum dolor sit, amet consectetur adipisicing elit. Deleniti doloremque quod facilis a consectetur, dolores quo in soluta minus eum libero quae.

              -
              - - - -
              -
              - - \ No newline at end of file diff --git a/spaces/vinceL/YonKomaMangaGenerator/sample_storyboards/3act.md b/spaces/vinceL/YonKomaMangaGenerator/sample_storyboards/3act.md deleted file mode 100644 index b56ee0074e855b661cc8f4730789c07b1f015ec6..0000000000000000000000000000000000000000 --- a/spaces/vinceL/YonKomaMangaGenerator/sample_storyboards/3act.md +++ /dev/null @@ -1,82 +0,0 @@ -{ - "storyboard": { - "title": "Resurgence of the Sunflower Warrior", - "step_by_step_thinking_for_designing_your_storyboard": "To design the storyboard, I began with the basic narrative structure of the light novel - a timid, bullied girl named Lily, discovering a mystical sunflower seed that grants her incredible powers. Throughout the story, Lily is continuously developing her character, learning to stand up for herself. I envisioned the major events, each driving Lily closer to her destiny, providing momentum for the story. Along the way, she meets several characters who push her towards her potential. I imagined the scenes visually, taking into account the environment, actions, characters, and events.", - "step_by_step_thinking_for_effectively_applying_the_3act_structure": "Each Act is applied in service of the overall story. Act 1, 'Beneath the Sunflower', sets up Lily's normal world, her desires, and the inciting incident - discovering the seed. Act 2, 'Sunflower blooming', deals with the struggle and growth. This involves Lily's battles, friendships, training, and her inner struggles. The plot develops more dynamically. Act 3, 'The Radiant Sunflower', presents the climax and resolution. Here, the conflict reaches its peak through Lily's major battle, followed by resolution and the closing of her journey.", - "step_by_step_thinking_for_incorporating_the_dramatic_question": "The dramatic question here is, 'Will Lily harness the power of the sunflower to overcome her fears and affirm her own worth?' This question permeates the whole narrative, giving it its central conflict and emotional depth. The confrontations, challenges, and personal growth all serve to answer this question in the climax.", - "acts": { - "Act 1: Setup": { - "title": "Beneath the Sunflower", - "100word_description": "We are introduced to Lily, a timid, bullied girl, longing for courage and respect. Her life takes a turn when she stumbles upon a mystical sunflower seed which grants her unique powers. The seed, representing hope and strength, catalyzes Lily's journey.", - "parts": [ - { - "id": 1, - "type": "Beginning", - "description": "Lily is depicted as a quiet and bullied girl, invoking sympathy. The bleak atmosphere of her life is contrasted with the vibrant sunflower she discovers in the park.", - "image_generation_prompt": "Girl sitting quietly, school, bullied by classmates, manga, artstyle inspired by 'A Silent Voice'." - }, - { - "id": 2, - "type": "Inciting Incident", - "description": "Lily discovers the mystical sunflower seed - a symbol of strength and hope. The scene is surreal, surrounded by light rays and a warm palette.", - "image_generation_prompt": "A girl, park, discovering a glowing seed, Ghibli’s aesthetic artstyle for emphasis on magic realism." - }, - { - "id": 3, - "type": "Plot Point 1", - "description": "Bright colours flood the scene in a burst as Lily's dormant powers awaken. Her physical transformation is presented - petals adorn her hair, eyes glow with an inner light.", - "image_generation_prompt": "Girl, her room, physical transformation into a magical girl, Madoka Magica’s artstyle for magic and surrealism." - } - ] - }, - "Act 2: Confrontation": { - "title": "Sunflower Blooming", - "100word_description": "Lily grapples with her newfound powers and responsibilities. She battles against evil entities and forms alliances. Slowly, she matures and begins to bloom as the 'Sunflower Warrior', with her powers reflecting her inner growth.", - "parts": [ - { - "id": 4, - "type": "Rising Action", - "description": "Lily's first battle is shown, where she wins but it's a tiring and confusing experience. Her uncertain expression and the chaotic backdrop convey her struggle.", - "image_generation_prompt": "Magical girl, cityscape, chaotic battle against dark creatures, Noirish artstyle with darkness surrounding the otherwise colourful character." - }, - { - "id": 5, - "type": "Midpoint", - "description": "Lily befriends other magical girls and trains under a mentor to become stronger. There's a sense of camaraderie and resolve in the scene.", - "image_generation_prompt": "Group of magical girls, training area, practicing their powers together, joyful art style akin to Little Witch Academia." - }, - { - "id": 6, - "type": "Plot Point 2", - "description": "A major showdown takes place. Lily's mentor is injured gravely, leaving Lily feeling devastated and alone. Darkness encapsulates the arena.", - "image_generation_prompt": "Female protagonist, battlefield, witnessing her mentor’s injury, Attack on Titans’ artstyle for showing despair and tension." - } - ] - }, - "Act 3: Resolution": { - "title": "The Radiant Sunflower", - "100word_description": "Lily, now the Sunflower Warrior, rises to her destiny. This Act showcases the climax - the decisive battle, and the denouement - Lily’s transition into a stronger, confident individual who's found her place in the world.", - "parts": [ - { - "id": 7, - "type": "Climax", - "description": "Fierce battling and brightness as Lily takes on the biggest threat alone. Her face is harder, determined, highlighted by radiating sunflower petals around her.", - "image_generation_prompt": "Sunflower warrior, battlefield, fighting against the major threat, intricate art style of Sailor Moon’s climax scenes." - }, - { - "id": 8, - "type": "Denouement", - "description": "Post-battle, Lily heals her mentor. She looks more matured with a soft glow enveloping her, reflecting deep tranquility and satisfaction.", - "image_generation_prompt": "Healer, recovery room, healing her mentor with magic, gentle and warm art style like Cardcaptor Sakura." - }, - { - "id": 9, - "type": "End", - "description": "Lily returns to school, now exuding confidence. The bullying has stopped, but now she's strong enough to face it all. There's quiet joy radiating from her.", - "image_generation_prompt": "Girl, school, smiling with newfound confidence, airy, refreshing, and soft aesthetic of Your Lie in April." - } - ] - } - } - } -} \ No newline at end of file diff --git a/spaces/vjain/Trading-Chatbot/app.py b/spaces/vjain/Trading-Chatbot/app.py deleted file mode 100644 index 1b6bc014f4747243b0aa3a94a4259df52e603f76..0000000000000000000000000000000000000000 --- a/spaces/vjain/Trading-Chatbot/app.py +++ /dev/null @@ -1,128 +0,0 @@ -import gradio as gr -import openai -import pandas as pd -import numpy as np -import csv -import os -from datasets import load_dataset -openai.api_key= os.environ.get("openai.api_key") -from openai.embeddings_utils import get_embedding -import requests -model_id = "sentence-transformers/all-MiniLM-L6-v2" -import json -hf_token = os.environ.get("hugginface.api.token") -import re -from sklearn.metrics.pairwise import cosine_similarity - -def generate_embeddings(texts, model_id, hf_token): - api_url = f"https://api-inference.huggingface.co/pipeline/feature-extraction/{model_id}" - headers = {"Authorization": f"Bearer {hf_token}"} - response = requests.post(api_url, headers=headers, json={"inputs": texts, "options":{"wait_for_model":True}}) - embeddings = response.json() - return embeddings -AP_Bio = load_dataset('vjain/biology_AP_embeddings') -df1 = pd.DataFrame(AP_Bio['train']) -df1["similarity"] = 0 - -AP_Physics = load_dataset('vjain/AP_physics_embeddings') -df2 = pd.DataFrame(AP_Physics['train']) -df2["similarity"] = 0 - -Personality = load_dataset('vjain/Personality_em') -df3 = pd.DataFrame(Personality['train']) -df3["similarity"] = 0 - -AP_statistics = load_dataset('vjain/AP_statistics') -df4 = pd.DataFrame(AP_statistics['train']) -df4["similarity"] = 0 - -tax_embeddings = load_dataset('vjain/tax_embeddings') -df5 = pd.DataFrame(tax_embeddings['train']) -df5["similarity"] = 0 - -therapy = load_dataset('vjain/therapy') -df6 = pd.DataFrame(therapy['train']) -df6["similarity"] = 0 - -gurbani = load_dataset('vjain/gurbani') -df7 = pd.DataFrame(gurbani['train']) -df7["similarity"] = 0 - - -dataframes = { - "AP_Bio": df1, - "AP_Physics": df2, - "Personality" : df3, - "AP_statistics": df4, - "tax_embeddings": df5, - "therapy": df6, - "gurbani":df7 -} - -#df = pd.read_csv("TA_embeddings.csv") -#df["embedding"]=df["embedding"].apply(eval).apply(np.array) -def reply(input, dataset_name): - try: - if dataset_name not in dataframes: - return "Invalid dataset selected. Please select a valid dataset." - if not input: - return "Please Enter a Question to get an Answer" - df = dataframes[dataset_name] - input = input - input_vector = generate_embeddings(input, model_id,hf_token) - df["similarities"]=df["embedding"].apply(lambda x: cosine_similarity([x],[input_vector])[0][0]) - data = df.sort_values("similarities", ascending=False).head(5) - data.to_csv("sorted.csv") - context = [] - for i, row in data.iterrows(): - context.append(row['text']) - context - text = "\n".join(context) - context = text - prompt = f""" - Answer the following question using the context given below.If you don't know the answer for certain, say I don't know. - Context: {context} - Q: {input} - """ - response= openai.Completion.create( - prompt=prompt, - temperature=1, - max_tokens=500, - top_p=1, - frequency_penalty=0, - presence_penalty=0, - model="text-davinci-003" - )["choices"][0]["text"].strip(" \n") - return response - except Exception as e: - return f"An error occurred: {e}" - -csv_dropdown = gr.inputs.Dropdown( - label="Select the Book", - choices=["AP_Bio", "AP_Physics","Personality","AP_statistics","tax_embeddings","therapy","gurbani"], - default="AP_Bio" - -) -input_text = gr.inputs.Textbox( - label="Enter your questions here", - placeholder="E.g. What is DNA?", - lines=3 - -) -text_output = gr.outputs.Textbox(label="Answer") - -description = "Scholar Bot is a question answering system designed to provide accurate and relevant answers to questions from this book hosted by OpenStax https://openstax.org/details/books/biology-ap-courses. Simply enter your question in the text box above and Scholar Bot will use advanced natural language processing algorithms to search a large corpus of biology text to find the best answer for you. Scholar Bot uses the Sentence Transformers model to generate embeddings of text, and OpenAI's GPT-3 language model to provide answers to your questions." - -ui = gr.Interface(fn=reply, - inputs=[input_text, csv_dropdown], - outputs=[text_output], - title="Scholar Bot", - description=description, - theme="light", - layout="vertical", - allow_flagging=False, - examples=[["What is the function of DNA polymerase?", "AP_Bio"]] - ) - - -ui.launch() \ No newline at end of file diff --git a/spaces/vslasor/VLS7-ClinicalTerminologyUIUX-GR/app.py b/spaces/vslasor/VLS7-ClinicalTerminologyUIUX-GR/app.py deleted file mode 100644 index 3a2a532354367b122f50cc6f70f4aca4c1e0ff38..0000000000000000000000000000000000000000 --- a/spaces/vslasor/VLS7-ClinicalTerminologyUIUX-GR/app.py +++ /dev/null @@ -1,327 +0,0 @@ -import pandas_profiling as pp -import pandas as pd -import tensorflow as tf - -from datasets import load_dataset -from tensorflow.python.framework import tensor_shape - -#LOINC -datasetLOINC = load_dataset("awacke1/LOINC-CodeSet-Value-Description.csv", split="train") -#SNOMED: -datasetSNOMED = load_dataset("awacke1/SNOMED-CT-Code-Value-Semantic-Set.csv", split="train") -#eCQM: -dataseteCQM = load_dataset("awacke1/eCQM-Code-Value-Semantic-Set.csv", split="train") - -# map using autotokenizer -from transformers import AutoTokenizer -tokenizer = AutoTokenizer.from_pretrained("bert-base-cased") -dataset = datasetLOINC.map(lambda examples: tokenizer(examples["Description"]), batched=True) -JSONOBJ2=dataset[0] -print(JSONOBJ2) - -sw = datasetLOINC.filter(lambda example: example["Description"].startswith("Allergy")) -len(sw) -print(sw) -print(datasetLOINC) -print(datasetSNOMED) -print(dataseteCQM) - -# play with some dataset tools before the show: - -#print(start_with_ar["Description"]) - -#--- -#Main Stage - Begin! -#--- - -import os -import json -import numpy as np -import gradio as gr - -HF_TOKEN = os.environ.get("HF_TOKEN") -CHOICES = ["SNOMED", "LOINC", "CQM"] -JSONOBJ = """{"items":{"item":[{"id": "0001","type": null,"is_good": false,"ppu": 0.55,"batters":{"batter":[{ "id": "1001", "type": "Regular" },{ "id": "1002", "type": "Chocolate" },{ "id": "1003", "type": "Blueberry" },{ "id": "1004", "type": "Devil's Food" }]},"topping":[{ "id": "5001", "type": "None" },{ "id": "5002", "type": "Glazed" },{ "id": "5005", "type": "Sugar" },{ "id": "5007", "type": "Powdered Sugar" },{ "id": "5006", "type": "Chocolate with Sprinkles" },{ "id": "5003", "type": "Chocolate" },{ "id": "5004", "type": "Maple" }]}]}}""" - - -def profile_dataset(dataset=datasetSNOMED, username="awacke1", token=HF_TOKEN, dataset_name="awacke1/SNOMED-CT-Code-Value-Semantic-Set.csv"): - df = pd.read_csv(dataset.Description) - if len(df.columns) <= 15: - profile = pp.ProfileReport(df, title=f"{dataset_name} Report") - else: - profile = pp.ProfileReport(df, title=f"{dataset_name} Report", minimal = True) - - repo_url = create_repo(f"{username}/{dataset_name}", repo_type = "space", token = token, space_sdk = "static", private=False) - - profile.to_file("./index.html") - - upload_file(path_or_fileobj ="./index.html", path_in_repo = "index.html", repo_id =f"{username}/{dataset_name}", repo_type = "space", token=token) - readme = f"---\ntitle: {dataset_name}\nemoji: ✨\ncolorFrom: green\ncolorTo: red\nsdk: static\npinned: false\ntags:\n- dataset-report\n---" - with open("README.md", "w+") as f: - f.write(readme) - upload_file(path_or_fileobj ="./README.md", path_in_repo = "README.md", repo_id =f"{username}/{dataset_name}", repo_type = "space", token=token) - return f"Your dataset report will be ready at {repo_url}" - -#def lowercase_title(example): -# return {"Description": example[title].lower()} - -# demonstrate map function of dataset -#JSONOBJ_MAP=datasetLOINC.map(lowercase_title) -#JSONOBJ_MAP=datasetLOINC.filter(lambda example: example["Description"].startswith("Mental health")) - - - - -def concatenate_text(examples): - return { - "text": examples["Code"] - + " \n " - + examples["Description"] - + " \n " - + examples["Purpose: Clinical Focus"] - } - -def cls_pooling(model_output): - return model_output.last_hidden_state[:, 0] - -def get_embeddings(text_list): - encoded_input = tokenizer( - text_list, padding=True, truncation=True, return_tensors="tf" - ) - encoded_input = {k: v for k, v in encoded_input.items()} - model_output = model(**encoded_input) - return cls_pooling(model_output) - - -def fn( text1, text2, num, slider1, slider2, single_checkbox, checkboxes, radio, dropdown, im1, im2, im3, im4, - video, audio1, audio2, file, df1, df2,): -#def fn( text1, text2, single_checkbox, checkboxes, radio, im4, file, df1, df2,): - - searchTerm = text1 - searchTermSentence = text2 - - start_with_searchTermLOINC = datasetLOINC.filter(lambda example:example["Description"].startswith('Allergy')) #Allergy - - - # FAISS - columns = start_with_searchTermLOINC.column_names - columns_to_keep = ["Value Set Name", "Code", "Description", "Purpose: Clinical Focus", "Code System OID"] - columns_to_remove = set(columns_to_keep).symmetric_difference(columns) - start_with_searchTermLOINC = start_with_searchTermLOINC.remove_columns(columns_to_remove) - start_with_searchTermLOINC - start_with_searchTermLOINC.set_format("pandas") - df = start_with_searchTermLOINC[:] - - df["Purpose: Clinical Focus"][0] - - df4 = df.explode("Purpose: Clinical Focus", ignore_index=True) - df4.head(4) - - from datasets import Dataset - clinical_dataset = Dataset.from_pandas(df4) - clinical_dataset - - clinical_dataset = clinical_dataset.map(lambda x: {"c_length": len(x["Description"].split())}) - - clinical_dataset = clinical_dataset.filter(lambda x: x["c_length"] > 15) - clinical_dataset - - - clinical_dataset = clinical_dataset.map(concatenate_text) - #embedding = get_embeddings(clinical_dataset["text"][0]) - #embedding.shape - - from transformers import AutoTokenizer, TFAutoModel - - model_ckpt = "sentence-transformers/multi-qa-mpnet-base-dot-v1" - tokenizer = AutoTokenizer.from_pretrained(model_ckpt) - model = TFAutoModel.from_pretrained(model_ckpt, from_pt=True) - -# TensorShape([1, 768]) - tf.shape([1, 768]) - - embeddings_dataset = clinical_dataset.map( - lambda x: {"embeddings": get_embeddings(x["text"]).numpy()[0]}) - -# embeddings_dataset.add_faiss_index(column="embeddings") - -# question = "How can I load a dataset offline?" -# question_embedding = get_embeddings([question]).numpy() -# question_embedding.shape - -# scores, samples = embeddings_dataset.get_nearest_examples("embeddings", question_embedding, k=5) - -# import pandas as pd - -# samples_df = pd.DataFrame.from_dict(samples) -# samples_df["scores"] = scores -# samples_df.sort_values("scores", ascending=False, inplace=True) - - - # "text": examples["Code"] - # + " \n " - # + examples["Description"] - # + " \n " - # + examples["Purpose: Clinical Focus"] - - -# for _, row in samples_df.iterrows(): -# print(f"Code: {row.Code}") -# print(f"Description: {row.Description}") -# #print(f"Purpose: Clinical Focus: {row.Purpose: Clinical Focus}") -# #print(f"URL: {row.html_url}") -# print("=" * 50) -# print() - - # SNOMED and CQM --------------- - start_with_searchTermSNOMED = datasetSNOMED.filter(lambda example: example["Description"].startswith('Hospital')) #Hospital - start_with_searchTermCQM = dataseteCQM.filter(lambda example: example["Description"].startswith('Telephone')) #Telephone - - print(start_with_searchTermLOINC ) - print(start_with_searchTermSNOMED ) - print(start_with_searchTermCQM) - - #print(start_with_searchTermLOINC["train"][0] ) - #print(start_with_searchTermSNOMED["train"][0] ) - #print(start_with_searchTermCQM["train"][0] ) - - #returnMsg=profile_dataset() - #print(returnMsg) - -# try: - #top1matchLOINC = json.loads(start_with_searchTermLOINC['train']) - #top1matchSNOMED = json.loads(start_with_searchTermSNOMED['train']) - #top1matchCQM = json.loads(start_with_searchTermCQM['train']) -# top1matchLOINC = json.loads(start_with_searchTermLOINC) -# top1matchSNOMED = json.loads(start_with_searchTermSNOMED) -# top1matchCQM = json.loads(start_with_searchTermCQM) -# except: -# print('Hello') - #print(start_with_searchTermLOINC[0]) - #print(start_with_searchTermSNOMED[0] ) - #print(start_with_searchTermCQM[0] ) - - #print(returnMsg) - # print("Datasets Processed") - - return ( - (text1 if single_checkbox else text2) - + ", selected:" - + ", ".join(checkboxes), # Text - { - "positive": num / (num + slider1 + slider2), - "negative": slider1 / (num + slider1 + slider2), - "neutral": slider2 / (num + slider1 + slider2), - }, # Label - (audio1[0], np.flipud(audio1[1])) - if audio1 is not None else os.path.join(os.path.dirname(__file__), "files/cantina.wav"), # Audio - np.flipud(im1) - if im1 is not None else os.path.join(os.path.dirname(__file__), "files/cheetah1.jpg"), # Image - video - if video is not None else os.path.join(os.path.dirname(__file__), "files/world.mp4"), # Video - [ - ("The", "art"), - ("quick brown", "adj"), - ("fox", "nn"), - ("jumped", "vrb"), - ("testing testing testing", None), - ("over", "prp"), - ("the", "art"), - ("testing", None), - ("lazy", "adj"), - ("dogs", "nn"), - (".", "punc"), - ] + [(f"test {x}", f"test {x}") for x in range(10)], # HighlightedText - [ - ("The testing testing testing", None), - ("over", 0.6), - ("the", 0.2), - ("testing", None), - ("lazy", -0.1), - ("dogs", 0.4), - (".", 0), - ] + [(f"test", x / 10) for x in range(-10, 10)], # HighlightedText - #json.loads(JSONOBJ), # JSON - start_with_searchTermLOINC.to_json(orient="records", path_or_buf="None"), - #json.dumps(json.loads(start_with_searchTermLOINC['train'].to_json(orient="records", path_or_buf="None"))), - "", # HTML - os.path.join(os.path.dirname(__file__), "files/titanic.csv"), - df1, # Dataframe - np.random.randint(0, 10, (4, 4)), # Dataframe - df2, # Timeseries - ) - - - -demo = gr.Interface( - fn, - inputs=[ - gr.Textbox(value="Allergy", label="Textbox"), - gr.Textbox(lines=3, value="Bathing", placeholder="Type here..", label="Textbox 2"), - gr.Number(label="Number", value=42), - gr.Slider(10, 20, value=15, label="Slider: 10 - 20"), - gr.Slider(maximum=20, step=0.04, label="Slider: step @ 0.04"), - gr.Checkbox(label="Check for NER Match on Submit"), - gr.CheckboxGroup(label="Clinical Terminology to Check", choices=CHOICES, value=CHOICES[0:2]), - gr.Radio(label="Preferred Terminology Output", choices=CHOICES, value=CHOICES[2]), - gr.Dropdown(label="Dropdown", choices=CHOICES), - gr.Image(label="Image"), - gr.Image(label="Image w/ Cropper", tool="select"), - gr.Image(label="Sketchpad", source="canvas"), - gr.Image(label="Webcam", source="webcam"), - gr.Video(label="Video"), - gr.Audio(label="Audio"), - gr.Audio(label="Microphone", source="microphone"), - gr.File(label="File"), - gr.Dataframe(label="Filters", headers=["Name", "Age", "Gender"]), - gr.Timeseries(x="time", y=["price", "value"], colors=["pink", "purple"]), - ], - outputs=[ - gr.Textbox(label="Textbox"), - gr.Label(label="Label"), - gr.Audio(label="Audio"), - gr.Image(label="Image"), - gr.Video(label="Video"), - gr.HighlightedText(label="HighlightedText", color_map={"punc": "pink", "test 0": "blue"}), - gr.HighlightedText(label="HighlightedText", show_legend=True), - gr.JSON(label="JSON"), - gr.HTML(label="HTML"), - gr.File(label="File"), - gr.Dataframe(label="Dataframe"), - gr.Dataframe(label="Numpy"), - gr.Timeseries(x="time", y=["price", "value"], label="Timeseries"), - ], - examples=[ - [ - "Allergy", - "Admission", - 10, - 12, - 4, - True, - ["SNOMED", "LOINC", "CQM"], - "SNOMED", - "bar", - os.path.join(os.path.dirname(__file__), "files/cheetah1.jpg"), - os.path.join(os.path.dirname(__file__), "files/cheetah1.jpg"), - os.path.join(os.path.dirname(__file__), "files/cheetah1.jpg"), - os.path.join(os.path.dirname(__file__), "files/cheetah1.jpg"), - os.path.join(os.path.dirname(__file__), "files/world.mp4"), - os.path.join(os.path.dirname(__file__), "files/cantina.wav"), - os.path.join(os.path.dirname(__file__), "files/cantina.wav"), - os.path.join(os.path.dirname(__file__), "files/titanic.csv"), - [[1, 2, 3], [3, 4, 5]], - os.path.join(os.path.dirname(__file__), "files/time.csv"), - ] - ] - * 3, - theme="default", - title="⚗️🧠🔬🧬 Clinical Terminology Auto Mapper AI 👩‍⚕️🩺⚕️🙋", - cache_examples=False, - description="Clinical Terminology Auto Mapper AI", - article="Learn more at [Yggdrasil](https://github.com/AaronCWacker/Yggdrasil)", -# live=True, -) - -if __name__ == "__main__": - demo.launch(debug=True) \ No newline at end of file diff --git a/spaces/weishao2019/ChuanhuChatGPT/Dockerfile b/spaces/weishao2019/ChuanhuChatGPT/Dockerfile deleted file mode 100644 index 8cbd335b09b1d1975bfd83a053b5fcaf398147ea..0000000000000000000000000000000000000000 --- a/spaces/weishao2019/ChuanhuChatGPT/Dockerfile +++ /dev/null @@ -1,14 +0,0 @@ -FROM python:3.9 as builder -RUN apt-get update && apt-get install -y build-essential -COPY requirements.txt . -RUN pip install --user -r requirements.txt - -FROM python:3.9 -MAINTAINER iskoldt -COPY --from=builder /root/.local /root/.local -ENV PATH=/root/.local/bin:$PATH -COPY . /app -WORKDIR /app -ENV my_api_key empty -ENV dockerrun yes -CMD ["python3", "-u", "ChuanhuChatbot.py", "2>&1", "|", "tee", "/var/log/application.log"] diff --git a/spaces/wuhuik/bingo/src/components/chat-history.tsx b/spaces/wuhuik/bingo/src/components/chat-history.tsx deleted file mode 100644 index feb81de66562edda8f40d3c0cc717202c92b6509..0000000000000000000000000000000000000000 --- a/spaces/wuhuik/bingo/src/components/chat-history.tsx +++ /dev/null @@ -1,48 +0,0 @@ -import { IconEdit, IconTrash, IconMore, IconDownload } from "./ui/icons" - -export function ChatHistory() { - return ( -
              -
              - 历史记录 -
              -
              -
              -
              -
              -
              -
              - -
              -

              无标题的聊天

              -
              -

              上午1:42

              -
              - - - - - - - - -
              -
              -
              -
              -
              -
              -
              -
              - ) -} diff --git a/spaces/xfys/yolov5_tracking/trackers/strong_sort/deep/models/osnet_ain.py b/spaces/xfys/yolov5_tracking/trackers/strong_sort/deep/models/osnet_ain.py deleted file mode 100644 index 3f9f7bd0704502401d499fd2bfdb802522b99efe..0000000000000000000000000000000000000000 --- a/spaces/xfys/yolov5_tracking/trackers/strong_sort/deep/models/osnet_ain.py +++ /dev/null @@ -1,609 +0,0 @@ -from __future__ import division, absolute_import -import warnings -import torch -from torch import nn -from torch.nn import functional as F - -__all__ = [ - 'osnet_ain_x1_0', 'osnet_ain_x0_75', 'osnet_ain_x0_5', 'osnet_ain_x0_25' -] - -pretrained_urls = { - 'osnet_ain_x1_0': - 'https://drive.google.com/uc?id=1-CaioD9NaqbHK_kzSMW8VE4_3KcsRjEo', - 'osnet_ain_x0_75': - 'https://drive.google.com/uc?id=1apy0hpsMypqstfencdH-jKIUEFOW4xoM', - 'osnet_ain_x0_5': - 'https://drive.google.com/uc?id=1KusKvEYyKGDTUBVRxRiz55G31wkihB6l', - 'osnet_ain_x0_25': - 'https://drive.google.com/uc?id=1SxQt2AvmEcgWNhaRb2xC4rP6ZwVDP0Wt' -} - - -########## -# Basic layers -########## -class ConvLayer(nn.Module): - """Convolution layer (conv + bn + relu).""" - - def __init__( - self, - in_channels, - out_channels, - kernel_size, - stride=1, - padding=0, - groups=1, - IN=False - ): - super(ConvLayer, self).__init__() - self.conv = nn.Conv2d( - in_channels, - out_channels, - kernel_size, - stride=stride, - padding=padding, - bias=False, - groups=groups - ) - if IN: - self.bn = nn.InstanceNorm2d(out_channels, affine=True) - else: - self.bn = nn.BatchNorm2d(out_channels) - self.relu = nn.ReLU() - - def forward(self, x): - x = self.conv(x) - x = self.bn(x) - return self.relu(x) - - -class Conv1x1(nn.Module): - """1x1 convolution + bn + relu.""" - - def __init__(self, in_channels, out_channels, stride=1, groups=1): - super(Conv1x1, self).__init__() - self.conv = nn.Conv2d( - in_channels, - out_channels, - 1, - stride=stride, - padding=0, - bias=False, - groups=groups - ) - self.bn = nn.BatchNorm2d(out_channels) - self.relu = nn.ReLU() - - def forward(self, x): - x = self.conv(x) - x = self.bn(x) - return self.relu(x) - - -class Conv1x1Linear(nn.Module): - """1x1 convolution + bn (w/o non-linearity).""" - - def __init__(self, in_channels, out_channels, stride=1, bn=True): - super(Conv1x1Linear, self).__init__() - self.conv = nn.Conv2d( - in_channels, out_channels, 1, stride=stride, padding=0, bias=False - ) - self.bn = None - if bn: - self.bn = nn.BatchNorm2d(out_channels) - - def forward(self, x): - x = self.conv(x) - if self.bn is not None: - x = self.bn(x) - return x - - -class Conv3x3(nn.Module): - """3x3 convolution + bn + relu.""" - - def __init__(self, in_channels, out_channels, stride=1, groups=1): - super(Conv3x3, self).__init__() - self.conv = nn.Conv2d( - in_channels, - out_channels, - 3, - stride=stride, - padding=1, - bias=False, - groups=groups - ) - self.bn = nn.BatchNorm2d(out_channels) - self.relu = nn.ReLU() - - def forward(self, x): - x = self.conv(x) - x = self.bn(x) - return self.relu(x) - - -class LightConv3x3(nn.Module): - """Lightweight 3x3 convolution. - - 1x1 (linear) + dw 3x3 (nonlinear). - """ - - def __init__(self, in_channels, out_channels): - super(LightConv3x3, self).__init__() - self.conv1 = nn.Conv2d( - in_channels, out_channels, 1, stride=1, padding=0, bias=False - ) - self.conv2 = nn.Conv2d( - out_channels, - out_channels, - 3, - stride=1, - padding=1, - bias=False, - groups=out_channels - ) - self.bn = nn.BatchNorm2d(out_channels) - self.relu = nn.ReLU() - - def forward(self, x): - x = self.conv1(x) - x = self.conv2(x) - x = self.bn(x) - return self.relu(x) - - -class LightConvStream(nn.Module): - """Lightweight convolution stream.""" - - def __init__(self, in_channels, out_channels, depth): - super(LightConvStream, self).__init__() - assert depth >= 1, 'depth must be equal to or larger than 1, but got {}'.format( - depth - ) - layers = [] - layers += [LightConv3x3(in_channels, out_channels)] - for i in range(depth - 1): - layers += [LightConv3x3(out_channels, out_channels)] - self.layers = nn.Sequential(*layers) - - def forward(self, x): - return self.layers(x) - - -########## -# Building blocks for omni-scale feature learning -########## -class ChannelGate(nn.Module): - """A mini-network that generates channel-wise gates conditioned on input tensor.""" - - def __init__( - self, - in_channels, - num_gates=None, - return_gates=False, - gate_activation='sigmoid', - reduction=16, - layer_norm=False - ): - super(ChannelGate, self).__init__() - if num_gates is None: - num_gates = in_channels - self.return_gates = return_gates - self.global_avgpool = nn.AdaptiveAvgPool2d(1) - self.fc1 = nn.Conv2d( - in_channels, - in_channels // reduction, - kernel_size=1, - bias=True, - padding=0 - ) - self.norm1 = None - if layer_norm: - self.norm1 = nn.LayerNorm((in_channels // reduction, 1, 1)) - self.relu = nn.ReLU() - self.fc2 = nn.Conv2d( - in_channels // reduction, - num_gates, - kernel_size=1, - bias=True, - padding=0 - ) - if gate_activation == 'sigmoid': - self.gate_activation = nn.Sigmoid() - elif gate_activation == 'relu': - self.gate_activation = nn.ReLU() - elif gate_activation == 'linear': - self.gate_activation = None - else: - raise RuntimeError( - "Unknown gate activation: {}".format(gate_activation) - ) - - def forward(self, x): - input = x - x = self.global_avgpool(x) - x = self.fc1(x) - if self.norm1 is not None: - x = self.norm1(x) - x = self.relu(x) - x = self.fc2(x) - if self.gate_activation is not None: - x = self.gate_activation(x) - if self.return_gates: - return x - return input * x - - -class OSBlock(nn.Module): - """Omni-scale feature learning block.""" - - def __init__(self, in_channels, out_channels, reduction=4, T=4, **kwargs): - super(OSBlock, self).__init__() - assert T >= 1 - assert out_channels >= reduction and out_channels % reduction == 0 - mid_channels = out_channels // reduction - - self.conv1 = Conv1x1(in_channels, mid_channels) - self.conv2 = nn.ModuleList() - for t in range(1, T + 1): - self.conv2 += [LightConvStream(mid_channels, mid_channels, t)] - self.gate = ChannelGate(mid_channels) - self.conv3 = Conv1x1Linear(mid_channels, out_channels) - self.downsample = None - if in_channels != out_channels: - self.downsample = Conv1x1Linear(in_channels, out_channels) - - def forward(self, x): - identity = x - x1 = self.conv1(x) - x2 = 0 - for conv2_t in self.conv2: - x2_t = conv2_t(x1) - x2 = x2 + self.gate(x2_t) - x3 = self.conv3(x2) - if self.downsample is not None: - identity = self.downsample(identity) - out = x3 + identity - return F.relu(out) - - -class OSBlockINin(nn.Module): - """Omni-scale feature learning block with instance normalization.""" - - def __init__(self, in_channels, out_channels, reduction=4, T=4, **kwargs): - super(OSBlockINin, self).__init__() - assert T >= 1 - assert out_channels >= reduction and out_channels % reduction == 0 - mid_channels = out_channels // reduction - - self.conv1 = Conv1x1(in_channels, mid_channels) - self.conv2 = nn.ModuleList() - for t in range(1, T + 1): - self.conv2 += [LightConvStream(mid_channels, mid_channels, t)] - self.gate = ChannelGate(mid_channels) - self.conv3 = Conv1x1Linear(mid_channels, out_channels, bn=False) - self.downsample = None - if in_channels != out_channels: - self.downsample = Conv1x1Linear(in_channels, out_channels) - self.IN = nn.InstanceNorm2d(out_channels, affine=True) - - def forward(self, x): - identity = x - x1 = self.conv1(x) - x2 = 0 - for conv2_t in self.conv2: - x2_t = conv2_t(x1) - x2 = x2 + self.gate(x2_t) - x3 = self.conv3(x2) - x3 = self.IN(x3) # IN inside residual - if self.downsample is not None: - identity = self.downsample(identity) - out = x3 + identity - return F.relu(out) - - -########## -# Network architecture -########## -class OSNet(nn.Module): - """Omni-Scale Network. - - Reference: - - Zhou et al. Omni-Scale Feature Learning for Person Re-Identification. ICCV, 2019. - - Zhou et al. Learning Generalisable Omni-Scale Representations - for Person Re-Identification. TPAMI, 2021. - """ - - def __init__( - self, - num_classes, - blocks, - layers, - channels, - feature_dim=512, - loss='softmax', - conv1_IN=False, - **kwargs - ): - super(OSNet, self).__init__() - num_blocks = len(blocks) - assert num_blocks == len(layers) - assert num_blocks == len(channels) - 1 - self.loss = loss - self.feature_dim = feature_dim - - # convolutional backbone - self.conv1 = ConvLayer( - 3, channels[0], 7, stride=2, padding=3, IN=conv1_IN - ) - self.maxpool = nn.MaxPool2d(3, stride=2, padding=1) - self.conv2 = self._make_layer( - blocks[0], layers[0], channels[0], channels[1] - ) - self.pool2 = nn.Sequential( - Conv1x1(channels[1], channels[1]), nn.AvgPool2d(2, stride=2) - ) - self.conv3 = self._make_layer( - blocks[1], layers[1], channels[1], channels[2] - ) - self.pool3 = nn.Sequential( - Conv1x1(channels[2], channels[2]), nn.AvgPool2d(2, stride=2) - ) - self.conv4 = self._make_layer( - blocks[2], layers[2], channels[2], channels[3] - ) - self.conv5 = Conv1x1(channels[3], channels[3]) - self.global_avgpool = nn.AdaptiveAvgPool2d(1) - # fully connected layer - self.fc = self._construct_fc_layer( - self.feature_dim, channels[3], dropout_p=None - ) - # identity classification layer - self.classifier = nn.Linear(self.feature_dim, num_classes) - - self._init_params() - - def _make_layer(self, blocks, layer, in_channels, out_channels): - layers = [] - layers += [blocks[0](in_channels, out_channels)] - for i in range(1, len(blocks)): - layers += [blocks[i](out_channels, out_channels)] - return nn.Sequential(*layers) - - def _construct_fc_layer(self, fc_dims, input_dim, dropout_p=None): - if fc_dims is None or fc_dims < 0: - self.feature_dim = input_dim - return None - - if isinstance(fc_dims, int): - fc_dims = [fc_dims] - - layers = [] - for dim in fc_dims: - layers.append(nn.Linear(input_dim, dim)) - layers.append(nn.BatchNorm1d(dim)) - layers.append(nn.ReLU()) - if dropout_p is not None: - layers.append(nn.Dropout(p=dropout_p)) - input_dim = dim - - self.feature_dim = fc_dims[-1] - - return nn.Sequential(*layers) - - def _init_params(self): - for m in self.modules(): - if isinstance(m, nn.Conv2d): - nn.init.kaiming_normal_( - m.weight, mode='fan_out', nonlinearity='relu' - ) - if m.bias is not None: - nn.init.constant_(m.bias, 0) - - elif isinstance(m, nn.BatchNorm2d): - nn.init.constant_(m.weight, 1) - nn.init.constant_(m.bias, 0) - - elif isinstance(m, nn.BatchNorm1d): - nn.init.constant_(m.weight, 1) - nn.init.constant_(m.bias, 0) - - elif isinstance(m, nn.InstanceNorm2d): - nn.init.constant_(m.weight, 1) - nn.init.constant_(m.bias, 0) - - elif isinstance(m, nn.Linear): - nn.init.normal_(m.weight, 0, 0.01) - if m.bias is not None: - nn.init.constant_(m.bias, 0) - - def featuremaps(self, x): - x = self.conv1(x) - x = self.maxpool(x) - x = self.conv2(x) - x = self.pool2(x) - x = self.conv3(x) - x = self.pool3(x) - x = self.conv4(x) - x = self.conv5(x) - return x - - def forward(self, x, return_featuremaps=False): - x = self.featuremaps(x) - if return_featuremaps: - return x - v = self.global_avgpool(x) - v = v.view(v.size(0), -1) - if self.fc is not None: - v = self.fc(v) - if not self.training: - return v - y = self.classifier(v) - if self.loss == 'softmax': - return y - elif self.loss == 'triplet': - return y, v - else: - raise KeyError("Unsupported loss: {}".format(self.loss)) - - -def init_pretrained_weights(model, key=''): - """Initializes model with pretrained weights. - - Layers that don't match with pretrained layers in name or size are kept unchanged. - """ - import os - import errno - import gdown - from collections import OrderedDict - - def _get_torch_home(): - ENV_TORCH_HOME = 'TORCH_HOME' - ENV_XDG_CACHE_HOME = 'XDG_CACHE_HOME' - DEFAULT_CACHE_DIR = '~/.cache' - torch_home = os.path.expanduser( - os.getenv( - ENV_TORCH_HOME, - os.path.join( - os.getenv(ENV_XDG_CACHE_HOME, DEFAULT_CACHE_DIR), 'torch' - ) - ) - ) - return torch_home - - torch_home = _get_torch_home() - model_dir = os.path.join(torch_home, 'checkpoints') - try: - os.makedirs(model_dir) - except OSError as e: - if e.errno == errno.EEXIST: - # Directory already exists, ignore. - pass - else: - # Unexpected OSError, re-raise. - raise - filename = key + '_imagenet.pth' - cached_file = os.path.join(model_dir, filename) - - if not os.path.exists(cached_file): - gdown.download(pretrained_urls[key], cached_file, quiet=False) - - state_dict = torch.load(cached_file) - model_dict = model.state_dict() - new_state_dict = OrderedDict() - matched_layers, discarded_layers = [], [] - - for k, v in state_dict.items(): - if k.startswith('module.'): - k = k[7:] # discard module. - - if k in model_dict and model_dict[k].size() == v.size(): - new_state_dict[k] = v - matched_layers.append(k) - else: - discarded_layers.append(k) - - model_dict.update(new_state_dict) - model.load_state_dict(model_dict) - - if len(matched_layers) == 0: - warnings.warn( - 'The pretrained weights from "{}" cannot be loaded, ' - 'please check the key names manually ' - '(** ignored and continue **)'.format(cached_file) - ) - else: - print( - 'Successfully loaded imagenet pretrained weights from "{}"'. - format(cached_file) - ) - if len(discarded_layers) > 0: - print( - '** The following layers are discarded ' - 'due to unmatched keys or layer size: {}'. - format(discarded_layers) - ) - - -########## -# Instantiation -########## -def osnet_ain_x1_0( - num_classes=1000, pretrained=True, loss='softmax', **kwargs -): - model = OSNet( - num_classes, - blocks=[ - [OSBlockINin, OSBlockINin], [OSBlock, OSBlockINin], - [OSBlockINin, OSBlock] - ], - layers=[2, 2, 2], - channels=[64, 256, 384, 512], - loss=loss, - conv1_IN=True, - **kwargs - ) - if pretrained: - init_pretrained_weights(model, key='osnet_ain_x1_0') - return model - - -def osnet_ain_x0_75( - num_classes=1000, pretrained=True, loss='softmax', **kwargs -): - model = OSNet( - num_classes, - blocks=[ - [OSBlockINin, OSBlockINin], [OSBlock, OSBlockINin], - [OSBlockINin, OSBlock] - ], - layers=[2, 2, 2], - channels=[48, 192, 288, 384], - loss=loss, - conv1_IN=True, - **kwargs - ) - if pretrained: - init_pretrained_weights(model, key='osnet_ain_x0_75') - return model - - -def osnet_ain_x0_5( - num_classes=1000, pretrained=True, loss='softmax', **kwargs -): - model = OSNet( - num_classes, - blocks=[ - [OSBlockINin, OSBlockINin], [OSBlock, OSBlockINin], - [OSBlockINin, OSBlock] - ], - layers=[2, 2, 2], - channels=[32, 128, 192, 256], - loss=loss, - conv1_IN=True, - **kwargs - ) - if pretrained: - init_pretrained_weights(model, key='osnet_ain_x0_5') - return model - - -def osnet_ain_x0_25( - num_classes=1000, pretrained=True, loss='softmax', **kwargs -): - model = OSNet( - num_classes, - blocks=[ - [OSBlockINin, OSBlockINin], [OSBlock, OSBlockINin], - [OSBlockINin, OSBlock] - ], - layers=[2, 2, 2], - channels=[16, 64, 96, 128], - loss=loss, - conv1_IN=True, - **kwargs - ) - if pretrained: - init_pretrained_weights(model, key='osnet_ain_x0_25') - return model diff --git a/spaces/yderre-aubay/midi-player-demo/src/common/midi/MidiEvent.ts b/spaces/yderre-aubay/midi-player-demo/src/common/midi/MidiEvent.ts deleted file mode 100644 index 3b30515b4c24fddd70dfdf751a11ccf1a224a92e..0000000000000000000000000000000000000000 --- a/spaces/yderre-aubay/midi-player-demo/src/common/midi/MidiEvent.ts +++ /dev/null @@ -1,273 +0,0 @@ -import { - ControllerEvent, - EndOfTrackEvent, - Event, - NoteOffEvent, - NoteOnEvent, - PitchBendEvent, - PortPrefixEvent, - ProgramChangeEvent, - SequencerSpecificEvent, - SetTempoEvent, - TimeSignatureEvent, - TrackNameEvent, -} from "midifile-ts" - -/* factory */ - -export function midiEvent( - deltaTime: number, - type: T, -): Event { - return { - deltaTime, - type, - } -} - -export function endOfTrackMidiEvent(deltaTime: number): EndOfTrackEvent { - return { - deltaTime, - type: "meta", - subtype: "endOfTrack", - } -} - -export function portPrefixMidiEvent( - deltaTime: number, - port: number, -): PortPrefixEvent { - return { - deltaTime, - type: "meta", - subtype: "portPrefix", - port, - } -} - -export function trackNameMidiEvent( - deltaTime: number, - text: string, -): TrackNameEvent { - return { - deltaTime, - type: "meta", - subtype: "trackName", - text, - } -} - -// from bpm: SetTempoMidiEvent(t, 60000000 / bpm) -export function setTempoMidiEvent( - deltaTime: number, - microsecondsPerBeat: number, -): SetTempoEvent { - return { - deltaTime, - type: "meta", - subtype: "setTempo", - microsecondsPerBeat, - } -} - -export function timeSignatureMidiEvent( - deltaTime: number, - numerator = 4, - denominator = 4, - metronome = 24, - thirtyseconds = 8, -): TimeSignatureEvent { - return { - deltaTime, - type: "meta", - subtype: "timeSignature", - numerator, - denominator, - metronome, - thirtyseconds, - } -} - -// channel events - -export function noteOnMidiEvent( - deltaTime: number, - channel: number, - noteNumber: number, - velocity: number, -): NoteOnEvent { - return { - deltaTime, - type: "channel", - subtype: "noteOn", - channel, - noteNumber, - velocity, - } -} - -export function noteOffMidiEvent( - deltaTime: number, - channel: number, - noteNumber: number, - velocity: number = 0, -): NoteOffEvent { - return { - deltaTime, - type: "channel", - subtype: "noteOff", - channel, - noteNumber, - velocity, - } -} - -export function pitchBendMidiEvent( - deltaTime: number, - channel: number, - value: number, -): PitchBendEvent { - return { - deltaTime, - type: "channel", - subtype: "pitchBend", - channel, - value, - } -} - -export function programChangeMidiEvent( - deltaTime: number, - channel: number, - value: number, -): ProgramChangeEvent { - return { - deltaTime, - type: "channel", - subtype: "programChange", - channel, - value, - } -} - -// controller events - -export function controllerMidiEvent( - deltaTime: number, - channel: number, - controllerType: number, - value: number, -): ControllerEvent { - return { - deltaTime, - type: "channel", - subtype: "controller", - channel, - controllerType, - value, - } -} - -export function modulationMidiEvent( - deltaTime: number, - channel: number, - value: number, -) { - return controllerMidiEvent(deltaTime, channel, 0x01, value) -} - -export function volumeMidiEvent( - deltaTime: number, - channel: number, - value: number, -) { - return controllerMidiEvent(deltaTime, channel, 0x07, value) -} - -export function panMidiEvent( - deltaTime: number, - channel: number, - value: number, -) { - return controllerMidiEvent(deltaTime, channel, 0x0a, value) -} - -export function expressionMidiEvent( - deltaTime: number, - channel: number, - value: number, -) { - return controllerMidiEvent(deltaTime, channel, 0x0b, value) -} - -export function resetAllMidiEvent(deltaTime: number, channel: number) { - return controllerMidiEvent(deltaTime, channel, 121, 0) -} - -export function sequencerSpecificEvent( - deltaTime: number, - data: number[], -): SequencerSpecificEvent { - return { - type: "meta", - subtype: "sequencerSpecific", - deltaTime, - data, - } -} - -// Control Change - -export function controlChangeEvents( - deltaTime: number, - channel: number, - rpnMsb: number, - rpnLsb: number, - dataMsb?: number | undefined, - dataLsb?: number | undefined, -): ControllerEvent[] { - const rpn = [ - controllerMidiEvent(deltaTime, channel, 101, rpnMsb), - controllerMidiEvent(0, channel, 100, rpnLsb), - ] - - const data: ControllerEvent[] = [] - if (dataMsb !== undefined) { - data.push(controllerMidiEvent(0, channel, 6, dataMsb)) - } - if (dataLsb !== undefined) { - data.push(controllerMidiEvent(0, channel, 38, dataLsb)) - } - - return [...rpn, ...data] -} - -// value: 0 - 24 (半音 / Half sound) -export function pitchbendSensitivityEvents( - deltaTime: number, - channel: number, - value = 2, -) { - return controlChangeEvents(deltaTime, channel, 0, 0, value) -} - -// value: -8192 - 8191 -export function masterFineTuningEvents( - deltaTime: number, - channel: number, - value = 0, -) { - const s = value + 0x2000 - const m = Math.floor(s / 0x80) - const l = s - m * 0x80 - return controlChangeEvents(deltaTime, channel, 0, 1, m, l) -} - -// value: -24 - 24 -export function masterCoarceTuningEvents( - deltaTime: number, - channel: number, - value = 0, -) { - return controlChangeEvents(deltaTime, channel, 0, 2, value + 64) -} diff --git a/spaces/yejijue/img-to-music/app.py b/spaces/yejijue/img-to-music/app.py deleted file mode 100644 index 30d094ce05b344d21f1c497c183a4ce7649ec164..0000000000000000000000000000000000000000 --- a/spaces/yejijue/img-to-music/app.py +++ /dev/null @@ -1,333 +0,0 @@ -import gradio as gr -import openai -import numpy as np -import time -import base64 -import ffmpeg -from sentence_transformers import SentenceTransformer -from audio2numpy import open_audio -import httpx -import json -import os -import requests -import urllib -import pydub -from os import path -from pydub import AudioSegment -import re - -MUBERT_LICENSE = os.environ.get('MUBERT_LICENSE') -MUBERT_TOKEN = os.environ.get('MUBERT_TOKEN') - -#img_to_text = gr.Blocks.load(name="spaces/pharma/CLIP-Interrogator") -img_to_text = gr.Blocks.load(name="spaces/fffiloni/CLIP-Interrogator-2") - -from share_btn import community_icon_html, loading_icon_html, share_js -from utils import get_tags_for_prompts, get_mubert_tags_embeddings - -minilm = SentenceTransformer('all-MiniLM-L6-v2') -mubert_tags_embeddings = get_mubert_tags_embeddings(minilm) - -##———————————————————————————————————— - -MUBERT_LICENSE = os.environ.get('MUBERT_LICENSE') -MUBERT_TOKEN = os.environ.get('MUBERT_TOKEN') - -##———————————————————————————————————— -def get_pat_token(): - r = httpx.post('https://api-b2b.mubert.com/v2/GetServiceAccess', - json={ - "method": "GetServiceAccess", - "params": { - "email":"mail@mail.com", - "phone":"+11234567890", - "license": MUBERT_LICENSE, - "token": MUBERT_TOKEN, - - } - }) - - rdata = json.loads(r.text) - assert rdata['status'] == 1, "probably incorrect e-mail" - pat = rdata['data']['pat'] - #print(f"pat: {pat}") - return pat - -def get_music(pat, prompt, track_duration, gen_intensity, gen_mode): - - if len(prompt) > 200: - prompt = prompt[:200] - - r = httpx.post('https://api-b2b.mubert.com/v2/TTMRecordTrack', - json={ - "method": "TTMRecordTrack", - "params": - { - "text": prompt, - "pat": pat, - "mode":gen_mode, - "duration":track_duration, - "intensity": gen_intensity, - "format": "wav" - } - }) - - rdata = json.loads(r.text) - - #print(f"rdata: {rdata}") - assert rdata['status'] == 1, rdata['error']['text'] - track = rdata['data']['tasks'][0]['download_link'] - print(track) - - local_file_path = "sample.wav" - - # Download the MP3 file from the URL - headers = { - 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7; rv:93.0) Gecko/20100101 Firefox/93.0'} - - retries = 3 - delay = 5 # in seconds - while retries > 0: - response = requests.get(track, headers=headers) - if response.status_code == 200: - break - retries -= 1 - time.sleep(delay) - response = requests.get(track, headers=headers) - print(f"{response}") - # Save the downloaded content to a local file - with open(local_file_path, 'wb') as f: - f.write(response.content) - return "sample.wav", track - - -def get_results(text_prompt,track_duration,gen_intensity,gen_mode): - pat_token = get_pat_token() - music = get_music(pat_token, text_prompt, track_duration, gen_intensity, gen_mode) - return pat_token, music[0], music[1] - -def get_prompts(uploaded_image, track_duration, gen_intensity, gen_mode, openai_api_key): - print("calling clip interrogator") - #prompt = img_to_text(uploaded_image, "ViT-L (best for Stable Diffusion 1.*)", "fast", fn_index=1)[0] - - prompt = img_to_text(uploaded_image, 'best', 4, fn_index=1)[0] - print(prompt) - clean_prompt = clean_text(prompt) - print(f"prompt cleaned: {clean_prompt}") - musical_prompt = 'You did not use any OpenAI API key to pimp your result :)' - if openai_api_key is not None: - gpt_adaptation = try_api(prompt, openai_api_key) - if gpt_adaptation[0] != "oups": - musical_prompt = gpt_adaptation[0] - print(f"musical adapt: {musical_prompt}") - music_result = get_results(musical_prompt, track_duration, gen_intensity, gen_mode) - else: - music_result = get_results(clean_prompt, track_duration, gen_intensity, gen_mode) - else: - music_result = get_results(clean_prompt, track_duration, gen_intensity, gen_mode) - - show_prompts = f""" - CLIP Interrogator Caption: '{prompt}' - — - OpenAI Musical Adaptation: '{musical_prompt}' - — - Audio file link: {music_result[2]} - """ - #wave_file = convert_mp3_to_wav(music_result[1]) - - time.sleep(1) - return gr.Textbox.update(value=show_prompts, visible=True), music_result[1], gr.update(visible=True), gr.update(visible=True), gr.update(visible=True) - -def try_api(message, openai_api_key): - - try: - response = call_api(message, openai_api_key) - return response, "no error" - except openai.error.Timeout as e: - #Handle timeout error, e.g. retry or log - #print(f"OpenAI API request timed out: {e}") - return "oups", f"OpenAI API request timed out:
              {e}
              " - except openai.error.APIError as e: - #Handle API error, e.g. retry or log - #print(f"OpenAI API returned an API Error: {e}") - return "oups", f"OpenAI API returned an API Error:
              {e}
              " - except openai.error.APIConnectionError as e: - #Handle connection error, e.g. check network or log - #print(f"OpenAI API request failed to connect: {e}") - return "oups", f"OpenAI API request failed to connect:
              {e}
              " - except openai.error.InvalidRequestError as e: - #Handle invalid request error, e.g. validate parameters or log - #print(f"OpenAI API request was invalid: {e}") - return "oups", f"OpenAI API request was invalid:
              {e}
              " - except openai.error.AuthenticationError as e: - #Handle authentication error, e.g. check credentials or log - #print(f"OpenAI API request was not authorized: {e}") - return "oups", f"OpenAI API request was not authorized:
              {e}
              " - except openai.error.PermissionError as e: - #Handle permission error, e.g. check scope or log - #print(f"OpenAI API request was not permitted: {e}") - return "oups", f"OpenAI API request was not permitted:
              {e}
              " - except openai.error.RateLimitError as e: - #Handle rate limit error, e.g. wait or log - #print(f"OpenAI API request exceeded rate limit: {e}") - return "oups", f"OpenAI API request exceeded rate limit:
              {e}
              " - -def call_api(message, openai_api_key): - - instruction = "Convert in less than 200 characters this image caption to a very concise musical description with musical terms, as if you wanted to describe a musical ambiance, stricly in English" - - print("starting open ai") - augmented_prompt = f"{instruction}: '{message}'." - openai.api_key = openai_api_key - - response = openai.Completion.create( - model="text-davinci-003", - prompt=augmented_prompt, - temperature=0.5, - max_tokens=2048, - top_p=1, - frequency_penalty=0, - presence_penalty=0.6 - ) - - #print(response) - - #return str(response.choices[0].text).split("\n",2)[2] - return str(response.choices[0].text).lstrip('\n') - - -def get_track_by_tags(tags, pat, duration, gen_intensity, gen_mode, maxit=20): - - r = httpx.post('https://api-b2b.mubert.com/v2/RecordTrackTTM', - json={ - "method": "RecordTrackTTM", - "params": { - "pat": pat, - "duration": duration, - "format": "wav", - "intensity":gen_intensity, - "tags": tags, - "mode": gen_mode - } - }) - - rdata = json.loads(r.text) - print(rdata) - #assert rdata['status'] == 1, rdata['error']['text'] - trackurl = rdata['data']['tasks'][0] - - print('Generating track ', end='') - for i in range(maxit): - r = httpx.get(trackurl) - if r.status_code == 200: - return trackurl - time.sleep(1) - - -def generate_track_by_prompt(pat, prompt, duration, gen_intensity, gen_mode): - try: - _, tags = get_tags_for_prompts(minilm, mubert_tags_embeddings, prompt)[0] - result = get_track_by_tags(tags, pat, int(duration), gen_intensity, gen_mode) - print(result) - return result, ",".join(tags), "Success" - except Exception as e: - return None, "", str(e) - -def convert_mp3_to_wav(mp3_filepath): - - wave_file="file.wav" - - sound = AudioSegment.from_mp3(mp3_filepath) - sound.export(wave_file, format="wav") - - return wave_file - -def remove_emoji(text): - emoji_pattern = re.compile("[" - u"\U0001F600-\U0001F64F" # emoticons - u"\U0001F300-\U0001F5FF" # symbols & pictographs - u"\U0001F680-\U0001F6FF" # transport & map symbols - u"\U0001F1E0-\U0001F1FF" # flags (iOS) - "]+", flags=re.UNICODE) - return emoji_pattern.sub(r'', text) - -def remove_nonalphanumeric(text): - return re.sub(r'[^a-zA-Z0-9\s]', '', text) - -def clean_text(text): - clean_text = remove_nonalphanumeric(text) - clean_text = remove_emoji(clean_text) - clean_text = re.sub(r'\d+', '', clean_text) # Remove any number - return clean_text - -article = """ - - - -
              -

              You may also like:

              -
              - - - - - -
              -
              - - -""" - -with gr.Blocks(css="style.css") as demo: - with gr.Column(elem_id="col-container"): - - gr.HTML("""
              -
              -

              - Image to Music -

              -
              -

              - Sends an image in to CLIP Interrogator - to generate a text prompt which is then run through - Mubert text-to-music to generate music from the input image! -

              -
              """) - - input_img = gr.Image(type="filepath", elem_id="input-img") - prompts_out = gr.Textbox(label="Text Captions", visible=False, elem_id="prompts_out", info="If player do not work, try to copy/paste the link in a new browser window") - music_output = gr.Audio(label="Result", type="filepath", elem_id="music-output").style(height="5rem") - #music_url = gr.Textbox(max_lines=1, info="If player do not work, try to copy/paste the link in a new browser window") - #text_status = gr.Textbox(label="status") - with gr.Group(elem_id="share-btn-container"): - community_icon = gr.HTML(community_icon_html, visible=False) - loading_icon = gr.HTML(loading_icon_html, visible=False) - share_button = gr.Button("Share to community", elem_id="share-btn", visible=False) - - with gr.Accordion(label="Music Generation Options", open=False): - openai_api_key = gr.Textbox(type="password", label="🔐 Your OpenAI API Key (optional)", placeholder="sk-123abc...", info="You can use your OpenAI key to adapt CLIP Interrogator caption to a musical translation.") - track_duration = gr.Slider(minimum=20, maximum=120, value=55, ustep=5, label="Track duration", elem_id="duration-inp") - with gr.Row(): - gen_intensity = gr.Dropdown(choices=["low", "medium", "high"], value="medium", label="Intensity") - gen_mode = gr.Radio(label="mode", choices=["track", "loop"], value="loop") - - generate = gr.Button("Generate Music from Image") - - gr.HTML(article) - - generate.click(get_prompts, inputs=[input_img,track_duration,gen_intensity,gen_mode, openai_api_key], outputs=[prompts_out, music_output, share_button, community_icon, loading_icon], api_name="i2m") - share_button.click(None, [], [], _js=share_js) - -demo.queue(max_size=32).launch() \ No newline at end of file diff --git a/spaces/yerfor/SyntaSpeech/utils/audio/io.py b/spaces/yerfor/SyntaSpeech/utils/audio/io.py deleted file mode 100644 index 34d5d20ae13e9aa481b1bc85117ad6539af8a624..0000000000000000000000000000000000000000 --- a/spaces/yerfor/SyntaSpeech/utils/audio/io.py +++ /dev/null @@ -1,22 +0,0 @@ -import subprocess - -import numpy as np -from scipy.io import wavfile - - -def save_wav(wav, path, sr, norm=False): - if norm: - wav = wav / np.abs(wav).max() - wav = wav * 32767 - wavfile.write(path[:-4] + '.wav', sr, wav.astype(np.int16)) - if path[-4:] == '.mp3': - to_mp3(path[:-4]) - - -def to_mp3(out_path): - if out_path[-4:] == '.wav': - out_path = out_path[:-4] - subprocess.check_call( - f'ffmpeg -threads 1 -loglevel error -i "{out_path}.wav" -vn -b:a 192k -y -hide_banner -async 1 "{out_path}.mp3"', - shell=True, stdin=subprocess.PIPE) - subprocess.check_call(f'rm -f "{out_path}.wav"', shell=True) diff --git a/spaces/ygtxr1997/ReliableSwap_Demo/modules/layers/smoothswap/id_embedder.py b/spaces/ygtxr1997/ReliableSwap_Demo/modules/layers/smoothswap/id_embedder.py deleted file mode 100644 index 28dcb7ccde140ac820d224db1675c0d8c98e56f2..0000000000000000000000000000000000000000 --- a/spaces/ygtxr1997/ReliableSwap_Demo/modules/layers/smoothswap/id_embedder.py +++ /dev/null @@ -1,50 +0,0 @@ -import torch -import torch.nn as nn -import torch.nn.functional as F - -from modules.layers.smoothswap.resnet import resnet50 - - -class IdentityHead(nn.Module): - def __init__(self): - super(IdentityHead, self).__init__() - self.fc1 = nn.Sequential( - nn.Linear(512 * 4, 1024), - nn.BatchNorm1d(num_features=1024), - nn.LeakyReLU(negative_slope=0.2, inplace=True) - ) - self.fc2 = nn.Sequential( - nn.Linear(1024, 512), - nn.BatchNorm1d(num_features=512) - ) - - for m in self.modules(): - if isinstance(m, (nn.BatchNorm2d,)): - nn.init.constant_(m.weight, 1) - nn.init.constant_(m.bias, 0) - - def forward(self, x): - x = self.fc1(x) - x = self.fc2(x) - x = F.normalize(x) - return x - - -class IdentityEmbedder(nn.Module): - def __init__(self): - super(IdentityEmbedder, self).__init__() - - self.backbone = resnet50(pretrained=False) - self.head = IdentityHead() - - def forward(self, x_src): - x_src = self.backbone(x_src) - x_src = self.head(x_src) - return x_src - - -if __name__ == '__main__': - img = torch.randn((11, 3, 256, 256)).cuda() - net = IdentityEmbedder().cuda() - out = net(img) - print(out.shape) diff --git a/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/kernels/deformable_detr/cpu/ms_deform_attn_cpu.cpp b/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/kernels/deformable_detr/cpu/ms_deform_attn_cpu.cpp deleted file mode 100644 index 388a73d22d4c9b561e2a887b50a1897b8cf2def9..0000000000000000000000000000000000000000 --- a/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/kernels/deformable_detr/cpu/ms_deform_attn_cpu.cpp +++ /dev/null @@ -1,40 +0,0 @@ -/*! -************************************************************************************************** -* Deformable DETR -* Copyright (c) 2020 SenseTime. All Rights Reserved. -* Licensed under the Apache License, Version 2.0 [see LICENSE for details] -************************************************************************************************** -* Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0 -************************************************************************************************** -*/ - -#include - -#include -#include - - -at::Tensor -ms_deform_attn_cpu_forward( - const at::Tensor &value, - const at::Tensor &spatial_shapes, - const at::Tensor &level_start_index, - const at::Tensor &sampling_loc, - const at::Tensor &attn_weight, - const int im2col_step) -{ - AT_ERROR("Not implement on cpu"); -} - -std::vector -ms_deform_attn_cpu_backward( - const at::Tensor &value, - const at::Tensor &spatial_shapes, - const at::Tensor &level_start_index, - const at::Tensor &sampling_loc, - const at::Tensor &attn_weight, - const at::Tensor &grad_output, - const int im2col_step) -{ - AT_ERROR("Not implement on cpu"); -} diff --git a/spaces/younker/chatgpt-turbo/client/node_modules/browserslist/README.md b/spaces/younker/chatgpt-turbo/client/node_modules/browserslist/README.md deleted file mode 100644 index ded73f2b118515ca53bb85a32ebe4739d4228416..0000000000000000000000000000000000000000 --- a/spaces/younker/chatgpt-turbo/client/node_modules/browserslist/README.md +++ /dev/null @@ -1,75 +0,0 @@ -# Browserslist [![Cult Of Martians][cult-img]][cult] - -Browserslist logo by Anton Popov - -The config to share target browsers and Node.js versions between different -front-end tools. It is used in: - -* [Autoprefixer] -* [Babel] -* [postcss-preset-env] -* [eslint-plugin-compat] -* [stylelint-no-unsupported-browser-features] -* [postcss-normalize] -* [obsolete-webpack-plugin] - -All tools will find target browsers automatically, -when you add the following to `package.json`: - -```json - "browserslist": [ - "defaults and supports es6-module", - "maintained node versions" - ] -``` - -Or in `.browserslistrc` config: - -```yaml -# Browsers that we support - -defaults and supports es6-module -maintained node versions -``` - -Developers set their version lists using queries like `last 2 versions` -to be free from updating versions manually. -Browserslist will use [`caniuse-lite`] with [Can I Use] data for this queries. - -You can check how config works at our playground: [`browsersl.ist`](https://browsersl.ist/) - - - browsersl.ist website - - -
              -
              - - -[stylelint-no-unsupported-browser-features]: https://github.com/ismay/stylelint-no-unsupported-browser-features -[obsolete-webpack-plugin]: https://github.com/ElemeFE/obsolete-webpack-plugin -[eslint-plugin-compat]: https://github.com/amilajack/eslint-plugin-compat -[Browserslist Example]: https://github.com/browserslist/browserslist-example -[postcss-preset-env]: https://github.com/csstools/postcss-plugins/tree/main/plugin-packs/postcss-preset-env -[postcss-normalize]: https://github.com/csstools/postcss-normalize -[`browsersl.ist`]: https://browsersl.ist/ -[`caniuse-lite`]: https://github.com/ben-eb/caniuse-lite -[Autoprefixer]: https://github.com/postcss/autoprefixer -[Can I Use]: https://caniuse.com/ -[Babel]: https://github.com/babel/babel/tree/master/packages/babel-preset-env -[cult-img]: https://cultofmartians.com/assets/badges/badge.svg -[cult]: https://cultofmartians.com/done.html - -## Docs -Read **[full docs](https://github.com/browserslist/browserslist#readme)** on GitHub. diff --git a/spaces/younker/chatgpt-turbo/client/node_modules/electron-to-chromium/README.md b/spaces/younker/chatgpt-turbo/client/node_modules/electron-to-chromium/README.md deleted file mode 100644 index a96ddf12afe27d617eb3128ccb6317c77b9cce37..0000000000000000000000000000000000000000 --- a/spaces/younker/chatgpt-turbo/client/node_modules/electron-to-chromium/README.md +++ /dev/null @@ -1,186 +0,0 @@ -### Made by [@kilianvalkhof](https://twitter.com/kilianvalkhof) - -#### Other projects: - -- 💻 [Polypane](https://polypane.app) - Develop responsive websites and apps twice as fast on multiple screens at once -- 🖌️ [Superposition](https://superposition.design) - Kickstart your design system by extracting design tokens from your website -- 🗒️ [FromScratch](https://fromscratch.rocks) - A smart but simple autosaving scratchpad - ---- - -# Electron-to-Chromium [![npm](https://img.shields.io/npm/v/electron-to-chromium.svg)](https://www.npmjs.com/package/electron-to-chromium) [![travis](https://img.shields.io/travis/Kilian/electron-to-chromium/master.svg)](https://travis-ci.org/Kilian/electron-to-chromium) [![npm-downloads](https://img.shields.io/npm/dm/electron-to-chromium.svg)](https://www.npmjs.com/package/electron-to-chromium) [![codecov](https://codecov.io/gh/Kilian/electron-to-chromium/branch/master/graph/badge.svg)](https://codecov.io/gh/Kilian/electron-to-chromium)[![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2FKilian%2Felectron-to-chromium.svg?type=shield)](https://app.fossa.io/projects/git%2Bgithub.com%2FKilian%2Felectron-to-chromium?ref=badge_shield) - -This repository provides a mapping of Electron versions to the Chromium version that it uses. - -This package is used in [Browserslist](https://github.com/ai/browserslist), so you can use e.g. `electron >= 1.4` in [Autoprefixer](https://github.com/postcss/autoprefixer), [Stylelint](https://github.com/stylelint/stylelint), [babel-preset-env](https://github.com/babel/babel-preset-env) and [eslint-plugin-compat](https://github.com/amilajack/eslint-plugin-compat). - -**Supported by:** - - - - - - -## Install -Install using `npm install electron-to-chromium`. - -## Usage -To include Electron-to-Chromium, require it: - -```js -var e2c = require('electron-to-chromium'); -``` - -### Properties -The Electron-to-Chromium object has 4 properties to use: - -#### `versions` -An object of key-value pairs with a _major_ Electron version as the key, and the corresponding major Chromium version as the value. - -```js -var versions = e2c.versions; -console.log(versions['1.4']); -// returns "53" -``` - -#### `fullVersions` -An object of key-value pairs with a Electron version as the key, and the corresponding full Chromium version as the value. - -```js -var versions = e2c.fullVersions; -console.log(versions['1.4.11']); -// returns "53.0.2785.143" -``` - -#### `chromiumVersions` -An object of key-value pairs with a _major_ Chromium version as the key, and the corresponding major Electron version as the value. - -```js -var versions = e2c.chromiumVersions; -console.log(versions['54']); -// returns "1.4" -``` - -#### `fullChromiumVersions` -An object of key-value pairs with a Chromium version as the key, and an array of the corresponding major Electron versions as the value. - -```js -var versions = e2c.fullChromiumVersions; -console.log(versions['54.0.2840.101']); -// returns ["1.5.1", "1.5.0"] -``` -### Functions - -#### `electronToChromium(query)` -Arguments: -* Query: string or number, required. A major or full Electron version. - -A function that returns the corresponding Chromium version for a given Electron function. Returns a string. - -If you provide it with a major Electron version, it will return a major Chromium version: - -```js -var chromeVersion = e2c.electronToChromium('1.4'); -// chromeVersion is "53" -``` - -If you provide it with a full Electron version, it will return the full Chromium version. - -```js -var chromeVersion = e2c.electronToChromium('1.4.11'); -// chromeVersion is "53.0.2785.143" -``` - -If a query does not match a Chromium version, it will return `undefined`. - -```js -var chromeVersion = e2c.electronToChromium('9000'); -// chromeVersion is undefined -``` - -#### `chromiumToElectron(query)` -Arguments: -* Query: string or number, required. A major or full Chromium version. - -Returns a string with the corresponding Electron version for a given Chromium query. - -If you provide it with a major Chromium version, it will return a major Electron version: - -```js -var electronVersion = e2c.chromiumToElectron('54'); -// electronVersion is "1.4" -``` - -If you provide it with a full Chrome version, it will return an array of full Electron versions. - -```js -var electronVersions = e2c.chromiumToElectron('56.0.2924.87'); -// electronVersions is ["1.6.3", "1.6.2", "1.6.1", "1.6.0"] -``` - -If a query does not match an Electron version, it will return `undefined`. - -```js -var electronVersion = e2c.chromiumToElectron('10'); -// electronVersion is undefined -``` - -#### `electronToBrowserList(query)` **DEPRECATED** -Arguments: -* Query: string or number, required. A major Electron version. - -_**Deprecated**: Browserlist already includes electron-to-chromium._ - -A function that returns a [Browserslist](https://github.com/ai/browserslist) query that matches the given major Electron version. Returns a string. - -If you provide it with a major Electron version, it will return a Browserlist query string that matches the Chromium capabilities: - -```js -var query = e2c.electronToBrowserList('1.4'); -// query is "Chrome >= 53" -``` - -If a query does not match a Chromium version, it will return `undefined`. - -```js -var query = e2c.electronToBrowserList('9000'); -// query is undefined -``` - -### Importing just versions, fullVersions, chromiumVersions and fullChromiumVersions -All lists can be imported on their own, if file size is a concern. - -#### `versions` - -```js -var versions = require('electron-to-chromium/versions'); -``` - -#### `fullVersions` - -```js -var fullVersions = require('electron-to-chromium/full-versions'); -``` - -#### `chromiumVersions` - -```js -var chromiumVersions = require('electron-to-chromium/chromium-versions'); -``` - -#### `fullChromiumVersions` - -```js -var fullChromiumVersions = require('electron-to-chromium/full-chromium-versions'); -``` - -## Updating -This package will be updated with each new Electron release. - -To update the list, run `npm run build.js`. Requires internet access as it downloads from the canonical list of Electron versions. - -To verify correct behaviour, run `npm test`. - - -## License -[![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2FKilian%2Felectron-to-chromium.svg?type=large)](https://app.fossa.io/projects/git%2Bgithub.com%2FKilian%2Felectron-to-chromium?ref=badge_large) diff --git a/spaces/ysheng/SSN-Soft-Shadow-Network-for-Image-Composition/models/__init__.py b/spaces/ysheng/SSN-Soft-Shadow-Network-for-Image-Composition/models/__init__.py deleted file mode 100644 index 10e9f1b1fd7fa0999f58f1cdc3b2ea2bd7c77d6d..0000000000000000000000000000000000000000 --- a/spaces/ysheng/SSN-Soft-Shadow-Network-for-Image-Composition/models/__init__.py +++ /dev/null @@ -1,43 +0,0 @@ -# SRC: https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/models/__init__.py -import logging -import importlib - -from .abs_model import abs_model - - -def find_model_using_name(model_name): - """Import the module "models/[model_name].py". - In the file, the class called DatasetNameModel() will - be instantiated. It has to be a subclass of BaseModel, - and it is case-insensitive. - """ - model_filename = "models." + model_name - modellib = importlib.import_module(model_filename) - model = None - - target_model_name = model_name - for name, cls in modellib.__dict__.items(): - if name.lower() == target_model_name.lower() \ - and issubclass(cls, abs_model): - model = cls - - if model is None: - err = "In %s.py, there should be a subclass of BaseModel with class name that matches %s in lowercase." % (model_filename, target_model_name) - logging.error(err) - exit(0) - - return model - - -def create_model(opt): - """Create a model given the option. - This funct - This is the main interface between this package and 'train.py'/'test.py' - Example: - >>> from models import create_model - >>> model = create_model(opt) - """ - model = find_model_using_name(opt['model']['name']) - instance = model(opt) - logging.info("model [%s] was created" % type(instance).__name__) - return instance diff --git a/spaces/zetavg/LLaMA-LoRA-Tuner-UI-Demo/llama_lora/ui/finetune/finetune_ui.py b/spaces/zetavg/LLaMA-LoRA-Tuner-UI-Demo/llama_lora/ui/finetune/finetune_ui.py deleted file mode 100644 index b6f92ddbf23306a2b070f31f5c807c637c302708..0000000000000000000000000000000000000000 --- a/spaces/zetavg/LLaMA-LoRA-Tuner-UI-Demo/llama_lora/ui/finetune/finetune_ui.py +++ /dev/null @@ -1,827 +0,0 @@ -import os -import json -from datetime import datetime -import gradio as gr -from random_word import RandomWords - -from ...config import Config -from ...globals import Global -from ...utils.data import ( - get_available_template_names, - get_available_dataset_names, - get_available_lora_model_names -) -from ...utils.relative_read_file import relative_read_file -from ..css_styles import register_css_style - -from .values import ( - default_dataset_plain_text_input_variables_separator, - default_dataset_plain_text_input_and_output_separator, - default_dataset_plain_text_data_separator, - sample_plain_text_value, - sample_jsonl_text_value, - sample_json_text_value, -) -from .previewing import ( - refresh_preview, - refresh_dataset_items_count, -) -from .training import ( - do_train, - render_training_status, - render_loss_plot -) - -register_css_style('finetune', relative_read_file(__file__, "style.css")) - - -def random_hyphenated_word(): - r = RandomWords() - word1 = r.get_random_word() - word2 = r.get_random_word() - return word1 + '-' + word2 - - -def random_name(): - current_datetime = datetime.now() - formatted_datetime = current_datetime.strftime("%Y-%m-%d-%H-%M-%S") - return f"{random_hyphenated_word()}-{formatted_datetime}" - - -def reload_selections(current_template, current_dataset): - available_template_names = get_available_template_names() - available_template_names_with_none = available_template_names + ["None"] - if current_template not in available_template_names_with_none: - current_template = None - current_template = current_template or next( - iter(available_template_names_with_none), None) - - available_dataset_names = get_available_dataset_names() - if current_dataset not in available_dataset_names: - current_dataset = None - current_dataset = current_dataset or next( - iter(available_dataset_names), None) - - available_lora_models = ["-"] + get_available_lora_model_names() - - return ( - gr.Dropdown.update( - choices=available_template_names_with_none, - value=current_template), - gr.Dropdown.update( - choices=available_dataset_names, - value=current_dataset), - gr.Dropdown.update(choices=available_lora_models) - ) - - -def handle_switch_dataset_source(source): - if source == "Text Input": - return gr.Column.update(visible=True), gr.Column.update(visible=False) - else: - return gr.Column.update(visible=False), gr.Column.update(visible=True) - - -def handle_switch_dataset_text_format(format): - if format == "Plain Text": - return gr.Column.update(visible=True) - return gr.Column.update(visible=False) - - -def load_sample_dataset_to_text_input(format): - if format == "JSON": - return gr.Code.update(value=sample_json_text_value) - if format == "JSON Lines": - return gr.Code.update(value=sample_jsonl_text_value) - else: # Plain Text - return gr.Code.update(value=sample_plain_text_value) - - -def handle_continue_from_model_change(model_name): - try: - lora_models_directory_path = os.path.join( - Config.data_dir, "lora_models") - lora_model_directory_path = os.path.join( - lora_models_directory_path, model_name) - all_files = os.listdir(lora_model_directory_path) - checkpoints = [ - file for file in all_files if file.startswith("checkpoint-")] - checkpoints = ["-"] + checkpoints - can_load_params = "finetune_params.json" in all_files or "finetune_args.json" in all_files - return (gr.Dropdown.update(choices=checkpoints, value="-"), - gr.Button.update(visible=can_load_params), - gr.Markdown.update(value="", visible=False)) - except Exception: - pass - return (gr.Dropdown.update(choices=["-"], value="-"), - gr.Button.update(visible=False), - gr.Markdown.update(value="", visible=False)) - - -def handle_load_params_from_model( - model_name, - template, load_dataset_from, dataset_from_data_dir, - max_seq_length, - evaluate_data_count, - micro_batch_size, - gradient_accumulation_steps, - epochs, - learning_rate, - train_on_inputs, - lora_r, - lora_alpha, - lora_dropout, - lora_target_modules, - lora_modules_to_save, - load_in_8bit, - fp16, - bf16, - gradient_checkpointing, - save_steps, - save_total_limit, - logging_steps, - additional_training_arguments, - additional_lora_config, - lora_target_module_choices, - lora_modules_to_save_choices, -): - error_message = "" - notice_message = "" - unknown_keys = [] - try: - lora_models_directory_path = os.path.join( - Config.data_dir, "lora_models") - lora_model_directory_path = os.path.join( - lora_models_directory_path, model_name) - - try: - with open(os.path.join(lora_model_directory_path, "info.json"), "r") as f: - info = json.load(f) - if isinstance(info, dict): - model_prompt_template = info.get("prompt_template") - if model_prompt_template: - template = model_prompt_template - model_dataset_name = info.get("dataset_name") - if model_dataset_name and isinstance(model_dataset_name, str) and not model_dataset_name.startswith("N/A"): - load_dataset_from = "Data Dir" - dataset_from_data_dir = model_dataset_name - except FileNotFoundError: - pass - - data = {} - possible_files = ["finetune_params.json", "finetune_args.json"] - for file in possible_files: - try: - with open(os.path.join(lora_model_directory_path, file), "r") as f: - data = json.load(f) - except FileNotFoundError: - pass - - for key, value in data.items(): - if key == "max_seq_length": - max_seq_length = value - if key == "cutoff_len": - max_seq_length = value - elif key == "evaluate_data_count": - evaluate_data_count = value - elif key == "val_set_size": - evaluate_data_count = value - elif key == "micro_batch_size": - micro_batch_size = value - elif key == "gradient_accumulation_steps": - gradient_accumulation_steps = value - elif key == "epochs": - epochs = value - elif key == "num_train_epochs": - epochs = value - elif key == "learning_rate": - learning_rate = value - elif key == "train_on_inputs": - train_on_inputs = value - elif key == "lora_r": - lora_r = value - elif key == "lora_alpha": - lora_alpha = value - elif key == "lora_dropout": - lora_dropout = value - elif key == "lora_target_modules": - lora_target_modules = value - if value: - for element in value: - if element not in lora_target_module_choices: - lora_target_module_choices.append(element) - elif key == "lora_modules_to_save": - lora_modules_to_save = value - if value: - for element in value: - if element not in lora_modules_to_save_choices: - lora_modules_to_save_choices.append(element) - elif key == "load_in_8bit": - load_in_8bit = value - elif key == "fp16": - fp16 = value - elif key == "bf16": - bf16 = value - elif key == "gradient_checkpointing": - gradient_checkpointing = value - elif key == "save_steps": - save_steps = value - elif key == "save_total_limit": - save_total_limit = value - elif key == "logging_steps": - logging_steps = value - elif key == "additional_training_arguments": - if value: - additional_training_arguments = json.dumps(value, indent=2) - else: - additional_training_arguments = "" - elif key == "additional_lora_config": - if value: - additional_lora_config = json.dumps(value, indent=2) - else: - additional_lora_config = "" - elif key == "group_by_length": - pass - elif key == "resume_from_checkpoint": - pass - else: - unknown_keys.append(key) - except Exception as e: - error_message = str(e) - - if len(unknown_keys) > 0: - notice_message = f"Note: cannot restore unknown arg: {', '.join([f'`{x}`' for x in unknown_keys])}" - - message = ". ".join([x for x in [error_message, notice_message] if x]) - - has_message = False - if message: - message += "." - has_message = True - - return ( - gr.Markdown.update(value=message, visible=has_message), - template, load_dataset_from, dataset_from_data_dir, - max_seq_length, - evaluate_data_count, - micro_batch_size, - gradient_accumulation_steps, - epochs, - learning_rate, - train_on_inputs, - lora_r, - lora_alpha, - lora_dropout, - gr.CheckboxGroup.update(value=lora_target_modules, - choices=lora_target_module_choices), - gr.CheckboxGroup.update( - value=lora_modules_to_save, choices=lora_modules_to_save_choices), - load_in_8bit, - fp16, - bf16, - gradient_checkpointing, - save_steps, - save_total_limit, - logging_steps, - additional_training_arguments, - additional_lora_config, - lora_target_module_choices, - lora_modules_to_save_choices - ) - - -default_lora_target_module_choices = ["q_proj", "k_proj", "v_proj", "o_proj"] -default_lora_modules_to_save_choices = ["model.embed_tokens", "lm_head"] - - -def handle_lora_target_modules_add(choices, new_module, selected_modules): - choices.append(new_module) - selected_modules.append(new_module) - - return (choices, "", gr.CheckboxGroup.update(value=selected_modules, choices=choices)) - - -def handle_lora_modules_to_save_add(choices, new_module, selected_modules): - choices.append(new_module) - selected_modules.append(new_module) - - return (choices, "", gr.CheckboxGroup.update(value=selected_modules, choices=choices)) - - -def do_abort_training(): - Global.should_stop_training = True - Global.training_status_text = "Aborting..." - - -def finetune_ui(): - things_that_might_timeout = [] - - with gr.Blocks() as finetune_ui_blocks: - with gr.Column(elem_id="finetune_ui_content"): - with gr.Tab("Prepare"): - with gr.Box(elem_id="finetune_ui_select_dataset_source"): - with gr.Row(): - template = gr.Dropdown( - label="Template", - elem_id="finetune_template", - ) - load_dataset_from = gr.Radio( - ["Text Input", "Data Dir"], - label="Load Dataset From", - value="Text Input", - elem_id="finetune_load_dataset_from") - reload_selections_button = gr.Button( - "↻", - elem_id="finetune_reload_selections_button" - ) - reload_selections_button.style( - full_width=False, - size="sm") - with gr.Column( - elem_id="finetune_dataset_from_data_dir_group", - visible=False - ) as dataset_from_data_dir_group: - dataset_from_data_dir = gr.Dropdown( - label="Dataset", - elem_id="finetune_dataset_from_data_dir", - ) - dataset_from_data_dir_message = gr.Markdown( - "", - visible=False, - elem_id="finetune_dataset_from_data_dir_message") - with gr.Box(elem_id="finetune_dataset_text_input_group") as dataset_text_input_group: - gr.Textbox( - label="Training Data", elem_classes="textbox_that_is_only_used_to_display_a_label") - dataset_text = gr.Code( - show_label=False, - language="json", - value=sample_plain_text_value, - # max_lines=40, - elem_id="finetune_dataset_text_input_textbox") - dataset_from_text_message = gr.Markdown( - "", - visible=False, - elem_id="finetune_dataset_from_text_message") - gr.Markdown( - "The data you entered here will not be saved. Do not make edits here directly. Instead, edit the data elsewhere then paste it here.") - with gr.Row(): - with gr.Column(): - dataset_text_format = gr.Radio( - ["Plain Text", "JSON Lines", "JSON"], - label="Format", value="Plain Text", elem_id="finetune_dataset_text_format") - dataset_text_load_sample_button = gr.Button( - "Load Sample", elem_id="finetune_dataset_text_load_sample_button") - dataset_text_load_sample_button.style( - full_width=False, - size="sm") - with gr.Column(elem_id="finetune_dataset_plain_text_separators_group") as dataset_plain_text_separators_group: - dataset_plain_text_input_variables_separator = gr.Textbox( - label="Input Variables Separator", - elem_id="dataset_plain_text_input_variables_separator", - placeholder=default_dataset_plain_text_input_variables_separator, - value=default_dataset_plain_text_input_variables_separator) - dataset_plain_text_input_and_output_separator = gr.Textbox( - label="Input and Output Separator", - elem_id="dataset_plain_text_input_and_output_separator", - placeholder=default_dataset_plain_text_input_and_output_separator, - value=default_dataset_plain_text_input_and_output_separator) - dataset_plain_text_data_separator = gr.Textbox( - label="Data Separator", - elem_id="dataset_plain_text_data_separator", - placeholder=default_dataset_plain_text_data_separator, - value=default_dataset_plain_text_data_separator) - things_that_might_timeout.append( - dataset_text_format.change( - fn=handle_switch_dataset_text_format, - inputs=[dataset_text_format], - outputs=[ - dataset_plain_text_separators_group # type: ignore - ] - )) - - things_that_might_timeout.append( - dataset_text_load_sample_button.click(fn=load_sample_dataset_to_text_input, inputs=[ - dataset_text_format], outputs=[dataset_text])) - gr.Markdown( - "💡 Switch to the \"Preview\" tab to verify that your inputs are correct.") - with gr.Tab("Preview"): - with gr.Row(): - finetune_dataset_preview_info_message = gr.Markdown( - "Set the dataset in the \"Prepare\" tab, then preview it here.", - elem_id="finetune_dataset_preview_info_message" - ) - finetune_dataset_preview_count = gr.Number( - label="Preview items count", - value=10, - # minimum=1, - # maximum=100, - precision=0, - elem_id="finetune_dataset_preview_count" - ) - finetune_dataset_preview = gr.Dataframe( - wrap=True, elem_id="finetune_dataset_preview") - things_that_might_timeout.append( - load_dataset_from.change( - fn=handle_switch_dataset_source, - inputs=[load_dataset_from], - outputs=[ - dataset_text_input_group, - dataset_from_data_dir_group - ] # type: ignore - )) - - dataset_inputs = [ - template, - load_dataset_from, - dataset_from_data_dir, - dataset_text, - dataset_text_format, - dataset_plain_text_input_variables_separator, - dataset_plain_text_input_and_output_separator, - dataset_plain_text_data_separator, - ] - dataset_preview_inputs = dataset_inputs + \ - [finetune_dataset_preview_count] - - with gr.Row(): - max_seq_length = gr.Slider( - minimum=1, maximum=4096, value=512, - label="Max Sequence Length", - info="The maximum length of each sample text sequence. Sequences longer than this will be truncated.", - elem_id="finetune_max_seq_length" - ) - - train_on_inputs = gr.Checkbox( - label="Train on Inputs", - value=True, - info="If not enabled, inputs will be masked out in loss.", - elem_id="finetune_train_on_inputs" - ) - - with gr.Row(): - # https://huggingface.co/docs/transformers/main/main_classes/trainer - - micro_batch_size_default_value = 1 - - if Global.gpu_total_cores is not None and Global.gpu_total_memory is not None: - memory_per_core = Global.gpu_total_memory / Global.gpu_total_cores - if memory_per_core >= 6291456: - micro_batch_size_default_value = 8 - elif memory_per_core >= 4000000: # ? - micro_batch_size_default_value = 4 - - with gr.Column(): - micro_batch_size = gr.Slider( - minimum=1, maximum=100, step=1, value=micro_batch_size_default_value, - label="Micro Batch Size", - info="The number of examples in each mini-batch for gradient computation. A smaller micro_batch_size reduces memory usage but may increase training time." - ) - - gradient_accumulation_steps = gr.Slider( - minimum=1, maximum=10, step=1, value=1, - label="Gradient Accumulation Steps", - info="The number of steps to accumulate gradients before updating model parameters. This can be used to simulate a larger effective batch size without increasing memory usage." - ) - - epochs = gr.Slider( - minimum=1, maximum=100, step=1, value=10, - label="Epochs", - info="The number of times to iterate over the entire training dataset. A larger number of epochs may improve model performance but also increase the risk of overfitting.") - - learning_rate = gr.Slider( - minimum=0.00001, maximum=0.01, value=3e-4, - label="Learning Rate", - info="The initial learning rate for the optimizer. A higher learning rate may speed up convergence but also cause instability or divergence. A lower learning rate may require more steps to reach optimal performance but also avoid overshooting or oscillating around local minima." - ) - - with gr.Column(elem_id="finetune_eval_data_group"): - evaluate_data_count = gr.Slider( - minimum=0, maximum=1, step=1, value=0, - label="Evaluation Data Count", - info="The number of data to be used for evaluation. This specific amount of data will be randomly chosen from the training dataset for evaluating the model's performance during the process, without contributing to the actual training.", - elem_id="finetune_evaluate_data_count" - ) - gr.HTML(elem_classes="flex_vertical_grow_area") - - with gr.Accordion("Advanced Options", open=False, elem_id="finetune_advance_options_accordion"): - with gr.Row(elem_id="finetune_advanced_options_checkboxes"): - load_in_8bit = gr.Checkbox( - label="8bit", value=Config.load_8bit) - fp16 = gr.Checkbox(label="FP16", value=True) - bf16 = gr.Checkbox(label="BF16", value=False) - gradient_checkpointing = gr.Checkbox( - label="gradient_checkpointing", value=False) - with gr.Column(variant="panel", elem_id="finetune_additional_training_arguments_box"): - gr.Textbox( - label="Additional Training Arguments", - info="Additional training arguments to be passed to the Trainer. Note that this can override ALL other arguments set elsewhere. See https://bit.ly/hf20-transformers-training-arguments for more details.", - elem_id="finetune_additional_training_arguments_textbox_for_label_display" - ) - additional_training_arguments = gr.Code( - label="JSON", - language="json", - value="", - lines=2, - elem_id="finetune_additional_training_arguments") - - with gr.Box(elem_id="finetune_continue_from_model_box"): - with gr.Row(): - continue_from_model = gr.Dropdown( - value="-", - label="Continue from Model", - choices=["-"], - allow_custom_value=True, - elem_id="finetune_continue_from_model" - ) - continue_from_checkpoint = gr.Dropdown( - value="-", - label="Resume from Checkpoint", - choices=["-"], - elem_id="finetune_continue_from_checkpoint") - with gr.Column(): - load_params_from_model_btn = gr.Button( - "Load training parameters from selected model", visible=False) - load_params_from_model_btn.style( - full_width=False, - size="sm") - load_params_from_model_message = gr.Markdown( - "", visible=False) - - things_that_might_timeout.append( - continue_from_model.change( - fn=handle_continue_from_model_change, - inputs=[continue_from_model], - outputs=[ - continue_from_checkpoint, - load_params_from_model_btn, - load_params_from_model_message - ] - ) - ) - - with gr.Column(): - lora_r = gr.Slider( - minimum=1, maximum=16, step=1, value=8, - label="LoRA R", - info="The rank parameter for LoRA, which controls the dimensionality of the rank decomposition matrices. A larger lora_r increases the expressiveness and flexibility of LoRA but also increases the number of trainable parameters and memory usage." - ) - - lora_alpha = gr.Slider( - minimum=1, maximum=128, step=1, value=16, - label="LoRA Alpha", - info="The scaling parameter for LoRA, which controls how much LoRA affects the original pre-trained model weights. A larger lora_alpha amplifies the impact of LoRA but may also distort or override the pre-trained knowledge." - ) - - lora_dropout = gr.Slider( - minimum=0, maximum=1, value=0.05, - label="LoRA Dropout", - info="The dropout probability for LoRA, which controls the fraction of LoRA parameters that are set to zero during training. A larger lora_dropout increases the regularization effect of LoRA but also increases the risk of underfitting." - ) - - with gr.Column(elem_id="finetune_lora_target_modules_box"): - lora_target_modules = gr.CheckboxGroup( - label="LoRA Target Modules", - choices=default_lora_target_module_choices, - value=["q_proj", "v_proj"], - info="Modules to replace with LoRA.", - elem_id="finetune_lora_target_modules" - ) - lora_target_module_choices = gr.State( - value=default_lora_target_module_choices) # type: ignore - with gr.Box(elem_id="finetune_lora_target_modules_add_box"): - with gr.Row(): - lora_target_modules_add = gr.Textbox( - lines=1, max_lines=1, show_label=False, - elem_id="finetune_lora_target_modules_add" - ) - lora_target_modules_add_btn = gr.Button( - "Add", - elem_id="finetune_lora_target_modules_add_btn" - ) - lora_target_modules_add_btn.style( - full_width=False, size="sm") - things_that_might_timeout.append(lora_target_modules_add_btn.click( - handle_lora_target_modules_add, - inputs=[lora_target_module_choices, - lora_target_modules_add, lora_target_modules], - outputs=[lora_target_module_choices, - lora_target_modules_add, lora_target_modules], - )) - - with gr.Accordion("Advanced LoRA Options", open=False, elem_id="finetune_advance_lora_options_accordion"): - with gr.Column(elem_id="finetune_lora_modules_to_save_box"): - lora_modules_to_save = gr.CheckboxGroup( - label="LoRA Modules To Save", - choices=default_lora_modules_to_save_choices, - value=[], - # info="", - elem_id="finetune_lora_modules_to_save" - ) - lora_modules_to_save_choices = gr.State( - value=default_lora_modules_to_save_choices) # type: ignore - with gr.Box(elem_id="finetune_lora_modules_to_save_add_box"): - with gr.Row(): - lora_modules_to_save_add = gr.Textbox( - lines=1, max_lines=1, show_label=False, - elem_id="finetune_lora_modules_to_save_add" - ) - lora_modules_to_save_add_btn = gr.Button( - "Add", - elem_id="finetune_lora_modules_to_save_add_btn" - ) - lora_modules_to_save_add_btn.style( - full_width=False, size="sm") - things_that_might_timeout.append(lora_modules_to_save_add_btn.click( - handle_lora_modules_to_save_add, - inputs=[lora_modules_to_save_choices, - lora_modules_to_save_add, lora_modules_to_save], - outputs=[lora_modules_to_save_choices, - lora_modules_to_save_add, lora_modules_to_save], - )) - - with gr.Column(variant="panel", elem_id="finetune_additional_lora_config_box"): - gr.Textbox( - label="Additional LoRA Config", - info="Additional LoraConfig. Note that this can override ALL other arguments set elsewhere.", - elem_id="finetune_additional_lora_config_textbox_for_label_display" - ) - additional_lora_config = gr.Code( - label="JSON", - language="json", - value="", - lines=2, - elem_id="finetune_additional_lora_config") - - gr.HTML(elem_classes="flex_vertical_grow_area no_limit") - - with gr.Column(elem_id="finetune_log_and_save_options_group_container"): - with gr.Row(elem_id="finetune_log_and_save_options_group"): - logging_steps = gr.Number( - label="Logging Steps", - precision=0, - value=10, - elem_id="finetune_logging_steps" - ) - save_steps = gr.Number( - label="Steps Per Save", - precision=0, - value=500, - elem_id="finetune_save_steps" - ) - save_total_limit = gr.Number( - label="Saved Checkpoints Limit", - precision=0, - value=5, - elem_id="finetune_save_total_limit" - ) - - with gr.Column(elem_id="finetune_model_name_group"): - model_name = gr.Textbox( - lines=1, label="LoRA Model Name", value=random_name, - max_lines=1, - info="The name of the new LoRA model.", - elem_id="finetune_model_name", - ) - - with gr.Row(): - with gr.Column(): - pass - with gr.Column(): - - with gr.Row(): - train_btn = gr.Button( - "Train", variant="primary", label="Train", - elem_id="finetune_start_btn" - ) - - abort_button = gr.Button( - "Abort", label="Abort", - elem_id="finetune_stop_btn" - ) - confirm_abort_button = gr.Button( - "Confirm Abort", label="Confirm Abort", variant="stop", - elem_id="finetune_confirm_stop_btn" - ) - - things_that_might_timeout.append(reload_selections_button.click( - reload_selections, - inputs=[template, dataset_from_data_dir], - outputs=[template, dataset_from_data_dir, continue_from_model], - )) - - for i in dataset_preview_inputs: - things_that_might_timeout.append( - i.change( - fn=refresh_preview, - inputs=dataset_preview_inputs, - outputs=[ - finetune_dataset_preview, - finetune_dataset_preview_info_message, - dataset_from_text_message, - dataset_from_data_dir_message - ] - ).then( - fn=refresh_dataset_items_count, - inputs=dataset_preview_inputs, - outputs=[ - finetune_dataset_preview_info_message, - dataset_from_text_message, - dataset_from_data_dir_message, - evaluate_data_count, - ] - )) - - finetune_args = [ - max_seq_length, - evaluate_data_count, - micro_batch_size, - gradient_accumulation_steps, - epochs, - learning_rate, - train_on_inputs, - lora_r, - lora_alpha, - lora_dropout, - lora_target_modules, - lora_modules_to_save, - load_in_8bit, - fp16, - bf16, - gradient_checkpointing, - save_steps, - save_total_limit, - logging_steps, - additional_training_arguments, - additional_lora_config, - ] - - things_that_might_timeout.append( - load_params_from_model_btn.click( - fn=handle_load_params_from_model, - inputs=( - [continue_from_model] + - [template, load_dataset_from, dataset_from_data_dir] + - finetune_args + - [lora_target_module_choices, lora_modules_to_save_choices] - ), # type: ignore - outputs=( - [load_params_from_model_message] + - [template, load_dataset_from, dataset_from_data_dir] + - finetune_args + - [lora_target_module_choices, lora_modules_to_save_choices] - ) # type: ignore - ) - ) - - train_status = gr.HTML( - "", - label="Train Output", - elem_id="finetune_training_status") - - with gr.Column(visible=False, elem_id="finetune_loss_plot_container") as loss_plot_container: - loss_plot = gr.Plot( - visible=False, show_label=False, - elem_id="finetune_loss_plot") - - training_indicator = gr.HTML( - "training_indicator", visible=False, elem_id="finetune_training_indicator") - - train_start = train_btn.click( - fn=do_train, - inputs=(dataset_inputs + finetune_args + [ - model_name, - continue_from_model, - continue_from_checkpoint, - ]), - outputs=[train_status, training_indicator, - loss_plot_container, loss_plot] - ) - - # controlled by JS, shows the confirm_abort_button - abort_button.click(None, None, None, None) - confirm_abort_button.click( - fn=do_abort_training, - inputs=None, outputs=None, - cancels=[train_start]) - - training_status_updates = finetune_ui_blocks.load( - fn=render_training_status, - inputs=None, - outputs=[train_status, training_indicator], - every=0.2 - ) - loss_plot_updates = finetune_ui_blocks.load( - fn=render_loss_plot, - inputs=None, - outputs=[loss_plot_container, loss_plot], - every=10 - ) - finetune_ui_blocks.load(_js=relative_read_file(__file__, "script.js")) - - # things_that_might_timeout.append(training_status_updates) - stop_timeoutable_btn = gr.Button( - "stop not-responding elements", - elem_id="inference_stop_timeoutable_btn", - elem_classes="foot_stop_timeoutable_btn") - stop_timeoutable_btn.click( - fn=None, inputs=None, outputs=None, cancels=things_that_might_timeout) diff --git a/spaces/zhang-wei-jian/docker/node_modules/on-finished/README.md b/spaces/zhang-wei-jian/docker/node_modules/on-finished/README.md deleted file mode 100644 index 8973cded6589a6cc5a9e1718e3fb0d709fe6e8d8..0000000000000000000000000000000000000000 --- a/spaces/zhang-wei-jian/docker/node_modules/on-finished/README.md +++ /dev/null @@ -1,162 +0,0 @@ -# on-finished - -[![NPM Version][npm-version-image]][npm-url] -[![NPM Downloads][npm-downloads-image]][npm-url] -[![Node.js Version][node-image]][node-url] -[![Build Status][ci-image]][ci-url] -[![Coverage Status][coveralls-image]][coveralls-url] - -Execute a callback when a HTTP request closes, finishes, or errors. - -## Install - -This is a [Node.js](https://nodejs.org/en/) module available through the -[npm registry](https://www.npmjs.com/). Installation is done using the -[`npm install` command](https://docs.npmjs.com/getting-started/installing-npm-packages-locally): - -```sh -$ npm install on-finished -``` - -## API - -```js -var onFinished = require('on-finished') -``` - -### onFinished(res, listener) - -Attach a listener to listen for the response to finish. The listener will -be invoked only once when the response finished. If the response finished -to an error, the first argument will contain the error. If the response -has already finished, the listener will be invoked. - -Listening to the end of a response would be used to close things associated -with the response, like open files. - -Listener is invoked as `listener(err, res)`. - - - -```js -onFinished(res, function (err, res) { - // clean up open fds, etc. - // err contains the error if request error'd -}) -``` - -### onFinished(req, listener) - -Attach a listener to listen for the request to finish. The listener will -be invoked only once when the request finished. If the request finished -to an error, the first argument will contain the error. If the request -has already finished, the listener will be invoked. - -Listening to the end of a request would be used to know when to continue -after reading the data. - -Listener is invoked as `listener(err, req)`. - - - -```js -var data = '' - -req.setEncoding('utf8') -req.on('data', function (str) { - data += str -}) - -onFinished(req, function (err, req) { - // data is read unless there is err -}) -``` - -### onFinished.isFinished(res) - -Determine if `res` is already finished. This would be useful to check and -not even start certain operations if the response has already finished. - -### onFinished.isFinished(req) - -Determine if `req` is already finished. This would be useful to check and -not even start certain operations if the request has already finished. - -## Special Node.js requests - -### HTTP CONNECT method - -The meaning of the `CONNECT` method from RFC 7231, section 4.3.6: - -> The CONNECT method requests that the recipient establish a tunnel to -> the destination origin server identified by the request-target and, -> if successful, thereafter restrict its behavior to blind forwarding -> of packets, in both directions, until the tunnel is closed. Tunnels -> are commonly used to create an end-to-end virtual connection, through -> one or more proxies, which can then be secured using TLS (Transport -> Layer Security, [RFC5246]). - -In Node.js, these request objects come from the `'connect'` event on -the HTTP server. - -When this module is used on a HTTP `CONNECT` request, the request is -considered "finished" immediately, **due to limitations in the Node.js -interface**. This means if the `CONNECT` request contains a request entity, -the request will be considered "finished" even before it has been read. - -There is no such thing as a response object to a `CONNECT` request in -Node.js, so there is no support for one. - -### HTTP Upgrade request - -The meaning of the `Upgrade` header from RFC 7230, section 6.1: - -> The "Upgrade" header field is intended to provide a simple mechanism -> for transitioning from HTTP/1.1 to some other protocol on the same -> connection. - -In Node.js, these request objects come from the `'upgrade'` event on -the HTTP server. - -When this module is used on a HTTP request with an `Upgrade` header, the -request is considered "finished" immediately, **due to limitations in the -Node.js interface**. This means if the `Upgrade` request contains a request -entity, the request will be considered "finished" even before it has been -read. - -There is no such thing as a response object to a `Upgrade` request in -Node.js, so there is no support for one. - -## Example - -The following code ensures that file descriptors are always closed -once the response finishes. - -```js -var destroy = require('destroy') -var fs = require('fs') -var http = require('http') -var onFinished = require('on-finished') - -http.createServer(function onRequest (req, res) { - var stream = fs.createReadStream('package.json') - stream.pipe(res) - onFinished(res, function () { - destroy(stream) - }) -}) -``` - -## License - -[MIT](LICENSE) - -[ci-image]: https://badgen.net/github/checks/jshttp/on-finished/master?label=ci -[ci-url]: https://github.com/jshttp/on-finished/actions/workflows/ci.yml -[coveralls-image]: https://badgen.net/coveralls/c/github/jshttp/on-finished/master -[coveralls-url]: https://coveralls.io/r/jshttp/on-finished?branch=master -[node-image]: https://badgen.net/npm/node/on-finished -[node-url]: https://nodejs.org/en/download -[npm-downloads-image]: https://badgen.net/npm/dm/on-finished -[npm-url]: https://npmjs.org/package/on-finished -[npm-version-image]: https://badgen.net/npm/v/on-finished diff --git a/spaces/zhang-wei-jian/docker/node_modules/simple-update-notifier/node_modules/semver/functions/patch.js b/spaces/zhang-wei-jian/docker/node_modules/simple-update-notifier/node_modules/semver/functions/patch.js deleted file mode 100644 index 63afca2524fca975831dcbfc13d011fad4ca6be8..0000000000000000000000000000000000000000 --- a/spaces/zhang-wei-jian/docker/node_modules/simple-update-notifier/node_modules/semver/functions/patch.js +++ /dev/null @@ -1,3 +0,0 @@ -const SemVer = require('../classes/semver') -const patch = (a, loose) => new SemVer(a, loose).patch -module.exports = patch diff --git a/spaces/zhangs2022/ChuanhuChatGPT/modules/overwrites.py b/spaces/zhangs2022/ChuanhuChatGPT/modules/overwrites.py deleted file mode 100644 index 035a4a52722d66ee28af1c05231ad1cea3339ef5..0000000000000000000000000000000000000000 --- a/spaces/zhangs2022/ChuanhuChatGPT/modules/overwrites.py +++ /dev/null @@ -1,94 +0,0 @@ -from __future__ import annotations -import logging - -from llama_index import Prompt -from typing import List, Tuple -import mdtex2html -from gradio_client import utils as client_utils - -from modules.presets import * -from modules.llama_func import * - - -def compact_text_chunks(self, prompt: Prompt, text_chunks: List[str]) -> List[str]: - logging.debug("Compacting text chunks...🚀🚀🚀") - combined_str = [c.strip() for c in text_chunks if c.strip()] - combined_str = [f"[{index+1}] {c}" for index, c in enumerate(combined_str)] - combined_str = "\n\n".join(combined_str) - # resplit based on self.max_chunk_overlap - text_splitter = self.get_text_splitter_given_prompt(prompt, 1, padding=1) - return text_splitter.split_text(combined_str) - - -def postprocess( - self, - y: List[List[str | Tuple[str] | Tuple[str, str] | None] | Tuple], - ) -> List[List[str | Dict | None]]: - """ - Parameters: - y: List of lists representing the message and response pairs. Each message and response should be a string, which may be in Markdown format. It can also be a tuple whose first element is a string filepath or URL to an image/video/audio, and second (optional) element is the alt text, in which case the media file is displayed. It can also be None, in which case that message is not displayed. - Returns: - List of lists representing the message and response. Each message and response will be a string of HTML, or a dictionary with media information. Or None if the message is not to be displayed. - """ - if y is None: - return [] - processed_messages = [] - for message_pair in y: - assert isinstance( - message_pair, (tuple, list) - ), f"Expected a list of lists or list of tuples. Received: {message_pair}" - assert ( - len(message_pair) == 2 - ), f"Expected a list of lists of length 2 or list of tuples of length 2. Received: {message_pair}" - - processed_messages.append( - [ - self._postprocess_chat_messages(message_pair[0], "user"), - self._postprocess_chat_messages(message_pair[1], "bot"), - ] - ) - return processed_messages - -def postprocess_chat_messages( - self, chat_message: str | Tuple | List | None, message_type: str - ) -> str | Dict | None: - if chat_message is None: - return None - elif isinstance(chat_message, (tuple, list)): - filepath = chat_message[0] - mime_type = client_utils.get_mimetype(filepath) - filepath = self.make_temp_copy_if_needed(filepath) - return { - "name": filepath, - "mime_type": mime_type, - "alt_text": chat_message[1] if len(chat_message) > 1 else None, - "data": None, # These last two fields are filled in by the frontend - "is_file": True, - } - elif isinstance(chat_message, str): - if message_type == "bot": - if not detect_converted_mark(chat_message): - chat_message = convert_mdtext(chat_message) - elif message_type == "user": - if not detect_converted_mark(chat_message): - chat_message = convert_asis(chat_message) - return chat_message - else: - raise ValueError(f"Invalid message for Chatbot component: {chat_message}") - -with open("./assets/custom.js", "r", encoding="utf-8") as f, open("./assets/Kelpy-Codos.js", "r", encoding="utf-8") as f2: - customJS = f.read() - kelpyCodos = f2.read() - -def reload_javascript(): - print("Reloading javascript...") - js = f'' - def template_response(*args, **kwargs): - res = GradioTemplateResponseOriginal(*args, **kwargs) - res.body = res.body.replace(b'', f'{js}'.encode("utf8")) - res.init_headers() - return res - - gr.routes.templates.TemplateResponse = template_response - -GradioTemplateResponseOriginal = gr.routes.templates.TemplateResponse \ No newline at end of file diff --git a/spaces/zlc99/M4Singer/data_gen/tts/data_gen_utils.py b/spaces/zlc99/M4Singer/data_gen/tts/data_gen_utils.py deleted file mode 100644 index 1d416b78c1e7aa6b03951c1db12bd4fd26d0a708..0000000000000000000000000000000000000000 --- a/spaces/zlc99/M4Singer/data_gen/tts/data_gen_utils.py +++ /dev/null @@ -1,347 +0,0 @@ -import warnings - -warnings.filterwarnings("ignore") - -import parselmouth -import os -import torch -from skimage.transform import resize -from utils.text_encoder import TokenTextEncoder -from utils.pitch_utils import f0_to_coarse -import struct -import webrtcvad -from scipy.ndimage.morphology import binary_dilation -import librosa -import numpy as np -from utils import audio -import pyloudnorm as pyln -import re -import json -from collections import OrderedDict - -PUNCS = '!,.?;:' - -int16_max = (2 ** 15) - 1 - - -def trim_long_silences(path, sr=None, return_raw_wav=False, norm=True, vad_max_silence_length=12): - """ - Ensures that segments without voice in the waveform remain no longer than a - threshold determined by the VAD parameters in params.py. - :param wav: the raw waveform as a numpy array of floats - :param vad_max_silence_length: Maximum number of consecutive silent frames a segment can have. - :return: the same waveform with silences trimmed away (length <= original wav length) - """ - - ## Voice Activation Detection - # Window size of the VAD. Must be either 10, 20 or 30 milliseconds. - # This sets the granularity of the VAD. Should not need to be changed. - sampling_rate = 16000 - wav_raw, sr = librosa.core.load(path, sr=sr) - - if norm: - meter = pyln.Meter(sr) # create BS.1770 meter - loudness = meter.integrated_loudness(wav_raw) - wav_raw = pyln.normalize.loudness(wav_raw, loudness, -20.0) - if np.abs(wav_raw).max() > 1.0: - wav_raw = wav_raw / np.abs(wav_raw).max() - - wav = librosa.resample(wav_raw, sr, sampling_rate, res_type='kaiser_best') - - vad_window_length = 30 # In milliseconds - # Number of frames to average together when performing the moving average smoothing. - # The larger this value, the larger the VAD variations must be to not get smoothed out. - vad_moving_average_width = 8 - - # Compute the voice detection window size - samples_per_window = (vad_window_length * sampling_rate) // 1000 - - # Trim the end of the audio to have a multiple of the window size - wav = wav[:len(wav) - (len(wav) % samples_per_window)] - - # Convert the float waveform to 16-bit mono PCM - pcm_wave = struct.pack("%dh" % len(wav), *(np.round(wav * int16_max)).astype(np.int16)) - - # Perform voice activation detection - voice_flags = [] - vad = webrtcvad.Vad(mode=3) - for window_start in range(0, len(wav), samples_per_window): - window_end = window_start + samples_per_window - voice_flags.append(vad.is_speech(pcm_wave[window_start * 2:window_end * 2], - sample_rate=sampling_rate)) - voice_flags = np.array(voice_flags) - - # Smooth the voice detection with a moving average - def moving_average(array, width): - array_padded = np.concatenate((np.zeros((width - 1) // 2), array, np.zeros(width // 2))) - ret = np.cumsum(array_padded, dtype=float) - ret[width:] = ret[width:] - ret[:-width] - return ret[width - 1:] / width - - audio_mask = moving_average(voice_flags, vad_moving_average_width) - audio_mask = np.round(audio_mask).astype(np.bool) - - # Dilate the voiced regions - audio_mask = binary_dilation(audio_mask, np.ones(vad_max_silence_length + 1)) - audio_mask = np.repeat(audio_mask, samples_per_window) - audio_mask = resize(audio_mask, (len(wav_raw),)) > 0 - if return_raw_wav: - return wav_raw, audio_mask, sr - return wav_raw[audio_mask], audio_mask, sr - - -def process_utterance(wav_path, - fft_size=1024, - hop_size=256, - win_length=1024, - window="hann", - num_mels=80, - fmin=80, - fmax=7600, - eps=1e-6, - sample_rate=22050, - loud_norm=False, - min_level_db=-100, - return_linear=False, - trim_long_sil=False, vocoder='pwg'): - if isinstance(wav_path, str): - if trim_long_sil: - wav, _, _ = trim_long_silences(wav_path, sample_rate) - else: - wav, _ = librosa.core.load(wav_path, sr=sample_rate) - else: - wav = wav_path - - if loud_norm: - meter = pyln.Meter(sample_rate) # create BS.1770 meter - loudness = meter.integrated_loudness(wav) - wav = pyln.normalize.loudness(wav, loudness, -22.0) - if np.abs(wav).max() > 1: - wav = wav / np.abs(wav).max() - - # get amplitude spectrogram - x_stft = librosa.stft(wav, n_fft=fft_size, hop_length=hop_size, - win_length=win_length, window=window, pad_mode="constant") - spc = np.abs(x_stft) # (n_bins, T) - - # get mel basis - fmin = 0 if fmin == -1 else fmin - fmax = sample_rate / 2 if fmax == -1 else fmax - mel_basis = librosa.filters.mel(sample_rate, fft_size, num_mels, fmin, fmax) - mel = mel_basis @ spc - - if vocoder == 'pwg': - mel = np.log10(np.maximum(eps, mel)) # (n_mel_bins, T) - else: - assert False, f'"{vocoder}" is not in ["pwg"].' - - l_pad, r_pad = audio.librosa_pad_lr(wav, fft_size, hop_size, 1) - wav = np.pad(wav, (l_pad, r_pad), mode='constant', constant_values=0.0) - wav = wav[:mel.shape[1] * hop_size] - - if not return_linear: - return wav, mel - else: - spc = audio.amp_to_db(spc) - spc = audio.normalize(spc, {'min_level_db': min_level_db}) - return wav, mel, spc - - -def get_pitch(wav_data, mel, hparams): - """ - - :param wav_data: [T] - :param mel: [T, 80] - :param hparams: - :return: - """ - time_step = hparams['hop_size'] / hparams['audio_sample_rate'] * 1000 - f0_min = 80 - f0_max = 750 - - if hparams['hop_size'] == 128: - pad_size = 4 - elif hparams['hop_size'] == 256: - pad_size = 2 - else: - assert False - - f0 = parselmouth.Sound(wav_data, hparams['audio_sample_rate']).to_pitch_ac( - time_step=time_step / 1000, voicing_threshold=0.6, - pitch_floor=f0_min, pitch_ceiling=f0_max).selected_array['frequency'] - lpad = pad_size * 2 - rpad = len(mel) - len(f0) - lpad - f0 = np.pad(f0, [[lpad, rpad]], mode='constant') - # mel and f0 are extracted by 2 different libraries. we should force them to have the same length. - # Attention: we find that new version of some libraries could cause ``rpad'' to be a negetive value... - # Just to be sure, we recommend users to set up the same environments as them in requirements_auto.txt (by Anaconda) - delta_l = len(mel) - len(f0) - assert np.abs(delta_l) <= 8 - if delta_l > 0: - f0 = np.concatenate([f0, [f0[-1]] * delta_l], 0) - f0 = f0[:len(mel)] - pitch_coarse = f0_to_coarse(f0) - return f0, pitch_coarse - - -def remove_empty_lines(text): - """remove empty lines""" - assert (len(text) > 0) - assert (isinstance(text, list)) - text = [t.strip() for t in text] - if "" in text: - text.remove("") - return text - - -class TextGrid(object): - def __init__(self, text): - text = remove_empty_lines(text) - self.text = text - self.line_count = 0 - self._get_type() - self._get_time_intval() - self._get_size() - self.tier_list = [] - self._get_item_list() - - def _extract_pattern(self, pattern, inc): - """ - Parameters - ---------- - pattern : regex to extract pattern - inc : increment of line count after extraction - Returns - ------- - group : extracted info - """ - try: - group = re.match(pattern, self.text[self.line_count]).group(1) - self.line_count += inc - except AttributeError: - raise ValueError("File format error at line %d:%s" % (self.line_count, self.text[self.line_count])) - return group - - def _get_type(self): - self.file_type = self._extract_pattern(r"File type = \"(.*)\"", 2) - - def _get_time_intval(self): - self.xmin = self._extract_pattern(r"xmin = (.*)", 1) - self.xmax = self._extract_pattern(r"xmax = (.*)", 2) - - def _get_size(self): - self.size = int(self._extract_pattern(r"size = (.*)", 2)) - - def _get_item_list(self): - """Only supports IntervalTier currently""" - for itemIdx in range(1, self.size + 1): - tier = OrderedDict() - item_list = [] - tier_idx = self._extract_pattern(r"item \[(.*)\]:", 1) - tier_class = self._extract_pattern(r"class = \"(.*)\"", 1) - if tier_class != "IntervalTier": - raise NotImplementedError("Only IntervalTier class is supported currently") - tier_name = self._extract_pattern(r"name = \"(.*)\"", 1) - tier_xmin = self._extract_pattern(r"xmin = (.*)", 1) - tier_xmax = self._extract_pattern(r"xmax = (.*)", 1) - tier_size = self._extract_pattern(r"intervals: size = (.*)", 1) - for i in range(int(tier_size)): - item = OrderedDict() - item["idx"] = self._extract_pattern(r"intervals \[(.*)\]", 1) - item["xmin"] = self._extract_pattern(r"xmin = (.*)", 1) - item["xmax"] = self._extract_pattern(r"xmax = (.*)", 1) - item["text"] = self._extract_pattern(r"text = \"(.*)\"", 1) - item_list.append(item) - tier["idx"] = tier_idx - tier["class"] = tier_class - tier["name"] = tier_name - tier["xmin"] = tier_xmin - tier["xmax"] = tier_xmax - tier["size"] = tier_size - tier["items"] = item_list - self.tier_list.append(tier) - - def toJson(self): - _json = OrderedDict() - _json["file_type"] = self.file_type - _json["xmin"] = self.xmin - _json["xmax"] = self.xmax - _json["size"] = self.size - _json["tiers"] = self.tier_list - return json.dumps(_json, ensure_ascii=False, indent=2) - - -def get_mel2ph(tg_fn, ph, mel, hparams): - ph_list = ph.split(" ") - with open(tg_fn, "r") as f: - tg = f.readlines() - tg = remove_empty_lines(tg) - tg = TextGrid(tg) - tg = json.loads(tg.toJson()) - split = np.ones(len(ph_list) + 1, np.float) * -1 - tg_idx = 0 - ph_idx = 0 - tg_align = [x for x in tg['tiers'][-1]['items']] - tg_align_ = [] - for x in tg_align: - x['xmin'] = float(x['xmin']) - x['xmax'] = float(x['xmax']) - if x['text'] in ['sil', 'sp', '', 'SIL', 'PUNC']: - x['text'] = '' - if len(tg_align_) > 0 and tg_align_[-1]['text'] == '': - tg_align_[-1]['xmax'] = x['xmax'] - continue - tg_align_.append(x) - tg_align = tg_align_ - tg_len = len([x for x in tg_align if x['text'] != '']) - ph_len = len([x for x in ph_list if not is_sil_phoneme(x)]) - assert tg_len == ph_len, (tg_len, ph_len, tg_align, ph_list, tg_fn) - while tg_idx < len(tg_align) or ph_idx < len(ph_list): - if tg_idx == len(tg_align) and is_sil_phoneme(ph_list[ph_idx]): - split[ph_idx] = 1e8 - ph_idx += 1 - continue - x = tg_align[tg_idx] - if x['text'] == '' and ph_idx == len(ph_list): - tg_idx += 1 - continue - assert ph_idx < len(ph_list), (tg_len, ph_len, tg_align, ph_list, tg_fn) - ph = ph_list[ph_idx] - if x['text'] == '' and not is_sil_phoneme(ph): - assert False, (ph_list, tg_align) - if x['text'] != '' and is_sil_phoneme(ph): - ph_idx += 1 - else: - assert (x['text'] == '' and is_sil_phoneme(ph)) \ - or x['text'].lower() == ph.lower() \ - or x['text'].lower() == 'sil', (x['text'], ph) - split[ph_idx] = x['xmin'] - if ph_idx > 0 and split[ph_idx - 1] == -1 and is_sil_phoneme(ph_list[ph_idx - 1]): - split[ph_idx - 1] = split[ph_idx] - ph_idx += 1 - tg_idx += 1 - assert tg_idx == len(tg_align), (tg_idx, [x['text'] for x in tg_align]) - assert ph_idx >= len(ph_list) - 1, (ph_idx, ph_list, len(ph_list), [x['text'] for x in tg_align], tg_fn) - mel2ph = np.zeros([mel.shape[0]], np.int) - split[0] = 0 - split[-1] = 1e8 - for i in range(len(split) - 1): - assert split[i] != -1 and split[i] <= split[i + 1], (split[:-1],) - split = [int(s * hparams['audio_sample_rate'] / hparams['hop_size'] + 0.5) for s in split] - for ph_idx in range(len(ph_list)): - mel2ph[split[ph_idx]:split[ph_idx + 1]] = ph_idx + 1 - mel2ph_torch = torch.from_numpy(mel2ph) - T_t = len(ph_list) - dur = mel2ph_torch.new_zeros([T_t + 1]).scatter_add(0, mel2ph_torch, torch.ones_like(mel2ph_torch)) - dur = dur[1:].numpy() - return mel2ph, dur - - -def build_phone_encoder(data_dir): - phone_list_file = os.path.join(data_dir, 'phone_set.json') - phone_list = json.load(open(phone_list_file)) - return TokenTextEncoder(None, vocab_list=phone_list, replace_oov=',') - - -def is_sil_phoneme(p): - return not p[0].isalpha() diff --git a/spaces/zomehwh/rvc-models/README.md b/spaces/zomehwh/rvc-models/README.md deleted file mode 100644 index 0bfeacb91d1f9a54158c61256175c8cae6eab19e..0000000000000000000000000000000000000000 --- a/spaces/zomehwh/rvc-models/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Rvc Models -emoji: 🎤 -colorFrom: red -colorTo: blue -sdk: gradio -sdk_version: 3.27.0 -app_file: app.py -pinned: false -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/zomehwh/vits-models-genshin-bh3/mel_processing.py b/spaces/zomehwh/vits-models-genshin-bh3/mel_processing.py deleted file mode 100644 index 3e252e76320522a8a4195a60665168f22769aec2..0000000000000000000000000000000000000000 --- a/spaces/zomehwh/vits-models-genshin-bh3/mel_processing.py +++ /dev/null @@ -1,101 +0,0 @@ -import torch -import torch.utils.data -from librosa.filters import mel as librosa_mel_fn - -MAX_WAV_VALUE = 32768.0 - - -def dynamic_range_compression_torch(x, C=1, clip_val=1e-5): - """ - PARAMS - ------ - C: compression factor - """ - return torch.log(torch.clamp(x, min=clip_val) * C) - - -def dynamic_range_decompression_torch(x, C=1): - """ - PARAMS - ------ - C: compression factor used to compress - """ - return torch.exp(x) / C - - -def spectral_normalize_torch(magnitudes): - output = dynamic_range_compression_torch(magnitudes) - return output - - -def spectral_de_normalize_torch(magnitudes): - output = dynamic_range_decompression_torch(magnitudes) - return output - - -mel_basis = {} -hann_window = {} - - -def spectrogram_torch(y, n_fft, sampling_rate, hop_size, win_size, center=False): - if torch.min(y) < -1.: - print('min value is ', torch.min(y)) - if torch.max(y) > 1.: - print('max value is ', torch.max(y)) - - global hann_window - dtype_device = str(y.dtype) + '_' + str(y.device) - wnsize_dtype_device = str(win_size) + '_' + dtype_device - if wnsize_dtype_device not in hann_window: - hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to(dtype=y.dtype, device=y.device) - - y = torch.nn.functional.pad(y.unsqueeze(1), (int((n_fft-hop_size)/2), int((n_fft-hop_size)/2)), mode='reflect') - y = y.squeeze(1) - - spec = torch.stft(y, n_fft, hop_length=hop_size, win_length=win_size, window=hann_window[wnsize_dtype_device], - center=center, pad_mode='reflect', normalized=False, onesided=True, return_complex=False) - - spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6) - return spec - - -def spec_to_mel_torch(spec, n_fft, num_mels, sampling_rate, fmin, fmax): - global mel_basis - dtype_device = str(spec.dtype) + '_' + str(spec.device) - fmax_dtype_device = str(fmax) + '_' + dtype_device - if fmax_dtype_device not in mel_basis: - mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax) - mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(dtype=spec.dtype, device=spec.device) - spec = torch.matmul(mel_basis[fmax_dtype_device], spec) - spec = spectral_normalize_torch(spec) - return spec - - -def mel_spectrogram_torch(y, n_fft, num_mels, sampling_rate, hop_size, win_size, fmin, fmax, center=False): - if torch.min(y) < -1.: - print('min value is ', torch.min(y)) - if torch.max(y) > 1.: - print('max value is ', torch.max(y)) - - global mel_basis, hann_window - dtype_device = str(y.dtype) + '_' + str(y.device) - fmax_dtype_device = str(fmax) + '_' + dtype_device - wnsize_dtype_device = str(win_size) + '_' + dtype_device - if fmax_dtype_device not in mel_basis: - mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax) - mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(dtype=y.dtype, device=y.device) - if wnsize_dtype_device not in hann_window: - hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to(dtype=y.dtype, device=y.device) - - y = torch.nn.functional.pad(y.unsqueeze(1), (int((n_fft-hop_size)/2), int((n_fft-hop_size)/2)), mode='reflect') - y = y.squeeze(1) - - spec = torch.stft(y, n_fft, hop_length=hop_size, win_length=win_size, window=hann_window[wnsize_dtype_device], - center=center, pad_mode='reflect', normalized=False, onesided=True) - - spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6) - - spec = torch.matmul(mel_basis[fmax_dtype_device], spec) - spec = spectral_normalize_torch(spec) - - return spec diff --git a/spaces/zxy666/bingo-chatai666/src/lib/utils.ts b/spaces/zxy666/bingo-chatai666/src/lib/utils.ts deleted file mode 100644 index 8de2eba94bf0bc93579d4f489e8b810dbf6ce92a..0000000000000000000000000000000000000000 --- a/spaces/zxy666/bingo-chatai666/src/lib/utils.ts +++ /dev/null @@ -1,159 +0,0 @@ -import { clsx, type ClassValue } from 'clsx' -import { customAlphabet } from 'nanoid' -import { twMerge } from 'tailwind-merge' -// @ts-ignore -import randomip from 'random-ip' -import cidr from './cidr.json' - -export function cn(...inputs: ClassValue[]) { - return twMerge(clsx(inputs)) -} - -export const nanoid = customAlphabet( - '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz', - 7 -) // 7-character random string - -export function createChunkDecoder() { - const decoder = new TextDecoder() - return function (chunk: Uint8Array | undefined): string { - if (!chunk) return '' - return decoder.decode(chunk, { stream: true }) - } -} - -export function random (start: number, end: number) { - return start + Math.floor(Math.random() * (end - start)) -} - -export function randomIP() { - // return `104.${random(0, 21)}.${random(0, 127)}.${random(1, 255)}` - const [ip, range] = cidr.at(random(0, cidr.length))?.split('/')! - return randomip(ip, range) -} - -export const defaultUID = 'xxx' - -export function parseHeadersFromCurl(content: string) { - const re = /-H '([^:]+):\s*([^']+)/mg - const headers: HeadersInit = {} - content = content.replaceAll('-H "', '-H \'').replaceAll('" ^', '\'\\').replaceAll('^\\^"', '"') // 将 cmd curl 转成 bash curl - content.replace(re, (_: string, key: string, value: string) => { - headers[key] = value - return '' - }) - return headers -} - -export const ChunkKeys = ['BING_HEADER', 'BING_HEADER1', 'BING_HEADER2'] -export function encodeHeadersToCookie(content: string) { - const base64Content = btoa(content) - const contentChunks = base64Content.match(/.{1,4000}/g) || [] - return ChunkKeys.map((key, index) => `${key}=${contentChunks[index] ?? ''}`) -} - -export function extraCurlFromCookie(cookies: Partial<{ [key: string]: string }>) { - let base64Content = '' - ChunkKeys.forEach((key) => { - base64Content += (cookies[key] || '') - }) - try { - return atob(base64Content) - } catch(e) { - return '' - } -} - -export function extraHeadersFromCookie(cookies: Partial<{ [key: string]: string }>) { - return parseHeadersFromCurl(extraCurlFromCookie(cookies)) -} - -export function formatDate(input: string | number | Date): string { - const date = new Date(input) - return date.toLocaleDateString('en-US', { - month: 'long', - day: 'numeric', - year: 'numeric' - }) -} - -export function parseCookie(cookie: string, cookieName: string) { - const targetCookie = new RegExp(`(?:[; ]|^)${cookieName}=([^;]*)`).test(cookie) ? RegExp.$1 : cookie - return targetCookie ? decodeURIComponent(targetCookie).trim() : cookie.indexOf('=') === -1 ? cookie.trim() : '' -} - -export function setCookie(key: string, value: string) { - const maxAge = value ? 86400 * 30 : 0 - document.cookie = `${key}=${value || ''}; Path=/; Max-Age=${maxAge}; SameSite=None; Secure` -} - -export function getCookie(cookieName: string) { - const re = new RegExp(`(?:[; ]|^)${cookieName}=([^;]*)`) - return re.test(document.cookie) ? RegExp.$1 : '' -} - -export function parseCookies(cookie: string, cookieNames: string[]) { - const cookies: { [key: string]: string } = {} - cookieNames.forEach(cookieName => { - cookies[cookieName] = parseCookie(cookie, cookieName) - }) - return cookies -} - -export const DEFAULT_UA = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Safari/537.36 Edg/115.0.0.0' - -export function parseUA(ua?: string, default_ua = DEFAULT_UA) { - return / EDGE?/i.test(decodeURIComponent(ua || '')) ? decodeURIComponent(ua!.trim()) : default_ua -} - -export function mockUser(cookies: Partial<{ [key: string]: string }>) { - const { - BING_UA = process.env.BING_UA, - BING_IP, - _U = defaultUID, - } = cookies - const ua = parseUA(BING_UA) - - return { - 'x-forwarded-for': BING_IP!, - 'Accept-Encoding': 'gzip, deflate, br', - 'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6', - 'User-Agent': ua!, - 'x-ms-useragent': 'azsdk-js-api-client-factory/1.0.0-beta.1 core-rest-pipeline/1.10.3 OS/Win32', - cookie: `_U=${_U}` || '', - } -} - -export function createHeaders(cookies: Partial<{ [key: string]: string }>, type?: string) { - let { - BING_HEADER = process.env.BING_HEADER, - BING_IP, - IMAGE_ONLY = process.env.IMAGE_ONLY ?? '1', - } = cookies - const imageOnly = /^(1|true|yes)$/.test(String(IMAGE_ONLY)) - if (BING_HEADER) { - if ( - (imageOnly && type === 'image') - || !imageOnly - ) { - const headers = extraHeadersFromCookie({ - BING_HEADER, - ...cookies, - }) || {} - headers['x-forward-for'] = BING_IP! - return headers - } - } - return mockUser(cookies) -} - -export class WatchDog { - private tid = 0 - watch(fn: Function, timeout = 2000) { - clearTimeout(this.tid) - this.tid = setTimeout(fn, timeout + Math.random() * 1000) - } - reset() { - clearTimeout(this.tid) - } -} diff --git a/spaces/zzzzzc/zzcbingAi/Dockerfile b/spaces/zzzzzc/zzcbingAi/Dockerfile deleted file mode 100644 index c677b05b75f7e4b2beee8c97fb47957a0861a83e..0000000000000000000000000000000000000000 --- a/spaces/zzzzzc/zzcbingAi/Dockerfile +++ /dev/null @@ -1,7 +0,0 @@ -FROM weaigc/bingo:latest - -ARG DEBIAN_FRONTEND=noninteractive - -ENV BING_HEADER "" - -CMD npm start